diff --git a/ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..e4287b96e21bdfd97a218800a4999863dd5c2883 --- /dev/null +++ b/ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:034eb6229e324eae8b6ba583cef0515886e17efad4ecb6622d7f4edcf4bda173 +size 9372 diff --git a/ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..fe3902f2b63552b7a81e7b007eb1ce78aebf56fb --- /dev/null +++ b/ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aba2f8eafba5fc5f06752423ad2672ff0d38bd5064e8b6362fd056b265c9d2c9 +size 9387 diff --git a/ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..5555e090afcb5bfbc62005e305dfd8bf948b1f47 --- /dev/null +++ b/ckpts/universal/global_step80/zero/17.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4eaebbcd702fced689bf99534651a846025526e6fc6a69bf4f3be730352a4ac7 +size 9293 diff --git a/ckpts/universal/global_step80/zero/20.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/20.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..acf9bc8e1f779ca39470924c7a32c0d226093a2c --- /dev/null +++ b/ckpts/universal/global_step80/zero/20.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea969c245f6822c4445ec535599b5b48779f685530f767f52d3b3cb4dc6e0976 +size 16778396 diff --git a/ckpts/universal/global_step80/zero/20.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/20.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..5a8fa459275702a846ab8a415a1e90a1f09fd8aa --- /dev/null +++ b/ckpts/universal/global_step80/zero/20.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ddaffb0f2b394af82c16248ec2d29dd3e47bf951f2f99441b2c42141f04f5bb +size 16778411 diff --git a/ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..421fdb4108f12464ced5cfd6c5740bf198767ab0 --- /dev/null +++ b/ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24b99a47225182c23b15b88365a94e9c7a413b05942e83b6511947d202d9c60c +size 33555612 diff --git a/ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..773425f98d526ab0e63673fe51096662a2eebbb9 --- /dev/null +++ b/ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27d345015bef2e0f42c7750c3a883b57cf4bbf71f86176a14e9093ed13c3ef1a +size 33555627 diff --git a/ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..affcf2025b16592e27da6ff0d2617c4b322ab62b --- /dev/null +++ b/ckpts/universal/global_step80/zero/23.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c97edf776f663b8ddaac4a76e4950e2886468499d3980f37cea6ac01f433b0f +size 33555533 diff --git a/ckpts/universal/global_step80/zero/25.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/25.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..719e4b55eb102e9480329db7502bf07cff62c4ac --- /dev/null +++ b/ckpts/universal/global_step80/zero/25.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f561df8938128ed96348c642a340b36d54ec20b3f24e1f8682e5ed27d014d7d +size 33555627 diff --git a/venv/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5b3486186675654121a3958dd4351c93d60dcaea Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/_acero.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/_azurefs.pyx b/venv/lib/python3.10/site-packages/pyarrow/_azurefs.pyx new file mode 100644 index 0000000000000000000000000000000000000000..5cd6300c18c6a83e7036d84724666ba85396b530 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/_azurefs.pyx @@ -0,0 +1,134 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +from cython cimport binding + + +from pyarrow.lib import frombytes, tobytes +from pyarrow.includes.libarrow_fs cimport * +from pyarrow._fs cimport FileSystem + + +cdef class AzureFileSystem(FileSystem): + """ + Azure Blob Storage backed FileSystem implementation + + This implementation supports flat namespace and hierarchical namespace (HNS) a.k.a. + Data Lake Gen2 storage accounts. HNS will be automatically detected and HNS specific + features will be used when they provide a performance advantage. Azurite emulator is + also supported. Note: `/` is the only supported delimiter. + + The storage account is considered the root of the filesystem. When enabled, containers + will be created or deleted during relevant directory operations. Obviously, this also + requires authentication with the additional permissions. + + By default `DefaultAzureCredential `__ + is used for authentication. This means it will try several types of authentication + and go with the first one that works. If any authentication parameters are provided when + initialising the FileSystem, they will be used instead of the default credential. + + Parameters + ---------- + account_name : str + Azure Blob Storage account name. This is the globally unique identifier for the + storage account. + account_key : str, default None + Account key of the storage account. Pass None to use default credential. + blob_storage_authority : str, default None + hostname[:port] of the Blob Service. Defaults to `.blob.core.windows.net`. Useful + for connecting to a local emulator, like Azurite. + dfs_storage_authority : str, default None + hostname[:port] of the Data Lake Gen 2 Service. Defaults to + `.dfs.core.windows.net`. Useful for connecting to a local emulator, like Azurite. + blob_storage_scheme : str, default None + Either `http` or `https`. Defaults to `https`. Useful for connecting to a local + emulator, like Azurite. + dfs_storage_scheme : str, default None + Either `http` or `https`. Defaults to `https`. Useful for connecting to a local + emulator, like Azurite. + + Examples + -------- + >>> from pyarrow import fs + >>> azure_fs = fs.AzureFileSystem(account_name='myaccount') + >>> azurite_fs = fs.AzureFileSystem( + ... account_name='devstoreaccount1', + ... account_key='Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', + ... blob_storage_authority='127.0.0.1:10000', + ... dfs_storage_authority='127.0.0.1:10000', + ... blob_storage_scheme='http', + ... dfs_storage_scheme='http', + ... ) + + For usage of the methods see examples for :func:`~pyarrow.fs.LocalFileSystem`. + """ + cdef: + CAzureFileSystem* azurefs + c_string account_key + + def __init__(self, account_name, *, account_key=None, blob_storage_authority=None, + dfs_storage_authority=None, blob_storage_scheme=None, + dfs_storage_scheme=None): + cdef: + CAzureOptions options + shared_ptr[CAzureFileSystem] wrapped + + options.account_name = tobytes(account_name) + if blob_storage_authority: + options.blob_storage_authority = tobytes(blob_storage_authority) + if dfs_storage_authority: + options.dfs_storage_authority = tobytes(dfs_storage_authority) + if blob_storage_scheme: + options.blob_storage_scheme = tobytes(blob_storage_scheme) + if dfs_storage_scheme: + options.dfs_storage_scheme = tobytes(dfs_storage_scheme) + + if account_key: + options.ConfigureAccountKeyCredential(tobytes(account_key)) + self.account_key = tobytes(account_key) + else: + options.ConfigureDefaultCredential() + + with nogil: + wrapped = GetResultValue(CAzureFileSystem.Make(options)) + + self.init( wrapped) + + cdef init(self, const shared_ptr[CFileSystem]& wrapped): + FileSystem.init(self, wrapped) + self.azurefs = wrapped.get() + + @staticmethod + @binding(True) # Required for cython < 3 + def _reconstruct(kwargs): + # __reduce__ doesn't allow passing named arguments directly to the + # reconstructor, hence this wrapper. + return AzureFileSystem(**kwargs) + + def __reduce__(self): + cdef CAzureOptions opts = self.azurefs.options() + return ( + AzureFileSystem._reconstruct, (dict( + account_name=frombytes(opts.account_name), + account_key=frombytes(self.account_key), + blob_storage_authority=frombytes(opts.blob_storage_authority), + dfs_storage_authority=frombytes(opts.dfs_storage_authority), + blob_storage_scheme=frombytes(opts.blob_storage_scheme), + dfs_storage_scheme=frombytes(opts.dfs_storage_scheme) + ),)) diff --git a/venv/lib/python3.10/site-packages/pyarrow/_csv.pyx b/venv/lib/python3.10/site-packages/pyarrow/_csv.pyx new file mode 100644 index 0000000000000000000000000000000000000000..508488c0c3b3c3bcd2d2157f57f625b1e5b92c2e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/_csv.pyx @@ -0,0 +1,1542 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from cython.operator cimport dereference as deref + +from collections import namedtuple +from collections.abc import Mapping + +from pyarrow.includes.common cimport * +from pyarrow.includes.libarrow cimport * +from pyarrow.includes.libarrow_python cimport * +from pyarrow.lib cimport (check_status, Field, MemoryPool, Schema, + RecordBatchReader, ensure_type, + maybe_unbox_memory_pool, get_input_stream, + get_writer, native_transcoding_input_stream, + pyarrow_unwrap_batch, pyarrow_unwrap_schema, + pyarrow_unwrap_table, pyarrow_wrap_schema, + pyarrow_wrap_table, pyarrow_wrap_data_type, + pyarrow_unwrap_data_type, Table, RecordBatch, + StopToken, _CRecordBatchWriter) +from pyarrow.lib import frombytes, tobytes, SignalStopHandler + + +cdef unsigned char _single_char(s) except 0: + val = ord(s) + if val == 0 or val > 127: + raise ValueError("Expecting an ASCII character") + return val + + +_InvalidRow = namedtuple( + "_InvalidRow", ("expected_columns", "actual_columns", "number", "text"), + module=__name__) + + +class InvalidRow(_InvalidRow): + """ + Description of an invalid row in a CSV file. + + Parameters + ---------- + expected_columns : int + The expected number of columns in the row. + actual_columns : int + The actual number of columns in the row. + number : int or None + The physical row number if known, otherwise None. + text : str + The contents of the row. + """ + __slots__ = () + + +cdef CInvalidRowResult _handle_invalid_row( + handler, const CCSVInvalidRow& c_row) except CInvalidRowResult_Error: + # A negative row number means undetermined (because of parallel reading) + row_number = c_row.number if c_row.number >= 0 else None + row = InvalidRow(c_row.expected_columns, c_row.actual_columns, + row_number, frombytes( c_row.text)) + result = handler(row) + if result == 'error': + return CInvalidRowResult_Error + elif result == 'skip': + return CInvalidRowResult_Skip + else: + raise ValueError("Invalid return value for invalid row handler: " + f"expected 'error' or 'skip', got {result!r}") + + +cdef class ReadOptions(_Weakrefable): + """ + Options for reading CSV files. + + Parameters + ---------- + use_threads : bool, optional (default True) + Whether to use multiple threads to accelerate reading + block_size : int, optional + How much bytes to process at a time from the input stream. + This will determine multi-threading granularity as well as + the size of individual record batches or table chunks. + Minimum valid value for block size is 1 + skip_rows : int, optional (default 0) + The number of rows to skip before the column names (if any) + and the CSV data. + skip_rows_after_names : int, optional (default 0) + The number of rows to skip after the column names. + This number can be larger than the number of rows in one + block, and empty rows are counted. + The order of application is as follows: + - `skip_rows` is applied (if non-zero); + - column names are read (unless `column_names` is set); + - `skip_rows_after_names` is applied (if non-zero). + column_names : list, optional + The column names of the target table. If empty, fall back on + `autogenerate_column_names`. + autogenerate_column_names : bool, optional (default False) + Whether to autogenerate column names if `column_names` is empty. + If true, column names will be of the form "f0", "f1"... + If false, column names will be read from the first CSV row + after `skip_rows`. + encoding : str, optional (default 'utf8') + The character encoding of the CSV data. Columns that cannot + decode using this encoding can still be read as Binary. + + Examples + -------- + + Defining an example data: + + >>> import io + >>> s = "1,2,3\\nFlamingo,2,2022-03-01\\nHorse,4,2022-03-02\\nBrittle stars,5,2022-03-03\\nCentipede,100,2022-03-04" + >>> print(s) + 1,2,3 + Flamingo,2,2022-03-01 + Horse,4,2022-03-02 + Brittle stars,5,2022-03-03 + Centipede,100,2022-03-04 + + Ignore the first numbered row and substitute it with defined + or autogenerated column names: + + >>> from pyarrow import csv + >>> read_options = csv.ReadOptions( + ... column_names=["animals", "n_legs", "entry"], + ... skip_rows=1) + >>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options) + pyarrow.Table + animals: string + n_legs: int64 + entry: date32[day] + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + n_legs: [[2,4,5,100]] + entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + + >>> read_options = csv.ReadOptions(autogenerate_column_names=True, + ... skip_rows=1) + >>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options) + pyarrow.Table + f0: string + f1: int64 + f2: date32[day] + ---- + f0: [["Flamingo","Horse","Brittle stars","Centipede"]] + f1: [[2,4,5,100]] + f2: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + + Remove the first 2 rows of the data: + + >>> read_options = csv.ReadOptions(skip_rows_after_names=2) + >>> csv.read_csv(io.BytesIO(s.encode()), read_options=read_options) + pyarrow.Table + 1: string + 2: int64 + 3: date32[day] + ---- + 1: [["Brittle stars","Centipede"]] + 2: [[5,100]] + 3: [[2022-03-03,2022-03-04]] + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + # __init__() is not called when unpickling, initialize storage here + def __cinit__(self, *argw, **kwargs): + self.options.reset(new CCSVReadOptions(CCSVReadOptions.Defaults())) + + def __init__(self, *, use_threads=None, block_size=None, skip_rows=None, + skip_rows_after_names=None, column_names=None, + autogenerate_column_names=None, encoding='utf8'): + if use_threads is not None: + self.use_threads = use_threads + if block_size is not None: + self.block_size = block_size + if skip_rows is not None: + self.skip_rows = skip_rows + if skip_rows_after_names is not None: + self.skip_rows_after_names = skip_rows_after_names + if column_names is not None: + self.column_names = column_names + if autogenerate_column_names is not None: + self.autogenerate_column_names= autogenerate_column_names + # Python-specific option + self.encoding = encoding + + @property + def use_threads(self): + """ + Whether to use multiple threads to accelerate reading. + """ + return deref(self.options).use_threads + + @use_threads.setter + def use_threads(self, value): + deref(self.options).use_threads = value + + @property + def block_size(self): + """ + How much bytes to process at a time from the input stream. + This will determine multi-threading granularity as well as + the size of individual record batches or table chunks. + """ + return deref(self.options).block_size + + @block_size.setter + def block_size(self, value): + deref(self.options).block_size = value + + @property + def skip_rows(self): + """ + The number of rows to skip before the column names (if any) + and the CSV data. + See `skip_rows_after_names` for interaction description + """ + return deref(self.options).skip_rows + + @skip_rows.setter + def skip_rows(self, value): + deref(self.options).skip_rows = value + + @property + def skip_rows_after_names(self): + """ + The number of rows to skip after the column names. + This number can be larger than the number of rows in one + block, and empty rows are counted. + The order of application is as follows: + - `skip_rows` is applied (if non-zero); + - column names are read (unless `column_names` is set); + - `skip_rows_after_names` is applied (if non-zero). + """ + return deref(self.options).skip_rows_after_names + + @skip_rows_after_names.setter + def skip_rows_after_names(self, value): + deref(self.options).skip_rows_after_names = value + + @property + def column_names(self): + """ + The column names of the target table. If empty, fall back on + `autogenerate_column_names`. + """ + return [frombytes(s) for s in deref(self.options).column_names] + + @column_names.setter + def column_names(self, value): + deref(self.options).column_names.clear() + for item in value: + deref(self.options).column_names.push_back(tobytes(item)) + + @property + def autogenerate_column_names(self): + """ + Whether to autogenerate column names if `column_names` is empty. + If true, column names will be of the form "f0", "f1"... + If false, column names will be read from the first CSV row + after `skip_rows`. + """ + return deref(self.options).autogenerate_column_names + + @autogenerate_column_names.setter + def autogenerate_column_names(self, value): + deref(self.options).autogenerate_column_names = value + + def validate(self): + check_status(deref(self.options).Validate()) + + def equals(self, ReadOptions other): + """ + Parameters + ---------- + other : pyarrow.csv.ReadOptions + + Returns + ------- + bool + """ + return ( + self.use_threads == other.use_threads and + self.block_size == other.block_size and + self.skip_rows == other.skip_rows and + self.skip_rows_after_names == other.skip_rows_after_names and + self.column_names == other.column_names and + self.autogenerate_column_names == + other.autogenerate_column_names and + self.encoding == other.encoding + ) + + @staticmethod + cdef ReadOptions wrap(CCSVReadOptions options): + out = ReadOptions() + out.options.reset(new CCSVReadOptions(move(options))) + out.encoding = 'utf8' # No way to know this + return out + + def __getstate__(self): + return (self.use_threads, self.block_size, self.skip_rows, + self.column_names, self.autogenerate_column_names, + self.encoding, self.skip_rows_after_names) + + def __setstate__(self, state): + (self.use_threads, self.block_size, self.skip_rows, + self.column_names, self.autogenerate_column_names, + self.encoding, self.skip_rows_after_names) = state + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class ParseOptions(_Weakrefable): + """ + Options for parsing CSV files. + + Parameters + ---------- + delimiter : 1-character string, optional (default ',') + The character delimiting individual cells in the CSV data. + quote_char : 1-character string or False, optional (default '"') + The character used optionally for quoting CSV values + (False if quoting is not allowed). + double_quote : bool, optional (default True) + Whether two quotes in a quoted CSV value denote a single quote + in the data. + escape_char : 1-character string or False, optional (default False) + The character used optionally for escaping special characters + (False if escaping is not allowed). + newlines_in_values : bool, optional (default False) + Whether newline characters are allowed in CSV values. + Setting this to True reduces the performance of multi-threaded + CSV reading. + ignore_empty_lines : bool, optional (default True) + Whether empty lines are ignored in CSV input. + If False, an empty line is interpreted as containing a single empty + value (assuming a one-column CSV file). + invalid_row_handler : callable, optional (default None) + If not None, this object is called for each CSV row that fails + parsing (because of a mismatching number of columns). + It should accept a single InvalidRow argument and return either + "skip" or "error" depending on the desired outcome. + + Examples + -------- + + Defining an example file from bytes object: + + >>> import io + >>> s = ( + ... "animals;n_legs;entry\\n" + ... "Flamingo;2;2022-03-01\\n" + ... "# Comment here:\\n" + ... "Horse;4;2022-03-02\\n" + ... "Brittle stars;5;2022-03-03\\n" + ... "Centipede;100;2022-03-04" + ... ) + >>> print(s) + animals;n_legs;entry + Flamingo;2;2022-03-01 + # Comment here: + Horse;4;2022-03-02 + Brittle stars;5;2022-03-03 + Centipede;100;2022-03-04 + >>> source = io.BytesIO(s.encode()) + + Read the data from a file skipping rows with comments + and defining the delimiter: + + >>> from pyarrow import csv + >>> def skip_comment(row): + ... if row.text.startswith("# "): + ... return 'skip' + ... else: + ... return 'error' + ... + >>> parse_options = csv.ParseOptions(delimiter=";", invalid_row_handler=skip_comment) + >>> csv.read_csv(source, parse_options=parse_options) + pyarrow.Table + animals: string + n_legs: int64 + entry: date32[day] + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + n_legs: [[2,4,5,100]] + entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + """ + __slots__ = () + + def __cinit__(self, *argw, **kwargs): + self._invalid_row_handler = None + self.options.reset(new CCSVParseOptions(CCSVParseOptions.Defaults())) + + def __init__(self, *, delimiter=None, quote_char=None, double_quote=None, + escape_char=None, newlines_in_values=None, + ignore_empty_lines=None, invalid_row_handler=None): + if delimiter is not None: + self.delimiter = delimiter + if quote_char is not None: + self.quote_char = quote_char + if double_quote is not None: + self.double_quote = double_quote + if escape_char is not None: + self.escape_char = escape_char + if newlines_in_values is not None: + self.newlines_in_values = newlines_in_values + if ignore_empty_lines is not None: + self.ignore_empty_lines = ignore_empty_lines + if invalid_row_handler is not None: + self.invalid_row_handler = invalid_row_handler + + @property + def delimiter(self): + """ + The character delimiting individual cells in the CSV data. + """ + return chr(deref(self.options).delimiter) + + @delimiter.setter + def delimiter(self, value): + deref(self.options).delimiter = _single_char(value) + + @property + def quote_char(self): + """ + The character used optionally for quoting CSV values + (False if quoting is not allowed). + """ + if deref(self.options).quoting: + return chr(deref(self.options).quote_char) + else: + return False + + @quote_char.setter + def quote_char(self, value): + if value is False: + deref(self.options).quoting = False + else: + deref(self.options).quote_char = _single_char(value) + deref(self.options).quoting = True + + @property + def double_quote(self): + """ + Whether two quotes in a quoted CSV value denote a single quote + in the data. + """ + return deref(self.options).double_quote + + @double_quote.setter + def double_quote(self, value): + deref(self.options).double_quote = value + + @property + def escape_char(self): + """ + The character used optionally for escaping special characters + (False if escaping is not allowed). + """ + if deref(self.options).escaping: + return chr(deref(self.options).escape_char) + else: + return False + + @escape_char.setter + def escape_char(self, value): + if value is False: + deref(self.options).escaping = False + else: + deref(self.options).escape_char = _single_char(value) + deref(self.options).escaping = True + + @property + def newlines_in_values(self): + """ + Whether newline characters are allowed in CSV values. + Setting this to True reduces the performance of multi-threaded + CSV reading. + """ + return deref(self.options).newlines_in_values + + @newlines_in_values.setter + def newlines_in_values(self, value): + deref(self.options).newlines_in_values = value + + @property + def ignore_empty_lines(self): + """ + Whether empty lines are ignored in CSV input. + If False, an empty line is interpreted as containing a single empty + value (assuming a one-column CSV file). + """ + return deref(self.options).ignore_empty_lines + + @property + def invalid_row_handler(self): + """ + Optional handler for invalid rows. + + If not None, this object is called for each CSV row that fails + parsing (because of a mismatching number of columns). + It should accept a single InvalidRow argument and return either + "skip" or "error" depending on the desired outcome. + """ + return self._invalid_row_handler + + @invalid_row_handler.setter + def invalid_row_handler(self, value): + if value is not None and not callable(value): + raise TypeError("Expected callable or None, " + f"got instance of {type(value)!r}") + self._invalid_row_handler = value + deref(self.options).invalid_row_handler = MakeInvalidRowHandler( + &_handle_invalid_row, value) + + @ignore_empty_lines.setter + def ignore_empty_lines(self, value): + deref(self.options).ignore_empty_lines = value + + def validate(self): + check_status(deref(self.options).Validate()) + + def equals(self, ParseOptions other): + """ + Parameters + ---------- + other : pyarrow.csv.ParseOptions + + Returns + ------- + bool + """ + return ( + self.delimiter == other.delimiter and + self.quote_char == other.quote_char and + self.double_quote == other.double_quote and + self.escape_char == other.escape_char and + self.newlines_in_values == other.newlines_in_values and + self.ignore_empty_lines == other.ignore_empty_lines and + self._invalid_row_handler == other._invalid_row_handler + ) + + @staticmethod + cdef ParseOptions wrap(CCSVParseOptions options): + out = ParseOptions() + out.options.reset(new CCSVParseOptions(move(options))) + return out + + def __getstate__(self): + return (self.delimiter, self.quote_char, self.double_quote, + self.escape_char, self.newlines_in_values, + self.ignore_empty_lines, self.invalid_row_handler) + + def __setstate__(self, state): + (self.delimiter, self.quote_char, self.double_quote, + self.escape_char, self.newlines_in_values, + self.ignore_empty_lines, self.invalid_row_handler) = state + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef class _ISO8601(_Weakrefable): + """ + A special object indicating ISO-8601 parsing. + """ + __slots__ = () + + def __str__(self): + return 'ISO8601' + + def __eq__(self, other): + return isinstance(other, _ISO8601) + + +ISO8601 = _ISO8601() + + +cdef class ConvertOptions(_Weakrefable): + """ + Options for converting CSV data. + + Parameters + ---------- + check_utf8 : bool, optional (default True) + Whether to check UTF8 validity of string columns. + column_types : pyarrow.Schema or dict, optional + Explicitly map column names to column types. Passing this argument + disables type inference on the defined columns. + null_values : list, optional + A sequence of strings that denote nulls in the data + (defaults are appropriate in most cases). Note that by default, + string columns are not checked for null values. To enable + null checking for those, specify ``strings_can_be_null=True``. + true_values : list, optional + A sequence of strings that denote true booleans in the data + (defaults are appropriate in most cases). + false_values : list, optional + A sequence of strings that denote false booleans in the data + (defaults are appropriate in most cases). + decimal_point : 1-character string, optional (default '.') + The character used as decimal point in floating-point and decimal + data. + strings_can_be_null : bool, optional (default False) + Whether string / binary columns can have null values. + If true, then strings in null_values are considered null for + string columns. + If false, then all strings are valid string values. + quoted_strings_can_be_null : bool, optional (default True) + Whether quoted values can be null. + If true, then strings in "null_values" are also considered null + when they appear quoted in the CSV file. Otherwise, quoted values + are never considered null. + include_columns : list, optional + The names of columns to include in the Table. + If empty, the Table will include all columns from the CSV file. + If not empty, only these columns will be included, in this order. + include_missing_columns : bool, optional (default False) + If false, columns in `include_columns` but not in the CSV file will + error out. + If true, columns in `include_columns` but not in the CSV file will + produce a column of nulls (whose type is selected using + `column_types`, or null by default). + This option is ignored if `include_columns` is empty. + auto_dict_encode : bool, optional (default False) + Whether to try to automatically dict-encode string / binary data. + If true, then when type inference detects a string or binary column, + it it dict-encoded up to `auto_dict_max_cardinality` distinct values + (per chunk), after which it switches to regular encoding. + This setting is ignored for non-inferred columns (those in + `column_types`). + auto_dict_max_cardinality : int, optional + The maximum dictionary cardinality for `auto_dict_encode`. + This value is per chunk. + timestamp_parsers : list, optional + A sequence of strptime()-compatible format strings, tried in order + when attempting to infer or convert timestamp values (the special + value ISO8601() can also be given). By default, a fast built-in + ISO-8601 parser is used. + + Examples + -------- + + Defining an example data: + + >>> import io + >>> s = ( + ... "animals,n_legs,entry,fast\\n" + ... "Flamingo,2,01/03/2022,Yes\\n" + ... "Horse,4,02/03/2022,Yes\\n" + ... "Brittle stars,5,03/03/2022,No\\n" + ... "Centipede,100,04/03/2022,No\\n" + ... ",6,05/03/2022," + ... ) + >>> print(s) + animals,n_legs,entry,fast + Flamingo,2,01/03/2022,Yes + Horse,4,02/03/2022,Yes + Brittle stars,5,03/03/2022,No + Centipede,100,04/03/2022,No + ,6,05/03/2022, + + Change the type of a column: + + >>> import pyarrow as pa + >>> from pyarrow import csv + >>> convert_options = csv.ConvertOptions(column_types={"n_legs": pa.float64()}) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: double + entry: string + fast: string + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + entry: [["01/03/2022","02/03/2022","03/03/2022","04/03/2022","05/03/2022"]] + fast: [["Yes","Yes","No","No",""]] + + Define a date parsing format to get a timestamp type column + (in case dates are not in ISO format and not converted by default): + + >>> convert_options = csv.ConvertOptions( + ... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"]) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + entry: timestamp[s] + fast: string + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]] + fast: [["Yes","Yes","No","No",""]] + + Specify a subset of columns to be read: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["animals", "n_legs"]) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + + List additional column to be included as a null typed column: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["animals", "n_legs", "location"], + ... include_missing_columns=True) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + location: null + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + n_legs: [[2,4,5,100,6]] + location: [5 nulls] + + Define columns as dictionary type (by default only the + string/binary columns are dictionary encoded): + + >>> convert_options = csv.ConvertOptions( + ... timestamp_parsers=["%m/%d/%Y", "%m-%d-%Y"], + ... auto_dict_encode=True) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: dictionary + n_legs: int64 + entry: timestamp[s] + fast: dictionary + ---- + animals: [ -- dictionary: + ["Flamingo","Horse","Brittle stars","Centipede",""] -- indices: + [0,1,2,3,4]] + n_legs: [[2,4,5,100,6]] + entry: [[2022-01-03 00:00:00,2022-02-03 00:00:00,2022-03-03 00:00:00,2022-04-03 00:00:00,2022-05-03 00:00:00]] + fast: [ -- dictionary: + ["Yes","No",""] -- indices: + [0,0,1,1,2]] + + Set upper limit for the number of categories. If the categories + is more than the limit, the conversion to dictionary will not + happen: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["animals"], + ... auto_dict_encode=True, + ... auto_dict_max_cardinality=2) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",""]] + + Set empty strings to missing values: + + >>> convert_options = csv.ConvertOptions(include_columns=["animals", "n_legs"], + ... strings_can_be_null=True) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + animals: string + n_legs: int64 + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede",null]] + n_legs: [[2,4,5,100,6]] + + Define values to be True and False when converting a column + into a bool type: + + >>> convert_options = csv.ConvertOptions( + ... include_columns=["fast"], + ... false_values=["No"], + ... true_values=["Yes"]) + >>> csv.read_csv(io.BytesIO(s.encode()), convert_options=convert_options) + pyarrow.Table + fast: bool + ---- + fast: [[true,true,false,false,null]] + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + def __cinit__(self, *argw, **kwargs): + self.options.reset( + new CCSVConvertOptions(CCSVConvertOptions.Defaults())) + + def __init__(self, *, check_utf8=None, column_types=None, null_values=None, + true_values=None, false_values=None, decimal_point=None, + strings_can_be_null=None, quoted_strings_can_be_null=None, + include_columns=None, include_missing_columns=None, + auto_dict_encode=None, auto_dict_max_cardinality=None, + timestamp_parsers=None): + if check_utf8 is not None: + self.check_utf8 = check_utf8 + if column_types is not None: + self.column_types = column_types + if null_values is not None: + self.null_values = null_values + if true_values is not None: + self.true_values = true_values + if false_values is not None: + self.false_values = false_values + if decimal_point is not None: + self.decimal_point = decimal_point + if strings_can_be_null is not None: + self.strings_can_be_null = strings_can_be_null + if quoted_strings_can_be_null is not None: + self.quoted_strings_can_be_null = quoted_strings_can_be_null + if include_columns is not None: + self.include_columns = include_columns + if include_missing_columns is not None: + self.include_missing_columns = include_missing_columns + if auto_dict_encode is not None: + self.auto_dict_encode = auto_dict_encode + if auto_dict_max_cardinality is not None: + self.auto_dict_max_cardinality = auto_dict_max_cardinality + if timestamp_parsers is not None: + self.timestamp_parsers = timestamp_parsers + + @property + def check_utf8(self): + """ + Whether to check UTF8 validity of string columns. + """ + return deref(self.options).check_utf8 + + @check_utf8.setter + def check_utf8(self, value): + deref(self.options).check_utf8 = value + + @property + def strings_can_be_null(self): + """ + Whether string / binary columns can have null values. + """ + return deref(self.options).strings_can_be_null + + @strings_can_be_null.setter + def strings_can_be_null(self, value): + deref(self.options).strings_can_be_null = value + + @property + def quoted_strings_can_be_null(self): + """ + Whether quoted values can be null. + """ + return deref(self.options).quoted_strings_can_be_null + + @quoted_strings_can_be_null.setter + def quoted_strings_can_be_null(self, value): + deref(self.options).quoted_strings_can_be_null = value + + @property + def column_types(self): + """ + Explicitly map column names to column types. + """ + d = {frombytes(item.first): pyarrow_wrap_data_type(item.second) + for item in deref(self.options).column_types} + return d + + @column_types.setter + def column_types(self, value): + cdef: + shared_ptr[CDataType] typ + + if isinstance(value, Mapping): + value = value.items() + + deref(self.options).column_types.clear() + for item in value: + if isinstance(item, Field): + k = item.name + v = item.type + else: + k, v = item + typ = pyarrow_unwrap_data_type(ensure_type(v)) + assert typ != NULL + deref(self.options).column_types[tobytes(k)] = typ + + @property + def null_values(self): + """ + A sequence of strings that denote nulls in the data. + """ + return [frombytes(x) for x in deref(self.options).null_values] + + @null_values.setter + def null_values(self, value): + deref(self.options).null_values = [tobytes(x) for x in value] + + @property + def true_values(self): + """ + A sequence of strings that denote true booleans in the data. + """ + return [frombytes(x) for x in deref(self.options).true_values] + + @true_values.setter + def true_values(self, value): + deref(self.options).true_values = [tobytes(x) for x in value] + + @property + def false_values(self): + """ + A sequence of strings that denote false booleans in the data. + """ + return [frombytes(x) for x in deref(self.options).false_values] + + @false_values.setter + def false_values(self, value): + deref(self.options).false_values = [tobytes(x) for x in value] + + @property + def decimal_point(self): + """ + The character used as decimal point in floating-point and decimal + data. + """ + return chr(deref(self.options).decimal_point) + + @decimal_point.setter + def decimal_point(self, value): + deref(self.options).decimal_point = _single_char(value) + + @property + def auto_dict_encode(self): + """ + Whether to try to automatically dict-encode string / binary data. + """ + return deref(self.options).auto_dict_encode + + @auto_dict_encode.setter + def auto_dict_encode(self, value): + deref(self.options).auto_dict_encode = value + + @property + def auto_dict_max_cardinality(self): + """ + The maximum dictionary cardinality for `auto_dict_encode`. + + This value is per chunk. + """ + return deref(self.options).auto_dict_max_cardinality + + @auto_dict_max_cardinality.setter + def auto_dict_max_cardinality(self, value): + deref(self.options).auto_dict_max_cardinality = value + + @property + def include_columns(self): + """ + The names of columns to include in the Table. + + If empty, the Table will include all columns from the CSV file. + If not empty, only these columns will be included, in this order. + """ + return [frombytes(s) for s in deref(self.options).include_columns] + + @include_columns.setter + def include_columns(self, value): + deref(self.options).include_columns.clear() + for item in value: + deref(self.options).include_columns.push_back(tobytes(item)) + + @property + def include_missing_columns(self): + """ + If false, columns in `include_columns` but not in the CSV file will + error out. + If true, columns in `include_columns` but not in the CSV file will + produce a null column (whose type is selected using `column_types`, + or null by default). + This option is ignored if `include_columns` is empty. + """ + return deref(self.options).include_missing_columns + + @include_missing_columns.setter + def include_missing_columns(self, value): + deref(self.options).include_missing_columns = value + + @property + def timestamp_parsers(self): + """ + A sequence of strptime()-compatible format strings, tried in order + when attempting to infer or convert timestamp values (the special + value ISO8601() can also be given). By default, a fast built-in + ISO-8601 parser is used. + """ + cdef: + shared_ptr[CTimestampParser] c_parser + c_string kind + + parsers = [] + for c_parser in deref(self.options).timestamp_parsers: + kind = deref(c_parser).kind() + if kind == b'strptime': + parsers.append(frombytes(deref(c_parser).format())) + else: + assert kind == b'iso8601' + parsers.append(ISO8601) + + return parsers + + @timestamp_parsers.setter + def timestamp_parsers(self, value): + cdef: + vector[shared_ptr[CTimestampParser]] c_parsers + + for v in value: + if isinstance(v, str): + c_parsers.push_back(CTimestampParser.MakeStrptime(tobytes(v))) + elif v == ISO8601: + c_parsers.push_back(CTimestampParser.MakeISO8601()) + else: + raise TypeError("Expected list of str or ISO8601 objects") + + deref(self.options).timestamp_parsers = move(c_parsers) + + @staticmethod + cdef ConvertOptions wrap(CCSVConvertOptions options): + out = ConvertOptions() + out.options.reset(new CCSVConvertOptions(move(options))) + return out + + def validate(self): + check_status(deref(self.options).Validate()) + + def equals(self, ConvertOptions other): + """ + Parameters + ---------- + other : pyarrow.csv.ConvertOptions + + Returns + ------- + bool + """ + return ( + self.check_utf8 == other.check_utf8 and + self.column_types == other.column_types and + self.null_values == other.null_values and + self.true_values == other.true_values and + self.false_values == other.false_values and + self.decimal_point == other.decimal_point and + self.timestamp_parsers == other.timestamp_parsers and + self.strings_can_be_null == other.strings_can_be_null and + self.quoted_strings_can_be_null == + other.quoted_strings_can_be_null and + self.auto_dict_encode == other.auto_dict_encode and + self.auto_dict_max_cardinality == + other.auto_dict_max_cardinality and + self.include_columns == other.include_columns and + self.include_missing_columns == other.include_missing_columns + ) + + def __getstate__(self): + return (self.check_utf8, self.column_types, self.null_values, + self.true_values, self.false_values, self.decimal_point, + self.timestamp_parsers, self.strings_can_be_null, + self.quoted_strings_can_be_null, self.auto_dict_encode, + self.auto_dict_max_cardinality, self.include_columns, + self.include_missing_columns) + + def __setstate__(self, state): + (self.check_utf8, self.column_types, self.null_values, + self.true_values, self.false_values, self.decimal_point, + self.timestamp_parsers, self.strings_can_be_null, + self.quoted_strings_can_be_null, self.auto_dict_encode, + self.auto_dict_max_cardinality, self.include_columns, + self.include_missing_columns) = state + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return False + + +cdef _get_reader(input_file, ReadOptions read_options, + shared_ptr[CInputStream]* out): + use_memory_map = False + get_input_stream(input_file, use_memory_map, out) + if read_options is not None: + out[0] = native_transcoding_input_stream(out[0], + read_options.encoding, + 'utf8') + + +cdef _get_read_options(ReadOptions read_options, CCSVReadOptions* out): + if read_options is None: + out[0] = CCSVReadOptions.Defaults() + else: + out[0] = deref(read_options.options) + + +cdef _get_parse_options(ParseOptions parse_options, CCSVParseOptions* out): + if parse_options is None: + out[0] = CCSVParseOptions.Defaults() + else: + out[0] = deref(parse_options.options) + + +cdef _get_convert_options(ConvertOptions convert_options, + CCSVConvertOptions* out): + if convert_options is None: + out[0] = CCSVConvertOptions.Defaults() + else: + out[0] = deref(convert_options.options) + + +cdef class CSVStreamingReader(RecordBatchReader): + """An object that reads record batches incrementally from a CSV file. + + Should not be instantiated directly by user code. + """ + cdef readonly: + Schema schema + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, " + "use pyarrow.csv.open_csv() instead." + .format(self.__class__.__name__)) + + # Note about cancellation: we cannot create a SignalStopHandler + # by default here, as several CSVStreamingReader instances may be + # created (including by the same thread). Handling cancellation + # would require having the user pass the SignalStopHandler. + # (in addition to solving ARROW-11853) + + cdef _open(self, shared_ptr[CInputStream] stream, + CCSVReadOptions c_read_options, + CCSVParseOptions c_parse_options, + CCSVConvertOptions c_convert_options, + MemoryPool memory_pool): + cdef: + shared_ptr[CSchema] c_schema + CIOContext io_context + + io_context = CIOContext(maybe_unbox_memory_pool(memory_pool)) + + with nogil: + self.reader = GetResultValue( + CCSVStreamingReader.Make( + io_context, stream, + move(c_read_options), move(c_parse_options), + move(c_convert_options))) + c_schema = self.reader.get().schema() + + self.schema = pyarrow_wrap_schema(c_schema) + + +def read_csv(input_file, read_options=None, parse_options=None, + convert_options=None, MemoryPool memory_pool=None): + """ + Read a Table from a stream of CSV data. + + Parameters + ---------- + input_file : string, path or file-like object + The location of CSV data. If a string or path, and if it ends + with a recognized compressed file extension (e.g. ".gz" or ".bz2"), + the data is automatically decompressed when reading. + read_options : pyarrow.csv.ReadOptions, optional + Options for the CSV reader (see pyarrow.csv.ReadOptions constructor + for defaults) + parse_options : pyarrow.csv.ParseOptions, optional + Options for the CSV parser + (see pyarrow.csv.ParseOptions constructor for defaults) + convert_options : pyarrow.csv.ConvertOptions, optional + Options for converting CSV data + (see pyarrow.csv.ConvertOptions constructor for defaults) + memory_pool : MemoryPool, optional + Pool to allocate Table memory from + + Returns + ------- + :class:`pyarrow.Table` + Contents of the CSV file as a in-memory table. + + Examples + -------- + + Defining an example file from bytes object: + + >>> import io + >>> s = ( + ... "animals,n_legs,entry\\n" + ... "Flamingo,2,2022-03-01\\n" + ... "Horse,4,2022-03-02\\n" + ... "Brittle stars,5,2022-03-03\\n" + ... "Centipede,100,2022-03-04" + ... ) + >>> print(s) + animals,n_legs,entry + Flamingo,2,2022-03-01 + Horse,4,2022-03-02 + Brittle stars,5,2022-03-03 + Centipede,100,2022-03-04 + >>> source = io.BytesIO(s.encode()) + + Reading from the file + + >>> from pyarrow import csv + >>> csv.read_csv(source) + pyarrow.Table + animals: string + n_legs: int64 + entry: date32[day] + ---- + animals: [["Flamingo","Horse","Brittle stars","Centipede"]] + n_legs: [[2,4,5,100]] + entry: [[2022-03-01,2022-03-02,2022-03-03,2022-03-04]] + """ + cdef: + shared_ptr[CInputStream] stream + CCSVReadOptions c_read_options + CCSVParseOptions c_parse_options + CCSVConvertOptions c_convert_options + CIOContext io_context + SharedPtrNoGIL[CCSVReader] reader + shared_ptr[CTable] table + + _get_reader(input_file, read_options, &stream) + _get_read_options(read_options, &c_read_options) + _get_parse_options(parse_options, &c_parse_options) + _get_convert_options(convert_options, &c_convert_options) + + with SignalStopHandler() as stop_handler: + io_context = CIOContext( + maybe_unbox_memory_pool(memory_pool), + ( stop_handler.stop_token).stop_token) + reader = GetResultValue(CCSVReader.Make( + io_context, stream, + c_read_options, c_parse_options, c_convert_options)) + + with nogil: + table = GetResultValue(reader.get().Read()) + + return pyarrow_wrap_table(table) + + +def open_csv(input_file, read_options=None, parse_options=None, + convert_options=None, MemoryPool memory_pool=None): + """ + Open a streaming reader of CSV data. + + Reading using this function is always single-threaded. + + Parameters + ---------- + input_file : string, path or file-like object + The location of CSV data. If a string or path, and if it ends + with a recognized compressed file extension (e.g. ".gz" or ".bz2"), + the data is automatically decompressed when reading. + read_options : pyarrow.csv.ReadOptions, optional + Options for the CSV reader (see pyarrow.csv.ReadOptions constructor + for defaults) + parse_options : pyarrow.csv.ParseOptions, optional + Options for the CSV parser + (see pyarrow.csv.ParseOptions constructor for defaults) + convert_options : pyarrow.csv.ConvertOptions, optional + Options for converting CSV data + (see pyarrow.csv.ConvertOptions constructor for defaults) + memory_pool : MemoryPool, optional + Pool to allocate Table memory from + + Returns + ------- + :class:`pyarrow.csv.CSVStreamingReader` + """ + cdef: + shared_ptr[CInputStream] stream + CCSVReadOptions c_read_options + CCSVParseOptions c_parse_options + CCSVConvertOptions c_convert_options + CSVStreamingReader reader + + _get_reader(input_file, read_options, &stream) + _get_read_options(read_options, &c_read_options) + _get_parse_options(parse_options, &c_parse_options) + _get_convert_options(convert_options, &c_convert_options) + + reader = CSVStreamingReader.__new__(CSVStreamingReader) + reader._open(stream, move(c_read_options), move(c_parse_options), + move(c_convert_options), memory_pool) + return reader + + +def _raise_invalid_function_option(value, description, *, + exception_class=ValueError): + raise exception_class(f"\"{value}\" is not a valid {description}") + + +cdef CQuotingStyle unwrap_quoting_style(quoting_style) except *: + if quoting_style == "needed": + return CQuotingStyle_Needed + elif quoting_style == "all_valid": + return CQuotingStyle_AllValid + elif quoting_style == "none": + return CQuotingStyle_None + _raise_invalid_function_option(quoting_style, "quoting style") + + +cdef wrap_quoting_style(quoting_style): + if quoting_style == CQuotingStyle_Needed: + return 'needed' + elif quoting_style == CQuotingStyle_AllValid: + return 'all_valid' + elif quoting_style == CQuotingStyle_None: + return 'none' + + +cdef class WriteOptions(_Weakrefable): + """ + Options for writing CSV files. + + Parameters + ---------- + include_header : bool, optional (default True) + Whether to write an initial header line with column names + batch_size : int, optional (default 1024) + How many rows to process together when converting and writing + CSV data + delimiter : 1-character string, optional (default ",") + The character delimiting individual cells in the CSV data. + quoting_style : str, optional (default "needed") + Whether to quote values, and if so, which quoting style to use. + The following values are accepted: + + - "needed" (default): only enclose values in quotes when needed. + - "all_valid": enclose all valid values in quotes; nulls are not quoted. + - "none": do not enclose any values in quotes; values containing + special characters (such as quotes, cell delimiters or line endings) + will raise an error. + """ + + # Avoid mistakingly creating attributes + __slots__ = () + + def __init__(self, *, include_header=None, batch_size=None, + delimiter=None, quoting_style=None): + self.options.reset(new CCSVWriteOptions(CCSVWriteOptions.Defaults())) + if include_header is not None: + self.include_header = include_header + if batch_size is not None: + self.batch_size = batch_size + if delimiter is not None: + self.delimiter = delimiter + if quoting_style is not None: + self.quoting_style = quoting_style + + @property + def include_header(self): + """ + Whether to write an initial header line with column names. + """ + return deref(self.options).include_header + + @include_header.setter + def include_header(self, value): + deref(self.options).include_header = value + + @property + def batch_size(self): + """ + How many rows to process together when converting and writing + CSV data. + """ + return deref(self.options).batch_size + + @batch_size.setter + def batch_size(self, value): + deref(self.options).batch_size = value + + @property + def delimiter(self): + """ + The character delimiting individual cells in the CSV data. + """ + return chr(deref(self.options).delimiter) + + @delimiter.setter + def delimiter(self, value): + deref(self.options).delimiter = _single_char(value) + + @property + def quoting_style(self): + """ + Whether to quote values, and if so, which quoting style to use. + The following values are accepted: + + - "needed" (default): only enclose values in quotes when needed. + - "all_valid": enclose all valid values in quotes; nulls are not quoted. + - "none": do not enclose any values in quotes; values containing + special characters (such as quotes, cell delimiters or line endings) + will raise an error. + """ + return wrap_quoting_style(deref(self.options).quoting_style) + + @quoting_style.setter + def quoting_style(self, value): + deref(self.options).quoting_style = unwrap_quoting_style(value) + + @staticmethod + cdef WriteOptions wrap(CCSVWriteOptions options): + out = WriteOptions() + out.options.reset(new CCSVWriteOptions(move(options))) + return out + + def validate(self): + check_status(self.options.get().Validate()) + + +cdef _get_write_options(WriteOptions write_options, CCSVWriteOptions* out): + if write_options is None: + out[0] = CCSVWriteOptions.Defaults() + else: + out[0] = deref(write_options.options) + + +def write_csv(data, output_file, write_options=None, + MemoryPool memory_pool=None): + """ + Write record batch or table to a CSV file. + + Parameters + ---------- + data : pyarrow.RecordBatch or pyarrow.Table + The data to write. + output_file : string, path, pyarrow.NativeFile, or file-like object + The location where to write the CSV data. + write_options : pyarrow.csv.WriteOptions + Options to configure writing the CSV data. + memory_pool : MemoryPool, optional + Pool for temporary allocations. + + Examples + -------- + + >>> import pyarrow as pa + >>> from pyarrow import csv + + >>> legs = pa.array([2, 4, 5, 100]) + >>> animals = pa.array(["Flamingo", "Horse", "Brittle stars", "Centipede"]) + >>> entry_date = pa.array(["01/03/2022", "02/03/2022", + ... "03/03/2022", "04/03/2022"]) + >>> table = pa.table([animals, legs, entry_date], + ... names=["animals", "n_legs", "entry"]) + + >>> csv.write_csv(table, "animals.csv") + + >>> write_options = csv.WriteOptions(include_header=False) + >>> csv.write_csv(table, "animals.csv", write_options=write_options) + + >>> write_options = csv.WriteOptions(delimiter=";") + >>> csv.write_csv(table, "animals.csv", write_options=write_options) + """ + cdef: + shared_ptr[COutputStream] stream + CCSVWriteOptions c_write_options + CMemoryPool* c_memory_pool + CRecordBatch* batch + CTable* table + _get_write_options(write_options, &c_write_options) + + get_writer(output_file, &stream) + c_memory_pool = maybe_unbox_memory_pool(memory_pool) + c_write_options.io_context = CIOContext(c_memory_pool) + if isinstance(data, RecordBatch): + batch = pyarrow_unwrap_batch(data).get() + with nogil: + check_status(WriteCSV(deref(batch), c_write_options, stream.get())) + elif isinstance(data, Table): + table = pyarrow_unwrap_table(data).get() + with nogil: + check_status(WriteCSV(deref(table), c_write_options, stream.get())) + else: + raise TypeError(f"Expected Table or RecordBatch, got '{type(data)}'") + + +cdef class CSVWriter(_CRecordBatchWriter): + """ + Writer to create a CSV file. + + Parameters + ---------- + sink : str, path, pyarrow.OutputStream or file-like object + The location where to write the CSV data. + schema : pyarrow.Schema + The schema of the data to be written. + write_options : pyarrow.csv.WriteOptions + Options to configure writing the CSV data. + memory_pool : MemoryPool, optional + Pool for temporary allocations. + """ + + def __init__(self, sink, Schema schema, *, + WriteOptions write_options=None, MemoryPool memory_pool=None): + cdef: + shared_ptr[COutputStream] c_stream + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + CCSVWriteOptions c_write_options + CMemoryPool* c_memory_pool = maybe_unbox_memory_pool(memory_pool) + _get_write_options(write_options, &c_write_options) + c_write_options.io_context = CIOContext(c_memory_pool) + get_writer(sink, &c_stream) + with nogil: + self.writer = GetResultValue(MakeCSVWriter( + c_stream, c_schema, c_write_options)) diff --git a/venv/lib/python3.10/site-packages/pyarrow/_cuda.pyx b/venv/lib/python3.10/site-packages/pyarrow/_cuda.pyx new file mode 100644 index 0000000000000000000000000000000000000000..ba799a105e7e15ab8414988cdefdaa4dc315cad8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/_cuda.pyx @@ -0,0 +1,1058 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from pyarrow.lib cimport * +from pyarrow.includes.libarrow_cuda cimport * +from pyarrow.lib import allocate_buffer, as_buffer, ArrowTypeError +from pyarrow.util import get_contiguous_span +cimport cpython as cp + + +cdef class Context(_Weakrefable): + """ + CUDA driver context. + """ + + def __init__(self, *args, **kwargs): + """ + Create a CUDA driver context for a particular device. + + If a CUDA context handle is passed, it is wrapped, otherwise + a default CUDA context for the given device is requested. + + Parameters + ---------- + device_number : int (default 0) + Specify the GPU device for which the CUDA driver context is + requested. + handle : int, optional + Specify CUDA handle for a shared context that has been created + by another library. + """ + # This method exposed because autodoc doesn't pick __cinit__ + + def __cinit__(self, int device_number=0, uintptr_t handle=0): + cdef CCudaDeviceManager* manager + manager = GetResultValue(CCudaDeviceManager.Instance()) + cdef int n = manager.num_devices() + if device_number >= n or device_number < 0: + self.context.reset() + raise ValueError('device_number argument must be ' + 'non-negative less than %s' % (n)) + if handle == 0: + self.context = GetResultValue(manager.GetContext(device_number)) + else: + self.context = GetResultValue(manager.GetSharedContext( + device_number, handle)) + self.device_number = device_number + + @staticmethod + def from_numba(context=None): + """ + Create a Context instance from a Numba CUDA context. + + Parameters + ---------- + context : {numba.cuda.cudadrv.driver.Context, None} + A Numba CUDA context instance. + If None, the current Numba context is used. + + Returns + ------- + shared_context : pyarrow.cuda.Context + Context instance. + """ + if context is None: + import numba.cuda + context = numba.cuda.current_context() + return Context(device_number=context.device.id, + handle=context.handle.value) + + def to_numba(self): + """ + Convert Context to a Numba CUDA context. + + Returns + ------- + context : numba.cuda.cudadrv.driver.Context + Numba CUDA context instance. + """ + import ctypes + import numba.cuda + device = numba.cuda.gpus[self.device_number] + handle = ctypes.c_void_p(self.handle) + context = numba.cuda.cudadrv.driver.Context(device, handle) + + class DummyPendingDeallocs(object): + # Context is managed by pyarrow + def add_item(self, *args, **kwargs): + pass + + context.deallocations = DummyPendingDeallocs() + return context + + @staticmethod + def get_num_devices(): + """ Return the number of GPU devices. + """ + cdef CCudaDeviceManager* manager + manager = GetResultValue(CCudaDeviceManager.Instance()) + return manager.num_devices() + + @property + def device_number(self): + """ Return context device number. + """ + return self.device_number + + @property + def handle(self): + """ Return pointer to context handle. + """ + return self.context.get().handle() + + cdef void init(self, const shared_ptr[CCudaContext]& ctx): + self.context = ctx + + def synchronize(self): + """Blocks until the device has completed all preceding requested + tasks. + """ + check_status(self.context.get().Synchronize()) + + @property + def bytes_allocated(self): + """Return the number of allocated bytes. + """ + return self.context.get().bytes_allocated() + + def get_device_address(self, uintptr_t address): + """Return the device address that is reachable from kernels running in + the context + + Parameters + ---------- + address : int + Specify memory address value + + Returns + ------- + device_address : int + Device address accessible from device context + + Notes + ----- + The device address is defined as a memory address accessible + by device. While it is often a device memory address but it + can be also a host memory address, for instance, when the + memory is allocated as host memory (using cudaMallocHost or + cudaHostAlloc) or as managed memory (using cudaMallocManaged) + or the host memory is page-locked (using cudaHostRegister). + """ + return GetResultValue(self.context.get().GetDeviceAddress(address)) + + def new_buffer(self, int64_t nbytes): + """Return new device buffer. + + Parameters + ---------- + nbytes : int + Specify the number of bytes to be allocated. + + Returns + ------- + buf : CudaBuffer + Allocated buffer. + """ + cdef: + shared_ptr[CCudaBuffer] cudabuf + with nogil: + cudabuf = GetResultValue(self.context.get().Allocate(nbytes)) + return pyarrow_wrap_cudabuffer(cudabuf) + + def foreign_buffer(self, address, size, base=None): + """ + Create device buffer from address and size as a view. + + The caller is responsible for allocating and freeing the + memory. When `address==size==0` then a new zero-sized buffer + is returned. + + Parameters + ---------- + address : int + Specify the starting address of the buffer. The address can + refer to both device or host memory but it must be + accessible from device after mapping it with + `get_device_address` method. + size : int + Specify the size of device buffer in bytes. + base : {None, object} + Specify object that owns the referenced memory. + + Returns + ------- + cbuf : CudaBuffer + Device buffer as a view of device reachable memory. + + """ + if not address and size == 0: + return self.new_buffer(0) + cdef: + uintptr_t c_addr = self.get_device_address(address) + int64_t c_size = size + shared_ptr[CCudaBuffer] cudabuf + + cudabuf = GetResultValue(self.context.get().View( + c_addr, c_size)) + return pyarrow_wrap_cudabuffer_base(cudabuf, base) + + def open_ipc_buffer(self, ipc_handle): + """ Open existing CUDA IPC memory handle + + Parameters + ---------- + ipc_handle : IpcMemHandle + Specify opaque pointer to CUipcMemHandle (driver API). + + Returns + ------- + buf : CudaBuffer + referencing device buffer + """ + handle = pyarrow_unwrap_cudaipcmemhandle(ipc_handle) + cdef shared_ptr[CCudaBuffer] cudabuf + with nogil: + cudabuf = GetResultValue( + self.context.get().OpenIpcBuffer(handle.get()[0])) + return pyarrow_wrap_cudabuffer(cudabuf) + + def buffer_from_data(self, object data, int64_t offset=0, int64_t size=-1): + """Create device buffer and initialize with data. + + Parameters + ---------- + data : {CudaBuffer, HostBuffer, Buffer, array-like} + Specify data to be copied to device buffer. + offset : int + Specify the offset of input buffer for device data + buffering. Default: 0. + size : int + Specify the size of device buffer in bytes. Default: all + (starting from input offset) + + Returns + ------- + cbuf : CudaBuffer + Device buffer with copied data. + """ + is_host_data = not pyarrow_is_cudabuffer(data) + buf = as_buffer(data) if is_host_data else data + + bsize = buf.size + if offset < 0 or (bsize and offset >= bsize): + raise ValueError('offset argument is out-of-range') + if size < 0: + size = bsize - offset + elif offset + size > bsize: + raise ValueError( + 'requested larger slice than available in device buffer') + + if offset != 0 or size != bsize: + buf = buf.slice(offset, size) + + result = self.new_buffer(size) + if is_host_data: + result.copy_from_host(buf, position=0, nbytes=size) + else: + result.copy_from_device(buf, position=0, nbytes=size) + return result + + def buffer_from_object(self, obj): + """Create device buffer view of arbitrary object that references + device accessible memory. + + When the object contains a non-contiguous view of device + accessible memory then the returned device buffer will contain + contiguous view of the memory, that is, including the + intermediate data that is otherwise invisible to the input + object. + + Parameters + ---------- + obj : {object, Buffer, HostBuffer, CudaBuffer, ...} + Specify an object that holds (device or host) address that + can be accessed from device. This includes objects with + types defined in pyarrow.cuda as well as arbitrary objects + that implement the CUDA array interface as defined by numba. + + Returns + ------- + cbuf : CudaBuffer + Device buffer as a view of device accessible memory. + + """ + if isinstance(obj, HostBuffer): + return self.foreign_buffer(obj.address, obj.size, base=obj) + elif isinstance(obj, Buffer): + return CudaBuffer.from_buffer(obj) + elif isinstance(obj, CudaBuffer): + return obj + elif hasattr(obj, '__cuda_array_interface__'): + desc = obj.__cuda_array_interface__ + addr = desc['data'][0] + if addr is None: + return self.new_buffer(0) + import numpy as np + start, end = get_contiguous_span( + desc['shape'], desc.get('strides'), + np.dtype(desc['typestr']).itemsize) + return self.foreign_buffer(addr + start, end - start, base=obj) + raise ArrowTypeError('cannot create device buffer view from' + ' `%s` object' % (type(obj))) + + +cdef class IpcMemHandle(_Weakrefable): + """A serializable container for a CUDA IPC handle. + """ + cdef void init(self, shared_ptr[CCudaIpcMemHandle]& h): + self.handle = h + + @staticmethod + def from_buffer(Buffer opaque_handle): + """Create IpcMemHandle from opaque buffer (e.g. from another + process) + + Parameters + ---------- + opaque_handle : + a CUipcMemHandle as a const void* + + Returns + ------- + ipc_handle : IpcMemHandle + """ + c_buf = pyarrow_unwrap_buffer(opaque_handle) + cdef: + shared_ptr[CCudaIpcMemHandle] handle + + handle = GetResultValue( + CCudaIpcMemHandle.FromBuffer(c_buf.get().data())) + return pyarrow_wrap_cudaipcmemhandle(handle) + + def serialize(self, pool=None): + """Write IpcMemHandle to a Buffer + + Parameters + ---------- + pool : {MemoryPool, None} + Specify a pool to allocate memory from + + Returns + ------- + buf : Buffer + The serialized buffer. + """ + cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool) + cdef shared_ptr[CBuffer] buf + cdef CCudaIpcMemHandle* h = self.handle.get() + with nogil: + buf = GetResultValue(h.Serialize(pool_)) + return pyarrow_wrap_buffer(buf) + + +cdef class CudaBuffer(Buffer): + """An Arrow buffer with data located in a GPU device. + + To create a CudaBuffer instance, use Context.device_buffer(). + + The memory allocated in a CudaBuffer is freed when the buffer object + is deleted. + """ + + def __init__(self): + raise TypeError("Do not call CudaBuffer's constructor directly, use " + "`.device_buffer`" + " method instead.") + + cdef void init_cuda(self, + const shared_ptr[CCudaBuffer]& buffer, + object base): + self.cuda_buffer = buffer + self.init( buffer) + self.base = base + + @staticmethod + def from_buffer(buf): + """ Convert back generic buffer into CudaBuffer + + Parameters + ---------- + buf : Buffer + Specify buffer containing CudaBuffer + + Returns + ------- + dbuf : CudaBuffer + Resulting device buffer. + """ + c_buf = pyarrow_unwrap_buffer(buf) + cuda_buffer = GetResultValue(CCudaBuffer.FromBuffer(c_buf)) + return pyarrow_wrap_cudabuffer(cuda_buffer) + + @staticmethod + def from_numba(mem): + """Create a CudaBuffer view from numba MemoryPointer instance. + + Parameters + ---------- + mem : numba.cuda.cudadrv.driver.MemoryPointer + + Returns + ------- + cbuf : CudaBuffer + Device buffer as a view of numba MemoryPointer. + """ + ctx = Context.from_numba(mem.context) + if mem.device_pointer.value is None and mem.size==0: + return ctx.new_buffer(0) + return ctx.foreign_buffer(mem.device_pointer.value, mem.size, base=mem) + + def to_numba(self): + """Return numba memory pointer of CudaBuffer instance. + """ + import ctypes + from numba.cuda.cudadrv.driver import MemoryPointer + return MemoryPointer(self.context.to_numba(), + pointer=ctypes.c_void_p(self.address), + size=self.size) + + cdef getitem(self, int64_t i): + return self.copy_to_host(position=i, nbytes=1)[0] + + def copy_to_host(self, int64_t position=0, int64_t nbytes=-1, + Buffer buf=None, + MemoryPool memory_pool=None, c_bool resizable=False): + """Copy memory from GPU device to CPU host + + Caller is responsible for ensuring that all tasks affecting + the memory are finished. Use + + `.context.synchronize()` + + when needed. + + Parameters + ---------- + position : int + Specify the starting position of the source data in GPU + device buffer. Default: 0. + nbytes : int + Specify the number of bytes to copy. Default: -1 (all from + the position until host buffer is full). + buf : Buffer + Specify a pre-allocated output buffer in host. Default: None + (allocate new output buffer). + memory_pool : MemoryPool + resizable : bool + Specify extra arguments to allocate_buffer. Used only when + buf is None. + + Returns + ------- + buf : Buffer + Output buffer in host. + + """ + if position < 0 or (self.size and position > self.size) \ + or (self.size == 0 and position != 0): + raise ValueError('position argument is out-of-range') + cdef: + int64_t c_nbytes + if buf is None: + if nbytes < 0: + # copy all starting from position to new host buffer + c_nbytes = self.size - position + else: + if nbytes > self.size - position: + raise ValueError( + 'requested more to copy than available from ' + 'device buffer') + # copy nbytes starting from position to new host buffer + c_nbytes = nbytes + buf = allocate_buffer(c_nbytes, memory_pool=memory_pool, + resizable=resizable) + else: + if nbytes < 0: + # copy all from position until given host buffer is full + c_nbytes = min(self.size - position, buf.size) + else: + if nbytes > buf.size: + raise ValueError( + 'requested copy does not fit into host buffer') + # copy nbytes from position to given host buffer + c_nbytes = nbytes + + cdef: + shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf) + int64_t c_position = position + with nogil: + check_status(self.cuda_buffer.get() + .CopyToHost(c_position, c_nbytes, + c_buf.get().mutable_data())) + return buf + + def copy_from_host(self, data, int64_t position=0, int64_t nbytes=-1): + """Copy data from host to device. + + The device buffer must be pre-allocated. + + Parameters + ---------- + data : {Buffer, array-like} + Specify data in host. It can be array-like that is valid + argument to py_buffer + position : int + Specify the starting position of the copy in device buffer. + Default: 0. + nbytes : int + Specify the number of bytes to copy. Default: -1 (all from + source until device buffer, starting from position, is full) + + Returns + ------- + nbytes : int + Number of bytes copied. + """ + if position < 0 or position > self.size: + raise ValueError('position argument is out-of-range') + cdef: + int64_t c_nbytes + buf = as_buffer(data) + + if nbytes < 0: + # copy from host buffer to device buffer starting from + # position until device buffer is full + c_nbytes = min(self.size - position, buf.size) + else: + if nbytes > buf.size: + raise ValueError( + 'requested more to copy than available from host buffer') + if nbytes > self.size - position: + raise ValueError( + 'requested more to copy than available in device buffer') + # copy nbytes from host buffer to device buffer starting + # from position + c_nbytes = nbytes + + cdef: + shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(buf) + int64_t c_position = position + with nogil: + check_status(self.cuda_buffer.get(). + CopyFromHost(c_position, c_buf.get().data(), + c_nbytes)) + return c_nbytes + + def copy_from_device(self, buf, int64_t position=0, int64_t nbytes=-1): + """Copy data from device to device. + + Parameters + ---------- + buf : CudaBuffer + Specify source device buffer. + position : int + Specify the starting position of the copy in device buffer. + Default: 0. + nbytes : int + Specify the number of bytes to copy. Default: -1 (all from + source until device buffer, starting from position, is full) + + Returns + ------- + nbytes : int + Number of bytes copied. + + """ + if position < 0 or position > self.size: + raise ValueError('position argument is out-of-range') + cdef: + int64_t c_nbytes + + if nbytes < 0: + # copy from source device buffer to device buffer starting + # from position until device buffer is full + c_nbytes = min(self.size - position, buf.size) + else: + if nbytes > buf.size: + raise ValueError( + 'requested more to copy than available from device buffer') + if nbytes > self.size - position: + raise ValueError( + 'requested more to copy than available in device buffer') + # copy nbytes from source device buffer to device buffer + # starting from position + c_nbytes = nbytes + + cdef: + shared_ptr[CCudaBuffer] c_buf = pyarrow_unwrap_cudabuffer(buf) + int64_t c_position = position + shared_ptr[CCudaContext] c_src_ctx = pyarrow_unwrap_cudacontext( + buf.context) + void* c_source_data = (c_buf.get().address()) + + if self.context.handle != buf.context.handle: + with nogil: + check_status(self.cuda_buffer.get(). + CopyFromAnotherDevice(c_src_ctx, c_position, + c_source_data, c_nbytes)) + else: + with nogil: + check_status(self.cuda_buffer.get(). + CopyFromDevice(c_position, c_source_data, + c_nbytes)) + return c_nbytes + + def export_for_ipc(self): + """ + Expose this device buffer as IPC memory which can be used in other + processes. + + After calling this function, this device memory will not be + freed when the CudaBuffer is destructed. + + Returns + ------- + ipc_handle : IpcMemHandle + The exported IPC handle + + """ + cdef shared_ptr[CCudaIpcMemHandle] handle + with nogil: + handle = GetResultValue(self.cuda_buffer.get().ExportForIpc()) + return pyarrow_wrap_cudaipcmemhandle(handle) + + @property + def context(self): + """Returns the CUDA driver context of this buffer. + """ + return pyarrow_wrap_cudacontext(self.cuda_buffer.get().context()) + + def slice(self, offset=0, length=None): + """Return slice of device buffer + + Parameters + ---------- + offset : int, default 0 + Specify offset from the start of device buffer to slice + length : int, default None + Specify the length of slice (default is until end of device + buffer starting from offset). If the length is larger than + the data available, the returned slice will have a size of + the available data starting from the offset. + + Returns + ------- + sliced : CudaBuffer + Zero-copy slice of device buffer. + + """ + if offset < 0 or (self.size and offset >= self.size): + raise ValueError('offset argument is out-of-range') + cdef int64_t offset_ = offset + cdef int64_t size + if length is None: + size = self.size - offset_ + elif offset + length <= self.size: + size = length + else: + size = self.size - offset + parent = pyarrow_unwrap_cudabuffer(self) + return pyarrow_wrap_cudabuffer(make_shared[CCudaBuffer](parent, + offset_, size)) + + def to_pybytes(self): + """Return device buffer content as Python bytes. + """ + return self.copy_to_host().to_pybytes() + + def __getbuffer__(self, cp.Py_buffer* buffer, int flags): + # Device buffer contains data pointers on the device. Hence, + # cannot support buffer protocol PEP-3118 for CudaBuffer. + raise BufferError('buffer protocol for device buffer not supported') + + +cdef class HostBuffer(Buffer): + """Device-accessible CPU memory created using cudaHostAlloc. + + To create a HostBuffer instance, use + + cuda.new_host_buffer() + """ + + def __init__(self): + raise TypeError("Do not call HostBuffer's constructor directly," + " use `cuda.new_host_buffer` function instead.") + + cdef void init_host(self, const shared_ptr[CCudaHostBuffer]& buffer): + self.host_buffer = buffer + self.init( buffer) + + @property + def size(self): + return self.host_buffer.get().size() + + +cdef class BufferReader(NativeFile): + """File interface for zero-copy read from CUDA buffers. + + Note: Read methods return pointers to device memory. This means + you must be careful using this interface with any Arrow code which + may expect to be able to do anything other than pointer arithmetic + on the returned buffers. + """ + + def __cinit__(self, CudaBuffer obj): + self.buffer = obj + self.reader = new CCudaBufferReader(self.buffer.buffer) + self.set_random_access_file( + shared_ptr[CRandomAccessFile](self.reader)) + self.is_readable = True + + def read_buffer(self, nbytes=None): + """Return a slice view of the underlying device buffer. + + The slice will start at the current reader position and will + have specified size in bytes. + + Parameters + ---------- + nbytes : int, default None + Specify the number of bytes to read. Default: None (read all + remaining bytes). + + Returns + ------- + cbuf : CudaBuffer + New device buffer. + + """ + cdef: + int64_t c_nbytes + shared_ptr[CCudaBuffer] output + + if nbytes is None: + c_nbytes = self.size() - self.tell() + else: + c_nbytes = nbytes + + with nogil: + output = static_pointer_cast[CCudaBuffer, CBuffer]( + GetResultValue(self.reader.Read(c_nbytes))) + + return pyarrow_wrap_cudabuffer(output) + + +cdef class BufferWriter(NativeFile): + """File interface for writing to CUDA buffers. + + By default writes are unbuffered. Use set_buffer_size to enable + buffering. + """ + + def __cinit__(self, CudaBuffer buffer): + self.buffer = buffer + self.writer = new CCudaBufferWriter(self.buffer.cuda_buffer) + self.set_output_stream(shared_ptr[COutputStream](self.writer)) + self.is_writable = True + + def writeat(self, int64_t position, object data): + """Write data to buffer starting from position. + + Parameters + ---------- + position : int + Specify device buffer position where the data will be + written. + data : array-like + Specify data, the data instance must implement buffer + protocol. + """ + cdef: + Buffer buf = as_buffer(data) + const uint8_t* c_data = buf.buffer.get().data() + int64_t c_size = buf.buffer.get().size() + + with nogil: + check_status(self.writer.WriteAt(position, c_data, c_size)) + + def flush(self): + """ Flush the buffer stream """ + with nogil: + check_status(self.writer.Flush()) + + def seek(self, int64_t position, int whence=0): + # TODO: remove this method after NativeFile.seek supports + # writable files. + cdef int64_t offset + + with nogil: + if whence == 0: + offset = position + elif whence == 1: + offset = GetResultValue(self.writer.Tell()) + offset = offset + position + else: + with gil: + raise ValueError("Invalid value of whence: {0}" + .format(whence)) + check_status(self.writer.Seek(offset)) + return self.tell() + + @property + def buffer_size(self): + """Returns size of host (CPU) buffer, 0 for unbuffered + """ + return self.writer.buffer_size() + + @buffer_size.setter + def buffer_size(self, int64_t buffer_size): + """Set CPU buffer size to limit calls to cudaMemcpy + + Parameters + ---------- + buffer_size : int + Specify the size of CPU buffer to allocate in bytes. + """ + with nogil: + check_status(self.writer.SetBufferSize(buffer_size)) + + @property + def num_bytes_buffered(self): + """Returns number of bytes buffered on host + """ + return self.writer.num_bytes_buffered() + +# Functions + + +def new_host_buffer(const int64_t size, int device=0): + """Return buffer with CUDA-accessible memory on CPU host + + Parameters + ---------- + size : int + Specify the number of bytes to be allocated. + device : int + Specify GPU device number. + + Returns + ------- + dbuf : HostBuffer + Allocated host buffer + """ + cdef shared_ptr[CCudaHostBuffer] buffer + with nogil: + buffer = GetResultValue(AllocateCudaHostBuffer(device, size)) + return pyarrow_wrap_cudahostbuffer(buffer) + + +def serialize_record_batch(object batch, object ctx): + """ Write record batch message to GPU device memory + + Parameters + ---------- + batch : RecordBatch + Record batch to write + ctx : Context + CUDA Context to allocate device memory from + + Returns + ------- + dbuf : CudaBuffer + device buffer which contains the record batch message + """ + cdef shared_ptr[CCudaBuffer] buffer + cdef CRecordBatch* batch_ = pyarrow_unwrap_batch(batch).get() + cdef CCudaContext* ctx_ = pyarrow_unwrap_cudacontext(ctx).get() + with nogil: + buffer = GetResultValue(CudaSerializeRecordBatch(batch_[0], ctx_)) + return pyarrow_wrap_cudabuffer(buffer) + + +def read_message(object source, pool=None): + """ Read Arrow IPC message located on GPU device + + Parameters + ---------- + source : {CudaBuffer, cuda.BufferReader} + Device buffer or reader of device buffer. + pool : MemoryPool (optional) + Pool to allocate CPU memory for the metadata + + Returns + ------- + message : Message + The deserialized message, body still on device + """ + cdef: + Message result = Message.__new__(Message) + cdef CMemoryPool* pool_ = maybe_unbox_memory_pool(pool) + if not isinstance(source, BufferReader): + reader = BufferReader(source) + with nogil: + result.message = move( + GetResultValue(ReadMessage(reader.reader, pool_))) + return result + + +def read_record_batch(object buffer, object schema, *, + DictionaryMemo dictionary_memo=None, pool=None): + """Construct RecordBatch referencing IPC message located on CUDA device. + + While the metadata is copied to host memory for deserialization, + the record batch data remains on the device. + + Parameters + ---------- + buffer : + Device buffer containing the complete IPC message + schema : Schema + The schema for the record batch + dictionary_memo : DictionaryMemo, optional + If message contains dictionaries, must pass a populated + DictionaryMemo + pool : MemoryPool (optional) + Pool to allocate metadata from + + Returns + ------- + batch : RecordBatch + Reconstructed record batch, with device pointers + + """ + cdef: + shared_ptr[CSchema] schema_ = pyarrow_unwrap_schema(schema) + shared_ptr[CCudaBuffer] buffer_ = pyarrow_unwrap_cudabuffer(buffer) + CDictionaryMemo temp_memo + CDictionaryMemo* arg_dict_memo + CMemoryPool* pool_ = maybe_unbox_memory_pool(pool) + shared_ptr[CRecordBatch] batch + + if dictionary_memo is not None: + arg_dict_memo = dictionary_memo.memo + else: + arg_dict_memo = &temp_memo + + with nogil: + batch = GetResultValue(CudaReadRecordBatch( + schema_, arg_dict_memo, buffer_, pool_)) + return pyarrow_wrap_batch(batch) + + +# Public API + + +cdef public api bint pyarrow_is_buffer(object buffer): + return isinstance(buffer, Buffer) + +# cudabuffer + +cdef public api bint pyarrow_is_cudabuffer(object buffer): + return isinstance(buffer, CudaBuffer) + + +cdef public api object \ + pyarrow_wrap_cudabuffer_base(const shared_ptr[CCudaBuffer]& buf, base): + cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer) + result.init_cuda(buf, base) + return result + + +cdef public api object \ + pyarrow_wrap_cudabuffer(const shared_ptr[CCudaBuffer]& buf): + cdef CudaBuffer result = CudaBuffer.__new__(CudaBuffer) + result.init_cuda(buf, None) + return result + + +cdef public api shared_ptr[CCudaBuffer] pyarrow_unwrap_cudabuffer(object obj): + if pyarrow_is_cudabuffer(obj): + return (obj).cuda_buffer + raise TypeError('expected CudaBuffer instance, got %s' + % (type(obj).__name__)) + +# cudahostbuffer + +cdef public api bint pyarrow_is_cudahostbuffer(object buffer): + return isinstance(buffer, HostBuffer) + + +cdef public api object \ + pyarrow_wrap_cudahostbuffer(const shared_ptr[CCudaHostBuffer]& buf): + cdef HostBuffer result = HostBuffer.__new__(HostBuffer) + result.init_host(buf) + return result + + +cdef public api shared_ptr[CCudaHostBuffer] \ + pyarrow_unwrap_cudahostbuffer(object obj): + if pyarrow_is_cudahostbuffer(obj): + return (obj).host_buffer + raise TypeError('expected HostBuffer instance, got %s' + % (type(obj).__name__)) + +# cudacontext + +cdef public api bint pyarrow_is_cudacontext(object ctx): + return isinstance(ctx, Context) + + +cdef public api object \ + pyarrow_wrap_cudacontext(const shared_ptr[CCudaContext]& ctx): + cdef Context result = Context.__new__(Context) + result.init(ctx) + return result + + +cdef public api shared_ptr[CCudaContext] \ + pyarrow_unwrap_cudacontext(object obj): + if pyarrow_is_cudacontext(obj): + return (obj).context + raise TypeError('expected Context instance, got %s' + % (type(obj).__name__)) + +# cudaipcmemhandle + +cdef public api bint pyarrow_is_cudaipcmemhandle(object handle): + return isinstance(handle, IpcMemHandle) + + +cdef public api object \ + pyarrow_wrap_cudaipcmemhandle(shared_ptr[CCudaIpcMemHandle]& h): + cdef IpcMemHandle result = IpcMemHandle.__new__(IpcMemHandle) + result.init(h) + return result + + +cdef public api shared_ptr[CCudaIpcMemHandle] \ + pyarrow_unwrap_cudaipcmemhandle(object obj): + if pyarrow_is_cudaipcmemhandle(obj): + return (obj).handle + raise TypeError('expected IpcMemHandle instance, got %s' + % (type(obj).__name__)) diff --git a/venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5ccceb9858fc1c2d7e780d239f50a2c6a75eb44f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/_dataset_parquet_encryption.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/_flight.pyx b/venv/lib/python3.10/site-packages/pyarrow/_flight.pyx new file mode 100644 index 0000000000000000000000000000000000000000..8289215de2e29c6cd7e09affd7ec5d377ee0fa9c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/_flight.pyx @@ -0,0 +1,3189 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: language_level = 3 + +import collections +import enum +import re +import time +import warnings +import weakref + +from cython.operator cimport dereference as deref +from cython.operator cimport postincrement +from libcpp cimport bool as c_bool + +from pyarrow.lib cimport * +from pyarrow.lib import (ArrowCancelled, ArrowException, ArrowInvalid, + SignalStopHandler) +from pyarrow.lib import as_buffer, frombytes, tobytes +from pyarrow.includes.libarrow_flight cimport * +from pyarrow.ipc import _get_legacy_format_default, _ReadPandasMixin +import pyarrow.lib as lib + + +cdef CFlightCallOptions DEFAULT_CALL_OPTIONS + + +cdef int check_flight_status(const CStatus& status) except -1 nogil: + cdef shared_ptr[FlightStatusDetail] detail + + if status.ok(): + return 0 + + detail = FlightStatusDetail.UnwrapStatus(status) + if detail: + with gil: + message = frombytes(status.message(), safe=True) + detail_msg = detail.get().extra_info() + if detail.get().code() == CFlightStatusInternal: + raise FlightInternalError(message, detail_msg) + elif detail.get().code() == CFlightStatusFailed: + message = _munge_grpc_python_error(message) + raise FlightServerError(message, detail_msg) + elif detail.get().code() == CFlightStatusTimedOut: + raise FlightTimedOutError(message, detail_msg) + elif detail.get().code() == CFlightStatusCancelled: + raise FlightCancelledError(message, detail_msg) + elif detail.get().code() == CFlightStatusUnauthenticated: + raise FlightUnauthenticatedError(message, detail_msg) + elif detail.get().code() == CFlightStatusUnauthorized: + raise FlightUnauthorizedError(message, detail_msg) + elif detail.get().code() == CFlightStatusUnavailable: + raise FlightUnavailableError(message, detail_msg) + + size_detail = FlightWriteSizeStatusDetail.UnwrapStatus(status) + if size_detail: + with gil: + message = frombytes(status.message(), safe=True) + raise FlightWriteSizeExceededError( + message, + size_detail.get().limit(), size_detail.get().actual()) + + return check_status(status) + + +_FLIGHT_SERVER_ERROR_REGEX = re.compile( + r'Flight RPC failed with message: (.*). Detail: ' + r'Python exception: (.*)', + re.DOTALL +) + + +def _munge_grpc_python_error(message): + m = _FLIGHT_SERVER_ERROR_REGEX.match(message) + if m: + return ('Flight RPC failed with Python exception \"{}: {}\"' + .format(m.group(2), m.group(1))) + else: + return message + + +cdef IpcWriteOptions _get_options(options): + return _get_legacy_format_default( + use_legacy_format=None, options=options) + + +cdef class FlightCallOptions(_Weakrefable): + """RPC-layer options for a Flight call.""" + + cdef: + CFlightCallOptions options + + def __init__(self, timeout=None, write_options=None, headers=None, + IpcReadOptions read_options=None): + """Create call options. + + Parameters + ---------- + timeout : float, None + A timeout for the call, in seconds. None means that the + timeout defaults to an implementation-specific value. + write_options : pyarrow.ipc.IpcWriteOptions, optional + IPC write options. The default options can be controlled + by environment variables (see pyarrow.ipc). + headers : List[Tuple[str, str]], optional + A list of arbitrary headers as key, value tuples + read_options : pyarrow.ipc.IpcReadOptions, optional + Serialization options for reading IPC format. + """ + cdef IpcWriteOptions c_write_options + + if timeout is not None: + self.options.timeout = CTimeoutDuration(timeout) + if write_options is not None: + c_write_options = _get_options(write_options) + self.options.write_options = c_write_options.c_options + if read_options is not None: + if not isinstance(read_options, IpcReadOptions): + raise TypeError("expected IpcReadOptions, got {}" + .format(type(read_options))) + self.options.read_options = read_options.c_options + if headers is not None: + self.options.headers = headers + + @staticmethod + cdef CFlightCallOptions* unwrap(obj): + if not obj: + return &DEFAULT_CALL_OPTIONS + elif isinstance(obj, FlightCallOptions): + return &(( obj).options) + raise TypeError("Expected a FlightCallOptions object, not " + "'{}'".format(type(obj))) + + +_CertKeyPair = collections.namedtuple('_CertKeyPair', ['cert', 'key']) + + +class CertKeyPair(_CertKeyPair): + """A TLS certificate and key for use in Flight.""" + + +cdef class FlightError(Exception): + """ + The base class for Flight-specific errors. + + A server may raise this class or one of its subclasses to provide + a more detailed error to clients. + + Parameters + ---------- + message : str, optional + The error message. + extra_info : bytes, optional + Extra binary error details that were provided by the + server/will be sent to the client. + + Attributes + ---------- + extra_info : bytes + Extra binary error details that were provided by the + server/will be sent to the client. + """ + + cdef dict __dict__ + + def __init__(self, message='', extra_info=b''): + super().__init__(message) + self.extra_info = tobytes(extra_info) + + cdef CStatus to_status(self): + message = tobytes("Flight error: {}".format(str(self))) + return CStatus_UnknownError(message) + + +cdef class FlightInternalError(FlightError, ArrowException): + """An error internal to the Flight server occurred.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusInternal, + tobytes(str(self)), self.extra_info) + + +cdef class FlightTimedOutError(FlightError, ArrowException): + """The Flight RPC call timed out.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusTimedOut, + tobytes(str(self)), self.extra_info) + + +cdef class FlightCancelledError(FlightError, ArrowCancelled): + """The operation was cancelled.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusCancelled, tobytes(str(self)), + self.extra_info) + + +cdef class FlightServerError(FlightError, ArrowException): + """A server error occurred.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusFailed, tobytes(str(self)), + self.extra_info) + + +cdef class FlightUnauthenticatedError(FlightError, ArrowException): + """The client is not authenticated.""" + + cdef CStatus to_status(self): + return MakeFlightError( + CFlightStatusUnauthenticated, tobytes(str(self)), self.extra_info) + + +cdef class FlightUnauthorizedError(FlightError, ArrowException): + """The client is not authorized to perform the given operation.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusUnauthorized, tobytes(str(self)), + self.extra_info) + + +cdef class FlightUnavailableError(FlightError, ArrowException): + """The server is not reachable or available.""" + + cdef CStatus to_status(self): + return MakeFlightError(CFlightStatusUnavailable, tobytes(str(self)), + self.extra_info) + + +class FlightWriteSizeExceededError(ArrowInvalid): + """A write operation exceeded the client-configured limit.""" + + def __init__(self, message, limit, actual): + super().__init__(message) + self.limit = limit + self.actual = actual + + +cdef class Action(_Weakrefable): + """An action executable on a Flight service.""" + cdef: + CAction action + + def __init__(self, action_type, buf): + """Create an action from a type and a buffer. + + Parameters + ---------- + action_type : bytes or str + buf : Buffer or bytes-like object + """ + self.action.type = tobytes(action_type) + self.action.body = pyarrow_unwrap_buffer(as_buffer(buf)) + + @property + def type(self): + """The action type.""" + return frombytes(self.action.type) + + @property + def body(self): + """The action body (arguments for the action).""" + return pyarrow_wrap_buffer(self.action.body) + + @staticmethod + cdef CAction unwrap(action) except *: + if not isinstance(action, Action): + raise TypeError("Must provide Action, not '{}'".format( + type(action))) + return ( action).action + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.action.SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef Action action = Action.__new__(Action) + action.action = GetResultValue( + CAction.Deserialize(tobytes(serialized))) + return action + + def __eq__(self, Action other): + return self.action == other.action + + def __repr__(self): + return (f"") + + +_ActionType = collections.namedtuple('_ActionType', ['type', 'description']) + + +class ActionType(_ActionType): + """A type of action that is executable on a Flight service.""" + + def make_action(self, buf): + """Create an Action with this type. + + Parameters + ---------- + buf : obj + An Arrow buffer or Python bytes or bytes-like object. + """ + return Action(self.type, buf) + + +cdef class Result(_Weakrefable): + """A result from executing an Action.""" + cdef: + unique_ptr[CFlightResult] result + + def __init__(self, buf): + """Create a new result. + + Parameters + ---------- + buf : Buffer or bytes-like object + """ + self.result.reset(new CFlightResult()) + self.result.get().body = pyarrow_unwrap_buffer(as_buffer(buf)) + + @property + def body(self): + """Get the Buffer containing the result.""" + return pyarrow_wrap_buffer(self.result.get().body) + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.result.get().SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef Result result = Result.__new__(Result) + result.result.reset(new CFlightResult(GetResultValue( + CFlightResult.Deserialize(tobytes(serialized))))) + return result + + def __eq__(self, Result other): + return deref(self.result.get()) == deref(other.result.get()) + + def __repr__(self): + return f"" + + +cdef class BasicAuth(_Weakrefable): + """A container for basic auth.""" + cdef: + unique_ptr[CBasicAuth] basic_auth + + def __init__(self, username=None, password=None): + """Create a new basic auth object. + + Parameters + ---------- + username : string + password : string + """ + self.basic_auth.reset(new CBasicAuth()) + if username: + self.basic_auth.get().username = tobytes(username) + if password: + self.basic_auth.get().password = tobytes(password) + + @property + def username(self): + """Get the username.""" + return self.basic_auth.get().username + + @property + def password(self): + """Get the password.""" + return self.basic_auth.get().password + + @staticmethod + def deserialize(serialized): + auth = BasicAuth() + auth.basic_auth.reset(new CBasicAuth(GetResultValue( + CBasicAuth.Deserialize(tobytes(serialized))))) + return auth + + def serialize(self): + return GetResultValue(self.basic_auth.get().SerializeToString()) + + def __eq__(self, BasicAuth other): + return deref(self.basic_auth.get()) == deref(other.basic_auth.get()) + + def __repr__(self): + return (f"") + + +class DescriptorType(enum.Enum): + """ + The type of a FlightDescriptor. + + Attributes + ---------- + + UNKNOWN + An unknown descriptor type. + + PATH + A Flight stream represented by a path. + + CMD + A Flight stream represented by an application-defined command. + + """ + + UNKNOWN = 0 + PATH = 1 + CMD = 2 + + +class FlightMethod(enum.Enum): + """The implemented methods in Flight.""" + + INVALID = 0 + HANDSHAKE = 1 + LIST_FLIGHTS = 2 + GET_FLIGHT_INFO = 3 + GET_SCHEMA = 4 + DO_GET = 5 + DO_PUT = 6 + DO_ACTION = 7 + LIST_ACTIONS = 8 + DO_EXCHANGE = 9 + + +cdef wrap_flight_method(CFlightMethod method): + if method == CFlightMethodHandshake: + return FlightMethod.HANDSHAKE + elif method == CFlightMethodListFlights: + return FlightMethod.LIST_FLIGHTS + elif method == CFlightMethodGetFlightInfo: + return FlightMethod.GET_FLIGHT_INFO + elif method == CFlightMethodGetSchema: + return FlightMethod.GET_SCHEMA + elif method == CFlightMethodDoGet: + return FlightMethod.DO_GET + elif method == CFlightMethodDoPut: + return FlightMethod.DO_PUT + elif method == CFlightMethodDoAction: + return FlightMethod.DO_ACTION + elif method == CFlightMethodListActions: + return FlightMethod.LIST_ACTIONS + elif method == CFlightMethodDoExchange: + return FlightMethod.DO_EXCHANGE + return FlightMethod.INVALID + + +cdef class FlightDescriptor(_Weakrefable): + """A description of a data stream available from a Flight service.""" + cdef: + CFlightDescriptor descriptor + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "`pyarrow.flight.FlightDescriptor.for_{path,command}` " + "function instead." + .format(self.__class__.__name__)) + + @staticmethod + def for_path(*path): + """Create a FlightDescriptor for a resource path.""" + cdef FlightDescriptor result = \ + FlightDescriptor.__new__(FlightDescriptor) + result.descriptor.type = CDescriptorTypePath + result.descriptor.path = [tobytes(p) for p in path] + return result + + @staticmethod + def for_command(command): + """Create a FlightDescriptor for an opaque command.""" + cdef FlightDescriptor result = \ + FlightDescriptor.__new__(FlightDescriptor) + result.descriptor.type = CDescriptorTypeCmd + result.descriptor.cmd = tobytes(command) + return result + + @property + def descriptor_type(self): + """Get the type of this descriptor.""" + if self.descriptor.type == CDescriptorTypeUnknown: + return DescriptorType.UNKNOWN + elif self.descriptor.type == CDescriptorTypePath: + return DescriptorType.PATH + elif self.descriptor.type == CDescriptorTypeCmd: + return DescriptorType.CMD + raise RuntimeError("Invalid descriptor type!") + + @property + def command(self): + """Get the command for this descriptor.""" + if self.descriptor_type != DescriptorType.CMD: + return None + return self.descriptor.cmd + + @property + def path(self): + """Get the path for this descriptor.""" + if self.descriptor_type != DescriptorType.PATH: + return None + return self.descriptor.path + + def __repr__(self): + if self.descriptor_type == DescriptorType.PATH: + return f"" + elif self.descriptor_type == DescriptorType.CMD: + return f"" + else: + return "" + + @staticmethod + cdef CFlightDescriptor unwrap(descriptor) except *: + if not isinstance(descriptor, FlightDescriptor): + raise TypeError("Must provide a FlightDescriptor, not '{}'".format( + type(descriptor))) + return ( descriptor).descriptor + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.descriptor.SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef FlightDescriptor descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + descriptor.descriptor = GetResultValue( + CFlightDescriptor.Deserialize(tobytes(serialized))) + return descriptor + + def __eq__(self, FlightDescriptor other): + return self.descriptor == other.descriptor + + +cdef class Ticket(_Weakrefable): + """A ticket for requesting a Flight stream.""" + + cdef: + CTicket c_ticket + + def __init__(self, ticket): + self.c_ticket.ticket = tobytes(ticket) + + @property + def ticket(self): + return self.c_ticket.ticket + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.c_ticket.SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef Ticket ticket = Ticket.__new__(Ticket) + ticket.c_ticket = GetResultValue( + CTicket.Deserialize(tobytes(serialized))) + return ticket + + def __eq__(self, Ticket other): + return self.c_ticket == other.c_ticket + + def __repr__(self): + return f"" + + +cdef class Location(_Weakrefable): + """The location of a Flight service.""" + cdef: + CLocation location + + def __init__(self, uri): + check_flight_status(CLocation.Parse(tobytes(uri)).Value(&self.location)) + + def __repr__(self): + return f'' + + @property + def uri(self): + return self.location.ToString() + + def equals(self, Location other): + return self == other + + def __eq__(self, other): + if not isinstance(other, Location): + return NotImplemented + return self.location.Equals(( other).location) + + @staticmethod + def for_grpc_tcp(host, port): + """Create a Location for a TCP-based gRPC service.""" + cdef: + c_string c_host = tobytes(host) + int c_port = port + Location result = Location.__new__(Location) + check_flight_status( + CLocation.ForGrpcTcp(c_host, c_port).Value(&result.location)) + return result + + @staticmethod + def for_grpc_tls(host, port): + """Create a Location for a TLS-based gRPC service.""" + cdef: + c_string c_host = tobytes(host) + int c_port = port + Location result = Location.__new__(Location) + check_flight_status( + CLocation.ForGrpcTls(c_host, c_port).Value(&result.location)) + return result + + @staticmethod + def for_grpc_unix(path): + """Create a Location for a domain socket-based gRPC service.""" + cdef: + c_string c_path = tobytes(path) + Location result = Location.__new__(Location) + check_flight_status(CLocation.ForGrpcUnix(c_path).Value(&result.location)) + return result + + @staticmethod + cdef Location wrap(CLocation location): + cdef Location result = Location.__new__(Location) + result.location = location + return result + + @staticmethod + cdef CLocation unwrap(object location) except *: + cdef CLocation c_location + if isinstance(location, str): + check_flight_status( + CLocation.Parse(tobytes(location)).Value(&c_location)) + return c_location + elif not isinstance(location, Location): + raise TypeError("Must provide a Location, not '{}'".format( + type(location))) + return ( location).location + + +cdef class FlightEndpoint(_Weakrefable): + """A Flight stream, along with the ticket and locations to access it.""" + cdef: + CFlightEndpoint endpoint + + def __init__(self, ticket, locations): + """Create a FlightEndpoint from a ticket and list of locations. + + Parameters + ---------- + ticket : Ticket or bytes + the ticket needed to access this flight + locations : list of string URIs + locations where this flight is available + + Raises + ------ + ArrowException + If one of the location URIs is not a valid URI. + """ + cdef: + CLocation c_location + + if isinstance(ticket, Ticket): + self.endpoint.ticket.ticket = tobytes(ticket.ticket) + else: + self.endpoint.ticket.ticket = tobytes(ticket) + + for location in locations: + if isinstance(location, Location): + c_location = ( location).location + else: + c_location = CLocation() + check_flight_status( + CLocation.Parse(tobytes(location)).Value(&c_location)) + self.endpoint.locations.push_back(c_location) + + @property + def ticket(self): + """Get the ticket in this endpoint.""" + return Ticket(self.endpoint.ticket.ticket) + + @property + def locations(self): + return [Location.wrap(location) + for location in self.endpoint.locations] + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.endpoint.SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef FlightEndpoint endpoint = FlightEndpoint.__new__(FlightEndpoint) + endpoint.endpoint = GetResultValue( + CFlightEndpoint.Deserialize(tobytes(serialized))) + return endpoint + + def __repr__(self): + return (f"") + + def __eq__(self, FlightEndpoint other): + return self.endpoint == other.endpoint + + +cdef class SchemaResult(_Weakrefable): + """The serialized schema returned from a GetSchema request.""" + cdef: + unique_ptr[CSchemaResult] result + + def __init__(self, Schema schema): + """Create a SchemaResult from a schema. + + Parameters + ---------- + schema: Schema + the schema of the data in this flight. + """ + cdef: + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + check_flight_status(CreateSchemaResult(c_schema, &self.result)) + + @property + def schema(self): + """The schema of the data in this flight.""" + cdef: + shared_ptr[CSchema] schema + CDictionaryMemo dummy_memo + + check_flight_status(self.result.get().GetSchema(&dummy_memo).Value(&schema)) + return pyarrow_wrap_schema(schema) + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.result.get().SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef SchemaResult result = SchemaResult.__new__(SchemaResult) + result.result.reset(new CSchemaResult(GetResultValue( + CSchemaResult.Deserialize(tobytes(serialized))))) + return result + + def __eq__(self, SchemaResult other): + return deref(self.result.get()) == deref(other.result.get()) + + def __repr__(self): + return f"" + + +cdef class FlightInfo(_Weakrefable): + """A description of a Flight stream.""" + cdef: + unique_ptr[CFlightInfo] info + + @staticmethod + cdef wrap(CFlightInfo c_info): + cdef FlightInfo obj = FlightInfo.__new__(FlightInfo) + obj.info.reset(new CFlightInfo(move(c_info))) + return obj + + def __init__(self, Schema schema, FlightDescriptor descriptor, endpoints, + total_records, total_bytes): + """Create a FlightInfo object from a schema, descriptor, and endpoints. + + Parameters + ---------- + schema : Schema + the schema of the data in this flight. + descriptor : FlightDescriptor + the descriptor for this flight. + endpoints : list of FlightEndpoint + a list of endpoints where this flight is available. + total_records : int + the total records in this flight, or -1 if unknown + total_bytes : int + the total bytes in this flight, or -1 if unknown + """ + cdef: + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + vector[CFlightEndpoint] c_endpoints + + for endpoint in endpoints: + if isinstance(endpoint, FlightEndpoint): + c_endpoints.push_back(( endpoint).endpoint) + else: + raise TypeError('Endpoint {} is not instance of' + ' FlightEndpoint'.format(endpoint)) + + check_flight_status(CreateFlightInfo(c_schema, + descriptor.descriptor, + c_endpoints, + total_records, + total_bytes, &self.info)) + + @property + def total_records(self): + """The total record count of this flight, or -1 if unknown.""" + return self.info.get().total_records() + + @property + def total_bytes(self): + """The size in bytes of the data in this flight, or -1 if unknown.""" + return self.info.get().total_bytes() + + @property + def schema(self): + """The schema of the data in this flight.""" + cdef: + shared_ptr[CSchema] schema + CDictionaryMemo dummy_memo + + check_flight_status(self.info.get().GetSchema(&dummy_memo).Value(&schema)) + return pyarrow_wrap_schema(schema) + + @property + def descriptor(self): + """The descriptor of the data in this flight.""" + cdef FlightDescriptor result = \ + FlightDescriptor.__new__(FlightDescriptor) + result.descriptor = self.info.get().descriptor() + return result + + @property + def endpoints(self): + """The endpoints where this flight is available.""" + # TODO: get Cython to iterate over reference directly + cdef: + vector[CFlightEndpoint] endpoints = self.info.get().endpoints() + FlightEndpoint py_endpoint + + result = [] + for endpoint in endpoints: + py_endpoint = FlightEndpoint.__new__(FlightEndpoint) + py_endpoint.endpoint = endpoint + result.append(py_endpoint) + return result + + def serialize(self): + """Get the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + return GetResultValue(self.info.get().SerializeToString()) + + @classmethod + def deserialize(cls, serialized): + """Parse the wire-format representation of this type. + + Useful when interoperating with non-Flight systems (e.g. REST + services) that may want to return Flight types. + + """ + cdef FlightInfo info = FlightInfo.__new__(FlightInfo) + info.info = move(GetResultValue( + CFlightInfo.Deserialize(tobytes(serialized)))) + return info + + def __eq__(self, FlightInfo other): + return deref(self.info.get()) == deref(other.info.get()) + + def __repr__(self): + return (f"") + + +cdef class FlightStreamChunk(_Weakrefable): + """A RecordBatch with application metadata on the side.""" + cdef: + CFlightStreamChunk chunk + + @property + def data(self): + if self.chunk.data == NULL: + return None + return pyarrow_wrap_batch(self.chunk.data) + + @property + def app_metadata(self): + if self.chunk.app_metadata == NULL: + return None + return pyarrow_wrap_buffer(self.chunk.app_metadata) + + def __iter__(self): + return iter((self.data, self.app_metadata)) + + def __repr__(self): + return "".format( + self.chunk.data != NULL, self.chunk.app_metadata != NULL) + + +cdef class _MetadataRecordBatchReader(_Weakrefable, _ReadPandasMixin): + """A reader for Flight streams.""" + + # Needs to be separate class so the "real" class can subclass the + # pure-Python mixin class + + cdef dict __dict__ + cdef shared_ptr[CMetadataRecordBatchReader] reader + + def __iter__(self): + return self + + def __next__(self): + return self.read_chunk() + + @property + def schema(self): + """Get the schema for this reader.""" + cdef shared_ptr[CSchema] c_schema + with nogil: + check_flight_status(self.reader.get().GetSchema().Value(&c_schema)) + return pyarrow_wrap_schema(c_schema) + + def read_all(self): + """Read the entire contents of the stream as a Table.""" + cdef: + shared_ptr[CTable] c_table + with nogil: + check_flight_status(self.reader.get().ToTable().Value(&c_table)) + return pyarrow_wrap_table(c_table) + + def read_chunk(self): + """Read the next FlightStreamChunk along with any metadata. + + Returns + ------- + chunk : FlightStreamChunk + The next FlightStreamChunk in the stream. + + Raises + ------ + StopIteration + when the stream is finished + """ + cdef: + FlightStreamChunk chunk = FlightStreamChunk() + + with nogil: + check_flight_status(self.reader.get().Next().Value(&chunk.chunk)) + + if chunk.chunk.data == NULL and chunk.chunk.app_metadata == NULL: + raise StopIteration + + return chunk + + def to_reader(self): + """Convert this reader into a regular RecordBatchReader. + + This may fail if the schema cannot be read from the remote end. + + Returns + ------- + RecordBatchReader + """ + cdef RecordBatchReader reader + reader = RecordBatchReader.__new__(RecordBatchReader) + with nogil: + reader.reader = GetResultValue(MakeRecordBatchReader(self.reader)) + + return reader + + +cdef class MetadataRecordBatchReader(_MetadataRecordBatchReader): + """The base class for readers for Flight streams. + + See Also + -------- + FlightStreamReader + """ + + +cdef class FlightStreamReader(MetadataRecordBatchReader): + """A reader that can also be canceled.""" + + def cancel(self): + """Cancel the read operation.""" + with nogil: + ( self.reader.get()).Cancel() + + def read_all(self): + """Read the entire contents of the stream as a Table.""" + cdef: + shared_ptr[CTable] c_table + CStopToken stop_token + with SignalStopHandler() as stop_handler: + stop_token = ( stop_handler.stop_token).stop_token + with nogil: + check_flight_status( + ( self.reader.get()) + .ToTableWithStopToken(stop_token).Value(&c_table)) + return pyarrow_wrap_table(c_table) + + +cdef class MetadataRecordBatchWriter(_CRecordBatchWriter): + """A RecordBatchWriter that also allows writing application metadata. + + This class is a context manager; on exit, close() will be called. + """ + + cdef CMetadataRecordBatchWriter* _writer(self) nogil: + return self.writer.get() + + def begin(self, schema: Schema, options=None): + """Prepare to write data to this stream with the given schema.""" + cdef: + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + CIpcWriteOptions c_options = _get_options(options).c_options + with nogil: + check_flight_status(self._writer().Begin(c_schema, c_options)) + + def write_metadata(self, buf): + """Write Flight metadata by itself.""" + cdef shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(as_buffer(buf)) + with nogil: + check_flight_status( + self._writer().WriteMetadata(c_buf)) + + def write_batch(self, RecordBatch batch): + """ + Write RecordBatch to stream. + + Parameters + ---------- + batch : RecordBatch + """ + cdef: + shared_ptr[const CKeyValueMetadata] custom_metadata + + # Override superclass method to use check_flight_status so we + # can generate FlightWriteSizeExceededError. We don't do this + # for write_table as callers who intend to handle the error + # and retry with a smaller batch should be working with + # individual batches to have control. + + with nogil: + check_flight_status( + self._writer().WriteRecordBatch(deref(batch.batch), custom_metadata)) + + def write_table(self, Table table, max_chunksize=None, **kwargs): + """ + Write Table to stream in (contiguous) RecordBatch objects. + + Parameters + ---------- + table : Table + max_chunksize : int, default None + Maximum number of rows for RecordBatch chunks. Individual chunks may + be smaller depending on the chunk layout of individual columns. + """ + cdef: + # max_chunksize must be > 0 to have any impact + int64_t c_max_chunksize = -1 + + if 'chunksize' in kwargs: + max_chunksize = kwargs['chunksize'] + msg = ('The parameter chunksize is deprecated for the write_table ' + 'methods as of 0.15, please use parameter ' + 'max_chunksize instead') + warnings.warn(msg, FutureWarning) + + if max_chunksize is not None: + c_max_chunksize = max_chunksize + + with nogil: + check_flight_status( + self._writer().WriteTable(table.table[0], c_max_chunksize)) + + def close(self): + """ + Close stream and write end-of-stream 0 marker. + """ + with nogil: + check_flight_status(self._writer().Close()) + + def write_with_metadata(self, RecordBatch batch, buf): + """Write a RecordBatch along with Flight metadata. + + Parameters + ---------- + batch : RecordBatch + The next RecordBatch in the stream. + buf : Buffer + Application-specific metadata for the batch as defined by + Flight. + """ + cdef shared_ptr[CBuffer] c_buf = pyarrow_unwrap_buffer(as_buffer(buf)) + with nogil: + check_flight_status( + self._writer().WriteWithMetadata(deref(batch.batch), c_buf)) + + +cdef class FlightStreamWriter(MetadataRecordBatchWriter): + """A writer that also allows closing the write side of a stream.""" + + def done_writing(self): + """Indicate that the client is done writing, but not done reading.""" + with nogil: + check_flight_status( + ( self.writer.get()).DoneWriting()) + + +cdef class FlightMetadataReader(_Weakrefable): + """A reader for Flight metadata messages sent during a DoPut.""" + + cdef: + unique_ptr[CFlightMetadataReader] reader + + def read(self): + """Read the next metadata message.""" + cdef shared_ptr[CBuffer] buf + with nogil: + check_flight_status(self.reader.get().ReadMetadata(&buf)) + if buf == NULL: + return None + return pyarrow_wrap_buffer(buf) + + +cdef class FlightMetadataWriter(_Weakrefable): + """A sender for Flight metadata messages during a DoPut.""" + + cdef: + unique_ptr[CFlightMetadataWriter] writer + + def write(self, message): + """Write the next metadata message. + + Parameters + ---------- + message : Buffer + """ + cdef shared_ptr[CBuffer] buf = \ + pyarrow_unwrap_buffer(as_buffer(message)) + with nogil: + check_flight_status(self.writer.get().WriteMetadata(deref(buf))) + + +class AsyncioCall: + """State for an async RPC using asyncio.""" + + def __init__(self) -> None: + import asyncio + self._future = asyncio.get_running_loop().create_future() + + def as_awaitable(self) -> object: + return self._future + + def wakeup(self, result_or_exception) -> None: + # Mark the Future done from within its loop (asyncio + # objects are generally not thread-safe) + loop = self._future.get_loop() + if isinstance(result_or_exception, BaseException): + loop.call_soon_threadsafe( + self._future.set_exception, result_or_exception) + else: + loop.call_soon_threadsafe( + self._future.set_result, result_or_exception) + + +cdef class AsyncioFlightClient: + """ + A FlightClient with an asyncio-based async interface. + + This interface is EXPERIMENTAL. + """ + + cdef: + FlightClient _client + + def __init__(self, FlightClient client) -> None: + self._client = client + + async def get_flight_info( + self, + descriptor: FlightDescriptor, + *, + options: FlightCallOptions = None, + ): + call = AsyncioCall() + self._get_flight_info(call, descriptor, options) + return await call.as_awaitable() + + cdef _get_flight_info(self, call, descriptor, options): + cdef: + CFlightCallOptions* c_options = \ + FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + CFuture[CFlightInfo] c_future + + with nogil: + c_future = self._client.client.get().GetFlightInfoAsync( + deref(c_options), c_descriptor) + + BindFuture(move(c_future), call.wakeup, FlightInfo.wrap) + + +cdef class FlightClient(_Weakrefable): + """A client to a Flight service. + + Connect to a Flight service on the given host and port. + + Parameters + ---------- + location : str, tuple or Location + Location to connect to. Either a gRPC URI like `grpc://localhost:port`, + a tuple of (host, port) pair, or a Location instance. + tls_root_certs : bytes or None + PEM-encoded + cert_chain: bytes or None + Client certificate if using mutual TLS + private_key: bytes or None + Client private key for cert_chain is using mutual TLS + override_hostname : str or None + Override the hostname checked by TLS. Insecure, use with caution. + middleware : list optional, default None + A list of ClientMiddlewareFactory instances. + write_size_limit_bytes : int optional, default None + A soft limit on the size of a data payload sent to the + server. Enabled if positive. If enabled, writing a record + batch that (when serialized) exceeds this limit will raise an + exception; the client can retry the write with a smaller + batch. + disable_server_verification : boolean optional, default False + A flag that indicates that, if the client is connecting + with TLS, that it skips server verification. If this is + enabled, all other TLS settings are overridden. + generic_options : list optional, default None + A list of generic (string, int or string) option tuples passed + to the underlying transport. Effect is implementation + dependent. + """ + cdef: + unique_ptr[CFlightClient] client + + def __init__(self, location, *, tls_root_certs=None, cert_chain=None, + private_key=None, override_hostname=None, middleware=None, + write_size_limit_bytes=None, + disable_server_verification=None, generic_options=None): + if isinstance(location, (bytes, str)): + location = Location(location) + elif isinstance(location, tuple): + host, port = location + if tls_root_certs or disable_server_verification is not None: + location = Location.for_grpc_tls(host, port) + else: + location = Location.for_grpc_tcp(host, port) + elif not isinstance(location, Location): + raise TypeError('`location` argument must be a string, tuple or a ' + 'Location instance') + self.init(location, tls_root_certs, cert_chain, private_key, + override_hostname, middleware, write_size_limit_bytes, + disable_server_verification, generic_options) + + cdef init(self, Location location, tls_root_certs, cert_chain, + private_key, override_hostname, middleware, + write_size_limit_bytes, disable_server_verification, + generic_options): + cdef: + CLocation c_location = Location.unwrap(location) + CFlightClientOptions c_options = CFlightClientOptions.Defaults() + function[cb_client_middleware_start_call] start_call = \ + &_client_middleware_start_call + CIntStringVariant variant + + if tls_root_certs: + c_options.tls_root_certs = tobytes(tls_root_certs) + if cert_chain: + c_options.cert_chain = tobytes(cert_chain) + if private_key: + c_options.private_key = tobytes(private_key) + if override_hostname: + c_options.override_hostname = tobytes(override_hostname) + if disable_server_verification is not None: + c_options.disable_server_verification = disable_server_verification + if middleware: + for factory in middleware: + c_options.middleware.push_back( + + make_shared[CPyClientMiddlewareFactory]( + factory, start_call)) + if write_size_limit_bytes is not None: + c_options.write_size_limit_bytes = write_size_limit_bytes + else: + c_options.write_size_limit_bytes = 0 + if generic_options: + for key, value in generic_options: + if isinstance(value, (str, bytes)): + variant = CIntStringVariant( tobytes(value)) + else: + variant = CIntStringVariant( value) + c_options.generic_options.push_back( + pair[c_string, CIntStringVariant](tobytes(key), variant)) + + with nogil: + check_flight_status(CFlightClient.Connect(c_location, c_options + ).Value(&self.client)) + + @property + def supports_async(self): + return self.client.get().supports_async() + + def as_async(self) -> None: + check_status(self.client.get().CheckAsyncSupport()) + return AsyncioFlightClient(self) + + def wait_for_available(self, timeout=5): + """Block until the server can be contacted. + + Parameters + ---------- + timeout : int, default 5 + The maximum seconds to wait. + """ + deadline = time.time() + timeout + while True: + try: + list(self.list_flights()) + except FlightUnavailableError: + if time.time() < deadline: + time.sleep(0.025) + continue + else: + raise + except NotImplementedError: + # allow if list_flights is not implemented, because + # the server can be contacted nonetheless + break + else: + break + + @classmethod + def connect(cls, location, tls_root_certs=None, cert_chain=None, + private_key=None, override_hostname=None, + disable_server_verification=None): + """Connect to a Flight server. + + .. deprecated:: 0.15.0 + Use the ``FlightClient`` constructor or ``pyarrow.flight.connect`` function instead. + """ + warnings.warn("The 'FlightClient.connect' method is deprecated, use " + "FlightClient constructor or pyarrow.flight.connect " + "function instead") + return FlightClient( + location, tls_root_certs=tls_root_certs, + cert_chain=cert_chain, private_key=private_key, + override_hostname=override_hostname, + disable_server_verification=disable_server_verification + ) + + def authenticate(self, auth_handler, options: FlightCallOptions = None): + """Authenticate to the server. + + Parameters + ---------- + auth_handler : ClientAuthHandler + The authentication mechanism to use. + options : FlightCallOptions + Options for this call. + """ + cdef: + unique_ptr[CClientAuthHandler] handler + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + + if not isinstance(auth_handler, ClientAuthHandler): + raise TypeError( + "FlightClient.authenticate takes a ClientAuthHandler, " + "not '{}'".format(type(auth_handler))) + handler.reset(( auth_handler).to_handler()) + with nogil: + check_flight_status( + self.client.get().Authenticate(deref(c_options), + move(handler))) + + def authenticate_basic_token(self, username, password, + options: FlightCallOptions = None): + """Authenticate to the server with HTTP basic authentication. + + Parameters + ---------- + username : string + Username to authenticate with + password : string + Password to authenticate with + options : FlightCallOptions + Options for this call + + Returns + ------- + tuple : Tuple[str, str] + A tuple representing the FlightCallOptions authorization + header entry of a bearer token. + """ + cdef: + CResult[pair[c_string, c_string]] result + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + c_string user = tobytes(username) + c_string pw = tobytes(password) + + with nogil: + result = self.client.get().AuthenticateBasicToken(deref(c_options), + user, pw) + check_flight_status(result.status()) + + return GetResultValue(result) + + def list_actions(self, options: FlightCallOptions = None): + """List the actions available on a service.""" + cdef: + vector[CActionType] results + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + + with SignalStopHandler() as stop_handler: + c_options.stop_token = \ + ( stop_handler.stop_token).stop_token + with nogil: + check_flight_status( + self.client.get().ListActions(deref(c_options)).Value(&results)) + + result = [] + for action_type in results: + py_action = ActionType(frombytes(action_type.type), + frombytes(action_type.description)) + result.append(py_action) + + return result + + def do_action(self, action, options: FlightCallOptions = None): + """ + Execute an action on a service. + + Parameters + ---------- + action : str, tuple, or Action + Can be action type name (no body), type and body, or any Action + object + options : FlightCallOptions + RPC options + + Returns + ------- + results : iterator of Result values + """ + cdef: + unique_ptr[CResultStream] results + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + + if isinstance(action, (str, bytes)): + action = Action(action, b'') + elif isinstance(action, tuple): + action = Action(*action) + elif not isinstance(action, Action): + raise TypeError("Action must be Action instance, string, or tuple") + + cdef CAction c_action = Action.unwrap( action) + with nogil: + check_flight_status( + self.client.get().DoAction( + deref(c_options), c_action).Value(&results)) + + def _do_action_response(): + cdef: + Result result + while True: + result = Result.__new__(Result) + with nogil: + check_flight_status(results.get().Next().Value(&result.result)) + if result.result == NULL: + break + yield result + return _do_action_response() + + def list_flights(self, criteria: bytes = None, + options: FlightCallOptions = None): + """List the flights available on a service.""" + cdef: + unique_ptr[CFlightListing] listing + FlightInfo result + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CCriteria c_criteria + + if criteria: + c_criteria.expression = tobytes(criteria) + + with SignalStopHandler() as stop_handler: + c_options.stop_token = \ + ( stop_handler.stop_token).stop_token + with nogil: + check_flight_status( + self.client.get().ListFlights(deref(c_options), + c_criteria).Value(&listing)) + + while True: + result = FlightInfo.__new__(FlightInfo) + with nogil: + check_flight_status(listing.get().Next().Value(&result.info)) + if result.info == NULL: + break + yield result + + def get_flight_info(self, descriptor: FlightDescriptor, + options: FlightCallOptions = None): + """Request information about an available flight.""" + cdef: + FlightInfo result = FlightInfo.__new__(FlightInfo) + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + + with nogil: + check_flight_status(self.client.get().GetFlightInfo( + deref(c_options), c_descriptor).Value(&result.info)) + + return result + + def get_schema(self, descriptor: FlightDescriptor, + options: FlightCallOptions = None): + """Request schema for an available flight.""" + cdef: + SchemaResult result = SchemaResult.__new__(SchemaResult) + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + with nogil: + check_status( + self.client.get() + .GetSchema(deref(c_options), c_descriptor).Value(&result.result) + ) + + return result + + def do_get(self, ticket: Ticket, options: FlightCallOptions = None): + """Request the data for a flight. + + Returns + ------- + reader : FlightStreamReader + """ + cdef: + unique_ptr[CFlightStreamReader] reader + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + + with nogil: + check_flight_status( + self.client.get().DoGet( + deref(c_options), ticket.c_ticket).Value(&reader)) + result = FlightStreamReader() + result.reader.reset(reader.release()) + return result + + def do_put(self, descriptor: FlightDescriptor, Schema schema not None, + options: FlightCallOptions = None): + """Upload data to a flight. + + Returns + ------- + writer : FlightStreamWriter + reader : FlightMetadataReader + """ + cdef: + shared_ptr[CSchema] c_schema = pyarrow_unwrap_schema(schema) + CDoPutResult c_do_put_result + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + + with nogil: + check_flight_status(self.client.get().DoPut( + deref(c_options), + c_descriptor, + c_schema).Value(&c_do_put_result)) + py_writer = FlightStreamWriter() + py_writer.writer.reset(c_do_put_result.writer.release()) + py_reader = FlightMetadataReader() + py_reader.reader.reset(c_do_put_result.reader.release()) + return py_writer, py_reader + + def do_exchange(self, descriptor: FlightDescriptor, + options: FlightCallOptions = None): + """Start a bidirectional data exchange with a server. + + Parameters + ---------- + descriptor : FlightDescriptor + A descriptor for the flight. + options : FlightCallOptions + RPC options. + + Returns + ------- + writer : FlightStreamWriter + reader : FlightStreamReader + """ + cdef: + CDoExchangeResult c_do_exchange_result + CFlightCallOptions* c_options = FlightCallOptions.unwrap(options) + CFlightDescriptor c_descriptor = \ + FlightDescriptor.unwrap(descriptor) + + with nogil: + check_flight_status(self.client.get().DoExchange( + deref(c_options), + c_descriptor).Value(&c_do_exchange_result)) + py_writer = FlightStreamWriter() + py_writer.writer.reset(c_do_exchange_result.writer.release()) + py_reader = FlightStreamReader() + py_reader.reader.reset(c_do_exchange_result.reader.release()) + return py_writer, py_reader + + def close(self): + """Close the client and disconnect.""" + client = self.client.get() + if client != NULL: + check_flight_status(client.Close()) + + def __del__(self): + # Not ideal, but close() wasn't originally present so + # applications may not be calling it + self.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + +cdef class FlightDataStream(_Weakrefable): + """ + Abstract base class for Flight data streams. + + See Also + -------- + RecordBatchStream + GeneratorStream + """ + + cdef CFlightDataStream* to_stream(self) except *: + """Create the C++ data stream for the backing Python object. + + We don't expose the C++ object to Python, so we can manage its + lifetime from the Cython/C++ side. + """ + raise NotImplementedError + + +cdef class RecordBatchStream(FlightDataStream): + """A Flight data stream backed by RecordBatches. + + The remainder of this DoGet request will be handled in C++, + without having to acquire the GIL. + + """ + cdef: + object data_source + CIpcWriteOptions write_options + + def __init__(self, data_source, options=None): + """Create a RecordBatchStream from a data source. + + Parameters + ---------- + data_source : RecordBatchReader or Table + The data to stream to the client. + options : pyarrow.ipc.IpcWriteOptions, optional + Optional IPC options to control how to write the data. + """ + if (not isinstance(data_source, RecordBatchReader) and + not isinstance(data_source, lib.Table)): + raise TypeError("Expected RecordBatchReader or Table, " + "but got: {}".format(type(data_source))) + self.data_source = data_source + self.write_options = _get_options(options).c_options + + cdef CFlightDataStream* to_stream(self) except *: + cdef: + shared_ptr[CRecordBatchReader] reader + if isinstance(self.data_source, RecordBatchReader): + reader = ( self.data_source).reader + elif isinstance(self.data_source, lib.Table): + table = ( self.data_source).table + reader.reset(new TableBatchReader(deref(table))) + else: + raise RuntimeError("Can't construct RecordBatchStream " + "from type {}".format(type(self.data_source))) + return new CRecordBatchStream(reader, self.write_options) + + +cdef class GeneratorStream(FlightDataStream): + """A Flight data stream backed by a Python generator.""" + cdef: + shared_ptr[CSchema] schema + object generator + # A substream currently being consumed by the client, if + # present. Produced by the generator. + unique_ptr[CFlightDataStream] current_stream + CIpcWriteOptions c_options + + def __init__(self, schema, generator, options=None): + """Create a GeneratorStream from a Python generator. + + Parameters + ---------- + schema : Schema + The schema for the data to be returned. + + generator : iterator or iterable + The generator should yield other FlightDataStream objects, + Tables, RecordBatches, or RecordBatchReaders. + + options : pyarrow.ipc.IpcWriteOptions, optional + """ + self.schema = pyarrow_unwrap_schema(schema) + self.generator = iter(generator) + self.c_options = _get_options(options).c_options + + cdef CFlightDataStream* to_stream(self) except *: + cdef: + function[cb_data_stream_next] callback = &_data_stream_next + return new CPyGeneratorFlightDataStream(self, self.schema, callback, + self.c_options) + + +cdef class ServerCallContext(_Weakrefable): + """Per-call state/context.""" + cdef: + const CServerCallContext* context + + def peer_identity(self): + """Get the identity of the authenticated peer. + + May be the empty string. + """ + return tobytes(self.context.peer_identity()) + + def peer(self): + """Get the address of the peer.""" + # Set safe=True as gRPC on Windows sometimes gives garbage bytes + return frombytes(self.context.peer(), safe=True) + + def is_cancelled(self): + """Check if the current RPC call has been canceled by the client.""" + return self.context.is_cancelled() + + def add_header(self, key, value): + """Add a response header.""" + self.context.AddHeader(tobytes(key), tobytes(value)) + + def add_trailer(self, key, value): + """Add a response trailer.""" + self.context.AddTrailer(tobytes(key), tobytes(value)) + + def get_middleware(self, key): + """ + Get a middleware instance by key. + + Returns None if the middleware was not found. + """ + cdef: + CServerMiddleware* c_middleware = \ + self.context.GetMiddleware(CPyServerMiddlewareName) + CPyServerMiddleware* middleware + vector[CTracingServerMiddlewareTraceKey] c_trace_context + if c_middleware == NULL: + c_middleware = self.context.GetMiddleware(tobytes(key)) + + if c_middleware == NULL: + return None + elif c_middleware.name() == CPyServerMiddlewareName: + middleware = c_middleware + py_middleware = <_ServerMiddlewareWrapper> middleware.py_object() + return py_middleware.middleware.get(key) + elif c_middleware.name() == CTracingServerMiddlewareName: + c_trace_context = ( c_middleware + ).GetTraceContext() + trace_context = {pair.key: pair.value for pair in c_trace_context} + return TracingServerMiddleware(trace_context) + return None + + @staticmethod + cdef ServerCallContext wrap(const CServerCallContext& context): + cdef ServerCallContext result = \ + ServerCallContext.__new__(ServerCallContext) + result.context = &context + return result + + +cdef class ServerAuthReader(_Weakrefable): + """A reader for messages from the client during an auth handshake.""" + cdef: + CServerAuthReader* reader + + def read(self): + cdef c_string token + if not self.reader: + raise ValueError("Cannot use ServerAuthReader outside " + "ServerAuthHandler.authenticate") + with nogil: + check_flight_status(self.reader.Read(&token)) + return token + + cdef void poison(self): + """Prevent further usage of this object. + + This object is constructed by taking a pointer to a reference, + so we want to make sure Python users do not access this after + the reference goes away. + """ + self.reader = NULL + + @staticmethod + cdef ServerAuthReader wrap(CServerAuthReader* reader): + cdef ServerAuthReader result = \ + ServerAuthReader.__new__(ServerAuthReader) + result.reader = reader + return result + + +cdef class ServerAuthSender(_Weakrefable): + """A writer for messages to the client during an auth handshake.""" + cdef: + CServerAuthSender* sender + + def write(self, message): + cdef c_string c_message = tobytes(message) + if not self.sender: + raise ValueError("Cannot use ServerAuthSender outside " + "ServerAuthHandler.authenticate") + with nogil: + check_flight_status(self.sender.Write(c_message)) + + cdef void poison(self): + """Prevent further usage of this object. + + This object is constructed by taking a pointer to a reference, + so we want to make sure Python users do not access this after + the reference goes away. + """ + self.sender = NULL + + @staticmethod + cdef ServerAuthSender wrap(CServerAuthSender* sender): + cdef ServerAuthSender result = \ + ServerAuthSender.__new__(ServerAuthSender) + result.sender = sender + return result + + +cdef class ClientAuthReader(_Weakrefable): + """A reader for messages from the server during an auth handshake.""" + cdef: + CClientAuthReader* reader + + def read(self): + cdef c_string token + if not self.reader: + raise ValueError("Cannot use ClientAuthReader outside " + "ClientAuthHandler.authenticate") + with nogil: + check_flight_status(self.reader.Read(&token)) + return token + + cdef void poison(self): + """Prevent further usage of this object. + + This object is constructed by taking a pointer to a reference, + so we want to make sure Python users do not access this after + the reference goes away. + """ + self.reader = NULL + + @staticmethod + cdef ClientAuthReader wrap(CClientAuthReader* reader): + cdef ClientAuthReader result = \ + ClientAuthReader.__new__(ClientAuthReader) + result.reader = reader + return result + + +cdef class ClientAuthSender(_Weakrefable): + """A writer for messages to the server during an auth handshake.""" + cdef: + CClientAuthSender* sender + + def write(self, message): + cdef c_string c_message = tobytes(message) + if not self.sender: + raise ValueError("Cannot use ClientAuthSender outside " + "ClientAuthHandler.authenticate") + with nogil: + check_flight_status(self.sender.Write(c_message)) + + cdef void poison(self): + """Prevent further usage of this object. + + This object is constructed by taking a pointer to a reference, + so we want to make sure Python users do not access this after + the reference goes away. + """ + self.sender = NULL + + @staticmethod + cdef ClientAuthSender wrap(CClientAuthSender* sender): + cdef ClientAuthSender result = \ + ClientAuthSender.__new__(ClientAuthSender) + result.sender = sender + return result + + +cdef CStatus _data_stream_next(void* self, CFlightPayload* payload) except *: + """Callback for implementing FlightDataStream in Python.""" + cdef: + unique_ptr[CFlightDataStream] data_stream + + py_stream = self + if not isinstance(py_stream, GeneratorStream): + raise RuntimeError("self object in callback is not GeneratorStream") + stream = py_stream + + # The generator is allowed to yield a reader or table which we + # yield from; if that sub-generator is empty, we need to reset and + # try again. However, limit the number of attempts so that we + # don't just spin forever. + max_attempts = 128 + for _ in range(max_attempts): + if stream.current_stream != nullptr: + with nogil: + check_flight_status( + stream.current_stream.get().Next().Value(payload)) + # If the stream ended, see if there's another stream from the + # generator + if payload.ipc_message.metadata != nullptr: + return CStatus_OK() + stream.current_stream.reset(nullptr) + + try: + result = next(stream.generator) + except StopIteration: + payload.ipc_message.metadata.reset( nullptr) + return CStatus_OK() + except FlightError as flight_error: + return ( flight_error).to_status() + + if isinstance(result, (list, tuple)): + result, metadata = result + else: + result, metadata = result, None + + if isinstance(result, (Table, RecordBatchReader)): + if metadata: + raise ValueError("Can only return metadata alongside a " + "RecordBatch.") + result = RecordBatchStream(result) + + stream_schema = pyarrow_wrap_schema(stream.schema) + if isinstance(result, FlightDataStream): + if metadata: + raise ValueError("Can only return metadata alongside a " + "RecordBatch.") + data_stream = unique_ptr[CFlightDataStream]( + ( result).to_stream()) + substream_schema = pyarrow_wrap_schema(data_stream.get().schema()) + if substream_schema != stream_schema: + raise ValueError("Got a FlightDataStream whose schema " + "does not match the declared schema of this " + "GeneratorStream. " + "Got: {}\nExpected: {}".format( + substream_schema, stream_schema)) + stream.current_stream.reset( + new CPyFlightDataStream(result, move(data_stream))) + # Loop around and try again + continue + elif isinstance(result, RecordBatch): + batch = result + if batch.schema != stream_schema: + raise ValueError("Got a RecordBatch whose schema does not " + "match the declared schema of this " + "GeneratorStream. " + "Got: {}\nExpected: {}".format(batch.schema, + stream_schema)) + check_flight_status(GetRecordBatchPayload( + deref(batch.batch), + stream.c_options, + &payload.ipc_message)) + if metadata: + payload.app_metadata = pyarrow_unwrap_buffer( + as_buffer(metadata)) + else: + raise TypeError("GeneratorStream must be initialized with " + "an iterator of FlightDataStream, Table, " + "RecordBatch, or RecordBatchStreamReader objects, " + "not {}.".format(type(result))) + # Don't loop around + return CStatus_OK() + # Ran out of attempts (the RPC handler kept yielding empty tables/readers) + raise RuntimeError("While getting next payload, ran out of attempts to " + "get something to send " + "(application server implementation error)") + + +cdef CStatus _list_flights(void* self, const CServerCallContext& context, + const CCriteria* c_criteria, + unique_ptr[CFlightListing]* listing) except *: + """Callback for implementing ListFlights in Python.""" + cdef: + vector[CFlightInfo] flights + + try: + result = ( self).list_flights(ServerCallContext.wrap(context), + c_criteria.expression) + for info in result: + if not isinstance(info, FlightInfo): + raise TypeError("FlightServerBase.list_flights must return " + "FlightInfo instances, but got {}".format( + type(info))) + flights.push_back(deref(( info).info.get())) + listing.reset(new CSimpleFlightListing(flights)) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _get_flight_info(void* self, const CServerCallContext& context, + CFlightDescriptor c_descriptor, + unique_ptr[CFlightInfo]* info) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + FlightDescriptor py_descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + py_descriptor.descriptor = c_descriptor + try: + result = ( self).get_flight_info( + ServerCallContext.wrap(context), + py_descriptor) + except FlightError as flight_error: + return ( flight_error).to_status() + if not isinstance(result, FlightInfo): + raise TypeError("FlightServerBase.get_flight_info must return " + "a FlightInfo instance, but got {}".format( + type(result))) + info.reset(new CFlightInfo(deref(( result).info.get()))) + return CStatus_OK() + +cdef CStatus _get_schema(void* self, const CServerCallContext& context, + CFlightDescriptor c_descriptor, + unique_ptr[CSchemaResult]* info) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + FlightDescriptor py_descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + py_descriptor.descriptor = c_descriptor + result = ( self).get_schema(ServerCallContext.wrap(context), + py_descriptor) + if not isinstance(result, SchemaResult): + raise TypeError("FlightServerBase.get_schema_info must return " + "a SchemaResult instance, but got {}".format( + type(result))) + info.reset(new CSchemaResult(deref(( result).result.get()))) + return CStatus_OK() + +cdef CStatus _do_put(void* self, const CServerCallContext& context, + unique_ptr[CFlightMessageReader] reader, + unique_ptr[CFlightMetadataWriter] writer) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + MetadataRecordBatchReader py_reader = MetadataRecordBatchReader() + FlightMetadataWriter py_writer = FlightMetadataWriter() + FlightDescriptor descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + + descriptor.descriptor = reader.get().descriptor() + py_reader.reader.reset(reader.release()) + py_writer.writer.reset(writer.release()) + try: + ( self).do_put(ServerCallContext.wrap(context), descriptor, + py_reader, py_writer) + return CStatus_OK() + except FlightError as flight_error: + return ( flight_error).to_status() + + +cdef CStatus _do_get(void* self, const CServerCallContext& context, + CTicket ticket, + unique_ptr[CFlightDataStream]* stream) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + unique_ptr[CFlightDataStream] data_stream + + py_ticket = Ticket(ticket.ticket) + try: + result = ( self).do_get(ServerCallContext.wrap(context), + py_ticket) + except FlightError as flight_error: + return ( flight_error).to_status() + if not isinstance(result, FlightDataStream): + raise TypeError("FlightServerBase.do_get must return " + "a FlightDataStream") + data_stream = unique_ptr[CFlightDataStream]( + ( result).to_stream()) + stream[0] = unique_ptr[CFlightDataStream]( + new CPyFlightDataStream(result, move(data_stream))) + return CStatus_OK() + + +cdef CStatus _do_exchange(void* self, const CServerCallContext& context, + unique_ptr[CFlightMessageReader] reader, + unique_ptr[CFlightMessageWriter] writer) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + MetadataRecordBatchReader py_reader = MetadataRecordBatchReader() + MetadataRecordBatchWriter py_writer = MetadataRecordBatchWriter() + FlightDescriptor descriptor = \ + FlightDescriptor.__new__(FlightDescriptor) + + descriptor.descriptor = reader.get().descriptor() + py_reader.reader.reset(reader.release()) + py_writer.writer.reset(writer.release()) + try: + ( self).do_exchange(ServerCallContext.wrap(context), + descriptor, py_reader, py_writer) + return CStatus_OK() + except FlightError as flight_error: + return ( flight_error).to_status() + + +cdef CStatus _do_action_result_next( + void* self, + unique_ptr[CFlightResult]* result +) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + CFlightResult* c_result + + try: + action_result = next( self) + if not isinstance(action_result, Result): + action_result = Result(action_result) + c_result = ( action_result).result.get() + result.reset(new CFlightResult(deref(c_result))) + except StopIteration: + result.reset(nullptr) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _do_action(void* self, const CServerCallContext& context, + const CAction& action, + unique_ptr[CResultStream]* result) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + function[cb_result_next] ptr = &_do_action_result_next + py_action = Action(action.type, pyarrow_wrap_buffer(action.body)) + try: + responses = ( self).do_action(ServerCallContext.wrap(context), + py_action) + except FlightError as flight_error: + return ( flight_error).to_status() + # Let the application return an iterator or anything convertible + # into one + if responses is None: + # Server didn't return anything + responses = [] + result.reset(new CPyFlightResultStream(iter(responses), ptr)) + return CStatus_OK() + + +cdef CStatus _list_actions(void* self, const CServerCallContext& context, + vector[CActionType]* actions) except *: + """Callback for implementing Flight servers in Python.""" + cdef: + CActionType action_type + # Method should return a list of ActionTypes or similar tuple + try: + result = ( self).list_actions(ServerCallContext.wrap(context)) + for action in result: + if not isinstance(action, tuple): + raise TypeError( + "Results of list_actions must be ActionType or tuple") + action_type.type = tobytes(action[0]) + action_type.description = tobytes(action[1]) + actions.push_back(action_type) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _server_authenticate(void* self, CServerAuthSender* outgoing, + CServerAuthReader* incoming) except *: + """Callback for implementing authentication in Python.""" + sender = ServerAuthSender.wrap(outgoing) + reader = ServerAuthReader.wrap(incoming) + try: + ( self).authenticate(sender, reader) + except FlightError as flight_error: + return ( flight_error).to_status() + finally: + sender.poison() + reader.poison() + return CStatus_OK() + +cdef CStatus _is_valid(void* self, const c_string& token, + c_string* peer_identity) except *: + """Callback for implementing authentication in Python.""" + cdef c_string c_result + try: + c_result = tobytes(( self).is_valid(token)) + peer_identity[0] = c_result + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _client_authenticate(void* self, CClientAuthSender* outgoing, + CClientAuthReader* incoming) except *: + """Callback for implementing authentication in Python.""" + sender = ClientAuthSender.wrap(outgoing) + reader = ClientAuthReader.wrap(incoming) + try: + ( self).authenticate(sender, reader) + except FlightError as flight_error: + return ( flight_error).to_status() + finally: + sender.poison() + reader.poison() + return CStatus_OK() + + +cdef CStatus _get_token(void* self, c_string* token) except *: + """Callback for implementing authentication in Python.""" + cdef c_string c_result + try: + c_result = tobytes(( self).get_token()) + token[0] = c_result + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _middleware_sending_headers( + void* self, CAddCallHeaders* add_headers) except *: + """Callback for implementing middleware.""" + try: + headers = ( self).sending_headers() + except FlightError as flight_error: + return ( flight_error).to_status() + + if headers: + for header, values in headers.items(): + if isinstance(values, (str, bytes)): + values = (values,) + # Headers in gRPC (and HTTP/1, HTTP/2) are required to be + # valid, lowercase ASCII. + header = header.lower() + if isinstance(header, str): + header = header.encode("ascii") + for value in values: + if isinstance(value, str): + value = value.encode("ascii") + # Allow bytes values to pass through. + add_headers.AddHeader(header, value) + + return CStatus_OK() + + +cdef CStatus _middleware_call_completed( + void* self, + const CStatus& call_status) except *: + """Callback for implementing middleware.""" + try: + try: + check_flight_status(call_status) + except Exception as e: + ( self).call_completed(e) + else: + ( self).call_completed(None) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef CStatus _middleware_received_headers( + void* self, + const CCallHeaders& c_headers) except *: + """Callback for implementing middleware.""" + try: + headers = convert_headers(c_headers) + ( self).received_headers(headers) + except FlightError as flight_error: + return ( flight_error).to_status() + return CStatus_OK() + + +cdef dict convert_headers(const CCallHeaders& c_headers): + cdef: + CCallHeaders.const_iterator header_iter = c_headers.cbegin() + headers = {} + while header_iter != c_headers.cend(): + header = c_string(deref(header_iter).first).decode("ascii") + value = c_string(deref(header_iter).second) + if not header.endswith("-bin"): + # Text header values in gRPC (and HTTP/1, HTTP/2) are + # required to be valid ASCII. Binary header values are + # exposed as bytes. + value = value.decode("ascii") + headers.setdefault(header, []).append(value) + postincrement(header_iter) + return headers + + +cdef CStatus _server_middleware_start_call( + void* self, + const CCallInfo& c_info, + const CCallHeaders& c_headers, + shared_ptr[CServerMiddleware]* c_instance) except *: + """Callback for implementing server middleware.""" + instance = None + try: + call_info = wrap_call_info(c_info) + headers = convert_headers(c_headers) + instance = ( self).start_call(call_info, headers) + except FlightError as flight_error: + return ( flight_error).to_status() + + if instance: + ServerMiddleware.wrap(instance, c_instance) + + return CStatus_OK() + + +cdef CStatus _client_middleware_start_call( + void* self, + const CCallInfo& c_info, + unique_ptr[CClientMiddleware]* c_instance) except *: + """Callback for implementing client middleware.""" + instance = None + try: + call_info = wrap_call_info(c_info) + instance = ( self).start_call(call_info) + except FlightError as flight_error: + return ( flight_error).to_status() + + if instance: + ClientMiddleware.wrap(instance, c_instance) + + return CStatus_OK() + + +cdef class ServerAuthHandler(_Weakrefable): + """Authentication middleware for a server. + + To implement an authentication mechanism, subclass this class and + override its methods. + + """ + + def authenticate(self, outgoing, incoming): + """Conduct the handshake with the client. + + May raise an error if the client cannot authenticate. + + Parameters + ---------- + outgoing : ServerAuthSender + A channel to send messages to the client. + incoming : ServerAuthReader + A channel to read messages from the client. + """ + raise NotImplementedError + + def is_valid(self, token): + """Validate a client token, returning their identity. + + May return an empty string (if the auth mechanism does not + name the peer) or raise an exception (if the token is + invalid). + + Parameters + ---------- + token : bytes + The authentication token from the client. + + """ + raise NotImplementedError + + cdef PyServerAuthHandler* to_handler(self): + cdef PyServerAuthHandlerVtable vtable + vtable.authenticate = _server_authenticate + vtable.is_valid = _is_valid + return new PyServerAuthHandler(self, vtable) + + +cdef class ClientAuthHandler(_Weakrefable): + """Authentication plugin for a client.""" + + def authenticate(self, outgoing, incoming): + """Conduct the handshake with the server. + + Parameters + ---------- + outgoing : ClientAuthSender + A channel to send messages to the server. + incoming : ClientAuthReader + A channel to read messages from the server. + """ + raise NotImplementedError + + def get_token(self): + """Get the auth token for a call.""" + raise NotImplementedError + + cdef PyClientAuthHandler* to_handler(self): + cdef PyClientAuthHandlerVtable vtable + vtable.authenticate = _client_authenticate + vtable.get_token = _get_token + return new PyClientAuthHandler(self, vtable) + + +_CallInfo = collections.namedtuple("_CallInfo", ["method"]) + + +class CallInfo(_CallInfo): + """Information about a particular RPC for Flight middleware.""" + + +cdef wrap_call_info(const CCallInfo& c_info): + method = wrap_flight_method(c_info.method) + return CallInfo(method=method) + + +cdef class ClientMiddlewareFactory(_Weakrefable): + """A factory for new middleware instances. + + All middleware methods will be called from the same thread as the + RPC method implementation. That is, thread-locals set in the + client are accessible from the middleware itself. + + """ + + def start_call(self, info): + """Called at the start of an RPC. + + This must be thread-safe and must not raise exceptions. + + Parameters + ---------- + info : CallInfo + Information about the call. + + Returns + ------- + instance : ClientMiddleware + An instance of ClientMiddleware (the instance to use for + the call), or None if this call is not intercepted. + + """ + + +cdef class ClientMiddleware(_Weakrefable): + """Client-side middleware for a call, instantiated per RPC. + + Methods here should be fast and must be infallible: they should + not raise exceptions or stall indefinitely. + + """ + + def sending_headers(self): + """A callback before headers are sent. + + Returns + ------- + headers : dict + A dictionary of header values to add to the request, or + None if no headers are to be added. The dictionary should + have string keys and string or list-of-string values. + + Bytes values are allowed, but the underlying transport may + not support them or may restrict them. For gRPC, binary + values are only allowed on headers ending in "-bin". + + Header names must be lowercase ASCII. + + """ + + def received_headers(self, headers): + """A callback when headers are received. + + The default implementation does nothing. + + Parameters + ---------- + headers : dict + A dictionary of headers from the server. Keys are strings + and values are lists of strings (for text headers) or + bytes (for binary headers). + + """ + + def call_completed(self, exception): + """A callback when the call finishes. + + The default implementation does nothing. + + Parameters + ---------- + exception : ArrowException + If the call errored, this is the equivalent + exception. Will be None if the call succeeded. + + """ + + @staticmethod + cdef void wrap(object py_middleware, + unique_ptr[CClientMiddleware]* c_instance): + cdef PyClientMiddlewareVtable vtable + vtable.sending_headers = _middleware_sending_headers + vtable.received_headers = _middleware_received_headers + vtable.call_completed = _middleware_call_completed + c_instance[0].reset(new CPyClientMiddleware(py_middleware, vtable)) + + +cdef class ServerMiddlewareFactory(_Weakrefable): + """A factory for new middleware instances. + + All middleware methods will be called from the same thread as the + RPC method implementation. That is, thread-locals set in the + middleware are accessible from the method itself. + + """ + + def start_call(self, info, headers): + """Called at the start of an RPC. + + This must be thread-safe. + + Parameters + ---------- + info : CallInfo + Information about the call. + headers : dict + A dictionary of headers from the client. Keys are strings + and values are lists of strings (for text headers) or + bytes (for binary headers). + + Returns + ------- + instance : ServerMiddleware + An instance of ServerMiddleware (the instance to use for + the call), or None if this call is not intercepted. + + Raises + ------ + exception : pyarrow.ArrowException + If an exception is raised, the call will be rejected with + the given error. + + """ + + +cdef class TracingServerMiddlewareFactory(ServerMiddlewareFactory): + """A factory for tracing middleware instances. + + This enables OpenTelemetry support in Arrow (if Arrow was compiled + with OpenTelemetry support enabled). A new span will be started on + each RPC call. The TracingServerMiddleware instance can then be + retrieved within an RPC handler to get the propagated context, + which can be used to start a new span on the Python side. + + Because the Python/C++ OpenTelemetry libraries do not + interoperate, spans on the C++ side are not directly visible to + the Python side and vice versa. + + """ + + +cdef class ServerMiddleware(_Weakrefable): + """Server-side middleware for a call, instantiated per RPC. + + Methods here should be fast and must be infallible: they should + not raise exceptions or stall indefinitely. + + """ + + def sending_headers(self): + """A callback before headers are sent. + + Returns + ------- + headers : dict + A dictionary of header values to add to the response, or + None if no headers are to be added. The dictionary should + have string keys and string or list-of-string values. + + Bytes values are allowed, but the underlying transport may + not support them or may restrict them. For gRPC, binary + values are only allowed on headers ending in "-bin". + + Header names must be lowercase ASCII. + + """ + + def call_completed(self, exception): + """A callback when the call finishes. + + Parameters + ---------- + exception : pyarrow.ArrowException + If the call errored, this is the equivalent + exception. Will be None if the call succeeded. + + """ + + @staticmethod + cdef void wrap(object py_middleware, + shared_ptr[CServerMiddleware]* c_instance): + cdef PyServerMiddlewareVtable vtable + vtable.sending_headers = _middleware_sending_headers + vtable.call_completed = _middleware_call_completed + c_instance[0].reset(new CPyServerMiddleware(py_middleware, vtable)) + + +class TracingServerMiddleware(ServerMiddleware): + __slots__ = ["trace_context"] + + def __init__(self, trace_context): + self.trace_context = trace_context + + +cdef class _ServerMiddlewareFactoryWrapper(ServerMiddlewareFactory): + """Wrapper to bundle server middleware into a single C++ one.""" + + cdef: + dict factories + + def __init__(self, dict factories): + self.factories = factories + + def start_call(self, info, headers): + instances = {} + for key, factory in self.factories.items(): + instance = factory.start_call(info, headers) + if instance: + # TODO: prevent duplicate keys + instances[key] = instance + if instances: + wrapper = _ServerMiddlewareWrapper(instances) + return wrapper + return None + + +cdef class _ServerMiddlewareWrapper(ServerMiddleware): + cdef: + dict middleware + + def __init__(self, dict middleware): + self.middleware = middleware + + def sending_headers(self): + headers = collections.defaultdict(list) + for instance in self.middleware.values(): + more_headers = instance.sending_headers() + if not more_headers: + continue + # Manually merge with existing headers (since headers are + # multi-valued) + for key, values in more_headers.items(): + # ARROW-16606 gRPC aborts given non-lowercase headers + key = key.lower() + if isinstance(values, (bytes, str)): + values = (values,) + headers[key].extend(values) + return headers + + def call_completed(self, exception): + for instance in self.middleware.values(): + instance.call_completed(exception) + + +cdef class _FlightServerFinalizer(_Weakrefable): + """ + A finalizer that shuts down the server on destruction. + + See ARROW-16597. If the server is still active at interpreter + exit, the process may segfault. + """ + + cdef: + shared_ptr[PyFlightServer] server + + def finalize(self): + cdef: + PyFlightServer* server = self.server.get() + CStatus status + if server == NULL: + return + try: + with nogil: + status = server.Shutdown() + if status.ok(): + status = server.Wait() + check_flight_status(status) + finally: + self.server.reset() + + +cdef class FlightServerBase(_Weakrefable): + """A Flight service definition. + + To start the server, create an instance of this class with an + appropriate location. The server will be running as soon as the + instance is created; it is not required to call :meth:`serve`. + + Override methods to define your Flight service. + + Parameters + ---------- + location : str, tuple or Location optional, default None + Location to serve on. Either a gRPC URI like `grpc://localhost:port`, + a tuple of (host, port) pair, or a Location instance. + If None is passed then the server will be started on localhost with a + system provided random port. + auth_handler : ServerAuthHandler optional, default None + An authentication mechanism to use. May be None. + tls_certificates : list optional, default None + A list of (certificate, key) pairs. + verify_client : boolean optional, default False + If True, then enable mutual TLS: require the client to present + a client certificate, and validate the certificate. + root_certificates : bytes optional, default None + If enabling mutual TLS, this specifies the PEM-encoded root + certificate used to validate client certificates. + middleware : dict optional, default None + A dictionary of :class:`ServerMiddlewareFactory` instances. The + string keys can be used to retrieve the middleware instance within + RPC handlers (see :meth:`ServerCallContext.get_middleware`). + + """ + + cdef: + shared_ptr[PyFlightServer] server + object finalizer + + def __init__(self, location=None, auth_handler=None, + tls_certificates=None, verify_client=None, + root_certificates=None, middleware=None): + self.finalizer = None + if isinstance(location, (bytes, str)): + location = Location(location) + elif isinstance(location, (tuple, type(None))): + if location is None: + location = ('localhost', 0) + host, port = location + if tls_certificates: + location = Location.for_grpc_tls(host, port) + else: + location = Location.for_grpc_tcp(host, port) + elif not isinstance(location, Location): + raise TypeError('`location` argument must be a string, tuple or a ' + 'Location instance') + self.init(location, auth_handler, tls_certificates, verify_client, + tobytes(root_certificates or b""), middleware) + + cdef init(self, Location location, ServerAuthHandler auth_handler, + list tls_certificates, c_bool verify_client, + bytes root_certificates, dict middleware): + cdef: + PyFlightServerVtable vtable = PyFlightServerVtable() + PyFlightServer* c_server + unique_ptr[CFlightServerOptions] c_options + CCertKeyPair c_cert + function[cb_server_middleware_start_call] start_call = \ + &_server_middleware_start_call + pair[c_string, shared_ptr[CServerMiddlewareFactory]] c_middleware + + c_options.reset(new CFlightServerOptions(Location.unwrap(location))) + # mTLS configuration + c_options.get().verify_client = verify_client + c_options.get().root_certificates = root_certificates + + if auth_handler: + if not isinstance(auth_handler, ServerAuthHandler): + raise TypeError("auth_handler must be a ServerAuthHandler, " + "not a '{}'".format(type(auth_handler))) + c_options.get().auth_handler.reset( + ( auth_handler).to_handler()) + + if tls_certificates: + for cert, key in tls_certificates: + c_cert.pem_cert = tobytes(cert) + c_cert.pem_key = tobytes(key) + c_options.get().tls_certificates.push_back(c_cert) + + if middleware: + non_tracing_middleware = {} + enable_tracing = None + for key, factory in middleware.items(): + if isinstance(factory, TracingServerMiddlewareFactory): + if enable_tracing is not None: + raise ValueError( + "Can only provide " + "TracingServerMiddlewareFactory once") + if tobytes(key) == CPyServerMiddlewareName: + raise ValueError(f"Middleware key cannot be {key}") + enable_tracing = key + else: + non_tracing_middleware[key] = factory + + if enable_tracing: + c_middleware.first = tobytes(enable_tracing) + c_middleware.second = MakeTracingServerMiddlewareFactory() + c_options.get().middleware.push_back(c_middleware) + + py_middleware = _ServerMiddlewareFactoryWrapper( + non_tracing_middleware) + c_middleware.first = CPyServerMiddlewareName + c_middleware.second.reset(new CPyServerMiddlewareFactory( + py_middleware, + start_call)) + c_options.get().middleware.push_back(c_middleware) + + vtable.list_flights = &_list_flights + vtable.get_flight_info = &_get_flight_info + vtable.get_schema = &_get_schema + vtable.do_put = &_do_put + vtable.do_get = &_do_get + vtable.do_exchange = &_do_exchange + vtable.list_actions = &_list_actions + vtable.do_action = &_do_action + + c_server = new PyFlightServer(self, vtable) + self.server.reset(c_server) + with nogil: + check_flight_status(c_server.Init(deref(c_options))) + cdef _FlightServerFinalizer finalizer = _FlightServerFinalizer() + finalizer.server = self.server + self.finalizer = weakref.finalize(self, finalizer.finalize) + + @property + def port(self): + """ + Get the port that this server is listening on. + + Returns a non-positive value if the operation is invalid + (e.g. init() was not called or server is listening on a domain + socket). + """ + return self.server.get().port() + + def list_flights(self, context, criteria): + """List flights available on this service. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + criteria : bytes + Filter criteria provided by the client. + + Returns + ------- + iterator of FlightInfo + + """ + raise NotImplementedError + + def get_flight_info(self, context, descriptor): + """Get information about a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + descriptor : FlightDescriptor + The descriptor for the flight provided by the client. + + Returns + ------- + FlightInfo + + """ + raise NotImplementedError + + def get_schema(self, context, descriptor): + """Get the schema of a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + descriptor : FlightDescriptor + The descriptor for the flight provided by the client. + + Returns + ------- + Schema + + """ + raise NotImplementedError + + def do_put(self, context, descriptor, reader: MetadataRecordBatchReader, + writer: FlightMetadataWriter): + """Write data to a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + descriptor : FlightDescriptor + The descriptor for the flight provided by the client. + reader : MetadataRecordBatchReader + A reader for data uploaded by the client. + writer : FlightMetadataWriter + A writer to send responses to the client. + + """ + raise NotImplementedError + + def do_get(self, context, ticket): + """Write data to a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + ticket : Ticket + The ticket for the flight. + + Returns + ------- + FlightDataStream + A stream of data to send back to the client. + + """ + raise NotImplementedError + + def do_exchange(self, context, descriptor, reader, writer): + """Write data to a flight. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + descriptor : FlightDescriptor + The descriptor for the flight provided by the client. + reader : MetadataRecordBatchReader + A reader for data uploaded by the client. + writer : MetadataRecordBatchWriter + A writer to send responses to the client. + + """ + raise NotImplementedError + + def list_actions(self, context): + """List custom actions available on this server. + + Applications should override this method to implement their + own behavior. The default method raises a NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + + Returns + ------- + iterator of ActionType or tuple + + """ + raise NotImplementedError + + def do_action(self, context, action): + """Execute a custom action. + + This method should return an iterator, or it should be a + generator. Applications should override this method to + implement their own behavior. The default method raises a + NotImplementedError. + + Parameters + ---------- + context : ServerCallContext + Common contextual information. + action : Action + The action to execute. + + Returns + ------- + iterator of bytes + + """ + raise NotImplementedError + + def serve(self): + """Block until the server shuts down. + + This method only returns if shutdown() is called or a signal is + received. + """ + if self.server.get() == nullptr: + raise ValueError("run() on uninitialized FlightServerBase") + with nogil: + check_flight_status(self.server.get().ServeWithSignals()) + + def run(self): + """Block until the server shuts down. + + .. deprecated:: 0.15.0 + Use the ``FlightServer.serve`` method instead + """ + warnings.warn("The 'FlightServer.run' method is deprecated, use " + "FlightServer.serve method instead") + self.serve() + + def shutdown(self): + """Shut down the server, blocking until current requests finish. + + Do not call this directly from the implementation of a Flight + method, as then the server will block forever waiting for that + request to finish. Instead, call this method from a background + thread. + + This method should only be called once. + """ + # Must not hold the GIL: shutdown waits for pending RPCs to + # complete. Holding the GIL means Python-implemented Flight + # methods will never get to run, so this will hang + # indefinitely. + if self.server.get() == nullptr: + raise ValueError("shutdown() on uninitialized FlightServerBase") + with nogil: + check_flight_status(self.server.get().Shutdown()) + + def wait(self): + """Block until server is terminated with shutdown.""" + with nogil: + self.server.get().Wait() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + if self.finalizer: + self.finalizer() + + +def connect(location, **kwargs): + """ + Connect to a Flight server. + + Parameters + ---------- + location : str, tuple, or Location + Location to connect to. Either a URI like "grpc://localhost:port", + a tuple of (host, port), or a Location instance. + tls_root_certs : bytes or None + PEM-encoded. + cert_chain: str or None + If provided, enables TLS mutual authentication. + private_key: str or None + If provided, enables TLS mutual authentication. + override_hostname : str or None + Override the hostname checked by TLS. Insecure, use with caution. + middleware : list or None + A list of ClientMiddlewareFactory instances to apply. + write_size_limit_bytes : int or None + A soft limit on the size of a data payload sent to the + server. Enabled if positive. If enabled, writing a record + batch that (when serialized) exceeds this limit will raise an + exception; the client can retry the write with a smaller + batch. + disable_server_verification : boolean or None + Disable verifying the server when using TLS. + Insecure, use with caution. + generic_options : list or None + A list of generic (string, int or string) options to pass to + the underlying transport. + + Returns + ------- + client : FlightClient + """ + return FlightClient(location, **kwargs) diff --git a/venv/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..cb378b62338546077735cc6df7d47728f3d7b8f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/_fs.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..828b5f56d0435e17e4cf5afe3fc87431cf303f22 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/_pyarrow_cpp_tests.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7e87a5e96bc3091b28bff383d76b0f9dd41c1b24 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/_s3fs.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..34dd818f8ee72f1ffdfe2117de70a121b0b1f258 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/_substrait.cpython-310-x86_64-linux-gnu.so differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/gandiva.pyx b/venv/lib/python3.10/site-packages/pyarrow/gandiva.pyx new file mode 100644 index 0000000000000000000000000000000000000000..2202ec64f29628d76143759220eb61102d1bea97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/gandiva.pyx @@ -0,0 +1,760 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# cython: profile=False +# distutils: language = c++ +# cython: language_level = 3 + +from libcpp.memory cimport shared_ptr +from libcpp.string cimport string as c_string +from libcpp.vector cimport vector as c_vector +from libcpp.unordered_set cimport unordered_set as c_unordered_set +from libc.stdint cimport int64_t, int32_t + +from pyarrow.includes.libarrow cimport * +from pyarrow.lib cimport (DataType, Field, MemoryPool, RecordBatch, + Schema, check_status, pyarrow_wrap_array, + pyarrow_wrap_data_type, ensure_type, _Weakrefable, + pyarrow_wrap_field) + +from pyarrow.includes.libgandiva cimport ( + CCondition, CGandivaExpression, + CNode, CProjector, CFilter, + CSelectionVector, + _ensure_selection_mode, + CConfiguration, + CConfigurationBuilder, + TreeExprBuilder_MakeExpression, + TreeExprBuilder_MakeFunction, + TreeExprBuilder_MakeBoolLiteral, + TreeExprBuilder_MakeUInt8Literal, + TreeExprBuilder_MakeUInt16Literal, + TreeExprBuilder_MakeUInt32Literal, + TreeExprBuilder_MakeUInt64Literal, + TreeExprBuilder_MakeInt8Literal, + TreeExprBuilder_MakeInt16Literal, + TreeExprBuilder_MakeInt32Literal, + TreeExprBuilder_MakeInt64Literal, + TreeExprBuilder_MakeFloatLiteral, + TreeExprBuilder_MakeDoubleLiteral, + TreeExprBuilder_MakeStringLiteral, + TreeExprBuilder_MakeBinaryLiteral, + TreeExprBuilder_MakeField, + TreeExprBuilder_MakeIf, + TreeExprBuilder_MakeAnd, + TreeExprBuilder_MakeOr, + TreeExprBuilder_MakeCondition, + TreeExprBuilder_MakeInExpressionInt32, + TreeExprBuilder_MakeInExpressionInt64, + TreeExprBuilder_MakeInExpressionTime32, + TreeExprBuilder_MakeInExpressionTime64, + TreeExprBuilder_MakeInExpressionDate32, + TreeExprBuilder_MakeInExpressionDate64, + TreeExprBuilder_MakeInExpressionTimeStamp, + TreeExprBuilder_MakeInExpressionString, + SelectionVector_MakeInt16, + SelectionVector_MakeInt32, + SelectionVector_MakeInt64, + Projector_Make, + Filter_Make, + CFunctionSignature, + GetRegisteredFunctionSignatures) + + +cdef class Node(_Weakrefable): + cdef: + shared_ptr[CNode] node + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use the " + "TreeExprBuilder API directly" + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CNode] node): + cdef Node self = Node.__new__(Node) + self.node = node + return self + + def __str__(self): + return self.node.get().ToString().decode() + + def __repr__(self): + type_format = object.__repr__(self) + return '{0}\n{1}'.format(type_format, str(self)) + + def return_type(self): + return pyarrow_wrap_data_type(self.node.get().return_type()) + + +cdef class Expression(_Weakrefable): + cdef: + shared_ptr[CGandivaExpression] expression + + cdef void init(self, shared_ptr[CGandivaExpression] expression): + self.expression = expression + + def __str__(self): + return self.expression.get().ToString().decode() + + def __repr__(self): + type_format = object.__repr__(self) + return '{0}\n{1}'.format(type_format, str(self)) + + def root(self): + return Node.create(self.expression.get().root()) + + def result(self): + return pyarrow_wrap_field(self.expression.get().result()) + + +cdef class Condition(_Weakrefable): + cdef: + shared_ptr[CCondition] condition + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use the " + "TreeExprBuilder API instead" + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CCondition] condition): + cdef Condition self = Condition.__new__(Condition) + self.condition = condition + return self + + def __str__(self): + return self.condition.get().ToString().decode() + + def __repr__(self): + type_format = object.__repr__(self) + return '{0}\n{1}'.format(type_format, str(self)) + + def root(self): + return Node.create(self.condition.get().root()) + + def result(self): + return pyarrow_wrap_field(self.condition.get().result()) + + +cdef class SelectionVector(_Weakrefable): + cdef: + shared_ptr[CSelectionVector] selection_vector + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly." + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CSelectionVector] selection_vector): + cdef SelectionVector self = SelectionVector.__new__(SelectionVector) + self.selection_vector = selection_vector + return self + + def to_array(self): + cdef shared_ptr[CArray] result = self.selection_vector.get().ToArray() + return pyarrow_wrap_array(result) + + +cdef class Projector(_Weakrefable): + cdef: + shared_ptr[CProjector] projector + MemoryPool pool + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "make_projector instead" + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CProjector] projector, MemoryPool pool): + cdef Projector self = Projector.__new__(Projector) + self.projector = projector + self.pool = pool + return self + + @property + def llvm_ir(self): + return self.projector.get().DumpIR().decode() + + def evaluate(self, RecordBatch batch, SelectionVector selection=None): + """ + Evaluate the specified record batch and return the arrays at the + filtered positions. + + Parameters + ---------- + batch : pyarrow.RecordBatch + selection : pyarrow.gandiva.SelectionVector + + Returns + ------- + list[pyarrow.Array] + """ + cdef vector[shared_ptr[CArray]] results + if selection is None: + check_status(self.projector.get().Evaluate( + batch.sp_batch.get()[0], self.pool.pool, &results)) + else: + check_status( + self.projector.get().Evaluate( + batch.sp_batch.get()[0], selection.selection_vector.get(), + self.pool.pool, &results)) + cdef shared_ptr[CArray] result + arrays = [] + for result in results: + arrays.append(pyarrow_wrap_array(result)) + return arrays + + +cdef class Filter(_Weakrefable): + cdef: + shared_ptr[CFilter] filter + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "make_filter instead" + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CFilter] filter): + cdef Filter self = Filter.__new__(Filter) + self.filter = filter + return self + + @property + def llvm_ir(self): + return self.filter.get().DumpIR().decode() + + def evaluate(self, RecordBatch batch, MemoryPool pool, dtype='int32'): + """ + Evaluate the specified record batch and return a selection vector. + + Parameters + ---------- + batch : pyarrow.RecordBatch + pool : MemoryPool + dtype : DataType or str, default int32 + + Returns + ------- + pyarrow.gandiva.SelectionVector + """ + cdef: + DataType type = ensure_type(dtype) + shared_ptr[CSelectionVector] selection + + if type.id == _Type_INT16: + check_status(SelectionVector_MakeInt16( + batch.num_rows, pool.pool, &selection)) + elif type.id == _Type_INT32: + check_status(SelectionVector_MakeInt32( + batch.num_rows, pool.pool, &selection)) + elif type.id == _Type_INT64: + check_status(SelectionVector_MakeInt64( + batch.num_rows, pool.pool, &selection)) + else: + raise ValueError("'dtype' of the selection vector should be " + "one of 'int16', 'int32' and 'int64'.") + + check_status(self.filter.get().Evaluate( + batch.sp_batch.get()[0], selection)) + return SelectionVector.create(selection) + + +cdef class TreeExprBuilder(_Weakrefable): + + def make_literal(self, value, dtype): + """ + Create a node on a literal. + + Parameters + ---------- + value : a literal value + dtype : DataType + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef: + DataType type = ensure_type(dtype) + shared_ptr[CNode] r + + if type.id == _Type_BOOL: + r = TreeExprBuilder_MakeBoolLiteral(value) + elif type.id == _Type_UINT8: + r = TreeExprBuilder_MakeUInt8Literal(value) + elif type.id == _Type_UINT16: + r = TreeExprBuilder_MakeUInt16Literal(value) + elif type.id == _Type_UINT32: + r = TreeExprBuilder_MakeUInt32Literal(value) + elif type.id == _Type_UINT64: + r = TreeExprBuilder_MakeUInt64Literal(value) + elif type.id == _Type_INT8: + r = TreeExprBuilder_MakeInt8Literal(value) + elif type.id == _Type_INT16: + r = TreeExprBuilder_MakeInt16Literal(value) + elif type.id == _Type_INT32: + r = TreeExprBuilder_MakeInt32Literal(value) + elif type.id == _Type_INT64: + r = TreeExprBuilder_MakeInt64Literal(value) + elif type.id == _Type_FLOAT: + r = TreeExprBuilder_MakeFloatLiteral(value) + elif type.id == _Type_DOUBLE: + r = TreeExprBuilder_MakeDoubleLiteral(value) + elif type.id == _Type_STRING: + r = TreeExprBuilder_MakeStringLiteral(value.encode('UTF-8')) + elif type.id == _Type_BINARY: + r = TreeExprBuilder_MakeBinaryLiteral(value) + else: + raise TypeError("Didn't recognize dtype " + str(dtype)) + + return Node.create(r) + + def make_expression(self, Node root_node not None, + Field return_field not None): + """ + Create an expression with the specified root_node, + and the result written to result_field. + + Parameters + ---------- + root_node : pyarrow.gandiva.Node + return_field : pyarrow.Field + + Returns + ------- + pyarrow.gandiva.Expression + """ + cdef shared_ptr[CGandivaExpression] r = TreeExprBuilder_MakeExpression( + root_node.node, return_field.sp_field) + cdef Expression expression = Expression() + expression.init(r) + return expression + + def make_function(self, name, children, DataType return_type): + """ + Create a node with a function. + + Parameters + ---------- + name : str + children : pyarrow.gandiva.NodeVector + return_type : DataType + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef c_vector[shared_ptr[CNode]] c_children + cdef Node child + for child in children: + if child is None: + raise TypeError("Child nodes must not be None") + c_children.push_back(child.node) + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeFunction( + name.encode(), c_children, return_type.sp_type) + return Node.create(r) + + def make_field(self, Field field not None): + """ + Create a node with an Arrow field. + + Parameters + ---------- + field : pyarrow.Field + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeField(field.sp_field) + return Node.create(r) + + def make_if(self, Node condition not None, Node this_node not None, + Node else_node not None, DataType return_type not None): + """ + Create a node with an if-else expression. + + Parameters + ---------- + condition : pyarrow.gandiva.Node + this_node : pyarrow.gandiva.Node + else_node : pyarrow.gandiva.Node + return_type : DataType + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeIf( + condition.node, this_node.node, else_node.node, + return_type.sp_type) + return Node.create(r) + + def make_and(self, children): + """ + Create a Node with a boolean AND expression. + + Parameters + ---------- + children : list[pyarrow.gandiva.Node] + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef c_vector[shared_ptr[CNode]] c_children + cdef Node child + for child in children: + if child is None: + raise TypeError("Child nodes must not be None") + c_children.push_back(child.node) + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeAnd(c_children) + return Node.create(r) + + def make_or(self, children): + """ + Create a Node with a boolean OR expression. + + Parameters + ---------- + children : list[pyarrow.gandiva.Node] + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef c_vector[shared_ptr[CNode]] c_children + cdef Node child + for child in children: + if child is None: + raise TypeError("Child nodes must not be None") + c_children.push_back(child.node) + cdef shared_ptr[CNode] r = TreeExprBuilder_MakeOr(c_children) + return Node.create(r) + + def _make_in_expression_int32(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int32_t] c_values + cdef int32_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionInt32(node.node, c_values) + return Node.create(r) + + def _make_in_expression_int64(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int64_t] c_values + cdef int64_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionInt64(node.node, c_values) + return Node.create(r) + + def _make_in_expression_time32(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int32_t] c_values + cdef int32_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionTime32(node.node, c_values) + return Node.create(r) + + def _make_in_expression_time64(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int64_t] c_values + cdef int64_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionTime64(node.node, c_values) + return Node.create(r) + + def _make_in_expression_date32(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int32_t] c_values + cdef int32_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionDate32(node.node, c_values) + return Node.create(r) + + def _make_in_expression_date64(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int64_t] c_values + cdef int64_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionDate64(node.node, c_values) + return Node.create(r) + + def _make_in_expression_timestamp(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[int64_t] c_values + cdef int64_t v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionTimeStamp(node.node, c_values) + return Node.create(r) + + def _make_in_expression_binary(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[c_string] c_values + cdef c_string v + for v in values: + c_values.insert(v) + r = TreeExprBuilder_MakeInExpressionString(node.node, c_values) + return Node.create(r) + + def _make_in_expression_string(self, Node node not None, values): + cdef shared_ptr[CNode] r + cdef c_unordered_set[c_string] c_values + cdef c_string _v + for v in values: + _v = v.encode('UTF-8') + c_values.insert(_v) + r = TreeExprBuilder_MakeInExpressionString(node.node, c_values) + return Node.create(r) + + def make_in_expression(self, Node node not None, values, dtype): + """ + Create a Node with an IN expression. + + Parameters + ---------- + node : pyarrow.gandiva.Node + values : iterable + dtype : DataType + + Returns + ------- + pyarrow.gandiva.Node + """ + cdef DataType type = ensure_type(dtype) + + if type.id == _Type_INT32: + return self._make_in_expression_int32(node, values) + elif type.id == _Type_INT64: + return self._make_in_expression_int64(node, values) + elif type.id == _Type_TIME32: + return self._make_in_expression_time32(node, values) + elif type.id == _Type_TIME64: + return self._make_in_expression_time64(node, values) + elif type.id == _Type_TIMESTAMP: + return self._make_in_expression_timestamp(node, values) + elif type.id == _Type_DATE32: + return self._make_in_expression_date32(node, values) + elif type.id == _Type_DATE64: + return self._make_in_expression_date64(node, values) + elif type.id == _Type_BINARY: + return self._make_in_expression_binary(node, values) + elif type.id == _Type_STRING: + return self._make_in_expression_string(node, values) + else: + raise TypeError("Data type " + str(dtype) + " not supported.") + + def make_condition(self, Node condition not None): + """ + Create a condition with the specified node. + + Parameters + ---------- + condition : pyarrow.gandiva.Node + + Returns + ------- + pyarrow.gandiva.Condition + """ + cdef shared_ptr[CCondition] r = TreeExprBuilder_MakeCondition( + condition.node) + return Condition.create(r) + +cdef class Configuration(_Weakrefable): + cdef: + shared_ptr[CConfiguration] configuration + + def __cinit__(self, bint optimize=True, bint dump_ir=False): + """ + Initialize the configuration with specified options. + + Parameters + ---------- + optimize : bool, default True + Whether to enable optimizations. + dump_ir : bool, default False + Whether to dump LLVM IR. + """ + self.configuration = CConfigurationBuilder().build() + self.configuration.get().set_optimize(optimize) + self.configuration.get().set_dump_ir(dump_ir) + + @staticmethod + cdef create(shared_ptr[CConfiguration] configuration): + """ + Create a Configuration instance from an existing CConfiguration pointer. + + Parameters + ---------- + configuration : shared_ptr[CConfiguration] + Existing CConfiguration pointer. + + Returns + ------- + Configuration instance + """ + cdef Configuration self = Configuration.__new__(Configuration) + self.configuration = configuration + return self + + +cpdef make_projector(Schema schema, children, MemoryPool pool, + str selection_mode="NONE", + Configuration configuration=None): + """ + Construct a projection using expressions. + + A projector is built for a specific schema and vector of expressions. + Once the projector is built, it can be used to evaluate many row batches. + + Parameters + ---------- + schema : pyarrow.Schema + Schema for the record batches, and the expressions. + children : list[pyarrow.gandiva.Expression] + List of projectable expression objects. + pool : pyarrow.MemoryPool + Memory pool used to allocate output arrays. + selection_mode : str, default "NONE" + Possible values are NONE, UINT16, UINT32, UINT64. + configuration : pyarrow.gandiva.Configuration, default None + Configuration for the projector. + + Returns + ------- + Projector instance + """ + cdef: + Expression child + c_vector[shared_ptr[CGandivaExpression]] c_children + shared_ptr[CProjector] result + + if configuration is None: + configuration = Configuration() + + for child in children: + if child is None: + raise TypeError("Expressions must not be None") + c_children.push_back(child.expression) + + check_status( + Projector_Make(schema.sp_schema, c_children, + _ensure_selection_mode(selection_mode), + configuration.configuration, + &result)) + return Projector.create(result, pool) + + +cpdef make_filter(Schema schema, Condition condition, + Configuration configuration=None): + """ + Construct a filter based on a condition. + + A filter is built for a specific schema and condition. Once the filter is + built, it can be used to evaluate many row batches. + + Parameters + ---------- + schema : pyarrow.Schema + Schema for the record batches, and the condition. + condition : pyarrow.gandiva.Condition + Filter condition. + configuration : pyarrow.gandiva.Configuration, default None + Configuration for the filter. + + Returns + ------- + Filter instance + """ + cdef shared_ptr[CFilter] result + if condition is None: + raise TypeError("Condition must not be None") + + if configuration is None: + configuration = Configuration() + + check_status( + Filter_Make(schema.sp_schema, condition.condition, configuration.configuration, &result)) + return Filter.create(result) + + +cdef class FunctionSignature(_Weakrefable): + """ + Signature of a Gandiva function including name, parameter types + and return type. + """ + + cdef: + shared_ptr[CFunctionSignature] signature + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly." + .format(self.__class__.__name__)) + + @staticmethod + cdef create(shared_ptr[CFunctionSignature] signature): + cdef FunctionSignature self = FunctionSignature.__new__( + FunctionSignature) + self.signature = signature + return self + + def return_type(self): + return pyarrow_wrap_data_type(self.signature.get().ret_type()) + + def param_types(self): + result = [] + cdef vector[shared_ptr[CDataType]] types = \ + self.signature.get().param_types() + for t in types: + result.append(pyarrow_wrap_data_type(t)) + return result + + def name(self): + return self.signature.get().base_name().decode() + + def __repr__(self): + signature = self.signature.get().ToString().decode() + return "FunctionSignature(" + signature + ")" + + +def get_registered_function_signatures(): + """ + Return the function in Gandiva's ExpressionRegistry. + + Returns + ------- + registry: a list of registered function signatures + """ + results = [] + + cdef vector[shared_ptr[CFunctionSignature]] signatures = \ + GetRegisteredFunctionSignatures() + + for signature in signatures: + results.append(FunctionSignature.create(signature)) + + return results diff --git a/venv/lib/python3.10/site-packages/pyarrow/parquet/__init__.py b/venv/lib/python3.10/site-packages/pyarrow/parquet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..134f3c097ef004f83fdc8e24e5cb45166c17577e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/parquet/__init__.py @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# flake8: noqa + +from .core import * diff --git a/venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3b31e400ee13417895c321954f18fa1d30e9e1f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/core.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf7f9d9bc758e474b6718355e5bc2d98a7f2ef0b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/core.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/encryption.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/encryption.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8214342d68997536b84bb74d00cd3669451c7264 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/encryption.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/parquet/core.py b/venv/lib/python3.10/site-packages/pyarrow/parquet/core.py new file mode 100644 index 0000000000000000000000000000000000000000..69a1c9d19aae249480f1ff2a929278691b732b26 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/parquet/core.py @@ -0,0 +1,2341 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from collections import defaultdict +from contextlib import nullcontext +from functools import reduce + +import inspect +import json +import os +import re +import operator +import warnings + +import pyarrow as pa + +try: + import pyarrow._parquet as _parquet +except ImportError as exc: + raise ImportError( + "The pyarrow installation is not built with support " + f"for the Parquet file format ({str(exc)})" + ) from None + +from pyarrow._parquet import (ParquetReader, Statistics, # noqa + FileMetaData, RowGroupMetaData, + ColumnChunkMetaData, + ParquetSchema, ColumnSchema, + ParquetLogicalType, + FileEncryptionProperties, + FileDecryptionProperties, + SortingColumn) +from pyarrow.fs import (LocalFileSystem, FileSystem, FileType, + _resolve_filesystem_and_path, _ensure_filesystem) +from pyarrow.util import guid, _is_path_like, _stringify_path, _deprecate_api + + +def _check_contains_null(val): + if isinstance(val, bytes): + for byte in val: + if isinstance(byte, bytes): + compare_to = chr(0) + else: + compare_to = 0 + if byte == compare_to: + return True + elif isinstance(val, str): + return '\x00' in val + return False + + +def _check_filters(filters, check_null_strings=True): + """ + Check if filters are well-formed. + """ + if filters is not None: + if len(filters) == 0 or any(len(f) == 0 for f in filters): + raise ValueError("Malformed filters") + if isinstance(filters[0][0], str): + # We have encountered the situation where we have one nesting level + # too few: + # We have [(,,), ..] instead of [[(,,), ..]] + filters = [filters] + if check_null_strings: + for conjunction in filters: + for col, op, val in conjunction: + if ( + isinstance(val, list) and + all(_check_contains_null(v) for v in val) or + _check_contains_null(val) + ): + raise NotImplementedError( + "Null-terminated binary strings are not supported " + "as filter values." + ) + return filters + + +_DNF_filter_doc = """Predicates are expressed using an ``Expression`` or using + the disjunctive normal form (DNF), like ``[[('x', '=', 0), ...], ...]``. + DNF allows arbitrary boolean logical combinations of single column predicates. + The innermost tuples each describe a single column predicate. The list of inner + predicates is interpreted as a conjunction (AND), forming a more selective and + multiple column predicate. Finally, the most outer list combines these filters + as a disjunction (OR). + + Predicates may also be passed as List[Tuple]. This form is interpreted + as a single conjunction. To express OR in predicates, one must + use the (preferred) List[List[Tuple]] notation. + + Each tuple has format: (``key``, ``op``, ``value``) and compares the + ``key`` with the ``value``. + The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``, + ``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the + ``value`` must be a collection such as a ``list``, a ``set`` or a + ``tuple``. + + Examples: + + Using the ``Expression`` API: + + .. code-block:: python + + import pyarrow.compute as pc + pc.field('x') = 0 + pc.field('y').isin(['a', 'b', 'c']) + ~pc.field('y').isin({'a', 'b'}) + + Using the DNF format: + + .. code-block:: python + + ('x', '=', 0) + ('y', 'in', ['a', 'b', 'c']) + ('z', 'not in', {'a','b'}) + + """ + + +def filters_to_expression(filters): + """ + Check if filters are well-formed and convert to an ``Expression``. + + Parameters + ---------- + filters : List[Tuple] or List[List[Tuple]] + + Notes + ----- + See internal ``pyarrow._DNF_filter_doc`` attribute for more details. + + Examples + -------- + + >>> filters_to_expression([('foo', '==', 'bar')]) + + + Returns + ------- + pyarrow.compute.Expression + An Expression representing the filters + """ + import pyarrow.dataset as ds + + if isinstance(filters, ds.Expression): + return filters + + filters = _check_filters(filters, check_null_strings=False) + + def convert_single_predicate(col, op, val): + field = ds.field(col) + + if op == "=" or op == "==": + return field == val + elif op == "!=": + return field != val + elif op == '<': + return field < val + elif op == '>': + return field > val + elif op == '<=': + return field <= val + elif op == '>=': + return field >= val + elif op == 'in': + return field.isin(val) + elif op == 'not in': + return ~field.isin(val) + else: + raise ValueError( + '"{0}" is not a valid operator in predicates.'.format( + (col, op, val))) + + disjunction_members = [] + + for conjunction in filters: + conjunction_members = [ + convert_single_predicate(col, op, val) + for col, op, val in conjunction + ] + + disjunction_members.append(reduce(operator.and_, conjunction_members)) + + return reduce(operator.or_, disjunction_members) + + +_filters_to_expression = _deprecate_api( + "_filters_to_expression", "filters_to_expression", + filters_to_expression, "10.0.0", DeprecationWarning) + + +# ---------------------------------------------------------------------- +# Reading a single Parquet file + + +class ParquetFile: + """ + Reader interface for a single Parquet file. + + Parameters + ---------- + source : str, pathlib.Path, pyarrow.NativeFile, or file-like object + Readable source. For passing bytes or buffer-like file containing a + Parquet file, use pyarrow.BufferReader. + metadata : FileMetaData, default None + Use existing metadata object, rather than reading from file. + common_metadata : FileMetaData, default None + Will be used in reads for pandas schema metadata if not found in the + main file's metadata, no other uses at the moment. + read_dictionary : list + List of column names to read directly as DictionaryArray. + memory_map : bool, default False + If the source is a file path, use a memory map to read file, which can + improve performance in some environments. + buffer_size : int, default 0 + If positive, perform read buffering when deserializing individual + column chunks. Otherwise IO calls are unbuffered. + pre_buffer : bool, default False + Coalesce and issue file reads in parallel to improve performance on + high-latency filesystems (e.g. S3). If True, Arrow will use a + background I/O thread pool. + coerce_int96_timestamp_unit : str, default None + Cast timestamps that are stored in INT96 format to a particular + resolution (e.g. 'ms'). Setting to None is equivalent to 'ns' + and therefore INT96 timestamps will be inferred as timestamps + in nanoseconds. + decryption_properties : FileDecryptionProperties, default None + File decryption properties for Parquet Modular Encryption. + thrift_string_size_limit : int, default None + If not None, override the maximum total string size allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. + thrift_container_size_limit : int, default None + If not None, override the maximum total size of containers allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. + filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. + page_checksum_verification : bool, default False + If True, verify the checksum for each page read from the file. + + Examples + -------- + + Generate an example PyArrow Table and write it to Parquet file: + + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + + Create a ``ParquetFile`` object from the Parquet file: + + >>> parquet_file = pq.ParquetFile('example.parquet') + + Read the data: + + >>> parquet_file.read() + pyarrow.Table + n_legs: int64 + animal: string + ---- + n_legs: [[2,2,4,4,5,100]] + animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]] + + Create a ParquetFile object with "animal" column as DictionaryArray: + + >>> parquet_file = pq.ParquetFile('example.parquet', + ... read_dictionary=["animal"]) + >>> parquet_file.read() + pyarrow.Table + n_legs: int64 + animal: dictionary + ---- + n_legs: [[2,2,4,4,5,100]] + animal: [ -- dictionary: + ["Flamingo","Parrot",...,"Brittle stars","Centipede"] -- indices: + [0,1,2,3,4,5]] + """ + + def __init__(self, source, *, metadata=None, common_metadata=None, + read_dictionary=None, memory_map=False, buffer_size=0, + pre_buffer=False, coerce_int96_timestamp_unit=None, + decryption_properties=None, thrift_string_size_limit=None, + thrift_container_size_limit=None, filesystem=None, + page_checksum_verification=False): + + self._close_source = getattr(source, 'closed', True) + + filesystem, source = _resolve_filesystem_and_path( + source, filesystem, memory_map=memory_map) + if filesystem is not None: + source = filesystem.open_input_file(source) + self._close_source = True # We opened it here, ensure we close it. + + self.reader = ParquetReader() + self.reader.open( + source, use_memory_map=memory_map, + buffer_size=buffer_size, pre_buffer=pre_buffer, + read_dictionary=read_dictionary, metadata=metadata, + coerce_int96_timestamp_unit=coerce_int96_timestamp_unit, + decryption_properties=decryption_properties, + thrift_string_size_limit=thrift_string_size_limit, + thrift_container_size_limit=thrift_container_size_limit, + page_checksum_verification=page_checksum_verification, + ) + self.common_metadata = common_metadata + self._nested_paths_by_prefix = self._build_nested_paths() + + def __enter__(self): + return self + + def __exit__(self, *args, **kwargs): + self.close() + + def _build_nested_paths(self): + paths = self.reader.column_paths + + result = defaultdict(list) + + for i, path in enumerate(paths): + key = path[0] + rest = path[1:] + while True: + result[key].append(i) + + if not rest: + break + + key = '.'.join((key, rest[0])) + rest = rest[1:] + + return result + + @property + def metadata(self): + """ + Return the Parquet metadata. + """ + return self.reader.metadata + + @property + def schema(self): + """ + Return the Parquet schema, unconverted to Arrow types + """ + return self.metadata.schema + + @property + def schema_arrow(self): + """ + Return the inferred Arrow schema, converted from the whole Parquet + file's schema + + Examples + -------- + Generate an example Parquet file: + + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + + Read the Arrow schema: + + >>> parquet_file.schema_arrow + n_legs: int64 + animal: string + """ + return self.reader.schema_arrow + + @property + def num_row_groups(self): + """ + Return the number of row groups of the Parquet file. + + Examples + -------- + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + + >>> parquet_file.num_row_groups + 1 + """ + return self.reader.num_row_groups + + def close(self, force: bool = False): + if self._close_source or force: + self.reader.close() + + @property + def closed(self) -> bool: + return self.reader.closed + + def read_row_group(self, i, columns=None, use_threads=True, + use_pandas_metadata=False): + """ + Read a single row group from a Parquet file. + + Parameters + ---------- + i : int + Index of the individual row group that we want to read. + columns : list + If not None, only these columns will be read from the row group. A + column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e'. + use_threads : bool, default True + Perform multi-threaded column reads. + use_pandas_metadata : bool, default False + If True and file has custom pandas schema metadata, ensure that + index columns are also loaded. + + Returns + ------- + pyarrow.table.Table + Content of the row group as a table (of columns) + + Examples + -------- + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + + >>> parquet_file.read_row_group(0) + pyarrow.Table + n_legs: int64 + animal: string + ---- + n_legs: [[2,2,4,4,5,100]] + animal: [["Flamingo","Parrot",...,"Brittle stars","Centipede"]] + """ + column_indices = self._get_column_indices( + columns, use_pandas_metadata=use_pandas_metadata) + return self.reader.read_row_group(i, column_indices=column_indices, + use_threads=use_threads) + + def read_row_groups(self, row_groups, columns=None, use_threads=True, + use_pandas_metadata=False): + """ + Read a multiple row groups from a Parquet file. + + Parameters + ---------- + row_groups : list + Only these row groups will be read from the file. + columns : list + If not None, only these columns will be read from the row group. A + column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e'. + use_threads : bool, default True + Perform multi-threaded column reads. + use_pandas_metadata : bool, default False + If True and file has custom pandas schema metadata, ensure that + index columns are also loaded. + + Returns + ------- + pyarrow.table.Table + Content of the row groups as a table (of columns). + + Examples + -------- + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + + >>> parquet_file.read_row_groups([0,0]) + pyarrow.Table + n_legs: int64 + animal: string + ---- + n_legs: [[2,2,4,4,5,...,2,4,4,5,100]] + animal: [["Flamingo","Parrot","Dog",...,"Brittle stars","Centipede"]] + """ + column_indices = self._get_column_indices( + columns, use_pandas_metadata=use_pandas_metadata) + return self.reader.read_row_groups(row_groups, + column_indices=column_indices, + use_threads=use_threads) + + def iter_batches(self, batch_size=65536, row_groups=None, columns=None, + use_threads=True, use_pandas_metadata=False): + """ + Read streaming batches from a Parquet file. + + Parameters + ---------- + batch_size : int, default 64K + Maximum number of records to yield per batch. Batches may be + smaller if there aren't enough rows in the file. + row_groups : list + Only these row groups will be read from the file. + columns : list + If not None, only these columns will be read from the file. A + column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e'. + use_threads : boolean, default True + Perform multi-threaded column reads. + use_pandas_metadata : boolean, default False + If True and file has custom pandas schema metadata, ensure that + index columns are also loaded. + + Yields + ------ + pyarrow.RecordBatch + Contents of each batch as a record batch + + Examples + -------- + Generate an example Parquet file: + + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + >>> for i in parquet_file.iter_batches(): + ... print("RecordBatch") + ... print(i.to_pandas()) + ... + RecordBatch + n_legs animal + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + """ + if row_groups is None: + row_groups = range(0, self.metadata.num_row_groups) + column_indices = self._get_column_indices( + columns, use_pandas_metadata=use_pandas_metadata) + + batches = self.reader.iter_batches(batch_size, + row_groups=row_groups, + column_indices=column_indices, + use_threads=use_threads) + return batches + + def read(self, columns=None, use_threads=True, use_pandas_metadata=False): + """ + Read a Table from Parquet format. + + Parameters + ---------- + columns : list + If not None, only these columns will be read from the file. A + column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e'. + use_threads : bool, default True + Perform multi-threaded column reads. + use_pandas_metadata : bool, default False + If True and file has custom pandas schema metadata, ensure that + index columns are also loaded. + + Returns + ------- + pyarrow.table.Table + Content of the file as a table (of columns). + + Examples + -------- + Generate an example Parquet file: + + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + + Read a Table: + + >>> parquet_file.read(columns=["animal"]) + pyarrow.Table + animal: string + ---- + animal: [["Flamingo","Parrot",...,"Brittle stars","Centipede"]] + """ + column_indices = self._get_column_indices( + columns, use_pandas_metadata=use_pandas_metadata) + return self.reader.read_all(column_indices=column_indices, + use_threads=use_threads) + + def scan_contents(self, columns=None, batch_size=65536): + """ + Read contents of file for the given columns and batch size. + + Notes + ----- + This function's primary purpose is benchmarking. + The scan is executed on a single thread. + + Parameters + ---------- + columns : list of integers, default None + Select columns to read, if None scan all columns. + batch_size : int, default 64K + Number of rows to read at a time internally. + + Returns + ------- + num_rows : int + Number of rows in file + + Examples + -------- + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + + >>> parquet_file.scan_contents() + 6 + """ + column_indices = self._get_column_indices(columns) + return self.reader.scan_contents(column_indices, + batch_size=batch_size) + + def _get_column_indices(self, column_names, use_pandas_metadata=False): + if column_names is None: + return None + + indices = [] + + for name in column_names: + if name in self._nested_paths_by_prefix: + indices.extend(self._nested_paths_by_prefix[name]) + + if use_pandas_metadata: + file_keyvalues = self.metadata.metadata + common_keyvalues = (self.common_metadata.metadata + if self.common_metadata is not None + else None) + + if file_keyvalues and b'pandas' in file_keyvalues: + index_columns = _get_pandas_index_columns(file_keyvalues) + elif common_keyvalues and b'pandas' in common_keyvalues: + index_columns = _get_pandas_index_columns(common_keyvalues) + else: + index_columns = [] + + if indices is not None and index_columns: + indices += [self.reader.column_name_idx(descr) + for descr in index_columns + if not isinstance(descr, dict)] + + return indices + + +_SPARK_DISALLOWED_CHARS = re.compile('[ ,;{}()\n\t=]') + + +def _sanitized_spark_field_name(name): + return _SPARK_DISALLOWED_CHARS.sub('_', name) + + +def _sanitize_schema(schema, flavor): + if 'spark' in flavor: + sanitized_fields = [] + + schema_changed = False + + for field in schema: + name = field.name + sanitized_name = _sanitized_spark_field_name(name) + + if sanitized_name != name: + schema_changed = True + sanitized_field = pa.field(sanitized_name, field.type, + field.nullable, field.metadata) + sanitized_fields.append(sanitized_field) + else: + sanitized_fields.append(field) + + new_schema = pa.schema(sanitized_fields, metadata=schema.metadata) + return new_schema, schema_changed + else: + return schema, False + + +def _sanitize_table(table, new_schema, flavor): + # TODO: This will not handle prohibited characters in nested field names + if 'spark' in flavor: + column_data = [table[i] for i in range(table.num_columns)] + return pa.Table.from_arrays(column_data, schema=new_schema) + else: + return table + + +_parquet_writer_arg_docs = """version : {"1.0", "2.4", "2.6"}, default "2.6" + Determine which Parquet logical types are available for use, whether the + reduced set from the Parquet 1.x.x format or the expanded logical types + added in later format versions. + Files written with version='2.4' or '2.6' may not be readable in all + Parquet implementations, so version='1.0' is likely the choice that + maximizes file compatibility. + UINT32 and some logical types are only available with version '2.4'. + Nanosecond timestamps are only available with version '2.6'. + Other features such as compression algorithms or the new serialized + data page format must be enabled separately (see 'compression' and + 'data_page_version'). +use_dictionary : bool or list, default True + Specify if we should use dictionary encoding in general or only for + some columns. + When encoding the column, if the dictionary size is too large, the + column will fallback to ``PLAIN`` encoding. Specially, ``BOOLEAN`` type + doesn't support dictionary encoding. +compression : str or dict, default 'snappy' + Specify the compression codec, either on a general basis or per-column. + Valid values: {'NONE', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD'}. +write_statistics : bool or list, default True + Specify if we should write statistics in general (default is True) or only + for some columns. +use_deprecated_int96_timestamps : bool, default None + Write timestamps to INT96 Parquet format. Defaults to False unless enabled + by flavor argument. This take priority over the coerce_timestamps option. +coerce_timestamps : str, default None + Cast timestamps to a particular resolution. If omitted, defaults are chosen + depending on `version`. By default, for ``version='1.0'`` (the default) + and ``version='2.4'``, nanoseconds are cast to microseconds ('us'), while + for other `version` values, they are written natively without loss + of resolution. Seconds are always cast to milliseconds ('ms') by default, + as Parquet does not have any temporal type with seconds resolution. + If the casting results in loss of data, it will raise an exception + unless ``allow_truncated_timestamps=True`` is given. + Valid values: {None, 'ms', 'us'} +allow_truncated_timestamps : bool, default False + Allow loss of data when coercing timestamps to a particular + resolution. E.g. if microsecond or nanosecond data is lost when coercing to + 'ms', do not raise an exception. Passing ``allow_truncated_timestamp=True`` + will NOT result in the truncation exception being ignored unless + ``coerce_timestamps`` is not None. +data_page_size : int, default None + Set a target threshold for the approximate encoded size of data + pages within a column chunk (in bytes). If None, use the default data page + size of 1MByte. +flavor : {'spark'}, default None + Sanitize schema or set other compatibility options to work with + various target systems. +filesystem : FileSystem, default None + If nothing passed, will be inferred from `where` if path-like, else + `where` is already a file-like object so no filesystem is needed. +compression_level : int or dict, default None + Specify the compression level for a codec, either on a general basis or + per-column. If None is passed, arrow selects the compression level for + the compression codec in use. The compression level has a different + meaning for each codec, so you have to read the documentation of the + codec you are using. + An exception is thrown if the compression codec does not allow specifying + a compression level. +use_byte_stream_split : bool or list, default False + Specify if the byte_stream_split encoding should be used in general or + only for some columns. If both dictionary and byte_stream_stream are + enabled, then dictionary is preferred. + The byte_stream_split encoding is valid only for floating-point data types + and should be combined with a compression codec. +column_encoding : string or dict, default None + Specify the encoding scheme on a per column basis. + Can only be used when ``use_dictionary`` is set to False, and + cannot be used in combination with ``use_byte_stream_split``. + Currently supported values: {'PLAIN', 'BYTE_STREAM_SPLIT', + 'DELTA_BINARY_PACKED', 'DELTA_LENGTH_BYTE_ARRAY', 'DELTA_BYTE_ARRAY'}. + Certain encodings are only compatible with certain data types. + Please refer to the encodings section of `Reading and writing Parquet + files `_. +data_page_version : {"1.0", "2.0"}, default "1.0" + The serialized Parquet data page format version to write, defaults to + 1.0. This does not impact the file schema logical types and Arrow to + Parquet type casting behavior; for that use the "version" option. +use_compliant_nested_type : bool, default True + Whether to write compliant Parquet nested type (lists) as defined + `here `_, defaults to ``True``. + For ``use_compliant_nested_type=True``, this will write into a list + with 3-level structure where the middle level, named ``list``, + is a repeated group with a single field named ``element``:: + + group (LIST) { + repeated group list { + element; + } + } + + For ``use_compliant_nested_type=False``, this will also write into a list + with 3-level structure, where the name of the single field of the middle + level ``list`` is taken from the element name for nested columns in Arrow, + which defaults to ``item``:: + + group (LIST) { + repeated group list { + item; + } + } +encryption_properties : FileEncryptionProperties, default None + File encryption properties for Parquet Modular Encryption. + If None, no encryption will be done. + The encryption properties can be created using: + ``CryptoFactory.file_encryption_properties()``. +write_batch_size : int, default None + Number of values to write to a page at a time. If None, use the default of + 1024. ``write_batch_size`` is complementary to ``data_page_size``. If pages + are exceeding the ``data_page_size`` due to large column values, lowering + the batch size can help keep page sizes closer to the intended size. +dictionary_pagesize_limit : int, default None + Specify the dictionary page size limit per row group. If None, use the + default 1MB. +store_schema : bool, default True + By default, the Arrow schema is serialized and stored in the Parquet + file metadata (in the "ARROW:schema" key). When reading the file, + if this key is available, it will be used to more faithfully recreate + the original Arrow data. For example, for tz-aware timestamp columns + it will restore the timezone (Parquet only stores the UTC values without + timezone), or columns with duration type will be restored from the int64 + Parquet column. +write_page_index : bool, default False + Whether to write a page index in general for all columns. + Writing statistics to the page index disables the old method of writing + statistics to each data page header. The page index makes statistics-based + filtering more efficient than the page header, as it gathers all the + statistics for a Parquet file in a single place, avoiding scattered I/O. + Note that the page index is not yet used on the read size by PyArrow. +write_page_checksum : bool, default False + Whether to write page checksums in general for all columns. + Page checksums enable detection of data corruption, which might occur during + transmission or in the storage. +sorting_columns : Sequence of SortingColumn, default None + Specify the sort order of the data being written. The writer does not sort + the data nor does it verify that the data is sorted. The sort order is + written to the row group metadata, which can then be used by readers. +""" + +_parquet_writer_example_doc = """\ +Generate an example PyArrow Table and RecordBatch: + +>>> import pyarrow as pa +>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], +... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", +... "Brittle stars", "Centipede"]}) +>>> batch = pa.record_batch([[2, 2, 4, 4, 5, 100], +... ["Flamingo", "Parrot", "Dog", "Horse", +... "Brittle stars", "Centipede"]], +... names=['n_legs', 'animal']) + +create a ParquetWriter object: + +>>> import pyarrow.parquet as pq +>>> writer = pq.ParquetWriter('example.parquet', table.schema) + +and write the Table into the Parquet file: + +>>> writer.write_table(table) +>>> writer.close() + +>>> pq.read_table('example.parquet').to_pandas() + n_legs animal +0 2 Flamingo +1 2 Parrot +2 4 Dog +3 4 Horse +4 5 Brittle stars +5 100 Centipede + +create a ParquetWriter object for the RecordBatch: + +>>> writer2 = pq.ParquetWriter('example2.parquet', batch.schema) + +and write the RecordBatch into the Parquet file: + +>>> writer2.write_batch(batch) +>>> writer2.close() + +>>> pq.read_table('example2.parquet').to_pandas() + n_legs animal +0 2 Flamingo +1 2 Parrot +2 4 Dog +3 4 Horse +4 5 Brittle stars +5 100 Centipede +""" + + +class ParquetWriter: + + __doc__ = """ +Class for incrementally building a Parquet file for Arrow tables. + +Parameters +---------- +where : path or file-like object +schema : pyarrow.Schema +{} +writer_engine_version : unused +**options : dict + If options contains a key `metadata_collector` then the + corresponding value is assumed to be a list (or any object with + `.append` method) that will be filled with the file metadata instance + of the written file. + +Examples +-------- +{} +""".format(_parquet_writer_arg_docs, _parquet_writer_example_doc) + + def __init__(self, where, schema, filesystem=None, + flavor=None, + version='2.6', + use_dictionary=True, + compression='snappy', + write_statistics=True, + use_deprecated_int96_timestamps=None, + compression_level=None, + use_byte_stream_split=False, + column_encoding=None, + writer_engine_version=None, + data_page_version='1.0', + use_compliant_nested_type=True, + encryption_properties=None, + write_batch_size=None, + dictionary_pagesize_limit=None, + store_schema=True, + write_page_index=False, + write_page_checksum=False, + sorting_columns=None, + **options): + if use_deprecated_int96_timestamps is None: + # Use int96 timestamps for Spark + if flavor is not None and 'spark' in flavor: + use_deprecated_int96_timestamps = True + else: + use_deprecated_int96_timestamps = False + + self.flavor = flavor + if flavor is not None: + schema, self.schema_changed = _sanitize_schema(schema, flavor) + else: + self.schema_changed = False + + self.schema = schema + self.where = where + + # If we open a file using a filesystem, store file handle so we can be + # sure to close it when `self.close` is called. + self.file_handle = None + + filesystem, path = _resolve_filesystem_and_path(where, filesystem) + if filesystem is not None: + # ARROW-10480: do not auto-detect compression. While + # a filename like foo.parquet.gz is nonconforming, it + # shouldn't implicitly apply compression. + sink = self.file_handle = filesystem.open_output_stream( + path, compression=None) + else: + sink = where + self._metadata_collector = options.pop('metadata_collector', None) + engine_version = 'V2' + self.writer = _parquet.ParquetWriter( + sink, schema, + version=version, + compression=compression, + use_dictionary=use_dictionary, + write_statistics=write_statistics, + use_deprecated_int96_timestamps=use_deprecated_int96_timestamps, + compression_level=compression_level, + use_byte_stream_split=use_byte_stream_split, + column_encoding=column_encoding, + writer_engine_version=engine_version, + data_page_version=data_page_version, + use_compliant_nested_type=use_compliant_nested_type, + encryption_properties=encryption_properties, + write_batch_size=write_batch_size, + dictionary_pagesize_limit=dictionary_pagesize_limit, + store_schema=store_schema, + write_page_index=write_page_index, + write_page_checksum=write_page_checksum, + sorting_columns=sorting_columns, + **options) + self.is_open = True + + def __del__(self): + if getattr(self, 'is_open', False): + self.close() + + def __enter__(self): + return self + + def __exit__(self, *args, **kwargs): + self.close() + # return false since we want to propagate exceptions + return False + + def write(self, table_or_batch, row_group_size=None): + """ + Write RecordBatch or Table to the Parquet file. + + Parameters + ---------- + table_or_batch : {RecordBatch, Table} + row_group_size : int, default None + Maximum number of rows in each written row group. If None, + the row group size will be the minimum of the input + table or batch length and 1024 * 1024. + """ + if isinstance(table_or_batch, pa.RecordBatch): + self.write_batch(table_or_batch, row_group_size) + elif isinstance(table_or_batch, pa.Table): + self.write_table(table_or_batch, row_group_size) + else: + raise TypeError(type(table_or_batch)) + + def write_batch(self, batch, row_group_size=None): + """ + Write RecordBatch to the Parquet file. + + Parameters + ---------- + batch : RecordBatch + row_group_size : int, default None + Maximum number of rows in written row group. If None, the + row group size will be the minimum of the RecordBatch + size and 1024 * 1024. If set larger than 64Mi then 64Mi + will be used instead. + """ + table = pa.Table.from_batches([batch], batch.schema) + self.write_table(table, row_group_size) + + def write_table(self, table, row_group_size=None): + """ + Write Table to the Parquet file. + + Parameters + ---------- + table : Table + row_group_size : int, default None + Maximum number of rows in each written row group. If None, + the row group size will be the minimum of the Table size + and 1024 * 1024. If set larger than 64Mi then 64Mi will + be used instead. + + """ + if self.schema_changed: + table = _sanitize_table(table, self.schema, self.flavor) + assert self.is_open + + if not table.schema.equals(self.schema, check_metadata=False): + msg = ('Table schema does not match schema used to create file: ' + '\ntable:\n{!s} vs. \nfile:\n{!s}' + .format(table.schema, self.schema)) + raise ValueError(msg) + + self.writer.write_table(table, row_group_size=row_group_size) + + def close(self): + """ + Close the connection to the Parquet file. + """ + if self.is_open: + self.writer.close() + self.is_open = False + if self._metadata_collector is not None: + self._metadata_collector.append(self.writer.metadata) + if self.file_handle is not None: + self.file_handle.close() + + +def _get_pandas_index_columns(keyvalues): + return (json.loads(keyvalues[b'pandas'].decode('utf8')) + ['index_columns']) + + +EXCLUDED_PARQUET_PATHS = {'_SUCCESS'} + + +_read_docstring_common = """\ +read_dictionary : list, default None + List of names or column paths (for nested types) to read directly + as DictionaryArray. Only supported for BYTE_ARRAY storage. To read + a flat column as dictionary-encoded pass the column name. For + nested types, you must pass the full column "path", which could be + something like level1.level2.list.item. Refer to the Parquet + file's schema to obtain the paths. +memory_map : bool, default False + If the source is a file path, use a memory map to read file, which can + improve performance in some environments. +buffer_size : int, default 0 + If positive, perform read buffering when deserializing individual + column chunks. Otherwise IO calls are unbuffered. +partitioning : pyarrow.dataset.Partitioning or str or list of str, \ +default "hive" + The partitioning scheme for a partitioned dataset. The default of "hive" + assumes directory names with key=value pairs like "/year=2009/month=11". + In addition, a scheme like "/2009/11" is also supported, in which case + you need to specify the field names or a full schema. See the + ``pyarrow.dataset.partitioning()`` function for more details.""" + + +_parquet_dataset_example = """\ +Generate an example PyArrow Table and write it to a partitioned dataset: + +>>> import pyarrow as pa +>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], +... 'n_legs': [2, 2, 4, 4, 5, 100], +... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", +... "Brittle stars", "Centipede"]}) +>>> import pyarrow.parquet as pq +>>> pq.write_to_dataset(table, root_path='dataset_v2', +... partition_cols=['year']) + +create a ParquetDataset object from the dataset source: + +>>> dataset = pq.ParquetDataset('dataset_v2/') + +and read the data: + +>>> dataset.read().to_pandas() + n_legs animal year +0 5 Brittle stars 2019 +1 2 Flamingo 2020 +2 4 Dog 2021 +3 100 Centipede 2021 +4 2 Parrot 2022 +5 4 Horse 2022 + +create a ParquetDataset object with filter: + +>>> dataset = pq.ParquetDataset('dataset_v2/', +... filters=[('n_legs','=',4)]) +>>> dataset.read().to_pandas() + n_legs animal year +0 4 Dog 2021 +1 4 Horse 2022 +""" + + +class ParquetDataset: + __doc__ = """ +Encapsulates details of reading a complete Parquet dataset possibly +consisting of multiple files and partitions in subdirectories. + +Parameters +---------- +path_or_paths : str or List[str] + A directory name, single file name, or list of file names. +filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. +schema : pyarrow.parquet.Schema + Optionally provide the Schema for the Dataset, in which case it will + not be inferred from the source. +filters : pyarrow.compute.Expression or List[Tuple] or List[List[Tuple]], default None + Rows which do not match the filter predicate will be removed from scanned + data. Partition keys embedded in a nested directory structure will be + exploited to avoid loading files at all if they contain no matching rows. + Within-file level filtering and different partitioning schemes are supported. + + {1} +{0} +ignore_prefixes : list, optional + Files matching any of these prefixes will be ignored by the + discovery process. + This is matched to the basename of a path. + By default this is ['.', '_']. + Note that discovery happens only if a directory is passed as source. +pre_buffer : bool, default True + Coalesce and issue file reads in parallel to improve performance on + high-latency filesystems (e.g. S3, GCS). If True, Arrow will use a + background I/O thread pool. If using a filesystem layer that itself + performs readahead (e.g. fsspec's S3FS), disable readahead for best + results. Set to False if you want to prioritize minimal memory usage + over maximum speed. +coerce_int96_timestamp_unit : str, default None + Cast timestamps that are stored in INT96 format to a particular resolution + (e.g. 'ms'). Setting to None is equivalent to 'ns' and therefore INT96 + timestamps will be inferred as timestamps in nanoseconds. +decryption_properties : FileDecryptionProperties or None + File-level decryption properties. + The decryption properties can be created using + ``CryptoFactory.file_decryption_properties()``. +thrift_string_size_limit : int, default None + If not None, override the maximum total string size allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. +thrift_container_size_limit : int, default None + If not None, override the maximum total size of containers allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. +page_checksum_verification : bool, default False + If True, verify the page checksum for each page read from the file. +use_legacy_dataset : bool, optional + Deprecated and has no effect from PyArrow version 15.0.0. + +Examples +-------- +{2} +""".format(_read_docstring_common, _DNF_filter_doc, _parquet_dataset_example) + + def __init__(self, path_or_paths, filesystem=None, schema=None, *, filters=None, + read_dictionary=None, memory_map=False, buffer_size=None, + partitioning="hive", ignore_prefixes=None, pre_buffer=True, + coerce_int96_timestamp_unit=None, + decryption_properties=None, thrift_string_size_limit=None, + thrift_container_size_limit=None, + page_checksum_verification=False, + use_legacy_dataset=None): + + if use_legacy_dataset is not None: + warnings.warn( + "Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 " + "and will be removed in a future version.", + FutureWarning, stacklevel=2) + + import pyarrow.dataset as ds + + # map format arguments + read_options = { + "pre_buffer": pre_buffer, + "coerce_int96_timestamp_unit": coerce_int96_timestamp_unit, + "thrift_string_size_limit": thrift_string_size_limit, + "thrift_container_size_limit": thrift_container_size_limit, + "page_checksum_verification": page_checksum_verification, + } + if buffer_size: + read_options.update(use_buffered_stream=True, + buffer_size=buffer_size) + if read_dictionary is not None: + read_options.update(dictionary_columns=read_dictionary) + + if decryption_properties is not None: + read_options.update(decryption_properties=decryption_properties) + + self._filter_expression = None + if filters is not None: + self._filter_expression = filters_to_expression(filters) + + # map old filesystems to new one + if filesystem is not None: + filesystem = _ensure_filesystem( + filesystem, use_mmap=memory_map) + elif filesystem is None and memory_map: + # if memory_map is specified, assume local file system (string + # path can in principle be URI for any filesystem) + filesystem = LocalFileSystem(use_mmap=memory_map) + + # This needs to be checked after _ensure_filesystem, because that + # handles the case of an fsspec LocalFileSystem + if ( + hasattr(path_or_paths, "__fspath__") and + filesystem is not None and + not isinstance(filesystem, LocalFileSystem) + ): + raise TypeError( + "Path-like objects with __fspath__ must only be used with " + f"local file systems, not {type(filesystem)}" + ) + + # check for single fragment dataset + single_file = None + self._base_dir = None + if not isinstance(path_or_paths, list): + if _is_path_like(path_or_paths): + path_or_paths = _stringify_path(path_or_paths) + if filesystem is None: + # path might be a URI describing the FileSystem as well + try: + filesystem, path_or_paths = FileSystem.from_uri( + path_or_paths) + except ValueError: + filesystem = LocalFileSystem(use_mmap=memory_map) + finfo = filesystem.get_file_info(path_or_paths) + if finfo.is_file: + single_file = path_or_paths + if finfo.type == FileType.Directory: + self._base_dir = path_or_paths + else: + single_file = path_or_paths + + parquet_format = ds.ParquetFileFormat(**read_options) + + if single_file is not None: + fragment = parquet_format.make_fragment(single_file, filesystem) + + self._dataset = ds.FileSystemDataset( + [fragment], schema=schema or fragment.physical_schema, + format=parquet_format, + filesystem=fragment.filesystem + ) + return + + # check partitioning to enable dictionary encoding + if partitioning == "hive": + partitioning = ds.HivePartitioning.discover( + infer_dictionary=True) + + self._dataset = ds.dataset(path_or_paths, filesystem=filesystem, + schema=schema, format=parquet_format, + partitioning=partitioning, + ignore_prefixes=ignore_prefixes) + + def equals(self, other): + if not isinstance(other, ParquetDataset): + raise TypeError('`other` must be an instance of ParquetDataset') + + return (self.schema == other.schema and + self._dataset.format == other._dataset.format and + self.filesystem == other.filesystem and + # self.fragments == other.fragments and + self.files == other.files) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + @property + def schema(self): + """ + Schema of the Dataset. + + Examples + -------- + Generate an example dataset: + + >>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_to_dataset(table, root_path='dataset_v2_schema', + ... partition_cols=['year']) + >>> dataset = pq.ParquetDataset('dataset_v2_schema/') + + Read the schema: + + >>> dataset.schema + n_legs: int64 + animal: string + year: dictionary + """ + return self._dataset.schema + + def read(self, columns=None, use_threads=True, use_pandas_metadata=False): + """ + Read (multiple) Parquet files as a single pyarrow.Table. + + Parameters + ---------- + columns : List[str] + Names of columns to read from the dataset. The partition fields + are not automatically included. + use_threads : bool, default True + Perform multi-threaded column reads. + use_pandas_metadata : bool, default False + If True and file has custom pandas schema metadata, ensure that + index columns are also loaded. + + Returns + ------- + pyarrow.Table + Content of the file as a table (of columns). + + Examples + -------- + Generate an example dataset: + + >>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_to_dataset(table, root_path='dataset_v2_read', + ... partition_cols=['year']) + >>> dataset = pq.ParquetDataset('dataset_v2_read/') + + Read the dataset: + + >>> dataset.read(columns=["n_legs"]) + pyarrow.Table + n_legs: int64 + ---- + n_legs: [[5],[2],[4,100],[2,4]] + """ + # if use_pandas_metadata, we need to include index columns in the + # column selection, to be able to restore those in the pandas DataFrame + metadata = self.schema.metadata or {} + + if use_pandas_metadata: + # if the dataset schema metadata itself doesn't have pandas + # then try to get this from common file (for backwards compat) + if b"pandas" not in metadata: + common_metadata = self._get_common_pandas_metadata() + if common_metadata: + metadata = common_metadata + + if columns is not None and use_pandas_metadata: + if metadata and b'pandas' in metadata: + # RangeIndex can be represented as dict instead of column name + index_columns = [ + col for col in _get_pandas_index_columns(metadata) + if not isinstance(col, dict) + ] + columns = ( + list(columns) + list(set(index_columns) - set(columns)) + ) + + table = self._dataset.to_table( + columns=columns, filter=self._filter_expression, + use_threads=use_threads + ) + + # if use_pandas_metadata, restore the pandas metadata (which gets + # lost if doing a specific `columns` selection in to_table) + if use_pandas_metadata: + if metadata and b"pandas" in metadata: + new_metadata = table.schema.metadata or {} + new_metadata.update({b"pandas": metadata[b"pandas"]}) + table = table.replace_schema_metadata(new_metadata) + + return table + + def _get_common_pandas_metadata(self): + + if not self._base_dir: + return None + + metadata = None + for name in ["_common_metadata", "_metadata"]: + metadata_path = os.path.join(str(self._base_dir), name) + finfo = self.filesystem.get_file_info(metadata_path) + if finfo.is_file: + pq_meta = read_metadata( + metadata_path, filesystem=self.filesystem) + metadata = pq_meta.metadata + if metadata and b'pandas' in metadata: + break + + return metadata + + def read_pandas(self, **kwargs): + """ + Read dataset including pandas metadata, if any. Other arguments passed + through to :func:`read`, see docstring for further details. + + Parameters + ---------- + **kwargs : optional + Additional options for :func:`read` + + Examples + -------- + Generate an example parquet file: + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'table_V2.parquet') + >>> dataset = pq.ParquetDataset('table_V2.parquet') + + Read the dataset with pandas metadata: + + >>> dataset.read_pandas(columns=["n_legs"]) + pyarrow.Table + n_legs: int64 + ---- + n_legs: [[2,2,4,4,5,100]] + + >>> dataset.read_pandas(columns=["n_legs"]).schema.pandas_metadata + {'index_columns': [{'kind': 'range', 'name': None, 'start': 0, ...} + """ + return self.read(use_pandas_metadata=True, **kwargs) + + @property + def fragments(self): + """ + A list of the Dataset source fragments or pieces with absolute + file paths. + + Examples + -------- + Generate an example dataset: + + >>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_to_dataset(table, root_path='dataset_v2_fragments', + ... partition_cols=['year']) + >>> dataset = pq.ParquetDataset('dataset_v2_fragments/') + + List the fragments: + + >>> dataset.fragments + [>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_to_dataset(table, root_path='dataset_v2_files', + ... partition_cols=['year']) + >>> dataset = pq.ParquetDataset('dataset_v2_files/') + + List the files: + + >>> dataset.files + ['dataset_v2_files/year=2019/...-0.parquet', ... + """ + return self._dataset.files + + @property + def filesystem(self): + """ + The filesystem type of the Dataset source. + """ + return self._dataset.filesystem + + @property + def partitioning(self): + """ + The partitioning of the Dataset source, if discovered. + """ + return self._dataset.partitioning + + +_read_table_docstring = """ +{0} + +Parameters +---------- +source : str, pyarrow.NativeFile, or file-like object + If a string passed, can be a single file name or directory name. For + file-like objects, only read a single file. Use pyarrow.BufferReader to + read a file contained in a bytes or buffer-like object. +columns : list + If not None, only these columns will be read from the file. A column + name may be a prefix of a nested field, e.g. 'a' will select 'a.b', + 'a.c', and 'a.d.e'. If empty, no columns will be read. Note + that the table will still have the correct num_rows set despite having + no columns. +use_threads : bool, default True + Perform multi-threaded column reads. +schema : Schema, optional + Optionally provide the Schema for the parquet dataset, in which case it + will not be inferred from the source. +{1} +filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. +filters : pyarrow.compute.Expression or List[Tuple] or List[List[Tuple]], default None + Rows which do not match the filter predicate will be removed from scanned + data. Partition keys embedded in a nested directory structure will be + exploited to avoid loading files at all if they contain no matching rows. + Within-file level filtering and different partitioning schemes are supported. + + {3} +use_legacy_dataset : bool, optional + Deprecated and has no effect from PyArrow version 15.0.0. +ignore_prefixes : list, optional + Files matching any of these prefixes will be ignored by the + discovery process. + This is matched to the basename of a path. + By default this is ['.', '_']. + Note that discovery happens only if a directory is passed as source. +pre_buffer : bool, default True + Coalesce and issue file reads in parallel to improve performance on + high-latency filesystems (e.g. S3). If True, Arrow will use a + background I/O thread pool. If using a filesystem layer that itself + performs readahead (e.g. fsspec's S3FS), disable readahead for best + results. +coerce_int96_timestamp_unit : str, default None + Cast timestamps that are stored in INT96 format to a particular + resolution (e.g. 'ms'). Setting to None is equivalent to 'ns' + and therefore INT96 timestamps will be inferred as timestamps + in nanoseconds. +decryption_properties : FileDecryptionProperties or None + File-level decryption properties. + The decryption properties can be created using + ``CryptoFactory.file_decryption_properties()``. +thrift_string_size_limit : int, default None + If not None, override the maximum total string size allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. +thrift_container_size_limit : int, default None + If not None, override the maximum total size of containers allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. +page_checksum_verification : bool, default False + If True, verify the checksum for each page read from the file. + +Returns +------- +{2} + +{4} +""" + +_read_table_example = """\ + +Examples +-------- + +Generate an example PyArrow Table and write it to a partitioned dataset: + +>>> import pyarrow as pa +>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], +... 'n_legs': [2, 2, 4, 4, 5, 100], +... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", +... "Brittle stars", "Centipede"]}) +>>> import pyarrow.parquet as pq +>>> pq.write_to_dataset(table, root_path='dataset_name_2', +... partition_cols=['year']) + +Read the data: + +>>> pq.read_table('dataset_name_2').to_pandas() + n_legs animal year +0 5 Brittle stars 2019 +1 2 Flamingo 2020 +2 4 Dog 2021 +3 100 Centipede 2021 +4 2 Parrot 2022 +5 4 Horse 2022 + + +Read only a subset of columns: + +>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"]) +pyarrow.Table +n_legs: int64 +animal: string +---- +n_legs: [[5],[2],[4,100],[2,4]] +animal: [["Brittle stars"],["Flamingo"],["Dog","Centipede"],["Parrot","Horse"]] + +Read a subset of columns and read one column as DictionaryArray: + +>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"], +... read_dictionary=["animal"]) +pyarrow.Table +n_legs: int64 +animal: dictionary +---- +n_legs: [[5],[2],[4,100],[2,4]] +animal: [ -- dictionary: +["Brittle stars"] -- indices: +[0], -- dictionary: +["Flamingo"] -- indices: +[0], -- dictionary: +["Dog","Centipede"] -- indices: +[0,1], -- dictionary: +["Parrot","Horse"] -- indices: +[0,1]] + +Read the table with filter: + +>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"], +... filters=[('n_legs','<',4)]).to_pandas() + n_legs animal +0 2 Flamingo +1 2 Parrot + +Read data from a single Parquet file: + +>>> pq.write_table(table, 'example.parquet') +>>> pq.read_table('dataset_name_2').to_pandas() + n_legs animal year +0 5 Brittle stars 2019 +1 2 Flamingo 2020 +2 4 Dog 2021 +3 100 Centipede 2021 +4 2 Parrot 2022 +5 4 Horse 2022 +""" + + +def read_table(source, *, columns=None, use_threads=True, + schema=None, use_pandas_metadata=False, read_dictionary=None, + memory_map=False, buffer_size=0, partitioning="hive", + filesystem=None, filters=None, use_legacy_dataset=None, + ignore_prefixes=None, pre_buffer=True, + coerce_int96_timestamp_unit=None, + decryption_properties=None, thrift_string_size_limit=None, + thrift_container_size_limit=None, + page_checksum_verification=False): + + if use_legacy_dataset is not None: + warnings.warn( + "Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 " + "and will be removed in a future version.", + FutureWarning, stacklevel=2) + + try: + dataset = ParquetDataset( + source, + schema=schema, + filesystem=filesystem, + partitioning=partitioning, + memory_map=memory_map, + read_dictionary=read_dictionary, + buffer_size=buffer_size, + filters=filters, + ignore_prefixes=ignore_prefixes, + pre_buffer=pre_buffer, + coerce_int96_timestamp_unit=coerce_int96_timestamp_unit, + thrift_string_size_limit=thrift_string_size_limit, + thrift_container_size_limit=thrift_container_size_limit, + page_checksum_verification=page_checksum_verification, + ) + except ImportError: + # fall back on ParquetFile for simple cases when pyarrow.dataset + # module is not available + if filters is not None: + raise ValueError( + "the 'filters' keyword is not supported when the " + "pyarrow.dataset module is not available" + ) + if partitioning != "hive": + raise ValueError( + "the 'partitioning' keyword is not supported when the " + "pyarrow.dataset module is not available" + ) + if schema is not None: + raise ValueError( + "the 'schema' argument is not supported when the " + "pyarrow.dataset module is not available" + ) + filesystem, path = _resolve_filesystem_and_path(source, filesystem) + if filesystem is not None: + source = filesystem.open_input_file(path) + # TODO test that source is not a directory or a list + dataset = ParquetFile( + source, read_dictionary=read_dictionary, + memory_map=memory_map, buffer_size=buffer_size, + pre_buffer=pre_buffer, + coerce_int96_timestamp_unit=coerce_int96_timestamp_unit, + decryption_properties=decryption_properties, + thrift_string_size_limit=thrift_string_size_limit, + thrift_container_size_limit=thrift_container_size_limit, + page_checksum_verification=page_checksum_verification, + ) + + return dataset.read(columns=columns, use_threads=use_threads, + use_pandas_metadata=use_pandas_metadata) + + +read_table.__doc__ = _read_table_docstring.format( + """Read a Table from Parquet format""", + "\n".join(("""use_pandas_metadata : bool, default False + If True and file has custom pandas schema metadata, ensure that + index columns are also loaded.""", _read_docstring_common)), + """pyarrow.Table + Content of the file as a table (of columns)""", + _DNF_filter_doc, _read_table_example) + + +def read_pandas(source, columns=None, **kwargs): + return read_table( + source, columns=columns, use_pandas_metadata=True, **kwargs + ) + + +read_pandas.__doc__ = _read_table_docstring.format( + 'Read a Table from Parquet format, also reading DataFrame\n' + 'index values if known in the file metadata', + "\n".join((_read_docstring_common, + """**kwargs + additional options for :func:`read_table`""")), + """pyarrow.Table + Content of the file as a Table of Columns, including DataFrame + indexes as columns""", + _DNF_filter_doc, "") + + +def write_table(table, where, row_group_size=None, version='2.6', + use_dictionary=True, compression='snappy', + write_statistics=True, + use_deprecated_int96_timestamps=None, + coerce_timestamps=None, + allow_truncated_timestamps=False, + data_page_size=None, flavor=None, + filesystem=None, + compression_level=None, + use_byte_stream_split=False, + column_encoding=None, + data_page_version='1.0', + use_compliant_nested_type=True, + encryption_properties=None, + write_batch_size=None, + dictionary_pagesize_limit=None, + store_schema=True, + write_page_index=False, + write_page_checksum=False, + sorting_columns=None, + **kwargs): + # Implementor's note: when adding keywords here / updating defaults, also + # update it in write_to_dataset and _dataset_parquet.pyx ParquetFileWriteOptions + row_group_size = kwargs.pop('chunk_size', row_group_size) + use_int96 = use_deprecated_int96_timestamps + try: + with ParquetWriter( + where, table.schema, + filesystem=filesystem, + version=version, + flavor=flavor, + use_dictionary=use_dictionary, + write_statistics=write_statistics, + coerce_timestamps=coerce_timestamps, + data_page_size=data_page_size, + allow_truncated_timestamps=allow_truncated_timestamps, + compression=compression, + use_deprecated_int96_timestamps=use_int96, + compression_level=compression_level, + use_byte_stream_split=use_byte_stream_split, + column_encoding=column_encoding, + data_page_version=data_page_version, + use_compliant_nested_type=use_compliant_nested_type, + encryption_properties=encryption_properties, + write_batch_size=write_batch_size, + dictionary_pagesize_limit=dictionary_pagesize_limit, + store_schema=store_schema, + write_page_index=write_page_index, + write_page_checksum=write_page_checksum, + sorting_columns=sorting_columns, + **kwargs) as writer: + writer.write_table(table, row_group_size=row_group_size) + except Exception: + if _is_path_like(where): + try: + os.remove(_stringify_path(where)) + except os.error: + pass + raise + + +_write_table_example = """\ +Generate an example PyArrow Table: + +>>> import pyarrow as pa +>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], +... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", +... "Brittle stars", "Centipede"]}) + +and write the Table into Parquet file: + +>>> import pyarrow.parquet as pq +>>> pq.write_table(table, 'example.parquet') + +Defining row group size for the Parquet file: + +>>> pq.write_table(table, 'example.parquet', row_group_size=3) + +Defining row group compression (default is Snappy): + +>>> pq.write_table(table, 'example.parquet', compression='none') + +Defining row group compression and encoding per-column: + +>>> pq.write_table(table, 'example.parquet', +... compression={'n_legs': 'snappy', 'animal': 'gzip'}, +... use_dictionary=['n_legs', 'animal']) + +Defining column encoding per-column: + +>>> pq.write_table(table, 'example.parquet', +... column_encoding={'animal':'PLAIN'}, +... use_dictionary=False) +""" + +write_table.__doc__ = """ +Write a Table to Parquet format. + +Parameters +---------- +table : pyarrow.Table +where : string or pyarrow.NativeFile +row_group_size : int + Maximum number of rows in each written row group. If None, the + row group size will be the minimum of the Table size and + 1024 * 1024. +{} +**kwargs : optional + Additional options for ParquetWriter + +Examples +-------- +{} +""".format(_parquet_writer_arg_docs, _write_table_example) + + +def write_to_dataset(table, root_path, partition_cols=None, + filesystem=None, use_legacy_dataset=None, + schema=None, partitioning=None, + basename_template=None, use_threads=None, + file_visitor=None, existing_data_behavior=None, + **kwargs): + """Wrapper around dataset.write_dataset for writing a Table to + Parquet format by partitions. + For each combination of partition columns and values, + a subdirectories are created in the following + manner: + + root_dir/ + group1=value1 + group2=value1 + .parquet + group2=value2 + .parquet + group1=valueN + group2=value1 + .parquet + group2=valueN + .parquet + + Parameters + ---------- + table : pyarrow.Table + root_path : str, pathlib.Path + The root directory of the dataset. + partition_cols : list, + Column names by which to partition the dataset. + Columns are partitioned in the order they are given. + filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. + use_legacy_dataset : bool, optional + Deprecated and has no effect from PyArrow version 15.0.0. + schema : Schema, optional + This Schema of the dataset. + partitioning : Partitioning or list[str], optional + The partitioning scheme specified with the + ``pyarrow.dataset.partitioning()`` function or a list of field names. + When providing a list of field names, you can use + ``partitioning_flavor`` to drive which partitioning type should be + used. + basename_template : str, optional + A template string used to generate basenames of written data files. + The token '{i}' will be replaced with an automatically incremented + integer. If not specified, it defaults to "guid-{i}.parquet". + use_threads : bool, default True + Write files in parallel. If enabled, then maximum parallelism will be + used determined by the number of available CPU cores. + file_visitor : function + If set, this function will be called with a WrittenFile instance + for each file created during the call. This object will have both + a path attribute and a metadata attribute. + + The path attribute will be a string containing the path to + the created file. + + The metadata attribute will be the parquet metadata of the file. + This metadata will have the file path attribute set and can be used + to build a _metadata file. The metadata attribute will be None if + the format is not parquet. + + Example visitor which simple collects the filenames created:: + + visited_paths = [] + + def file_visitor(written_file): + visited_paths.append(written_file.path) + + existing_data_behavior : 'overwrite_or_ignore' | 'error' | \ +'delete_matching' + Controls how the dataset will handle data that already exists in + the destination. The default behaviour is 'overwrite_or_ignore'. + + 'overwrite_or_ignore' will ignore any existing data and will + overwrite files with the same name as an output file. Other + existing files will be ignored. This behavior, in combination + with a unique basename_template for each write, will allow for + an append workflow. + + 'error' will raise an error if any data exists in the destination. + + 'delete_matching' is useful when you are writing a partitioned + dataset. The first time each partition directory is encountered + the entire directory will be deleted. This allows you to overwrite + old partitions completely. + **kwargs : dict, + Used as additional kwargs for :func:`pyarrow.dataset.write_dataset` + function for matching kwargs, and remainder to + :func:`pyarrow.dataset.ParquetFileFormat.make_write_options`. + See the docstring of :func:`write_table` and + :func:`pyarrow.dataset.write_dataset` for the available options. + Using `metadata_collector` in kwargs allows one to collect the + file metadata instances of dataset pieces. The file paths in the + ColumnChunkMetaData will be set relative to `root_path`. + + Examples + -------- + Generate an example PyArrow Table: + + >>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + + and write it to a partitioned dataset: + + >>> import pyarrow.parquet as pq + >>> pq.write_to_dataset(table, root_path='dataset_name_3', + ... partition_cols=['year']) + >>> pq.ParquetDataset('dataset_name_3').files + ['dataset_name_3/year=2019/...-0.parquet', ... + + Write a single Parquet file into the root folder: + + >>> pq.write_to_dataset(table, root_path='dataset_name_4') + >>> pq.ParquetDataset('dataset_name_4/').files + ['dataset_name_4/...-0.parquet'] + """ + if use_legacy_dataset is not None: + warnings.warn( + "Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 " + "and will be removed in a future version.", + FutureWarning, stacklevel=2) + + metadata_collector = kwargs.pop('metadata_collector', None) + + # Check for conflicting keywords + msg_confl = ( + "The '{1}' argument is not supported. " + "Use only '{0}' instead." + ) + if partition_cols is not None and partitioning is not None: + raise ValueError(msg_confl.format("partitioning", + "partition_cols")) + + if metadata_collector is not None and file_visitor is not None: + raise ValueError(msg_confl.format("file_visitor", + "metadata_collector")) + + import pyarrow.dataset as ds + + # extract write_dataset specific options + # reset assumed to go to make_write_options + write_dataset_kwargs = dict() + for key in inspect.signature(ds.write_dataset).parameters: + if key in kwargs: + write_dataset_kwargs[key] = kwargs.pop(key) + write_dataset_kwargs['max_rows_per_group'] = kwargs.pop( + 'row_group_size', kwargs.pop("chunk_size", None) + ) + + if metadata_collector is not None: + def file_visitor(written_file): + metadata_collector.append(written_file.metadata) + + # map format arguments + parquet_format = ds.ParquetFileFormat() + write_options = parquet_format.make_write_options(**kwargs) + + # map old filesystems to new one + if filesystem is not None: + filesystem = _ensure_filesystem(filesystem) + + if partition_cols: + part_schema = table.select(partition_cols).schema + partitioning = ds.partitioning(part_schema, flavor="hive") + + if basename_template is None: + basename_template = guid() + '-{i}.parquet' + + if existing_data_behavior is None: + existing_data_behavior = 'overwrite_or_ignore' + + ds.write_dataset( + table, root_path, filesystem=filesystem, + format=parquet_format, file_options=write_options, schema=schema, + partitioning=partitioning, use_threads=use_threads, + file_visitor=file_visitor, + basename_template=basename_template, + existing_data_behavior=existing_data_behavior, + **write_dataset_kwargs) + return + + +def write_metadata(schema, where, metadata_collector=None, filesystem=None, + **kwargs): + """ + Write metadata-only Parquet file from schema. This can be used with + `write_to_dataset` to generate `_common_metadata` and `_metadata` sidecar + files. + + Parameters + ---------- + schema : pyarrow.Schema + where : string or pyarrow.NativeFile + metadata_collector : list + where to collect metadata information. + filesystem : FileSystem, default None + If nothing passed, will be inferred from `where` if path-like, else + `where` is already a file-like object so no filesystem is needed. + **kwargs : dict, + Additional kwargs for ParquetWriter class. See docstring for + `ParquetWriter` for more information. + + Examples + -------- + Generate example data: + + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + + Write a dataset and collect metadata information. + + >>> metadata_collector = [] + >>> import pyarrow.parquet as pq + >>> pq.write_to_dataset( + ... table, 'dataset_metadata', + ... metadata_collector=metadata_collector) + + Write the `_common_metadata` parquet file without row groups statistics. + + >>> pq.write_metadata( + ... table.schema, 'dataset_metadata/_common_metadata') + + Write the `_metadata` parquet file with row groups statistics. + + >>> pq.write_metadata( + ... table.schema, 'dataset_metadata/_metadata', + ... metadata_collector=metadata_collector) + """ + filesystem, where = _resolve_filesystem_and_path(where, filesystem) + + if hasattr(where, "seek"): # file-like + cursor_position = where.tell() + + writer = ParquetWriter(where, schema, filesystem, **kwargs) + writer.close() + + if metadata_collector is not None: + # ParquetWriter doesn't expose the metadata until it's written. Write + # it and read it again. + metadata = read_metadata(where, filesystem=filesystem) + if hasattr(where, "seek"): + where.seek(cursor_position) # file-like, set cursor back. + + for m in metadata_collector: + metadata.append_row_groups(m) + if filesystem is not None: + with filesystem.open_output_stream(where) as f: + metadata.write_metadata_file(f) + else: + metadata.write_metadata_file(where) + + +def read_metadata(where, memory_map=False, decryption_properties=None, + filesystem=None): + """ + Read FileMetaData from footer of a single Parquet file. + + Parameters + ---------- + where : str (file path) or file-like object + memory_map : bool, default False + Create memory map when the source is a file path. + decryption_properties : FileDecryptionProperties, default None + Decryption properties for reading encrypted Parquet files. + filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. + + Returns + ------- + metadata : FileMetaData + The metadata of the Parquet file + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.parquet as pq + >>> table = pa.table({'n_legs': [4, 5, 100], + ... 'animal': ["Dog", "Brittle stars", "Centipede"]}) + >>> pq.write_table(table, 'example.parquet') + + >>> pq.read_metadata('example.parquet') + + created_by: parquet-cpp-arrow version ... + num_columns: 2 + num_rows: 3 + num_row_groups: 1 + format_version: 2.6 + serialized_size: ... + """ + filesystem, where = _resolve_filesystem_and_path(where, filesystem) + file_ctx = nullcontext() + if filesystem is not None: + file_ctx = where = filesystem.open_input_file(where) + + with file_ctx: + file = ParquetFile(where, memory_map=memory_map, + decryption_properties=decryption_properties) + return file.metadata + + +def read_schema(where, memory_map=False, decryption_properties=None, + filesystem=None): + """ + Read effective Arrow schema from Parquet file metadata. + + Parameters + ---------- + where : str (file path) or file-like object + memory_map : bool, default False + Create memory map when the source is a file path. + decryption_properties : FileDecryptionProperties, default None + Decryption properties for reading encrypted Parquet files. + filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. + + Returns + ------- + schema : pyarrow.Schema + The schema of the Parquet file + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.parquet as pq + >>> table = pa.table({'n_legs': [4, 5, 100], + ... 'animal': ["Dog", "Brittle stars", "Centipede"]}) + >>> pq.write_table(table, 'example.parquet') + + >>> pq.read_schema('example.parquet') + n_legs: int64 + animal: string + """ + filesystem, where = _resolve_filesystem_and_path(where, filesystem) + file_ctx = nullcontext() + if filesystem is not None: + file_ctx = where = filesystem.open_input_file(where) + + with file_ctx: + file = ParquetFile( + where, memory_map=memory_map, + decryption_properties=decryption_properties) + return file.schema.to_arrow_schema() + + +__all__ = ( + "ColumnChunkMetaData", + "ColumnSchema", + "FileDecryptionProperties", + "FileEncryptionProperties", + "FileMetaData", + "ParquetDataset", + "ParquetFile", + "ParquetLogicalType", + "ParquetReader", + "ParquetSchema", + "ParquetWriter", + "RowGroupMetaData", + "SortingColumn", + "Statistics", + "read_metadata", + "read_pandas", + "read_schema", + "read_table", + "write_metadata", + "write_table", + "write_to_dataset", + "_filters_to_expression", + "filters_to_expression", +) diff --git a/venv/lib/python3.10/site-packages/pyarrow/parquet/encryption.py b/venv/lib/python3.10/site-packages/pyarrow/parquet/encryption.py new file mode 100644 index 0000000000000000000000000000000000000000..df6eed913fa52da8c8c9f497f12953b6178439b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/parquet/encryption.py @@ -0,0 +1,23 @@ +# pylint: disable=unused-wildcard-import, unused-import + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from pyarrow._parquet_encryption import (CryptoFactory, # noqa + EncryptionConfiguration, + DecryptionConfiguration, + KmsConnectionConfig, + KmsClient) diff --git a/venv/lib/python3.10/site-packages/pyarrow/scalar.pxi b/venv/lib/python3.10/site-packages/pyarrow/scalar.pxi new file mode 100644 index 0000000000000000000000000000000000000000..41bfde39adb6fb0d468fcc6d85fd427294bd5845 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/scalar.pxi @@ -0,0 +1,1220 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections +from cython cimport binding + + +cdef class Scalar(_Weakrefable): + """ + The base class for scalars. + """ + + def __init__(self): + raise TypeError("Do not call {}'s constructor directly, use " + "pa.scalar() instead.".format(self.__class__.__name__)) + + cdef void init(self, const shared_ptr[CScalar]& wrapped): + self.wrapped = wrapped + + @staticmethod + cdef wrap(const shared_ptr[CScalar]& wrapped): + cdef: + Scalar self + Type type_id = wrapped.get().type.get().id() + shared_ptr[CDataType] sp_data_type = wrapped.get().type + + if type_id == _Type_NA: + return _NULL + + if type_id not in _scalar_classes: + raise NotImplementedError( + "Wrapping scalar of type " + frombytes(sp_data_type.get().ToString())) + + typ = get_scalar_class_from_type(sp_data_type) + self = typ.__new__(typ) + self.init(wrapped) + + return self + + cdef inline shared_ptr[CScalar] unwrap(self) nogil: + return self.wrapped + + @property + def type(self): + """ + Data type of the Scalar object. + """ + return pyarrow_wrap_data_type(self.wrapped.get().type) + + @property + def is_valid(self): + """ + Holds a valid (non-null) value. + """ + return self.wrapped.get().is_valid + + def cast(self, object target_type=None, safe=None, options=None, memory_pool=None): + """ + Cast scalar value to another data type. + + See :func:`pyarrow.compute.cast` for usage. + + Parameters + ---------- + target_type : DataType, default None + Type to cast scalar to. + safe : boolean, default True + Whether to check for conversion errors such as overflow. + options : CastOptions, default None + Additional checks pass by CastOptions + memory_pool : MemoryPool, optional + memory pool to use for allocations during function execution. + + Returns + ------- + scalar : A Scalar of the given target data type. + """ + return _pc().cast(self, target_type, safe=safe, + options=options, memory_pool=memory_pool) + + def validate(self, *, full=False): + """ + Perform validation checks. An exception is raised if validation fails. + + By default only cheap validation checks are run. Pass `full=True` + for thorough validation checks (potentially O(n)). + + Parameters + ---------- + full : bool, default False + If True, run expensive checks, otherwise cheap checks only. + + Raises + ------ + ArrowInvalid + """ + if full: + with nogil: + check_status(self.wrapped.get().ValidateFull()) + else: + with nogil: + check_status(self.wrapped.get().Validate()) + + def __repr__(self): + return ''.format( + self.__class__.__name__, self.as_py() + ) + + def __str__(self): + return str(self.as_py()) + + def equals(self, Scalar other not None): + """ + Parameters + ---------- + other : pyarrow.Scalar + + Returns + ------- + bool + """ + return self.wrapped.get().Equals(other.unwrap().get()[0]) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + def __hash__(self): + cdef CScalarHash hasher + return hasher(self.wrapped) + + def __reduce__(self): + return scalar, (self.as_py(), self.type) + + def as_py(self): + raise NotImplementedError() + + +_NULL = NA = None + + +cdef class NullScalar(Scalar): + """ + Concrete class for null scalars. + """ + + def __cinit__(self): + global NA + if NA is not None: + raise RuntimeError('Cannot create multiple NullScalar instances') + self.init(shared_ptr[CScalar](new CNullScalar())) + + def __init__(self): + pass + + def as_py(self): + """ + Return this value as a Python None. + """ + return None + + +_NULL = NA = NullScalar() + + +cdef class BooleanScalar(Scalar): + """ + Concrete class for boolean scalars. + """ + + def as_py(self): + """ + Return this value as a Python bool. + """ + cdef CBooleanScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class UInt8Scalar(Scalar): + """ + Concrete class for uint8 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CUInt8Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Int8Scalar(Scalar): + """ + Concrete class for int8 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CInt8Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class UInt16Scalar(Scalar): + """ + Concrete class for uint16 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CUInt16Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Int16Scalar(Scalar): + """ + Concrete class for int16 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CInt16Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class UInt32Scalar(Scalar): + """ + Concrete class for uint32 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CUInt32Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Int32Scalar(Scalar): + """ + Concrete class for int32 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CInt32Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class UInt64Scalar(Scalar): + """ + Concrete class for uint64 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CUInt64Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Int64Scalar(Scalar): + """ + Concrete class for int64 scalars. + """ + + def as_py(self): + """ + Return this value as a Python int. + """ + cdef CInt64Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class HalfFloatScalar(Scalar): + """ + Concrete class for float scalars. + """ + + def as_py(self): + """ + Return this value as a Python float. + """ + cdef CHalfFloatScalar* sp = self.wrapped.get() + return PyHalf_FromHalf(sp.value) if sp.is_valid else None + + +cdef class FloatScalar(Scalar): + """ + Concrete class for float scalars. + """ + + def as_py(self): + """ + Return this value as a Python float. + """ + cdef CFloatScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class DoubleScalar(Scalar): + """ + Concrete class for double scalars. + """ + + def as_py(self): + """ + Return this value as a Python float. + """ + cdef CDoubleScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + +cdef class Decimal128Scalar(Scalar): + """ + Concrete class for decimal128 scalars. + """ + + def as_py(self): + """ + Return this value as a Python Decimal. + """ + cdef: + CDecimal128Scalar* sp = self.wrapped.get() + CDecimal128Type* dtype = sp.type.get() + if sp.is_valid: + return _pydecimal.Decimal( + frombytes(sp.value.ToString(dtype.scale())) + ) + else: + return None + + +cdef class Decimal256Scalar(Scalar): + """ + Concrete class for decimal256 scalars. + """ + + def as_py(self): + """ + Return this value as a Python Decimal. + """ + cdef: + CDecimal256Scalar* sp = self.wrapped.get() + CDecimal256Type* dtype = sp.type.get() + if sp.is_valid: + return _pydecimal.Decimal( + frombytes(sp.value.ToString(dtype.scale())) + ) + else: + return None + + +cdef class Date32Scalar(Scalar): + """ + Concrete class for date32 scalars. + """ + + @property + def value(self): + cdef CDate32Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python datetime.datetime instance. + """ + cdef CDate32Scalar* sp = self.wrapped.get() + + if sp.is_valid: + # shift to seconds since epoch + return ( + datetime.date(1970, 1, 1) + datetime.timedelta(days=sp.value) + ) + else: + return None + + +cdef class Date64Scalar(Scalar): + """ + Concrete class for date64 scalars. + """ + + @property + def value(self): + cdef CDate64Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python datetime.datetime instance. + """ + cdef CDate64Scalar* sp = self.wrapped.get() + + if sp.is_valid: + return ( + datetime.date(1970, 1, 1) + + datetime.timedelta(days=sp.value / 86400000) + ) + else: + return None + + +def _datetime_from_int(int64_t value, TimeUnit unit, tzinfo=None): + if unit == TimeUnit_SECOND: + delta = datetime.timedelta(seconds=value) + elif unit == TimeUnit_MILLI: + delta = datetime.timedelta(milliseconds=value) + elif unit == TimeUnit_MICRO: + delta = datetime.timedelta(microseconds=value) + else: + # TimeUnit_NANO: prefer pandas timestamps if available + if _pandas_api.have_pandas: + return _pandas_api.pd.Timestamp(value, tz=tzinfo, unit='ns') + # otherwise safely truncate to microsecond resolution datetime + if value % 1000 != 0: + raise ValueError( + "Nanosecond resolution temporal type {} is not safely " + "convertible to microseconds to convert to datetime.datetime. " + "Install pandas to return as Timestamp with nanosecond " + "support or access the .value attribute.".format(value) + ) + delta = datetime.timedelta(microseconds=value // 1000) + + dt = datetime.datetime(1970, 1, 1) + delta + # adjust timezone if set to the datatype + if tzinfo is not None: + dt = dt.replace(tzinfo=datetime.timezone.utc).astimezone(tzinfo) + + return dt + + +cdef class Time32Scalar(Scalar): + """ + Concrete class for time32 scalars. + """ + + @property + def value(self): + cdef CTime32Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python datetime.timedelta instance. + """ + cdef: + CTime32Scalar* sp = self.wrapped.get() + CTime32Type* dtype = sp.type.get() + + if sp.is_valid: + return _datetime_from_int(sp.value, unit=dtype.unit()).time() + else: + return None + + +cdef class Time64Scalar(Scalar): + """ + Concrete class for time64 scalars. + """ + + @property + def value(self): + cdef CTime64Scalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python datetime.timedelta instance. + """ + cdef: + CTime64Scalar* sp = self.wrapped.get() + CTime64Type* dtype = sp.type.get() + + if sp.is_valid: + return _datetime_from_int(sp.value, unit=dtype.unit()).time() + else: + return None + + +cdef class TimestampScalar(Scalar): + """ + Concrete class for timestamp scalars. + """ + + @property + def value(self): + cdef CTimestampScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Pandas Timestamp instance (if units are + nanoseconds and pandas is available), otherwise as a Python + datetime.datetime instance. + """ + cdef: + CTimestampScalar* sp = self.wrapped.get() + CTimestampType* dtype = sp.type.get() + + if not sp.is_valid: + return None + + if not dtype.timezone().empty(): + tzinfo = string_to_tzinfo(frombytes(dtype.timezone())) + else: + tzinfo = None + + return _datetime_from_int(sp.value, unit=dtype.unit(), tzinfo=tzinfo) + + def __repr__(self): + """ + Return the representation of TimestampScalar using `strftime` to avoid + original repr datetime values being out of range. + """ + cdef: + CTimestampScalar* sp = self.wrapped.get() + CTimestampType* dtype = sp.type.get() + + if not dtype.timezone().empty(): + type_format = str(_pc().strftime(self, format="%Y-%m-%dT%H:%M:%S%z")) + else: + type_format = str(_pc().strftime(self)) + return ''.format( + self.__class__.__name__, type_format + ) + + +cdef class DurationScalar(Scalar): + """ + Concrete class for duration scalars. + """ + + @property + def value(self): + cdef CDurationScalar* sp = self.wrapped.get() + return sp.value if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Pandas Timedelta instance (if units are + nanoseconds and pandas is available), otherwise as a Python + datetime.timedelta instance. + """ + cdef: + CDurationScalar* sp = self.wrapped.get() + CDurationType* dtype = sp.type.get() + TimeUnit unit = dtype.unit() + + if not sp.is_valid: + return None + + if unit == TimeUnit_SECOND: + return datetime.timedelta(seconds=sp.value) + elif unit == TimeUnit_MILLI: + return datetime.timedelta(milliseconds=sp.value) + elif unit == TimeUnit_MICRO: + return datetime.timedelta(microseconds=sp.value) + else: + # TimeUnit_NANO: prefer pandas timestamps if available + if _pandas_api.have_pandas: + return _pandas_api.pd.Timedelta(sp.value, unit='ns') + # otherwise safely truncate to microsecond resolution timedelta + if sp.value % 1000 != 0: + raise ValueError( + "Nanosecond duration {} is not safely convertible to " + "microseconds to convert to datetime.timedelta. Install " + "pandas to return as Timedelta with nanosecond support or " + "access the .value attribute.".format(sp.value) + ) + return datetime.timedelta(microseconds=sp.value // 1000) + + +cdef class MonthDayNanoIntervalScalar(Scalar): + """ + Concrete class for month, day, nanosecond interval scalars. + """ + + @property + def value(self): + """ + Same as self.as_py() + """ + return self.as_py() + + def as_py(self): + """ + Return this value as a pyarrow.MonthDayNano. + """ + cdef: + PyObject* val + CMonthDayNanoIntervalScalar* scalar + scalar = self.wrapped.get() + val = GetResultValue(MonthDayNanoIntervalScalarToPyObject( + deref(scalar))) + return PyObject_to_object(val) + + +cdef class BinaryScalar(Scalar): + """ + Concrete class for binary-like scalars. + """ + + def as_buffer(self): + """ + Return a view over this value as a Buffer object. + """ + cdef CBaseBinaryScalar* sp = self.wrapped.get() + return pyarrow_wrap_buffer(sp.value) if sp.is_valid else None + + def as_py(self): + """ + Return this value as a Python bytes. + """ + buffer = self.as_buffer() + return None if buffer is None else buffer.to_pybytes() + + +cdef class LargeBinaryScalar(BinaryScalar): + pass + + +cdef class FixedSizeBinaryScalar(BinaryScalar): + pass + + +cdef class StringScalar(BinaryScalar): + """ + Concrete class for string-like (utf8) scalars. + """ + + def as_py(self): + """ + Return this value as a Python string. + """ + buffer = self.as_buffer() + return None if buffer is None else str(buffer, 'utf8') + + +cdef class LargeStringScalar(StringScalar): + pass + + +cdef class BinaryViewScalar(BinaryScalar): + pass + + +cdef class StringViewScalar(StringScalar): + pass + + +cdef class ListScalar(Scalar): + """ + Concrete class for list-like scalars. + """ + + @property + def values(self): + cdef CBaseListScalar* sp = self.wrapped.get() + if sp.is_valid: + return pyarrow_wrap_array(sp.value) + else: + return None + + def __len__(self): + """ + Return the number of values. + """ + return len(self.values) + + def __getitem__(self, i): + """ + Return the value at the given index. + """ + return self.values[_normalize_index(i, len(self))] + + def __iter__(self): + """ + Iterate over this element's values. + """ + return iter(self.values) + + def as_py(self): + """ + Return this value as a Python list. + """ + arr = self.values + return None if arr is None else arr.to_pylist() + + +cdef class FixedSizeListScalar(ListScalar): + pass + + +cdef class LargeListScalar(ListScalar): + pass + + +cdef class ListViewScalar(ListScalar): + pass + + +cdef class LargeListViewScalar(ListScalar): + pass + + +cdef class StructScalar(Scalar, collections.abc.Mapping): + """ + Concrete class for struct scalars. + """ + + def __len__(self): + cdef CStructScalar* sp = self.wrapped.get() + return sp.value.size() + + def __iter__(self): + cdef: + CStructScalar* sp = self.wrapped.get() + CStructType* dtype = sp.type.get() + vector[shared_ptr[CField]] fields = dtype.fields() + + for i in range(dtype.num_fields()): + yield frombytes(fields[i].get().name()) + + def items(self): + return ((key, self[i]) for i, key in enumerate(self)) + + def __contains__(self, key): + return key in list(self) + + def __getitem__(self, key): + """ + Return the child value for the given field. + + Parameters + ---------- + index : Union[int, str] + Index / position or name of the field. + + Returns + ------- + result : Scalar + """ + cdef: + CFieldRef ref + CStructScalar* sp = self.wrapped.get() + + if isinstance(key, (bytes, str)): + ref = CFieldRef( tobytes(key)) + elif isinstance(key, int): + ref = CFieldRef( key) + else: + raise TypeError('Expected integer or string index') + + try: + return Scalar.wrap(GetResultValue(sp.field(ref))) + except ArrowInvalid as exc: + if isinstance(key, int): + raise IndexError(key) from exc + else: + raise KeyError(key) from exc + + def as_py(self): + """ + Return this value as a Python dict. + """ + if self.is_valid: + try: + return {k: self[k].as_py() for k in self.keys()} + except KeyError: + raise ValueError( + "Converting to Python dictionary is not supported when " + "duplicate field names are present") + else: + return None + + def _as_py_tuple(self): + # a version that returns a tuple instead of dict to support repr/str + # with the presence of duplicate field names + if self.is_valid: + return [(key, self[i].as_py()) for i, key in enumerate(self)] + else: + return None + + def __repr__(self): + return ''.format( + self.__class__.__name__, self._as_py_tuple() + ) + + def __str__(self): + return str(self._as_py_tuple()) + + +cdef class MapScalar(ListScalar): + """ + Concrete class for map scalars. + """ + + def __getitem__(self, i): + """ + Return the value at the given index. + """ + arr = self.values + if arr is None: + raise IndexError(i) + dct = arr[_normalize_index(i, len(arr))] + return (dct[self.type.key_field.name], dct[self.type.item_field.name]) + + def __iter__(self): + """ + Iterate over this element's values. + """ + arr = self.values + if arr is None: + return + for k, v in zip(arr.field(self.type.key_field.name), arr.field(self.type.item_field.name)): + yield (k.as_py(), v.as_py()) + + def as_py(self): + """ + Return this value as a Python list. + """ + cdef CStructScalar* sp = self.wrapped.get() + return list(self) if sp.is_valid else None + + +cdef class DictionaryScalar(Scalar): + """ + Concrete class for dictionary-encoded scalars. + """ + + @staticmethod + @binding(True) # Required for cython < 3 + def _reconstruct(type, is_valid, index, dictionary): + cdef: + CDictionaryScalarIndexAndDictionary value + shared_ptr[CDictionaryScalar] wrapped + DataType type_ + Scalar index_ + Array dictionary_ + + type_ = ensure_type(type, allow_none=False) + if not isinstance(type_, DictionaryType): + raise TypeError('Must pass a DictionaryType instance') + + if isinstance(index, Scalar): + if not index.type.equals(type.index_type): + raise TypeError("The Scalar value passed as index must have " + "identical type to the dictionary type's " + "index_type") + index_ = index + else: + index_ = scalar(index, type=type_.index_type) + + if isinstance(dictionary, Array): + if not dictionary.type.equals(type.value_type): + raise TypeError("The Array passed as dictionary must have " + "identical type to the dictionary type's " + "value_type") + dictionary_ = dictionary + else: + dictionary_ = array(dictionary, type=type_.value_type) + + value.index = pyarrow_unwrap_scalar(index_) + value.dictionary = pyarrow_unwrap_array(dictionary_) + + wrapped = make_shared[CDictionaryScalar]( + value, pyarrow_unwrap_data_type(type_), (is_valid) + ) + return Scalar.wrap( wrapped) + + def __reduce__(self): + return DictionaryScalar._reconstruct, ( + self.type, self.is_valid, self.index, self.dictionary + ) + + @property + def index(self): + """ + Return this value's underlying index as a scalar. + """ + cdef CDictionaryScalar* sp = self.wrapped.get() + return Scalar.wrap(sp.value.index) + + @property + def value(self): + """ + Return the encoded value as a scalar. + """ + cdef CDictionaryScalar* sp = self.wrapped.get() + return Scalar.wrap(GetResultValue(sp.GetEncodedValue())) + + @property + def dictionary(self): + cdef CDictionaryScalar* sp = self.wrapped.get() + return pyarrow_wrap_array(sp.value.dictionary) + + def as_py(self): + """ + Return this encoded value as a Python object. + """ + return self.value.as_py() if self.is_valid else None + + +cdef class RunEndEncodedScalar(Scalar): + """ + Concrete class for RunEndEncoded scalars. + """ + @property + def value(self): + """ + Return underlying value as a scalar. + """ + cdef CRunEndEncodedScalar* sp = self.wrapped.get() + return Scalar.wrap(sp.value) + + def as_py(self): + """ + Return underlying value as a Python object. + """ + return self.value.as_py() + + +cdef class UnionScalar(Scalar): + """ + Concrete class for Union scalars. + """ + + @property + def value(self): + """ + Return underlying value as a scalar. + """ + cdef CSparseUnionScalar* sp + cdef CDenseUnionScalar* dp + if self.type.id == _Type_SPARSE_UNION: + sp = self.wrapped.get() + return Scalar.wrap(sp.value[sp.child_id]) if sp.is_valid else None + else: + dp = self.wrapped.get() + return Scalar.wrap(dp.value) if dp.is_valid else None + + def as_py(self): + """ + Return underlying value as a Python object. + """ + value = self.value + return None if value is None else value.as_py() + + @property + def type_code(self): + """ + Return the union type code for this scalar. + """ + cdef CUnionScalar* sp = self.wrapped.get() + return sp.type_code + + +cdef class ExtensionScalar(Scalar): + """ + Concrete class for Extension scalars. + """ + + @property + def value(self): + """ + Return storage value as a scalar. + """ + cdef CExtensionScalar* sp = self.wrapped.get() + return Scalar.wrap(sp.value) if sp.is_valid else None + + def as_py(self): + """ + Return this scalar as a Python object. + """ + return None if self.value is None else self.value.as_py() + + @staticmethod + def from_storage(BaseExtensionType typ, value): + """ + Construct ExtensionScalar from type and storage value. + + Parameters + ---------- + typ : DataType + The extension type for the result scalar. + value : object + The storage value for the result scalar. + + Returns + ------- + ext_scalar : ExtensionScalar + """ + cdef: + shared_ptr[CExtensionScalar] sp_scalar + shared_ptr[CScalar] sp_storage + CExtensionScalar* ext_scalar + + if value is None: + storage = None + elif isinstance(value, Scalar): + if value.type != typ.storage_type: + raise TypeError("Incompatible storage type {0} " + "for extension type {1}" + .format(value.type, typ)) + storage = value + else: + storage = scalar(value, typ.storage_type) + + cdef c_bool is_valid = storage is not None and storage.is_valid + if is_valid: + sp_storage = pyarrow_unwrap_scalar(storage) + else: + sp_storage = MakeNullScalar(( typ.storage_type).sp_type) + sp_scalar = make_shared[CExtensionScalar](sp_storage, typ.sp_type, + is_valid) + with nogil: + check_status(sp_scalar.get().Validate()) + return pyarrow_wrap_scalar( sp_scalar) + + +cdef class FixedShapeTensorScalar(ExtensionScalar): + """ + Concrete class for fixed shape tensor extension scalar. + """ + + def to_numpy(self): + """ + Convert fixed shape tensor scalar to a numpy.ndarray. + + The resulting ndarray's shape matches the permuted shape of the + fixed shape tensor scalar. + The conversion is zero-copy. + + Returns + ------- + numpy.ndarray + """ + return self.to_tensor().to_numpy() + + def to_tensor(self): + """ + Convert fixed shape tensor extension scalar to a pyarrow.Tensor, using shape + and strides derived from corresponding FixedShapeTensorType. + + The conversion is zero-copy. + + Returns + ------- + pyarrow.Tensor + Tensor represented stored in FixedShapeTensorScalar. + """ + cdef: + CFixedShapeTensorType* c_type = static_pointer_cast[CFixedShapeTensorType, CDataType]( + self.wrapped.get().type).get() + shared_ptr[CExtensionScalar] scalar = static_pointer_cast[CExtensionScalar, CScalar](self.wrapped) + shared_ptr[CTensor] ctensor + + with nogil: + ctensor = GetResultValue(c_type.MakeTensor(scalar)) + return pyarrow_wrap_tensor(ctensor) + + +cdef dict _scalar_classes = { + _Type_BOOL: BooleanScalar, + _Type_UINT8: UInt8Scalar, + _Type_UINT16: UInt16Scalar, + _Type_UINT32: UInt32Scalar, + _Type_UINT64: UInt64Scalar, + _Type_INT8: Int8Scalar, + _Type_INT16: Int16Scalar, + _Type_INT32: Int32Scalar, + _Type_INT64: Int64Scalar, + _Type_HALF_FLOAT: HalfFloatScalar, + _Type_FLOAT: FloatScalar, + _Type_DOUBLE: DoubleScalar, + _Type_DECIMAL128: Decimal128Scalar, + _Type_DECIMAL256: Decimal256Scalar, + _Type_DATE32: Date32Scalar, + _Type_DATE64: Date64Scalar, + _Type_TIME32: Time32Scalar, + _Type_TIME64: Time64Scalar, + _Type_TIMESTAMP: TimestampScalar, + _Type_DURATION: DurationScalar, + _Type_BINARY: BinaryScalar, + _Type_LARGE_BINARY: LargeBinaryScalar, + _Type_FIXED_SIZE_BINARY: FixedSizeBinaryScalar, + _Type_BINARY_VIEW: BinaryViewScalar, + _Type_STRING: StringScalar, + _Type_LARGE_STRING: LargeStringScalar, + _Type_STRING_VIEW: StringViewScalar, + _Type_LIST: ListScalar, + _Type_LARGE_LIST: LargeListScalar, + _Type_FIXED_SIZE_LIST: FixedSizeListScalar, + _Type_LIST_VIEW: ListViewScalar, + _Type_LARGE_LIST_VIEW: LargeListViewScalar, + _Type_STRUCT: StructScalar, + _Type_MAP: MapScalar, + _Type_DICTIONARY: DictionaryScalar, + _Type_RUN_END_ENCODED: RunEndEncodedScalar, + _Type_SPARSE_UNION: UnionScalar, + _Type_DENSE_UNION: UnionScalar, + _Type_INTERVAL_MONTH_DAY_NANO: MonthDayNanoIntervalScalar, + _Type_EXTENSION: ExtensionScalar, +} + + +cdef object get_scalar_class_from_type( + const shared_ptr[CDataType]& sp_data_type): + cdef CDataType* data_type = sp_data_type.get() + if data_type == NULL: + raise ValueError('Scalar data type was NULL') + + if data_type.id() == _Type_EXTENSION: + py_ext_data_type = pyarrow_wrap_data_type(sp_data_type) + return py_ext_data_type.__arrow_ext_scalar_class__() + else: + return _scalar_classes[data_type.id()] + + +def scalar(value, type=None, *, from_pandas=None, MemoryPool memory_pool=None): + """ + Create a pyarrow.Scalar instance from a Python object. + + Parameters + ---------- + value : Any + Python object coercible to arrow's type system. + type : pyarrow.DataType + Explicit type to attempt to coerce to, otherwise will be inferred from + the value. + from_pandas : bool, default None + Use pandas's semantics for inferring nulls from values in + ndarray-like data. Defaults to False if not passed explicitly by user, + or True if a pandas object is passed in. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the currently-set default + memory pool. + + Returns + ------- + scalar : pyarrow.Scalar + + Examples + -------- + >>> import pyarrow as pa + + >>> pa.scalar(42) + + + >>> pa.scalar("string") + + + >>> pa.scalar([1, 2]) + + + >>> pa.scalar([1, 2], type=pa.list_(pa.int16())) + + """ + cdef: + DataType ty + PyConversionOptions options + shared_ptr[CScalar] scalar + shared_ptr[CArray] array + shared_ptr[CChunkedArray] chunked + bint is_pandas_object = False + CMemoryPool* pool + + type = ensure_type(type, allow_none=True) + pool = maybe_unbox_memory_pool(memory_pool) + + if _is_array_like(value): + value = get_values(value, &is_pandas_object) + + options.size = 1 + + if type is not None: + ty = ensure_type(type) + options.type = ty.sp_type + + if from_pandas is None: + options.from_pandas = is_pandas_object + else: + options.from_pandas = from_pandas + + value = [value] + with nogil: + chunked = GetResultValue(ConvertPySequence(value, None, options, pool)) + + # get the first chunk + assert chunked.get().num_chunks() == 1 + array = chunked.get().chunk(0) + + # retrieve the scalar from the first position + scalar = GetResultValue(array.get().GetScalar(0)) + return Scalar.wrap(scalar) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/__init__.py b/venv/lib/python3.10/site-packages/pyarrow/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/arrow_39313.py b/venv/lib/python3.10/site-packages/pyarrow/tests/arrow_39313.py new file mode 100644 index 0000000000000000000000000000000000000000..1e769f49d942b169a9473ba777b29666355d7062 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/arrow_39313.py @@ -0,0 +1,47 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# This file is called from a test in test_pandas.py. + +from threading import Thread + +import pandas as pd +from pyarrow.pandas_compat import _pandas_api + +if __name__ == "__main__": + wait = True + num_threads = 10 + df = pd.DataFrame() + results = [] + + def rc(): + while wait: + pass + results.append(_pandas_api.is_data_frame(df)) + + threads = [Thread(target=rc) for _ in range(num_threads)] + + for t in threads: + t.start() + + wait = False + + for t in threads: + t.join() + + assert len(results) == num_threads + assert all(results), "`is_data_frame` returned False when given a DataFrame" diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/arrow_7980.py b/venv/lib/python3.10/site-packages/pyarrow/tests/arrow_7980.py new file mode 100644 index 0000000000000000000000000000000000000000..c1bc3176d2822620f7121b404ee90a6c5d21f533 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/arrow_7980.py @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# This file is called from a test in test_schema.py. + +import pyarrow as pa + + +# the types where to_pandas_dtype returns a non-numpy dtype +cases = [ + (pa.timestamp('ns', tz='UTC'), "datetime64[ns, UTC]"), +] + + +for arrow_type, pandas_type in cases: + assert str(arrow_type.to_pandas_dtype()) == pandas_type diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/bound_function_visit_strings.pyx b/venv/lib/python3.10/site-packages/pyarrow/tests/bound_function_visit_strings.pyx new file mode 100644 index 0000000000000000000000000000000000000000..80b4c541b84b477b422faf7e86ddbd008195ece6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/bound_function_visit_strings.pyx @@ -0,0 +1,67 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language=c++ +# cython: language_level = 3 + +from pyarrow.lib cimport * +from pyarrow.lib import frombytes, tobytes + +# basic test to roundtrip through a BoundFunction + +ctypedef CStatus visit_string_cb(const c_string&) + +cdef extern from * namespace "arrow::py" nogil: + """ + #include + #include + #include + + #include "arrow/status.h" + + namespace arrow { + namespace py { + + Status VisitStrings(const std::vector& strs, + std::function cb) { + for (const std::string& str : strs) { + RETURN_NOT_OK(cb(str)); + } + return Status::OK(); + } + + } // namespace py + } // namespace arrow + """ + cdef CStatus CVisitStrings" arrow::py::VisitStrings"( + vector[c_string], function[visit_string_cb]) + + +cdef void _visit_strings_impl(py_cb, const c_string& s) except *: + py_cb(frombytes(s)) + + +def _visit_strings(strings, cb): + cdef: + function[visit_string_cb] c_cb + vector[c_string] c_strings + + c_cb = BindFunction[visit_string_cb](&_visit_strings_impl, cb) + for s in strings: + c_strings.push_back(tobytes(s)) + + check_status(CVisitStrings(c_strings, c_cb)) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/conftest.py b/venv/lib/python3.10/site-packages/pyarrow/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..57bc3c8fc66167725b6c3d48e7ecbbc7611910e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/conftest.py @@ -0,0 +1,312 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import functools +import os +import pathlib +import subprocess +import sys +import time +import urllib.request + +import pytest +import hypothesis as h +from ..conftest import groups, defaults + +from pyarrow import set_timezone_db_path +from pyarrow.util import find_free_port + + +# setup hypothesis profiles +h.settings.register_profile('ci', max_examples=1000) +h.settings.register_profile('dev', max_examples=50) +h.settings.register_profile('debug', max_examples=10, + verbosity=h.Verbosity.verbose) + +# load default hypothesis profile, either set HYPOTHESIS_PROFILE environment +# variable or pass --hypothesis-profile option to pytest, to see the generated +# examples try: +# pytest pyarrow -sv --enable-hypothesis --hypothesis-profile=debug +h.settings.load_profile(os.environ.get('HYPOTHESIS_PROFILE', 'dev')) + +# Set this at the beginning before the AWS SDK was loaded to avoid reading in +# user configuration values. +os.environ['AWS_CONFIG_FILE'] = "/dev/null" + + +if sys.platform == 'win32': + tzdata_set_path = os.environ.get('PYARROW_TZDATA_PATH', None) + if tzdata_set_path: + set_timezone_db_path(tzdata_set_path) + + +def pytest_addoption(parser): + # Create options to selectively enable test groups + def bool_env(name, default=None): + value = os.environ.get(name.upper()) + if not value: # missing or empty + return default + value = value.lower() + if value in {'1', 'true', 'on', 'yes', 'y'}: + return True + elif value in {'0', 'false', 'off', 'no', 'n'}: + return False + else: + raise ValueError('{}={} is not parsable as boolean' + .format(name.upper(), value)) + + for group in groups: + default = bool_env('PYARROW_TEST_{}'.format(group), defaults[group]) + parser.addoption('--enable-{}'.format(group), + action='store_true', default=default, + help=('Enable the {} test group'.format(group))) + parser.addoption('--disable-{}'.format(group), + action='store_true', default=False, + help=('Disable the {} test group'.format(group))) + + +class PyArrowConfig: + def __init__(self): + self.is_enabled = {} + + def apply_mark(self, mark): + group = mark.name + if group in groups: + self.requires(group) + + def requires(self, group): + if not self.is_enabled[group]: + pytest.skip('{} NOT enabled'.format(group)) + + +def pytest_configure(config): + # Apply command-line options to initialize PyArrow-specific config object + config.pyarrow = PyArrowConfig() + + for mark in groups: + config.addinivalue_line( + "markers", mark, + ) + + enable_flag = '--enable-{}'.format(mark) + disable_flag = '--disable-{}'.format(mark) + + is_enabled = (config.getoption(enable_flag) and not + config.getoption(disable_flag)) + config.pyarrow.is_enabled[mark] = is_enabled + + +def pytest_runtest_setup(item): + # Apply test markers to skip tests selectively + for mark in item.iter_markers(): + item.config.pyarrow.apply_mark(mark) + + +@pytest.fixture +def tempdir(tmpdir): + # convert pytest's LocalPath to pathlib.Path + return pathlib.Path(tmpdir.strpath) + + +@pytest.fixture(scope='session') +def base_datadir(): + return pathlib.Path(__file__).parent / 'data' + + +@pytest.fixture(autouse=True) +def disable_aws_metadata(monkeypatch): + """Stop the AWS SDK from trying to contact the EC2 metadata server. + + Otherwise, this causes a 5 second delay in tests that exercise the + S3 filesystem. + """ + monkeypatch.setenv("AWS_EC2_METADATA_DISABLED", "true") + + +# TODO(kszucs): move the following fixtures to test_fs.py once the previous +# parquet dataset implementation and hdfs implementation are removed. + +@pytest.fixture(scope='session') +def hdfs_connection(): + host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default') + port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0)) + user = os.environ.get('ARROW_HDFS_TEST_USER', 'hdfs') + return host, port, user + + +@pytest.fixture(scope='session') +def s3_connection(): + host, port = 'localhost', find_free_port() + access_key, secret_key = 'arrow', 'apachearrow' + return host, port, access_key, secret_key + + +def retry(attempts=3, delay=1.0, max_delay=None, backoff=1): + """ + Retry decorator + + Parameters + ---------- + attempts : int, default 3 + The number of attempts. + delay : float, default 1 + Initial delay in seconds. + max_delay : float, optional + The max delay between attempts. + backoff : float, default 1 + The multiplier to delay after each attempt. + """ + def decorate(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + remaining_attempts = attempts + curr_delay = delay + while remaining_attempts > 0: + try: + return func(*args, **kwargs) + except Exception as err: + remaining_attempts -= 1 + last_exception = err + curr_delay *= backoff + if max_delay: + curr_delay = min(curr_delay, max_delay) + time.sleep(curr_delay) + raise last_exception + return wrapper + return decorate + + +@pytest.fixture(scope='session') +def s3_server(s3_connection, tmpdir_factory): + @retry(attempts=5, delay=0.1, backoff=2) + def minio_server_health_check(address): + resp = urllib.request.urlopen(f"http://{address}/minio/health/cluster") + assert resp.getcode() == 200 + + tmpdir = tmpdir_factory.getbasetemp() + host, port, access_key, secret_key = s3_connection + + address = '{}:{}'.format(host, port) + env = os.environ.copy() + env.update({ + 'MINIO_ACCESS_KEY': access_key, + 'MINIO_SECRET_KEY': secret_key + }) + + args = ['minio', '--compat', 'server', '--quiet', '--address', + address, tmpdir] + proc = None + try: + proc = subprocess.Popen(args, env=env) + except OSError: + pytest.skip('`minio` command cannot be located') + else: + # Wait for the server to startup before yielding + minio_server_health_check(address) + + yield { + 'connection': s3_connection, + 'process': proc, + 'tempdir': tmpdir + } + finally: + if proc is not None: + proc.kill() + proc.wait() + + +@pytest.fixture(scope='session') +def gcs_server(): + port = find_free_port() + env = os.environ.copy() + args = [sys.executable, '-m', 'testbench', '--port', str(port)] + proc = None + try: + # check first if testbench module is available + import testbench # noqa:F401 + # start server + proc = subprocess.Popen(args, env=env) + # Make sure the server is alive. + if proc.poll() is not None: + pytest.skip(f"Command {args} did not start server successfully!") + except (ModuleNotFoundError, OSError) as e: + pytest.skip(f"Command {args} failed to execute: {e}") + else: + yield { + 'connection': ('localhost', port), + 'process': proc, + } + finally: + if proc is not None: + proc.kill() + proc.wait() + + +@pytest.fixture(scope='session') +def azure_server(tmpdir_factory): + port = find_free_port() + env = os.environ.copy() + tmpdir = tmpdir_factory.getbasetemp() + # We only need blob service emulator, not queue or table. + args = ['azurite-blob', "--location", tmpdir, "--blobPort", str(port)] + proc = None + try: + proc = subprocess.Popen(args, env=env) + # Make sure the server is alive. + if proc.poll() is not None: + pytest.skip(f"Command {args} did not start server successfully!") + except (ModuleNotFoundError, OSError) as e: + pytest.skip(f"Command {args} failed to execute: {e}") + else: + yield { + # Use the standard azurite account_name and account_key. + # https://learn.microsoft.com/en-us/azure/storage/common/storage-use-emulator#authorize-with-shared-key-credentials + 'connection': ('127.0.0.1', port, 'devstoreaccount1', + 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2' + 'UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='), + 'process': proc, + 'tempdir': tmpdir, + } + finally: + if proc is not None: + proc.kill() + proc.wait() + + +@pytest.fixture( + params=[ + 'builtin_pickle', + 'cloudpickle' + ], + scope='session' +) +def pickle_module(request): + return request.getfixturevalue(request.param) + + +@pytest.fixture(scope='session') +def builtin_pickle(): + import pickle + return pickle + + +@pytest.fixture(scope='session') +def cloudpickle(): + cp = pytest.importorskip('cloudpickle') + if 'HIGHEST_PROTOCOL' not in cp.__dict__: + cp.HIGHEST_PROTOCOL = cp.DEFAULT_PROTOCOL + return cp diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/pandas_threaded_import.py b/venv/lib/python3.10/site-packages/pyarrow/tests/pandas_threaded_import.py new file mode 100644 index 0000000000000000000000000000000000000000..f44632d747fcb273f9d3b23284ba10adb0b05352 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/pandas_threaded_import.py @@ -0,0 +1,44 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# This file is called from a test in test_pandas.py. + +from concurrent.futures import ThreadPoolExecutor +import faulthandler +import sys + +import pyarrow as pa + +num_threads = 60 +timeout = 10 # seconds + + +def thread_func(i): + pa.array([i]).to_pandas() + + +def main(): + # In case of import deadlock, crash after a finite timeout + faulthandler.dump_traceback_later(timeout, exit=True) + with ThreadPoolExecutor(num_threads) as pool: + assert "pandas" not in sys.modules # pandas is imported lazily + list(pool.map(thread_func, range(num_threads))) + assert "pandas" in sys.modules + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/pyarrow_cython_example.pyx b/venv/lib/python3.10/site-packages/pyarrow/tests/pyarrow_cython_example.pyx new file mode 100644 index 0000000000000000000000000000000000000000..9ae59efb8b78cc1245421cfb4b2584c96e33fb1c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/pyarrow_cython_example.pyx @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language=c++ +# cython: language_level = 3 + +from pyarrow.lib cimport * + + +def get_array_length(obj): + # An example function accessing both the pyarrow Cython API + # and the Arrow C++ API + cdef shared_ptr[CArray] arr = pyarrow_unwrap_array(obj) + if arr.get() == NULL: + raise TypeError("not an array") + return arr.get().length() + + +def make_null_array(length): + # An example function that returns a PyArrow object without PyArrow + # being imported explicitly at the Python level. + cdef shared_ptr[CArray] null_array + null_array.reset(new CNullArray(length)) + return pyarrow_wrap_array(null_array) + + +def cast_scalar(scalar, to_type): + cdef: + shared_ptr[CScalar] c_scalar + shared_ptr[CDataType] c_type + CCastOptions cast_options + CDatum c_datum + CResult[CDatum] c_cast_result + + c_scalar = pyarrow_unwrap_scalar(scalar) + if c_scalar.get() == NULL: + raise TypeError("not a scalar") + c_type = pyarrow_unwrap_data_type(to_type) + if c_type.get() == NULL: + raise TypeError("not a type") + + c_datum = CDatum(c_scalar) + cast_options = CCastOptions() + cast_options.to_type = c_type + c_cast_result = Cast(c_datum, cast_options) + c_datum = GetResultValue(c_cast_result) + return pyarrow_wrap_scalar(c_datum.scalar()) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/read_record_batch.py b/venv/lib/python3.10/site-packages/pyarrow/tests/read_record_batch.py new file mode 100644 index 0000000000000000000000000000000000000000..d565d254143fb4a1ef54df11843dd225739e5abe --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/read_record_batch.py @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# This file is called from a test in test_ipc.py. + +import sys + +import pyarrow as pa + +with open(sys.argv[1], 'rb') as f: + pa.ipc.open_file(f).read_all().to_pandas() diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/strategies.py b/venv/lib/python3.10/site-packages/pyarrow/tests/strategies.py new file mode 100644 index 0000000000000000000000000000000000000000..db0aa1397123df0d5b7e0a5031b3b8710227a55c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/strategies.py @@ -0,0 +1,457 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import datetime +import sys + +import pytest +import hypothesis as h +import hypothesis.strategies as st +import hypothesis.extra.numpy as npst +try: + import hypothesis.extra.pytz as tzst +except ImportError: + tzst = None +try: + import zoneinfo +except ImportError: + zoneinfo = None +if sys.platform == 'win32': + try: + import tzdata # noqa:F401 + except ImportError: + zoneinfo = None +import numpy as np + +import pyarrow as pa + + +# TODO(kszucs): alphanum_text, surrogate_text +custom_text = st.text( + alphabet=st.characters( + min_codepoint=0x41, + max_codepoint=0x7E + ) +) + +null_type = st.just(pa.null()) +bool_type = st.just(pa.bool_()) + +binary_type = st.just(pa.binary()) +string_type = st.just(pa.string()) +large_binary_type = st.just(pa.large_binary()) +large_string_type = st.just(pa.large_string()) +fixed_size_binary_type = st.builds( + pa.binary, + st.integers(min_value=0, max_value=16) +) +binary_like_types = st.one_of( + binary_type, + string_type, + large_binary_type, + large_string_type, + fixed_size_binary_type +) + +signed_integer_types = st.sampled_from([ + pa.int8(), + pa.int16(), + pa.int32(), + pa.int64() +]) +unsigned_integer_types = st.sampled_from([ + pa.uint8(), + pa.uint16(), + pa.uint32(), + pa.uint64() +]) +integer_types = st.one_of(signed_integer_types, unsigned_integer_types) + +floating_types = st.sampled_from([ + pa.float16(), + pa.float32(), + pa.float64() +]) +decimal128_type = st.builds( + pa.decimal128, + precision=st.integers(min_value=1, max_value=38), + scale=st.integers(min_value=1, max_value=38) +) +decimal256_type = st.builds( + pa.decimal256, + precision=st.integers(min_value=1, max_value=76), + scale=st.integers(min_value=1, max_value=76) +) +numeric_types = st.one_of(integer_types, floating_types, + decimal128_type, decimal256_type) + +date_types = st.sampled_from([ + pa.date32(), + pa.date64() +]) +time_types = st.sampled_from([ + pa.time32('s'), + pa.time32('ms'), + pa.time64('us'), + pa.time64('ns') +]) + +if tzst and zoneinfo: + timezones = st.one_of(st.none(), tzst.timezones(), st.timezones()) +elif tzst: + timezones = st.one_of(st.none(), tzst.timezones()) +elif zoneinfo: + timezones = st.one_of(st.none(), st.timezones()) +else: + timezones = st.none() +timestamp_types = st.builds( + pa.timestamp, + unit=st.sampled_from(['s', 'ms', 'us', 'ns']), + tz=timezones +) +duration_types = st.builds( + pa.duration, + st.sampled_from(['s', 'ms', 'us', 'ns']) +) +interval_types = st.just(pa.month_day_nano_interval()) +temporal_types = st.one_of( + date_types, + time_types, + timestamp_types, + duration_types, + interval_types +) + +primitive_types = st.one_of( + null_type, + bool_type, + numeric_types, + temporal_types, + binary_like_types +) + +metadata = st.dictionaries(st.text(), st.text()) + + +@st.composite +def fields(draw, type_strategy=primitive_types): + name = draw(custom_text) + typ = draw(type_strategy) + if pa.types.is_null(typ): + nullable = True + else: + nullable = draw(st.booleans()) + meta = draw(metadata) + return pa.field(name, type=typ, nullable=nullable, metadata=meta) + + +def list_types(item_strategy=primitive_types): + return ( + st.builds(pa.list_, item_strategy) | + st.builds(pa.large_list, item_strategy) | + st.builds( + pa.list_, + item_strategy, + st.integers(min_value=0, max_value=16) + ) | + st.builds(pa.list_view, item_strategy) | + st.builds(pa.large_list_view, item_strategy) + ) + + +@st.composite +def struct_types(draw, item_strategy=primitive_types): + fields_strategy = st.lists(fields(item_strategy)) + fields_rendered = draw(fields_strategy) + field_names = [field.name for field in fields_rendered] + # check that field names are unique, see ARROW-9997 + h.assume(len(set(field_names)) == len(field_names)) + return pa.struct(fields_rendered) + + +def dictionary_types(key_strategy=None, value_strategy=None): + if key_strategy is None: + key_strategy = signed_integer_types + if value_strategy is None: + value_strategy = st.one_of( + bool_type, + integer_types, + st.sampled_from([pa.float32(), pa.float64()]), + binary_type, + string_type, + fixed_size_binary_type, + ) + return st.builds(pa.dictionary, key_strategy, value_strategy) + + +@st.composite +def map_types(draw, key_strategy=primitive_types, + item_strategy=primitive_types): + key_type = draw(key_strategy) + h.assume(not pa.types.is_null(key_type)) + value_type = draw(item_strategy) + return pa.map_(key_type, value_type) + + +# union type +# extension type + + +def schemas(type_strategy=primitive_types, max_fields=None): + children = st.lists(fields(type_strategy), max_size=max_fields) + return st.builds(pa.schema, children) + + +all_types = st.deferred( + lambda: ( + primitive_types | + list_types() | + struct_types() | + dictionary_types() | + map_types() | + list_types(all_types) | + struct_types(all_types) + ) +) +all_fields = fields(all_types) +all_schemas = schemas(all_types) + + +_default_array_sizes = st.integers(min_value=0, max_value=20) + + +@st.composite +def _pylist(draw, value_type, size, nullable=True): + arr = draw(arrays(value_type, size=size, nullable=False)) + return arr.to_pylist() + + +@st.composite +def _pymap(draw, key_type, value_type, size, nullable=True): + length = draw(size) + keys = draw(_pylist(key_type, size=length, nullable=False)) + values = draw(_pylist(value_type, size=length, nullable=nullable)) + return list(zip(keys, values)) + + +@st.composite +def arrays(draw, type, size=None, nullable=True): + if isinstance(type, st.SearchStrategy): + ty = draw(type) + elif isinstance(type, pa.DataType): + ty = type + else: + raise TypeError('Type must be a pyarrow DataType') + + if isinstance(size, st.SearchStrategy): + size = draw(size) + elif size is None: + size = draw(_default_array_sizes) + elif not isinstance(size, int): + raise TypeError('Size must be an integer') + + if pa.types.is_null(ty): + h.assume(nullable) + value = st.none() + elif pa.types.is_boolean(ty): + value = st.booleans() + elif pa.types.is_integer(ty): + values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size,))) + return pa.array(values, type=ty) + elif pa.types.is_floating(ty): + values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size,))) + # Workaround ARROW-4952: no easy way to assert array equality + # in a NaN-tolerant way. + values[np.isnan(values)] = -42.0 + return pa.array(values, type=ty) + elif pa.types.is_decimal(ty): + # TODO(kszucs): properly limit the precision + # value = st.decimals(places=type.scale, allow_infinity=False) + h.reject() + elif pa.types.is_time(ty): + value = st.times() + elif pa.types.is_date(ty): + value = st.dates() + elif pa.types.is_timestamp(ty): + if zoneinfo is None: + pytest.skip('no module named zoneinfo (or tzdata on Windows)') + if ty.tz is None: + pytest.skip('requires timezone not None') + min_int64 = -(2**63) + max_int64 = 2**63 - 1 + min_datetime = datetime.datetime.fromtimestamp( + min_int64 // 10**9) + datetime.timedelta(hours=12) + max_datetime = datetime.datetime.fromtimestamp( + max_int64 // 10**9) - datetime.timedelta(hours=12) + try: + offset = ty.tz.split(":") + offset_hours = int(offset[0]) + offset_min = int(offset[1]) + tz = datetime.timedelta(hours=offset_hours, minutes=offset_min) + except ValueError: + tz = zoneinfo.ZoneInfo(ty.tz) + value = st.datetimes(timezones=st.just(tz), min_value=min_datetime, + max_value=max_datetime) + elif pa.types.is_duration(ty): + value = st.timedeltas() + elif pa.types.is_interval(ty): + value = st.timedeltas() + elif pa.types.is_binary(ty) or pa.types.is_large_binary(ty): + value = st.binary() + elif pa.types.is_string(ty) or pa.types.is_large_string(ty): + value = st.text() + elif pa.types.is_fixed_size_binary(ty): + value = st.binary(min_size=ty.byte_width, max_size=ty.byte_width) + elif pa.types.is_list(ty): + value = _pylist(ty.value_type, size=size, nullable=nullable) + elif pa.types.is_large_list(ty): + value = _pylist(ty.value_type, size=size, nullable=nullable) + elif pa.types.is_fixed_size_list(ty): + value = _pylist(ty.value_type, size=ty.list_size, nullable=nullable) + elif pa.types.is_list_view(ty): + value = _pylist(ty.value_type, size=size, nullable=nullable) + elif pa.types.is_large_list_view(ty): + value = _pylist(ty.value_type, size=size, nullable=nullable) + elif pa.types.is_dictionary(ty): + values = _pylist(ty.value_type, size=size, nullable=nullable) + return pa.array(draw(values), type=ty) + elif pa.types.is_map(ty): + value = _pymap(ty.key_type, ty.item_type, size=_default_array_sizes, + nullable=nullable) + elif pa.types.is_struct(ty): + h.assume(len(ty) > 0) + fields, child_arrays = [], [] + for field in ty: + fields.append(field) + child_arrays.append(draw(arrays(field.type, size=size))) + return pa.StructArray.from_arrays(child_arrays, fields=fields) + else: + raise NotImplementedError(ty) + + if nullable: + value = st.one_of(st.none(), value) + values = st.lists(value, min_size=size, max_size=size) + + return pa.array(draw(values), type=ty) + + +@st.composite +def chunked_arrays(draw, type, min_chunks=0, max_chunks=None, chunk_size=None): + if isinstance(type, st.SearchStrategy): + type = draw(type) + + # TODO(kszucs): remove it, field metadata is not kept + h.assume(not pa.types.is_struct(type)) + + chunk = arrays(type, size=chunk_size) + chunks = st.lists(chunk, min_size=min_chunks, max_size=max_chunks) + + return pa.chunked_array(draw(chunks), type=type) + + +@st.composite +def record_batches(draw, type, rows=None, max_fields=None): + if isinstance(rows, st.SearchStrategy): + rows = draw(rows) + elif rows is None: + rows = draw(_default_array_sizes) + elif not isinstance(rows, int): + raise TypeError('Rows must be an integer') + + schema = draw(schemas(type, max_fields=max_fields)) + children = [draw(arrays(field.type, size=rows)) for field in schema] + # TODO(kszucs): the names and schema arguments are not consistent with + # Table.from_array's arguments + return pa.RecordBatch.from_arrays(children, schema=schema) + + +@st.composite +def tables(draw, type, rows=None, max_fields=None): + if isinstance(rows, st.SearchStrategy): + rows = draw(rows) + elif rows is None: + rows = draw(_default_array_sizes) + elif not isinstance(rows, int): + raise TypeError('Rows must be an integer') + + schema = draw(schemas(type, max_fields=max_fields)) + children = [draw(arrays(field.type, size=rows)) for field in schema] + return pa.Table.from_arrays(children, schema=schema) + + +all_arrays = arrays(all_types) +all_chunked_arrays = chunked_arrays(all_types) +all_record_batches = record_batches(all_types) +all_tables = tables(all_types) + + +# Define the same rules as above for pandas tests by excluding certain types +# from the generation because of known issues. + +pandas_compatible_primitive_types = st.one_of( + null_type, + bool_type, + integer_types, + st.sampled_from([pa.float32(), pa.float64()]), + decimal128_type, + date_types, + time_types, + # Need to exclude timestamp and duration types otherwise hypothesis + # discovers ARROW-10210 + # timestamp_types, + # duration_types + interval_types, + binary_type, + string_type, + large_binary_type, + large_string_type, +) + +# Need to exclude floating point types otherwise hypothesis discovers +# ARROW-10211 +pandas_compatible_dictionary_value_types = st.one_of( + bool_type, + integer_types, + binary_type, + string_type, + fixed_size_binary_type, +) + + +def pandas_compatible_list_types( + item_strategy=pandas_compatible_primitive_types +): + # Need to exclude fixed size list type otherwise hypothesis discovers + # ARROW-10194 + return ( + st.builds(pa.list_, item_strategy) | + st.builds(pa.large_list, item_strategy) + ) + + +pandas_compatible_types = st.deferred( + lambda: st.one_of( + pandas_compatible_primitive_types, + pandas_compatible_list_types(pandas_compatible_primitive_types), + struct_types(pandas_compatible_primitive_types), + dictionary_types( + value_strategy=pandas_compatible_dictionary_value_types + ), + pandas_compatible_list_types(pandas_compatible_types), + struct_types(pandas_compatible_types) + ) +) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_acero.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_acero.py new file mode 100644 index 0000000000000000000000000000000000000000..1bdfabd8f5832b3aa8c20fccbd86e69b0766ee11 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_acero.py @@ -0,0 +1,413 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +import pyarrow as pa +import pyarrow.compute as pc +from pyarrow.compute import field + +try: + from pyarrow.acero import ( + Declaration, + TableSourceNodeOptions, + FilterNodeOptions, + ProjectNodeOptions, + AggregateNodeOptions, + OrderByNodeOptions, + HashJoinNodeOptions, + AsofJoinNodeOptions, + ) +except ImportError: + pass + +try: + import pyarrow.dataset as ds + from pyarrow.acero import ScanNodeOptions +except ImportError: + ds = None + +pytestmark = pytest.mark.acero + + +@pytest.fixture +def table_source(): + table = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]}) + table_opts = TableSourceNodeOptions(table) + table_source = Declaration("table_source", options=table_opts) + return table_source + + +def test_declaration(): + + table = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]}) + table_opts = TableSourceNodeOptions(table) + filter_opts = FilterNodeOptions(field('a') > 1) + + # using sequence + decl = Declaration.from_sequence([ + Declaration("table_source", options=table_opts), + Declaration("filter", options=filter_opts) + ]) + result = decl.to_table() + assert result.equals(table.slice(1, 2)) + + # using explicit inputs + table_source = Declaration("table_source", options=table_opts) + filtered = Declaration("filter", options=filter_opts, inputs=[table_source]) + result = filtered.to_table() + assert result.equals(table.slice(1, 2)) + + +def test_declaration_repr(table_source): + + assert "TableSourceNode" in str(table_source) + assert "TableSourceNode" in repr(table_source) + + +def test_declaration_to_reader(table_source): + with table_source.to_reader() as reader: + assert reader.schema == pa.schema([("a", pa.int64()), ("b", pa.int64())]) + result = reader.read_all() + expected = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]}) + assert result.equals(expected) + + +def test_table_source(): + with pytest.raises(TypeError): + TableSourceNodeOptions(pa.record_batch([pa.array([1, 2, 3])], ["a"])) + + table_source = TableSourceNodeOptions(None) + decl = Declaration("table_source", table_source) + with pytest.raises( + ValueError, match="TableSourceNode requires table which is not null" + ): + _ = decl.to_table() + + +def test_filter(table_source): + # referencing unknown field + decl = Declaration.from_sequence([ + table_source, + Declaration("filter", options=FilterNodeOptions(field("c") > 1)) + ]) + with pytest.raises(ValueError, match=r"No match for FieldRef.Name\(c\)"): + _ = decl.to_table() + + # requires a pyarrow Expression + with pytest.raises(TypeError): + FilterNodeOptions(pa.array([True, False, True])) + with pytest.raises(TypeError): + FilterNodeOptions(None) + + +def test_project(table_source): + # default name from expression + decl = Declaration.from_sequence([ + table_source, + Declaration("project", ProjectNodeOptions([pc.multiply(field("a"), 2)])) + ]) + result = decl.to_table() + assert result.schema.names == ["multiply(a, 2)"] + assert result[0].to_pylist() == [2, 4, 6] + + # provide name + decl = Declaration.from_sequence([ + table_source, + Declaration("project", ProjectNodeOptions([pc.multiply(field("a"), 2)], ["a2"])) + ]) + result = decl.to_table() + assert result.schema.names == ["a2"] + assert result["a2"].to_pylist() == [2, 4, 6] + + # input validation + with pytest.raises(ValueError): + ProjectNodeOptions([pc.multiply(field("a"), 2)], ["a2", "b2"]) + + # no scalar expression + decl = Declaration.from_sequence([ + table_source, + Declaration("project", ProjectNodeOptions([pc.sum(field("a"))])) + ]) + with pytest.raises(ValueError, match="cannot Execute non-scalar expression"): + _ = decl.to_table() + + +def test_aggregate_scalar(table_source): + decl = Declaration.from_sequence([ + table_source, + Declaration("aggregate", AggregateNodeOptions([("a", "sum", None, "a_sum")])) + ]) + result = decl.to_table() + assert result.schema.names == ["a_sum"] + assert result["a_sum"].to_pylist() == [6] + + # with options class + table = pa.table({'a': [1, 2, None]}) + aggr_opts = AggregateNodeOptions( + [("a", "sum", pc.ScalarAggregateOptions(skip_nulls=False), "a_sum")] + ) + decl = Declaration.from_sequence([ + Declaration("table_source", TableSourceNodeOptions(table)), + Declaration("aggregate", aggr_opts), + ]) + result = decl.to_table() + assert result.schema.names == ["a_sum"] + assert result["a_sum"].to_pylist() == [None] + + # test various ways of specifying the target column + for target in ["a", field("a"), 0, field(0), ["a"], [field("a")], [0]]: + aggr_opts = AggregateNodeOptions([(target, "sum", None, "a_sum")]) + decl = Declaration.from_sequence( + [table_source, Declaration("aggregate", aggr_opts)] + ) + result = decl.to_table() + assert result.schema.names == ["a_sum"] + assert result["a_sum"].to_pylist() == [6] + + # proper error when specifying the wrong number of target columns + aggr_opts = AggregateNodeOptions([(["a", "b"], "sum", None, "a_sum")]) + decl = Declaration.from_sequence( + [table_source, Declaration("aggregate", aggr_opts)] + ) + with pytest.raises( + ValueError, match="Function 'sum' accepts 1 arguments but 2 passed" + ): + _ = decl.to_table() + + # proper error when using hash aggregation without keys + aggr_opts = AggregateNodeOptions([("a", "hash_sum", None, "a_sum")]) + decl = Declaration.from_sequence( + [table_source, Declaration("aggregate", aggr_opts)] + ) + with pytest.raises(ValueError, match="is a hash aggregate function"): + _ = decl.to_table() + + +def test_aggregate_hash(): + table = pa.table({'a': [1, 2, None], 'b': ["foo", "bar", "foo"]}) + table_opts = TableSourceNodeOptions(table) + table_source = Declaration("table_source", options=table_opts) + + # default options + aggr_opts = AggregateNodeOptions( + [("a", "hash_count", None, "count(a)")], keys=["b"]) + decl = Declaration.from_sequence([ + table_source, Declaration("aggregate", aggr_opts) + ]) + result = decl.to_table() + expected = pa.table({"b": ["foo", "bar"], "count(a)": [1, 1]}) + assert result.equals(expected) + + # specify function options + aggr_opts = AggregateNodeOptions( + [("a", "hash_count", pc.CountOptions("all"), "count(a)")], keys=["b"] + ) + decl = Declaration.from_sequence([ + table_source, Declaration("aggregate", aggr_opts) + ]) + result = decl.to_table() + expected_all = pa.table({"b": ["foo", "bar"], "count(a)": [2, 1]}) + assert result.equals(expected_all) + + # specify keys as field references + aggr_opts = AggregateNodeOptions( + [("a", "hash_count", None, "count(a)")], keys=[field("b")] + ) + decl = Declaration.from_sequence([ + table_source, Declaration("aggregate", aggr_opts) + ]) + result = decl.to_table() + assert result.equals(expected) + + # wrong type of (aggregation) function + # TODO test with kernel that matches number of arguments (arity) -> avoid segfault + aggr_opts = AggregateNodeOptions([("a", "sum", None, "a_sum")], keys=["b"]) + decl = Declaration.from_sequence([ + table_source, Declaration("aggregate", aggr_opts) + ]) + with pytest.raises(ValueError): + _ = decl.to_table() + + +def test_order_by(): + table = pa.table({'a': [1, 2, 3, 4], 'b': [1, 3, None, 2]}) + table_source = Declaration("table_source", TableSourceNodeOptions(table)) + + ord_opts = OrderByNodeOptions([("b", "ascending")]) + decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)]) + result = decl.to_table() + expected = pa.table({"a": [1, 4, 2, 3], "b": [1, 2, 3, None]}) + assert result.equals(expected) + + ord_opts = OrderByNodeOptions([(field("b"), "descending")]) + decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)]) + result = decl.to_table() + expected = pa.table({"a": [2, 4, 1, 3], "b": [3, 2, 1, None]}) + assert result.equals(expected) + + ord_opts = OrderByNodeOptions([(1, "descending")], null_placement="at_start") + decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)]) + result = decl.to_table() + expected = pa.table({"a": [3, 2, 4, 1], "b": [None, 3, 2, 1]}) + assert result.equals(expected) + + # empty ordering + ord_opts = OrderByNodeOptions([]) + decl = Declaration.from_sequence([table_source, Declaration("order_by", ord_opts)]) + with pytest.raises( + ValueError, match="`ordering` must be an explicit non-empty ordering" + ): + _ = decl.to_table() + + with pytest.raises(ValueError, match="\"decreasing\" is not a valid sort order"): + _ = OrderByNodeOptions([("b", "decreasing")]) + + with pytest.raises(ValueError, match="\"start\" is not a valid null placement"): + _ = OrderByNodeOptions([("b", "ascending")], null_placement="start") + + +def test_hash_join(): + left = pa.table({'key': [1, 2, 3], 'a': [4, 5, 6]}) + left_source = Declaration("table_source", options=TableSourceNodeOptions(left)) + right = pa.table({'key': [2, 3, 4], 'b': [4, 5, 6]}) + right_source = Declaration("table_source", options=TableSourceNodeOptions(right)) + + # inner join + join_opts = HashJoinNodeOptions("inner", left_keys="key", right_keys="key") + joined = Declaration( + "hashjoin", options=join_opts, inputs=[left_source, right_source]) + result = joined.to_table() + expected = pa.table( + [[2, 3], [5, 6], [2, 3], [4, 5]], + names=["key", "a", "key", "b"]) + assert result.equals(expected) + + for keys in [field("key"), ["key"], [field("key")]]: + join_opts = HashJoinNodeOptions("inner", left_keys=keys, right_keys=keys) + joined = Declaration( + "hashjoin", options=join_opts, inputs=[left_source, right_source]) + result = joined.to_table() + assert result.equals(expected) + + # left join + join_opts = HashJoinNodeOptions( + "left outer", left_keys="key", right_keys="key") + joined = Declaration( + "hashjoin", options=join_opts, inputs=[left_source, right_source]) + result = joined.to_table() + expected = pa.table( + [[1, 2, 3], [4, 5, 6], [None, 2, 3], [None, 4, 5]], + names=["key", "a", "key", "b"] + ) + assert result.sort_by("a").equals(expected) + + # suffixes + join_opts = HashJoinNodeOptions( + "left outer", left_keys="key", right_keys="key", + output_suffix_for_left="_left", output_suffix_for_right="_right") + joined = Declaration( + "hashjoin", options=join_opts, inputs=[left_source, right_source]) + result = joined.to_table() + expected = pa.table( + [[1, 2, 3], [4, 5, 6], [None, 2, 3], [None, 4, 5]], + names=["key_left", "a", "key_right", "b"] + ) + assert result.sort_by("a").equals(expected) + + # manually specifying output columns + join_opts = HashJoinNodeOptions( + "left outer", left_keys="key", right_keys="key", + left_output=["key", "a"], right_output=[field("b")]) + joined = Declaration( + "hashjoin", options=join_opts, inputs=[left_source, right_source]) + result = joined.to_table() + expected = pa.table( + [[1, 2, 3], [4, 5, 6], [None, 4, 5]], + names=["key", "a", "b"] + ) + assert result.sort_by("a").equals(expected) + + +def test_asof_join(): + left = pa.table({'key': [1, 2, 3], 'ts': [1, 1, 1], 'a': [4, 5, 6]}) + left_source = Declaration("table_source", options=TableSourceNodeOptions(left)) + right = pa.table({'key': [2, 3, 4], 'ts': [2, 5, 2], 'b': [4, 5, 6]}) + right_source = Declaration("table_source", options=TableSourceNodeOptions(right)) + + # asof join + join_opts = AsofJoinNodeOptions( + left_on="ts", left_by=["key"], + right_on="ts", right_by=["key"], + tolerance=1, + ) + joined = Declaration( + "asofjoin", options=join_opts, inputs=[left_source, right_source] + ) + result = joined.to_table() + expected = pa.table( + [[1, 2, 3], [1, 1, 1], [4, 5, 6], [None, 4, None]], + names=["key", "ts", "a", "b"]) + assert result == expected + + for by in [field("key"), ["key"], [field("key")]]: + for on in [field("ts"), "ts"]: + join_opts = AsofJoinNodeOptions( + left_on=on, left_by=by, + right_on=on, right_by=by, + tolerance=1, + ) + joined = Declaration( + "asofjoin", options=join_opts, inputs=[left_source, right_source]) + result = joined.to_table() + assert result == expected + + +@pytest.mark.dataset +def test_scan(tempdir): + table = pa.table({'a': [1, 2, 3], 'b': [4, 5, 6]}) + ds.write_dataset(table, tempdir / "dataset", format="parquet") + dataset = ds.dataset(tempdir / "dataset", format="parquet") + decl = Declaration("scan", ScanNodeOptions(dataset)) + result = decl.to_table() + assert result.schema.names == [ + "a", "b", "__fragment_index", "__batch_index", + "__last_in_fragment", "__filename" + ] + assert result.select(["a", "b"]).equals(table) + + # using a filter only does pushdown (depending on file format), not actual filter + + scan_opts = ScanNodeOptions(dataset, filter=field('a') > 1) + decl = Declaration("scan", scan_opts) + # fragment not filtered based on min/max statistics + assert decl.to_table().num_rows == 3 + + scan_opts = ScanNodeOptions(dataset, filter=field('a') > 4) + decl = Declaration("scan", scan_opts) + # full fragment filtered based on min/max statistics + assert decl.to_table().num_rows == 0 + + # projection scan option + + scan_opts = ScanNodeOptions(dataset, columns={"a2": pc.multiply(field("a"), 2)}) + decl = Declaration("scan", scan_opts) + result = decl.to_table() + # "a" is included in the result (needed later on for the actual projection) + assert result["a"].to_pylist() == [1, 2, 3] + # "b" is still included, but without data as it will be removed by the projection + assert pc.all(result["b"].is_null()).as_py() diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_adhoc_memory_leak.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_adhoc_memory_leak.py new file mode 100644 index 0000000000000000000000000000000000000000..cd381cf427dc35c701fc6743d9523d12ef066a82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_adhoc_memory_leak.py @@ -0,0 +1,43 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +import numpy as np +import pyarrow as pa + +import pyarrow.tests.util as test_util + +try: + import pandas as pd +except ImportError: + pass + + +@pytest.mark.memory_leak +@pytest.mark.pandas +def test_deserialize_pandas_arrow_7956(): + df = pd.DataFrame({'a': np.arange(10000), + 'b': [test_util.rands(5) for _ in range(10000)]}) + + def action(): + df_bytes = pa.ipc.serialize_pandas(df).to_pybytes() + buf = pa.py_buffer(df_bytes) + pa.ipc.deserialize_pandas(buf) + + # Abort at 128MB threshold + test_util.memory_leak_check(action, threshold=1 << 27, iterations=100) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_array.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_array.py new file mode 100644 index 0000000000000000000000000000000000000000..156d58326b961718d7ca6ed85345eac8c2b5dfa8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_array.py @@ -0,0 +1,3881 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from collections.abc import Iterable +import datetime +import decimal +import hypothesis as h +import hypothesis.strategies as st +import itertools +import pytest +import struct +import subprocess +import sys +import weakref + +import numpy as np + +import pyarrow as pa +import pyarrow.tests.strategies as past +from pyarrow.vendored.version import Version + + +def test_total_bytes_allocated(): + code = """if 1: + import pyarrow as pa + + assert pa.total_allocated_bytes() == 0 + """ + res = subprocess.run([sys.executable, "-c", code], + universal_newlines=True, stderr=subprocess.PIPE) + if res.returncode != 0: + print(res.stderr, file=sys.stderr) + res.check_returncode() # fail + assert len(res.stderr.splitlines()) == 0 + + +def test_weakref(): + arr = pa.array([1, 2, 3]) + wr = weakref.ref(arr) + assert wr() is not None + del arr + assert wr() is None + + +def test_getitem_NULL(): + arr = pa.array([1, None, 2]) + assert arr[1].as_py() is None + assert arr[1].is_valid is False + assert isinstance(arr[1], pa.Int64Scalar) + + +def test_constructor_raises(): + # This could happen by wrong capitalization. + # ARROW-2638: prevent calling extension class constructors directly + with pytest.raises(TypeError): + pa.Array([1, 2]) + + +def test_list_format(): + arr = pa.array([[1], None, [2, 3, None]]) + result = arr.to_string() + expected = """\ +[ + [ + 1 + ], + null, + [ + 2, + 3, + null + ] +]""" + assert result == expected + + +def test_string_format(): + arr = pa.array(['', None, 'foo']) + result = arr.to_string() + expected = """\ +[ + "", + null, + "foo" +]""" + assert result == expected + + +def test_long_array_format(): + arr = pa.array(range(100)) + result = arr.to_string(window=2) + expected = """\ +[ + 0, + 1, + ... + 98, + 99 +]""" + assert result == expected + + +def test_indented_string_format(): + arr = pa.array(['', None, 'foo']) + result = arr.to_string(indent=1) + expected = '[\n "",\n null,\n "foo"\n]' + + assert result == expected + + +def test_top_level_indented_string_format(): + arr = pa.array(['', None, 'foo']) + result = arr.to_string(top_level_indent=1) + expected = ' [\n "",\n null,\n "foo"\n ]' + + assert result == expected + + +def test_binary_format(): + arr = pa.array([b'\x00', b'', None, b'\x01foo', b'\x80\xff']) + result = arr.to_string() + expected = """\ +[ + 00, + , + null, + 01666F6F, + 80FF +]""" + assert result == expected + + +def test_binary_total_values_length(): + arr = pa.array([b'0000', None, b'11111', b'222222', b'3333333'], + type='binary') + large_arr = pa.array([b'0000', None, b'11111', b'222222', b'3333333'], + type='large_binary') + + assert arr.total_values_length == 22 + assert arr.slice(1, 3).total_values_length == 11 + assert large_arr.total_values_length == 22 + assert large_arr.slice(1, 3).total_values_length == 11 + + +def test_to_numpy_zero_copy(): + arr = pa.array(range(10)) + + np_arr = arr.to_numpy() + + # check for zero copy (both arrays using same memory) + arrow_buf = arr.buffers()[1] + assert arrow_buf.address == np_arr.ctypes.data + + arr = None + import gc + gc.collect() + + # Ensure base is still valid + assert np_arr.base is not None + expected = np.arange(10) + np.testing.assert_array_equal(np_arr, expected) + + +def test_chunked_array_to_numpy_zero_copy(): + elements = [[2, 2, 4], [4, 5, 100]] + + chunked_arr = pa.chunked_array(elements) + + msg = "zero_copy_only must be False for pyarrow.ChunkedArray.to_numpy" + + with pytest.raises(ValueError, match=msg): + chunked_arr.to_numpy(zero_copy_only=True) + + np_arr = chunked_arr.to_numpy() + expected = [2, 2, 4, 4, 5, 100] + np.testing.assert_array_equal(np_arr, expected) + + +def test_to_numpy_unsupported_types(): + # ARROW-2871: Some primitive types are not yet supported in to_numpy + bool_arr = pa.array([True, False, True]) + + with pytest.raises(ValueError): + bool_arr.to_numpy() + + result = bool_arr.to_numpy(zero_copy_only=False) + expected = np.array([True, False, True]) + np.testing.assert_array_equal(result, expected) + + null_arr = pa.array([None, None, None]) + + with pytest.raises(ValueError): + null_arr.to_numpy() + + result = null_arr.to_numpy(zero_copy_only=False) + expected = np.array([None, None, None], dtype=object) + np.testing.assert_array_equal(result, expected) + + arr = pa.array([1, 2, None]) + + with pytest.raises(ValueError, match="with 1 nulls"): + arr.to_numpy() + + +def test_to_numpy_writable(): + arr = pa.array(range(10)) + np_arr = arr.to_numpy() + + # by default not writable for zero-copy conversion + with pytest.raises(ValueError): + np_arr[0] = 10 + + np_arr2 = arr.to_numpy(zero_copy_only=False, writable=True) + np_arr2[0] = 10 + assert arr[0].as_py() == 0 + + # when asking for writable, cannot do zero-copy + with pytest.raises(ValueError): + arr.to_numpy(zero_copy_only=True, writable=True) + + +@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns']) +@pytest.mark.parametrize('tz', [None, "UTC"]) +def test_to_numpy_datetime64(unit, tz): + arr = pa.array([1, 2, 3], pa.timestamp(unit, tz=tz)) + expected = np.array([1, 2, 3], dtype="datetime64[{}]".format(unit)) + np_arr = arr.to_numpy() + np.testing.assert_array_equal(np_arr, expected) + + +@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns']) +def test_to_numpy_timedelta64(unit): + arr = pa.array([1, 2, 3], pa.duration(unit)) + expected = np.array([1, 2, 3], dtype="timedelta64[{}]".format(unit)) + np_arr = arr.to_numpy() + np.testing.assert_array_equal(np_arr, expected) + + +def test_to_numpy_dictionary(): + # ARROW-7591 + arr = pa.array(["a", "b", "a"]).dictionary_encode() + expected = np.array(["a", "b", "a"], dtype=object) + np_arr = arr.to_numpy(zero_copy_only=False) + np.testing.assert_array_equal(np_arr, expected) + + +@pytest.mark.pandas +def test_to_pandas_zero_copy(): + import gc + + arr = pa.array(range(10)) + + for i in range(10): + series = arr.to_pandas() + assert sys.getrefcount(series) == 2 + series = None # noqa + + assert sys.getrefcount(arr) == 2 + + for i in range(10): + arr = pa.array(range(10)) + series = arr.to_pandas() + arr = None + gc.collect() + + # Ensure base is still valid + + # Because of py.test's assert inspection magic, if you put getrefcount + # on the line being examined, it will be 1 higher than you expect + base_refcount = sys.getrefcount(series.values.base) + assert base_refcount == 2 + series.sum() + + +@pytest.mark.nopandas +@pytest.mark.pandas +def test_asarray(): + # ensure this is tested both when pandas is present or not (ARROW-6564) + + arr = pa.array(range(4)) + + # The iterator interface gives back an array of Int64Value's + np_arr = np.asarray([_ for _ in arr]) + assert np_arr.tolist() == [0, 1, 2, 3] + assert np_arr.dtype == np.dtype('O') + assert isinstance(np_arr[0], pa.lib.Int64Value) + + # Calling with the arrow array gives back an array with 'int64' dtype + np_arr = np.asarray(arr) + assert np_arr.tolist() == [0, 1, 2, 3] + assert np_arr.dtype == np.dtype('int64') + + # An optional type can be specified when calling np.asarray + np_arr = np.asarray(arr, dtype='str') + assert np_arr.tolist() == ['0', '1', '2', '3'] + + # If PyArrow array has null values, numpy type will be changed as needed + # to support nulls. + arr = pa.array([0, 1, 2, None]) + assert arr.type == pa.int64() + np_arr = np.asarray(arr) + elements = np_arr.tolist() + assert elements[:3] == [0., 1., 2.] + assert np.isnan(elements[3]) + assert np_arr.dtype == np.dtype('float64') + + # DictionaryType data will be converted to dense numpy array + arr = pa.DictionaryArray.from_arrays( + pa.array([0, 1, 2, 0, 1]), pa.array(['a', 'b', 'c'])) + np_arr = np.asarray(arr) + assert np_arr.dtype == np.dtype('object') + assert np_arr.tolist() == ['a', 'b', 'c', 'a', 'b'] + + +@pytest.mark.parametrize('ty', [ + None, + pa.null(), + pa.int8(), + pa.string() +]) +def test_nulls(ty): + arr = pa.nulls(3, type=ty) + expected = pa.array([None, None, None], type=ty) + + assert len(arr) == 3 + assert arr.equals(expected) + + if ty is None: + assert arr.type == pa.null() + else: + assert arr.type == ty + + +def test_array_from_scalar(): + pytz = pytest.importorskip("pytz") + + today = datetime.date.today() + now = datetime.datetime.now() + now_utc = now.replace(tzinfo=pytz.utc) + now_with_tz = now_utc.astimezone(pytz.timezone('US/Eastern')) + oneday = datetime.timedelta(days=1) + + cases = [ + (None, 1, pa.array([None])), + (None, 10, pa.nulls(10)), + (-1, 3, pa.array([-1, -1, -1], type=pa.int64())), + (2.71, 2, pa.array([2.71, 2.71], type=pa.float64())), + ("string", 4, pa.array(["string"] * 4)), + ( + pa.scalar(8, type=pa.uint8()), + 17, + pa.array([8] * 17, type=pa.uint8()) + ), + (pa.scalar(None), 3, pa.array([None, None, None])), + (pa.scalar(True), 11, pa.array([True] * 11)), + (today, 2, pa.array([today] * 2)), + (now, 10, pa.array([now] * 10)), + ( + now_with_tz, + 2, + pa.array( + [now_utc] * 2, + type=pa.timestamp('us', tz=pytz.timezone('US/Eastern')) + ) + ), + (now.time(), 9, pa.array([now.time()] * 9)), + (oneday, 4, pa.array([oneday] * 4)), + (False, 9, pa.array([False] * 9)), + ([1, 2], 2, pa.array([[1, 2], [1, 2]])), + ( + pa.scalar([-1, 3], type=pa.large_list(pa.int8())), + 5, + pa.array([[-1, 3]] * 5, type=pa.large_list(pa.int8())) + ), + ({'a': 1, 'b': 2}, 3, pa.array([{'a': 1, 'b': 2}] * 3)) + ] + + for value, size, expected in cases: + arr = pa.repeat(value, size) + assert len(arr) == size + assert arr.type.equals(expected.type) + assert arr.equals(expected) + if expected.type == pa.null(): + assert arr.null_count == size + else: + assert arr.null_count == 0 + + +def test_array_from_dictionary_scalar(): + dictionary = ['foo', 'bar', 'baz'] + arr = pa.DictionaryArray.from_arrays([2, 1, 2, 0], dictionary=dictionary) + + result = pa.repeat(arr[0], 5) + expected = pa.DictionaryArray.from_arrays([2] * 5, dictionary=dictionary) + assert result.equals(expected) + + result = pa.repeat(arr[3], 5) + expected = pa.DictionaryArray.from_arrays([0] * 5, dictionary=dictionary) + assert result.equals(expected) + + +def test_array_getitem(): + arr = pa.array(range(10, 15)) + lst = arr.to_pylist() + + for idx in range(-len(arr), len(arr)): + assert arr[idx].as_py() == lst[idx] + for idx in range(-2 * len(arr), -len(arr)): + with pytest.raises(IndexError): + arr[idx] + for idx in range(len(arr), 2 * len(arr)): + with pytest.raises(IndexError): + arr[idx] + + # check that numpy scalars are supported + for idx in range(-len(arr), len(arr)): + assert arr[np.int32(idx)].as_py() == lst[idx] + + +def test_array_slice(): + arr = pa.array(range(10)) + + sliced = arr.slice(2) + expected = pa.array(range(2, 10)) + assert sliced.equals(expected) + + sliced2 = arr.slice(2, 4) + expected2 = pa.array(range(2, 6)) + assert sliced2.equals(expected2) + + # 0 offset + assert arr.slice(0).equals(arr) + + # Slice past end of array + assert len(arr.slice(len(arr))) == 0 + assert len(arr.slice(len(arr) + 2)) == 0 + assert len(arr.slice(len(arr) + 2, 100)) == 0 + + with pytest.raises(IndexError): + arr.slice(-1) + + with pytest.raises(ValueError): + arr.slice(2, -1) + + # Test slice notation + assert arr[2:].equals(arr.slice(2)) + assert arr[2:5].equals(arr.slice(2, 3)) + assert arr[-5:].equals(arr.slice(len(arr) - 5)) + + n = len(arr) + for start in range(-n * 2, n * 2): + for stop in range(-n * 2, n * 2): + res = arr[start:stop] + res.validate() + expected = arr.to_pylist()[start:stop] + assert res.to_pylist() == expected + assert res.to_numpy().tolist() == expected + + +def test_array_slice_negative_step(): + # ARROW-2714 + np_arr = np.arange(20) + arr = pa.array(np_arr) + chunked_arr = pa.chunked_array([arr]) + + cases = [ + slice(None, None, -1), + slice(None, 6, -2), + slice(10, 6, -2), + slice(8, None, -2), + slice(2, 10, -2), + slice(10, 2, -2), + slice(None, None, 2), + slice(0, 10, 2), + slice(15, -25, -1), # GH-38768 + slice(-22, -22, -1), # GH-40642 + ] + + for case in cases: + result = arr[case] + expected = pa.array(np_arr[case]) + assert result.equals(expected) + + result = pa.record_batch([arr], names=['f0'])[case] + expected = pa.record_batch([expected], names=['f0']) + assert result.equals(expected) + + result = chunked_arr[case] + expected = pa.chunked_array([np_arr[case]]) + assert result.equals(expected) + + +def test_array_diff(): + # ARROW-6252 + arr1 = pa.array(['foo'], type=pa.utf8()) + arr2 = pa.array(['foo', 'bar', None], type=pa.utf8()) + arr3 = pa.array([1, 2, 3]) + arr4 = pa.array([[], [1], None], type=pa.list_(pa.int64())) + + assert arr1.diff(arr1) == '' + assert arr1.diff(arr2) == ''' +@@ -1, +1 @@ ++"bar" ++null +''' + assert arr1.diff(arr3).strip() == '# Array types differed: string vs int64' + assert arr1.diff(arr3).strip() == '# Array types differed: string vs int64' + assert arr1.diff(arr4).strip() == ('# Array types differed: string vs ' + 'list') + + +def test_array_iter(): + arr = pa.array(range(10)) + + for i, j in zip(range(10), arr): + assert i == j.as_py() + + assert isinstance(arr, Iterable) + + +def test_struct_array_slice(): + # ARROW-2311: slicing nested arrays needs special care + ty = pa.struct([pa.field('a', pa.int8()), + pa.field('b', pa.float32())]) + arr = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty) + assert arr[1:].to_pylist() == [{'a': 3, 'b': 4.5}, + {'a': 5, 'b': 6.5}] + + +def test_array_factory_invalid_type(): + + class MyObject: + pass + + arr = np.array([MyObject()]) + with pytest.raises(ValueError): + pa.array(arr) + + +def test_array_ref_to_ndarray_base(): + arr = np.array([1, 2, 3]) + + refcount = sys.getrefcount(arr) + arr2 = pa.array(arr) # noqa + assert sys.getrefcount(arr) == (refcount + 1) + + +def test_array_eq(): + # ARROW-2150 / ARROW-9445: we define the __eq__ behavior to be + # data equality (not element-wise equality) + arr1 = pa.array([1, 2, 3], type=pa.int32()) + arr2 = pa.array([1, 2, 3], type=pa.int32()) + arr3 = pa.array([1, 2, 3], type=pa.int64()) + + assert (arr1 == arr2) is True + assert (arr1 != arr2) is False + assert (arr1 == arr3) is False + assert (arr1 != arr3) is True + + assert (arr1 == 1) is False + assert (arr1 == None) is False # noqa: E711 + + +def test_array_from_buffers(): + values_buf = pa.py_buffer(np.int16([4, 5, 6, 7])) + nulls_buf = pa.py_buffer(np.uint8([0b00001101])) + arr = pa.Array.from_buffers(pa.int16(), 4, [nulls_buf, values_buf]) + assert arr.type == pa.int16() + assert arr.to_pylist() == [4, None, 6, 7] + + arr = pa.Array.from_buffers(pa.int16(), 4, [None, values_buf]) + assert arr.type == pa.int16() + assert arr.to_pylist() == [4, 5, 6, 7] + + arr = pa.Array.from_buffers(pa.int16(), 3, [nulls_buf, values_buf], + offset=1) + assert arr.type == pa.int16() + assert arr.to_pylist() == [None, 6, 7] + + with pytest.raises(TypeError): + pa.Array.from_buffers(pa.int16(), 3, ['', ''], offset=1) + + +def test_string_binary_from_buffers(): + array = pa.array(["a", None, "b", "c"]) + + buffers = array.buffers() + copied = pa.StringArray.from_buffers( + len(array), buffers[1], buffers[2], buffers[0], array.null_count, + array.offset) + assert copied.to_pylist() == ["a", None, "b", "c"] + + binary_copy = pa.Array.from_buffers(pa.binary(), len(array), + array.buffers(), array.null_count, + array.offset) + assert binary_copy.to_pylist() == [b"a", None, b"b", b"c"] + + copied = pa.StringArray.from_buffers( + len(array), buffers[1], buffers[2], buffers[0]) + assert copied.to_pylist() == ["a", None, "b", "c"] + + sliced = array[1:] + buffers = sliced.buffers() + copied = pa.StringArray.from_buffers( + len(sliced), buffers[1], buffers[2], buffers[0], -1, sliced.offset) + assert copied.to_pylist() == [None, "b", "c"] + assert copied.null_count == 1 + + # Slice but exclude all null entries so that we don't need to pass + # the null bitmap. + sliced = array[2:] + buffers = sliced.buffers() + copied = pa.StringArray.from_buffers( + len(sliced), buffers[1], buffers[2], None, -1, sliced.offset) + assert copied.to_pylist() == ["b", "c"] + assert copied.null_count == 0 + + +@pytest.mark.parametrize('list_type_factory', [ + pa.list_, pa.large_list, pa.list_view, pa.large_list_view]) +def test_list_from_buffers(list_type_factory): + ty = list_type_factory(pa.int16()) + array = pa.array([[0, 1, 2], None, [], [3, 4, 5]], type=ty) + assert array.type == ty + + buffers = array.buffers() + + with pytest.raises(ValueError): + # No children + pa.Array.from_buffers(ty, 4, buffers[:ty.num_buffers]) + + child = pa.Array.from_buffers(pa.int16(), 6, buffers[ty.num_buffers:]) + copied = pa.Array.from_buffers(ty, 4, buffers[:ty.num_buffers], children=[child]) + assert copied.equals(array) + + with pytest.raises(ValueError): + # too many children + pa.Array.from_buffers(ty, 4, buffers[:ty.num_buffers], + children=[child, child]) + + +def test_struct_from_buffers(): + ty = pa.struct([pa.field('a', pa.int16()), pa.field('b', pa.utf8())]) + array = pa.array([{'a': 0, 'b': 'foo'}, None, {'a': 5, 'b': ''}], + type=ty) + buffers = array.buffers() + + with pytest.raises(ValueError): + # No children + pa.Array.from_buffers(ty, 3, [None, buffers[1]]) + + children = [pa.Array.from_buffers(pa.int16(), 3, buffers[1:3]), + pa.Array.from_buffers(pa.utf8(), 3, buffers[3:])] + copied = pa.Array.from_buffers(ty, 3, buffers[:1], children=children) + assert copied.equals(array) + + with pytest.raises(ValueError): + # not enough many children + pa.Array.from_buffers(ty, 3, [buffers[0]], + children=children[:1]) + + +def test_struct_from_arrays(): + a = pa.array([4, 5, 6], type=pa.int64()) + b = pa.array(["bar", None, ""]) + c = pa.array([[1, 2], None, [3, None]]) + expected_list = [ + {'a': 4, 'b': 'bar', 'c': [1, 2]}, + {'a': 5, 'b': None, 'c': None}, + {'a': 6, 'b': '', 'c': [3, None]}, + ] + + # From field names + arr = pa.StructArray.from_arrays([a, b, c], ["a", "b", "c"]) + assert arr.type == pa.struct( + [("a", a.type), ("b", b.type), ("c", c.type)]) + assert arr.to_pylist() == expected_list + + with pytest.raises(ValueError): + pa.StructArray.from_arrays([a, b, c], ["a", "b"]) + + arr = pa.StructArray.from_arrays([], []) + assert arr.type == pa.struct([]) + assert arr.to_pylist() == [] + + # From fields + fa = pa.field("a", a.type, nullable=False) + fb = pa.field("b", b.type) + fc = pa.field("c", c.type) + arr = pa.StructArray.from_arrays([a, b, c], fields=[fa, fb, fc]) + assert arr.type == pa.struct([fa, fb, fc]) + assert not arr.type[0].nullable + assert arr.to_pylist() == expected_list + + with pytest.raises(ValueError): + pa.StructArray.from_arrays([a, b, c], fields=[fa, fb]) + + arr = pa.StructArray.from_arrays([], fields=[]) + assert arr.type == pa.struct([]) + assert arr.to_pylist() == [] + + # Inconsistent fields + fa2 = pa.field("a", pa.int32()) + with pytest.raises(ValueError, match="int64 vs int32"): + pa.StructArray.from_arrays([a, b, c], fields=[fa2, fb, fc]) + + arrays = [a, b, c] + fields = [fa, fb, fc] + # With mask + mask = pa.array([True, False, False]) + arr = pa.StructArray.from_arrays(arrays, fields=fields, mask=mask) + assert arr.to_pylist() == [None] + expected_list[1:] + + arr = pa.StructArray.from_arrays(arrays, names=['a', 'b', 'c'], mask=mask) + assert arr.to_pylist() == [None] + expected_list[1:] + + # Bad masks + with pytest.raises(TypeError, match='Mask must be'): + pa.StructArray.from_arrays(arrays, fields, mask=[True, False, False]) + + with pytest.raises(ValueError, match='not contain nulls'): + pa.StructArray.from_arrays( + arrays, fields, mask=pa.array([True, False, None])) + + with pytest.raises(TypeError, match='Mask must be'): + pa.StructArray.from_arrays( + arrays, fields, mask=pa.chunked_array([mask])) + + # Non-empty array with no fields https://github.com/apache/arrow/issues/15109 + arr = pa.StructArray.from_arrays([], [], mask=mask) + assert arr.is_null() == mask + assert arr.to_pylist() == [None, {}, {}] + + +def test_struct_array_from_chunked(): + # ARROW-11780 + # Check that we don't segfault when trying to build + # a StructArray from a chunked array. + chunked_arr = pa.chunked_array([[1, 2, 3], [4, 5, 6]]) + + with pytest.raises(TypeError, match="Expected Array"): + pa.StructArray.from_arrays([chunked_arr], ["foo"]) + + +@pytest.mark.parametrize("offset", (0, 1)) +def test_dictionary_from_buffers(offset): + a = pa.array(["one", "two", "three", "two", "one"]).dictionary_encode() + b = pa.DictionaryArray.from_buffers(a.type, len(a)-offset, + a.indices.buffers(), a.dictionary, + offset=offset) + assert a[offset:] == b + + +def test_dictionary_from_numpy(): + indices = np.repeat([0, 1, 2], 2) + dictionary = np.array(['foo', 'bar', 'baz'], dtype=object) + mask = np.array([False, False, True, False, False, False]) + + d1 = pa.DictionaryArray.from_arrays(indices, dictionary) + d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask) + + assert d1.indices.to_pylist() == indices.tolist() + assert d1.indices.to_pylist() == indices.tolist() + assert d1.dictionary.to_pylist() == dictionary.tolist() + assert d2.dictionary.to_pylist() == dictionary.tolist() + + for i in range(len(indices)): + assert d1[i].as_py() == dictionary[indices[i]] + + if mask[i]: + assert d2[i].as_py() is None + else: + assert d2[i].as_py() == dictionary[indices[i]] + + +def test_dictionary_to_numpy(): + expected = pa.array( + ["foo", "bar", None, "foo"] + ).to_numpy(zero_copy_only=False) + a = pa.DictionaryArray.from_arrays( + pa.array([0, 1, None, 0]), + pa.array(['foo', 'bar']) + ) + np.testing.assert_array_equal(a.to_numpy(zero_copy_only=False), + expected) + + with pytest.raises(pa.ArrowInvalid): + # If this would be changed to no longer raise in the future, + # ensure to test the actual result because, currently, to_numpy takes + # for granted that when zero_copy_only=True there will be no nulls + # (it's the decoding of the DictionaryArray that handles the nulls and + # this is only activated with zero_copy_only=False) + a.to_numpy(zero_copy_only=True) + + anonulls = pa.DictionaryArray.from_arrays( + pa.array([0, 1, 1, 0]), + pa.array(['foo', 'bar']) + ) + expected = pa.array( + ["foo", "bar", "bar", "foo"] + ).to_numpy(zero_copy_only=False) + np.testing.assert_array_equal(anonulls.to_numpy(zero_copy_only=False), + expected) + + with pytest.raises(pa.ArrowInvalid): + anonulls.to_numpy(zero_copy_only=True) + + afloat = pa.DictionaryArray.from_arrays( + pa.array([0, 1, 1, 0]), + pa.array([13.7, 11.0]) + ) + expected = pa.array([13.7, 11.0, 11.0, 13.7]).to_numpy() + np.testing.assert_array_equal(afloat.to_numpy(zero_copy_only=True), + expected) + np.testing.assert_array_equal(afloat.to_numpy(zero_copy_only=False), + expected) + + afloat2 = pa.DictionaryArray.from_arrays( + pa.array([0, 1, None, 0]), + pa.array([13.7, 11.0]) + ) + expected = pa.array( + [13.7, 11.0, None, 13.7] + ).to_numpy(zero_copy_only=False) + np.testing.assert_allclose( + afloat2.to_numpy(zero_copy_only=False), + expected, + equal_nan=True + ) + + # Testing for integers can reveal problems related to dealing + # with None values, as a numpy array of int dtype + # can't contain NaN nor None. + aints = pa.DictionaryArray.from_arrays( + pa.array([0, 1, None, 0]), + pa.array([7, 11]) + ) + expected = pa.array([7, 11, None, 7]).to_numpy(zero_copy_only=False) + np.testing.assert_allclose( + aints.to_numpy(zero_copy_only=False), + expected, + equal_nan=True + ) + + +def test_dictionary_from_boxed_arrays(): + indices = np.repeat([0, 1, 2], 2) + dictionary = np.array(['foo', 'bar', 'baz'], dtype=object) + + iarr = pa.array(indices) + darr = pa.array(dictionary) + + d1 = pa.DictionaryArray.from_arrays(iarr, darr) + + assert d1.indices.to_pylist() == indices.tolist() + assert d1.dictionary.to_pylist() == dictionary.tolist() + + for i in range(len(indices)): + assert d1[i].as_py() == dictionary[indices[i]] + + +def test_dictionary_from_arrays_boundscheck(): + indices1 = pa.array([0, 1, 2, 0, 1, 2]) + indices2 = pa.array([0, -1, 2]) + indices3 = pa.array([0, 1, 2, 3]) + + dictionary = pa.array(['foo', 'bar', 'baz']) + + # Works fine + pa.DictionaryArray.from_arrays(indices1, dictionary) + + with pytest.raises(pa.ArrowException): + pa.DictionaryArray.from_arrays(indices2, dictionary) + + with pytest.raises(pa.ArrowException): + pa.DictionaryArray.from_arrays(indices3, dictionary) + + # If we are confident that the indices are "safe" we can pass safe=False to + # disable the boundschecking + pa.DictionaryArray.from_arrays(indices2, dictionary, safe=False) + + +def test_dictionary_indices(): + # https://issues.apache.org/jira/browse/ARROW-6882 + indices = pa.array([0, 1, 2, 0, 1, 2]) + dictionary = pa.array(['foo', 'bar', 'baz']) + arr = pa.DictionaryArray.from_arrays(indices, dictionary) + arr.indices.validate(full=True) + + +@pytest.mark.parametrize(('list_array_type', 'list_type_factory'), + [(pa.ListArray, pa.list_), + (pa.LargeListArray, pa.large_list)]) +def test_list_from_arrays(list_array_type, list_type_factory): + offsets_arr = np.array([0, 2, 5, 8], dtype='i4') + offsets = pa.array(offsets_arr, type='int32') + pyvalues = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h'] + values = pa.array(pyvalues, type='binary') + + result = list_array_type.from_arrays(offsets, values) + expected = pa.array([pyvalues[:2], pyvalues[2:5], pyvalues[5:8]], + type=list_type_factory(pa.binary())) + + assert result.equals(expected) + + # With specified type + typ = list_type_factory(pa.field("name", pa.binary())) + result = list_array_type.from_arrays(offsets, values, typ) + assert result.type == typ + assert result.type.value_field.name == "name" + + # With nulls + offsets = [0, None, 2, 6] + values = [b'a', b'b', b'c', b'd', b'e', b'f'] + + result = list_array_type.from_arrays(offsets, values) + expected = pa.array([values[:2], None, values[2:]], + type=list_type_factory(pa.binary())) + + assert result.equals(expected) + + # Another edge case + offsets2 = [0, 2, None, 6] + result = list_array_type.from_arrays(offsets2, values) + expected = pa.array([values[:2], values[2:], None], + type=list_type_factory(pa.binary())) + assert result.equals(expected) + + # raise on invalid array + offsets = [1, 3, 10] + values = np.arange(5) + with pytest.raises(ValueError): + list_array_type.from_arrays(offsets, values) + + # Non-monotonic offsets + offsets = [0, 3, 2, 6] + values = list(range(6)) + result = list_array_type.from_arrays(offsets, values) + with pytest.raises(ValueError): + result.validate(full=True) + + # mismatching type + typ = list_type_factory(pa.binary()) + with pytest.raises(TypeError): + list_array_type.from_arrays(offsets, values, type=typ) + + +@pytest.mark.parametrize(('list_array_type', 'list_type_factory'), ( + (pa.ListArray, pa.list_), + (pa.LargeListArray, pa.large_list) +)) +@pytest.mark.parametrize("arr", ( + [None, [0]], + [None, [0, None], [0]], + [[0], [1]], +)) +def test_list_array_types_from_arrays( + list_array_type, list_type_factory, arr +): + arr = pa.array(arr, list_type_factory(pa.int8())) + reconstructed_arr = list_array_type.from_arrays( + arr.offsets, arr.values, mask=arr.is_null()) + assert arr == reconstructed_arr + + +@pytest.mark.parametrize(('list_array_type', 'list_type_factory'), ( + (pa.ListArray, pa.list_), + (pa.LargeListArray, pa.large_list) +)) +def test_list_array_types_from_arrays_fail(list_array_type, list_type_factory): + # Fail when manual offsets include nulls and mask passed + # ListArray.offsets doesn't report nulls. + + # This test case arr.offsets == [0, 1, 1, 3, 4] + arr = pa.array([[0], None, [0, None], [0]], list_type_factory(pa.int8())) + offsets = pa.array([0, None, 1, 3, 4]) + + # Using array's offset has no nulls; gives empty lists on top level + reconstructed_arr = list_array_type.from_arrays(arr.offsets, arr.values) + assert reconstructed_arr.to_pylist() == [[0], [], [0, None], [0]] + + # Manually specifying offsets (with nulls) is same as mask at top level + reconstructed_arr = list_array_type.from_arrays(offsets, arr.values) + assert arr == reconstructed_arr + reconstructed_arr = list_array_type.from_arrays(arr.offsets, + arr.values, + mask=arr.is_null()) + assert arr == reconstructed_arr + + # But using both is ambiguous, in this case `offsets` has nulls + with pytest.raises(ValueError, match="Ambiguous to specify both "): + list_array_type.from_arrays(offsets, arr.values, mask=arr.is_null()) + + # Not supported to reconstruct from a slice. + arr_slice = arr[1:] + msg = "Null bitmap with offsets slice not supported." + with pytest.raises(NotImplementedError, match=msg): + list_array_type.from_arrays( + arr_slice.offsets, arr_slice.values, mask=arr_slice.is_null()) + + +def test_map_labelled(): + # ARROW-13735 + t = pa.map_(pa.field("name", "string", nullable=False), "int64") + arr = pa.array([[('a', 1), ('b', 2)], [('c', 3)]], type=t) + assert arr.type.key_field == pa.field("name", pa.utf8(), nullable=False) + assert arr.type.item_field == pa.field("value", pa.int64()) + assert len(arr) == 2 + + +def test_map_from_dict(): + # ARROW-17832 + tup_arr = pa.array([[('a', 1), ('b', 2)], [('c', 3)]], + pa.map_(pa.string(), pa.int64())) + dict_arr = pa.array([{'a': 1, 'b': 2}, {'c': 3}], + pa.map_(pa.string(), pa.int64())) + + assert tup_arr.equals(dict_arr) + + +def test_map_from_arrays(): + offsets_arr = np.array([0, 2, 5, 8], dtype='i4') + offsets = pa.array(offsets_arr, type='int32') + pykeys = [b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h'] + pyitems = list(range(len(pykeys))) + pypairs = list(zip(pykeys, pyitems)) + pyentries = [pypairs[:2], pypairs[2:5], pypairs[5:8]] + keys = pa.array(pykeys, type='binary') + items = pa.array(pyitems, type='i4') + + result = pa.MapArray.from_arrays(offsets, keys, items) + expected = pa.array(pyentries, type=pa.map_(pa.binary(), pa.int32())) + + assert result.equals(expected) + + # With nulls + offsets = [0, None, 2, 6] + pykeys = [b'a', b'b', b'c', b'd', b'e', b'f'] + pyitems = [1, 2, 3, None, 4, 5] + pypairs = list(zip(pykeys, pyitems)) + pyentries = [pypairs[:2], None, pypairs[2:]] + keys = pa.array(pykeys, type='binary') + items = pa.array(pyitems, type='i4') + + result = pa.MapArray.from_arrays(offsets, keys, items) + expected = pa.array(pyentries, type=pa.map_(pa.binary(), pa.int32())) + + assert result.equals(expected) + + # pass in the type explicitly + result = pa.MapArray.from_arrays(offsets, keys, items, pa.map_( + keys.type, + items.type + )) + assert result.equals(expected) + + # pass in invalid types + with pytest.raises(pa.ArrowTypeError, match='Expected map type, got string'): + pa.MapArray.from_arrays(offsets, keys, items, pa.string()) + + with pytest.raises(pa.ArrowTypeError, match='Mismatching map items type'): + pa.MapArray.from_arrays(offsets, keys, items, pa.map_( + keys.type, + # Larger than the original i4 + pa.int64() + )) + + # check invalid usage + offsets = [0, 1, 3, 5] + keys = np.arange(5) + items = np.arange(5) + _ = pa.MapArray.from_arrays(offsets, keys, items) + + # raise on invalid offsets + with pytest.raises(ValueError): + pa.MapArray.from_arrays(offsets + [6], keys, items) + + # raise on length of keys != items + with pytest.raises(ValueError): + pa.MapArray.from_arrays(offsets, keys, np.concatenate([items, items])) + + # raise on keys with null + keys_with_null = list(keys)[:-1] + [None] + assert len(keys_with_null) == len(items) + with pytest.raises(ValueError): + pa.MapArray.from_arrays(offsets, keys_with_null, items) + + +def test_fixed_size_list_from_arrays(): + values = pa.array(range(12), pa.int64()) + result = pa.FixedSizeListArray.from_arrays(values, 4) + assert result.to_pylist() == [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + assert result.type.equals(pa.list_(pa.int64(), 4)) + + typ = pa.list_(pa.field("name", pa.int64()), 4) + result = pa.FixedSizeListArray.from_arrays(values, type=typ) + assert result.to_pylist() == [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + assert result.type.equals(typ) + assert result.type.value_field.name == "name" + + result = pa.FixedSizeListArray.from_arrays(values, + type=typ, + mask=pa.array([False, True, False])) + assert result.to_pylist() == [[0, 1, 2, 3], None, [8, 9, 10, 11]] + + result = pa.FixedSizeListArray.from_arrays(values, + list_size=4, + mask=pa.array([False, True, False])) + assert result.to_pylist() == [[0, 1, 2, 3], None, [8, 9, 10, 11]] + + # raise on invalid values / list_size + with pytest.raises(ValueError): + pa.FixedSizeListArray.from_arrays(values, -4) + + with pytest.raises(ValueError): + # array with list size 0 cannot be constructed with from_arrays + pa.FixedSizeListArray.from_arrays(pa.array([], pa.int64()), 0) + + with pytest.raises(ValueError): + # length of values not multiple of 5 + pa.FixedSizeListArray.from_arrays(values, 5) + + typ = pa.list_(pa.int64(), 5) + with pytest.raises(ValueError): + pa.FixedSizeListArray.from_arrays(values, type=typ) + + # raise on mismatching values type + typ = pa.list_(pa.float64(), 4) + with pytest.raises(TypeError): + pa.FixedSizeListArray.from_arrays(values, type=typ) + + # raise on specifying none or both of list_size / type + with pytest.raises(ValueError): + pa.FixedSizeListArray.from_arrays(values) + + typ = pa.list_(pa.int64(), 4) + with pytest.raises(ValueError): + pa.FixedSizeListArray.from_arrays(values, list_size=4, type=typ) + + +def test_variable_list_from_arrays(): + values = pa.array([1, 2, 3, 4], pa.int64()) + offsets = pa.array([0, 2, 4]) + result = pa.ListArray.from_arrays(offsets, values) + assert result.to_pylist() == [[1, 2], [3, 4]] + assert result.type.equals(pa.list_(pa.int64())) + + offsets = pa.array([0, None, 2, 4]) + result = pa.ListArray.from_arrays(offsets, values) + assert result.to_pylist() == [[1, 2], None, [3, 4]] + + # raise if offset out of bounds + with pytest.raises(ValueError): + pa.ListArray.from_arrays(pa.array([-1, 2, 4]), values) + + with pytest.raises(ValueError): + pa.ListArray.from_arrays(pa.array([0, 2, 5]), values) + + +def test_union_from_dense(): + binary = pa.array([b'a', b'b', b'c', b'd'], type='binary') + int64 = pa.array([1, 2, 3], type='int64') + types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8') + logical_types = pa.array([11, 13, 11, 11, 13, 13, 11], type='int8') + value_offsets = pa.array([0, 0, 1, 2, 1, 2, 3], type='int32') + py_value = [b'a', 1, b'b', b'c', 2, 3, b'd'] + + def check_result(result, expected_field_names, expected_type_codes, + expected_type_code_values): + result.validate(full=True) + actual_field_names = [result.type[i].name + for i in range(result.type.num_fields)] + assert actual_field_names == expected_field_names + assert result.type.mode == "dense" + assert result.type.type_codes == expected_type_codes + assert result.to_pylist() == py_value + assert expected_type_code_values.equals(result.type_codes) + assert value_offsets.equals(result.offsets) + assert result.field(0).equals(binary) + assert result.field(1).equals(int64) + with pytest.raises(KeyError): + result.field(-1) + with pytest.raises(KeyError): + result.field(2) + + # without field names and type codes + check_result(pa.UnionArray.from_dense(types, value_offsets, + [binary, int64]), + expected_field_names=['0', '1'], + expected_type_codes=[0, 1], + expected_type_code_values=types) + + # with field names + check_result(pa.UnionArray.from_dense(types, value_offsets, + [binary, int64], + ['bin', 'int']), + expected_field_names=['bin', 'int'], + expected_type_codes=[0, 1], + expected_type_code_values=types) + + # with type codes + check_result(pa.UnionArray.from_dense(logical_types, value_offsets, + [binary, int64], + type_codes=[11, 13]), + expected_field_names=['0', '1'], + expected_type_codes=[11, 13], + expected_type_code_values=logical_types) + + # with field names and type codes + check_result(pa.UnionArray.from_dense(logical_types, value_offsets, + [binary, int64], + ['bin', 'int'], [11, 13]), + expected_field_names=['bin', 'int'], + expected_type_codes=[11, 13], + expected_type_code_values=logical_types) + + # Bad type ids + arr = pa.UnionArray.from_dense(logical_types, value_offsets, + [binary, int64]) + with pytest.raises(pa.ArrowInvalid): + arr.validate(full=True) + arr = pa.UnionArray.from_dense(types, value_offsets, [binary, int64], + type_codes=[11, 13]) + with pytest.raises(pa.ArrowInvalid): + arr.validate(full=True) + + # Offset larger than child size + bad_offsets = pa.array([0, 0, 1, 2, 1, 2, 4], type='int32') + arr = pa.UnionArray.from_dense(types, bad_offsets, [binary, int64]) + with pytest.raises(pa.ArrowInvalid): + arr.validate(full=True) + + +def test_union_from_sparse(): + binary = pa.array([b'a', b' ', b'b', b'c', b' ', b' ', b'd'], + type='binary') + int64 = pa.array([0, 1, 0, 0, 2, 3, 0], type='int64') + types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8') + logical_types = pa.array([11, 13, 11, 11, 13, 13, 11], type='int8') + py_value = [b'a', 1, b'b', b'c', 2, 3, b'd'] + + def check_result(result, expected_field_names, expected_type_codes, + expected_type_code_values): + result.validate(full=True) + assert result.to_pylist() == py_value + actual_field_names = [result.type[i].name + for i in range(result.type.num_fields)] + assert actual_field_names == expected_field_names + assert result.type.mode == "sparse" + assert result.type.type_codes == expected_type_codes + assert expected_type_code_values.equals(result.type_codes) + assert result.field(0).equals(binary) + assert result.field(1).equals(int64) + with pytest.raises(pa.ArrowTypeError): + result.offsets + with pytest.raises(KeyError): + result.field(-1) + with pytest.raises(KeyError): + result.field(2) + + # without field names and type codes + check_result(pa.UnionArray.from_sparse(types, [binary, int64]), + expected_field_names=['0', '1'], + expected_type_codes=[0, 1], + expected_type_code_values=types) + + # with field names + check_result(pa.UnionArray.from_sparse(types, [binary, int64], + ['bin', 'int']), + expected_field_names=['bin', 'int'], + expected_type_codes=[0, 1], + expected_type_code_values=types) + + # with type codes + check_result(pa.UnionArray.from_sparse(logical_types, [binary, int64], + type_codes=[11, 13]), + expected_field_names=['0', '1'], + expected_type_codes=[11, 13], + expected_type_code_values=logical_types) + + # with field names and type codes + check_result(pa.UnionArray.from_sparse(logical_types, [binary, int64], + ['bin', 'int'], + [11, 13]), + expected_field_names=['bin', 'int'], + expected_type_codes=[11, 13], + expected_type_code_values=logical_types) + + # Bad type ids + arr = pa.UnionArray.from_sparse(logical_types, [binary, int64]) + with pytest.raises(pa.ArrowInvalid): + arr.validate(full=True) + arr = pa.UnionArray.from_sparse(types, [binary, int64], + type_codes=[11, 13]) + with pytest.raises(pa.ArrowInvalid): + arr.validate(full=True) + + # Invalid child length + with pytest.raises(pa.ArrowInvalid): + arr = pa.UnionArray.from_sparse(logical_types, [binary, int64[1:]]) + + +def test_union_array_to_pylist_with_nulls(): + # ARROW-9556 + arr = pa.UnionArray.from_sparse( + pa.array([0, 1, 0, 0, 1], type=pa.int8()), + [ + pa.array([0.0, 1.1, None, 3.3, 4.4]), + pa.array([True, None, False, True, False]), + ] + ) + assert arr.to_pylist() == [0.0, None, None, 3.3, False] + + arr = pa.UnionArray.from_dense( + pa.array([0, 1, 0, 0, 0, 1, 1], type=pa.int8()), + pa.array([0, 0, 1, 2, 3, 1, 2], type=pa.int32()), + [ + pa.array([0.0, 1.1, None, 3.3]), + pa.array([True, None, False]) + ] + ) + assert arr.to_pylist() == [0.0, True, 1.1, None, 3.3, None, False] + + +def test_union_array_slice(): + # ARROW-2314 + arr = pa.UnionArray.from_sparse(pa.array([0, 0, 1, 1], type=pa.int8()), + [pa.array(["a", "b", "c", "d"]), + pa.array([1, 2, 3, 4])]) + assert arr[1:].to_pylist() == ["b", 3, 4] + + binary = pa.array([b'a', b'b', b'c', b'd'], type='binary') + int64 = pa.array([1, 2, 3], type='int64') + types = pa.array([0, 1, 0, 0, 1, 1, 0], type='int8') + value_offsets = pa.array([0, 0, 2, 1, 1, 2, 3], type='int32') + + arr = pa.UnionArray.from_dense(types, value_offsets, [binary, int64]) + lst = arr.to_pylist() + for i in range(len(arr)): + for j in range(i, len(arr)): + assert arr[i:j].to_pylist() == lst[i:j] + + +def _check_cast_case(case, *, safe=True, check_array_construction=True): + in_data, in_type, out_data, out_type = case + if isinstance(out_data, pa.Array): + assert out_data.type == out_type + expected = out_data + else: + expected = pa.array(out_data, type=out_type) + + # check casting an already created array + if isinstance(in_data, pa.Array): + assert in_data.type == in_type + in_arr = in_data + else: + in_arr = pa.array(in_data, type=in_type) + casted = in_arr.cast(out_type, safe=safe) + casted.validate(full=True) + assert casted.equals(expected) + + # constructing an array with out type which optionally involves casting + # for more see ARROW-1949 + if check_array_construction: + in_arr = pa.array(in_data, type=out_type, safe=safe) + assert in_arr.equals(expected) + + +def test_cast_integers_safe(): + safe_cases = [ + (np.array([0, 1, 2, 3], dtype='i1'), 'int8', + np.array([0, 1, 2, 3], dtype='i4'), pa.int32()), + (np.array([0, 1, 2, 3], dtype='i1'), 'int8', + np.array([0, 1, 2, 3], dtype='u4'), pa.uint16()), + (np.array([0, 1, 2, 3], dtype='i1'), 'int8', + np.array([0, 1, 2, 3], dtype='u1'), pa.uint8()), + (np.array([0, 1, 2, 3], dtype='i1'), 'int8', + np.array([0, 1, 2, 3], dtype='f8'), pa.float64()) + ] + + for case in safe_cases: + _check_cast_case(case) + + unsafe_cases = [ + (np.array([50000], dtype='i4'), 'int32', 'int16'), + (np.array([70000], dtype='i4'), 'int32', 'uint16'), + (np.array([-1], dtype='i4'), 'int32', 'uint16'), + (np.array([50000], dtype='u2'), 'uint16', 'int16') + ] + for in_data, in_type, out_type in unsafe_cases: + in_arr = pa.array(in_data, type=in_type) + + with pytest.raises(pa.ArrowInvalid): + in_arr.cast(out_type) + + +def test_cast_none(): + # ARROW-3735: Ensure that calling cast(None) doesn't segfault. + arr = pa.array([1, 2, 3]) + + with pytest.raises(TypeError): + arr.cast(None) + + +def test_cast_list_to_primitive(): + # ARROW-8070: cast segfaults on unsupported cast from list to utf8 + arr = pa.array([[1, 2], [3, 4]]) + with pytest.raises(NotImplementedError): + arr.cast(pa.int8()) + + arr = pa.array([[b"a", b"b"], [b"c"]], pa.list_(pa.binary())) + with pytest.raises(NotImplementedError): + arr.cast(pa.binary()) + + +def test_slice_chunked_array_zero_chunks(): + # ARROW-8911 + arr = pa.chunked_array([], type='int8') + assert arr.num_chunks == 0 + + result = arr[:] + assert result.equals(arr) + + # Do not crash + arr[:5] + + +def test_cast_chunked_array(): + arrays = [pa.array([1, 2, 3]), pa.array([4, 5, 6])] + carr = pa.chunked_array(arrays) + + target = pa.float64() + casted = carr.cast(target) + expected = pa.chunked_array([x.cast(target) for x in arrays]) + assert casted.equals(expected) + + +def test_cast_chunked_array_empty(): + # ARROW-8142 + for typ1, typ2 in [(pa.dictionary(pa.int8(), pa.string()), pa.string()), + (pa.int64(), pa.int32())]: + + arr = pa.chunked_array([], type=typ1) + result = arr.cast(typ2) + expected = pa.chunked_array([], type=typ2) + assert result.equals(expected) + + +def test_chunked_array_data_warns(): + with pytest.warns(FutureWarning): + res = pa.chunked_array([[]]).data + assert isinstance(res, pa.ChunkedArray) + + +def test_cast_integers_unsafe(): + # We let NumPy do the unsafe casting. + # Note that NEP50 in the NumPy spec no longer allows + # the np.array() constructor to pass the dtype directly + # if it results in an unsafe cast. + unsafe_cases = [ + (np.array([50000], dtype='i4'), 'int32', + np.array([50000]).astype(dtype='i2'), pa.int16()), + (np.array([70000], dtype='i4'), 'int32', + np.array([70000]).astype(dtype='u2'), pa.uint16()), + (np.array([-1], dtype='i4'), 'int32', + np.array([-1]).astype(dtype='u2'), pa.uint16()), + (np.array([50000], dtype='u2'), pa.uint16(), + np.array([50000]).astype(dtype='i2'), pa.int16()) + ] + + for case in unsafe_cases: + _check_cast_case(case, safe=False) + + +def test_floating_point_truncate_safe(): + safe_cases = [ + (np.array([1.0, 2.0, 3.0], dtype='float32'), 'float32', + np.array([1, 2, 3], dtype='i4'), pa.int32()), + (np.array([1.0, 2.0, 3.0], dtype='float64'), 'float64', + np.array([1, 2, 3], dtype='i4'), pa.int32()), + (np.array([-10.0, 20.0, -30.0], dtype='float64'), 'float64', + np.array([-10, 20, -30], dtype='i4'), pa.int32()), + ] + for case in safe_cases: + _check_cast_case(case, safe=True) + + +def test_floating_point_truncate_unsafe(): + unsafe_cases = [ + (np.array([1.1, 2.2, 3.3], dtype='float32'), 'float32', + np.array([1, 2, 3], dtype='i4'), pa.int32()), + (np.array([1.1, 2.2, 3.3], dtype='float64'), 'float64', + np.array([1, 2, 3], dtype='i4'), pa.int32()), + (np.array([-10.1, 20.2, -30.3], dtype='float64'), 'float64', + np.array([-10, 20, -30], dtype='i4'), pa.int32()), + ] + for case in unsafe_cases: + # test safe casting raises + with pytest.raises(pa.ArrowInvalid, match='truncated'): + _check_cast_case(case, safe=True) + + # test unsafe casting truncates + _check_cast_case(case, safe=False) + + +def test_decimal_to_int_safe(): + safe_cases = [ + ( + [decimal.Decimal("123456"), None, decimal.Decimal("-912345")], + pa.decimal128(32, 5), + [123456, None, -912345], + pa.int32() + ), + ( + [decimal.Decimal("1234"), None, decimal.Decimal("-9123")], + pa.decimal128(19, 10), + [1234, None, -9123], + pa.int16() + ), + ( + [decimal.Decimal("123"), None, decimal.Decimal("-91")], + pa.decimal128(19, 10), + [123, None, -91], + pa.int8() + ), + ] + for case in safe_cases: + _check_cast_case(case) + _check_cast_case(case, safe=True) + + +def test_decimal_to_int_value_out_of_bounds(): + out_of_bounds_cases = [ + ( + np.array([ + decimal.Decimal("1234567890123"), + None, + decimal.Decimal("-912345678901234") + ]), + pa.decimal128(32, 5), + [1912276171, None, -135950322], + pa.int32() + ), + ( + [decimal.Decimal("123456"), None, decimal.Decimal("-912345678")], + pa.decimal128(32, 5), + [-7616, None, -19022], + pa.int16() + ), + ( + [decimal.Decimal("1234"), None, decimal.Decimal("-9123")], + pa.decimal128(32, 5), + [-46, None, 93], + pa.int8() + ), + ] + + for case in out_of_bounds_cases: + # test safe casting raises + with pytest.raises(pa.ArrowInvalid, + match='Integer value out of bounds'): + _check_cast_case(case) + + # XXX `safe=False` can be ignored when constructing an array + # from a sequence of Python objects (ARROW-8567) + _check_cast_case(case, safe=False, check_array_construction=False) + + +def test_decimal_to_int_non_integer(): + non_integer_cases = [ + ( + [ + decimal.Decimal("123456.21"), + None, + decimal.Decimal("-912345.13") + ], + pa.decimal128(32, 5), + [123456, None, -912345], + pa.int32() + ), + ( + [decimal.Decimal("1234.134"), None, decimal.Decimal("-9123.1")], + pa.decimal128(19, 10), + [1234, None, -9123], + pa.int16() + ), + ( + [decimal.Decimal("123.1451"), None, decimal.Decimal("-91.21")], + pa.decimal128(19, 10), + [123, None, -91], + pa.int8() + ), + ] + + for case in non_integer_cases: + # test safe casting raises + msg_regexp = 'Rescaling Decimal128 value would cause data loss' + with pytest.raises(pa.ArrowInvalid, match=msg_regexp): + _check_cast_case(case) + + _check_cast_case(case, safe=False) + + +def test_decimal_to_decimal(): + arr = pa.array( + [decimal.Decimal("1234.12"), None], + type=pa.decimal128(19, 10) + ) + result = arr.cast(pa.decimal128(15, 6)) + expected = pa.array( + [decimal.Decimal("1234.12"), None], + type=pa.decimal128(15, 6) + ) + assert result.equals(expected) + + msg_regexp = 'Rescaling Decimal128 value would cause data loss' + with pytest.raises(pa.ArrowInvalid, match=msg_regexp): + result = arr.cast(pa.decimal128(9, 1)) + + result = arr.cast(pa.decimal128(9, 1), safe=False) + expected = pa.array( + [decimal.Decimal("1234.1"), None], + type=pa.decimal128(9, 1) + ) + assert result.equals(expected) + + with pytest.raises(pa.ArrowInvalid, + match='Decimal value does not fit in precision'): + result = arr.cast(pa.decimal128(5, 2)) + + +def test_safe_cast_nan_to_int_raises(): + arr = pa.array([np.nan, 1.]) + + with pytest.raises(pa.ArrowInvalid, match='truncated'): + arr.cast(pa.int64(), safe=True) + + +def test_cast_signed_to_unsigned(): + safe_cases = [ + (np.array([0, 1, 2, 3], dtype='i1'), pa.uint8(), + np.array([0, 1, 2, 3], dtype='u1'), pa.uint8()), + (np.array([0, 1, 2, 3], dtype='i2'), pa.uint16(), + np.array([0, 1, 2, 3], dtype='u2'), pa.uint16()) + ] + + for case in safe_cases: + _check_cast_case(case) + + +def test_cast_from_null(): + in_data = [None] * 3 + in_type = pa.null() + out_types = [ + pa.null(), + pa.uint8(), + pa.float16(), + pa.utf8(), + pa.binary(), + pa.binary(10), + pa.list_(pa.int16()), + pa.list_(pa.int32(), 4), + pa.large_list(pa.uint8()), + pa.decimal128(19, 4), + pa.timestamp('us'), + pa.timestamp('us', tz='UTC'), + pa.timestamp('us', tz='Europe/Paris'), + pa.duration('us'), + pa.month_day_nano_interval(), + pa.struct([pa.field('a', pa.int32()), + pa.field('b', pa.list_(pa.int8())), + pa.field('c', pa.string())]), + pa.dictionary(pa.int32(), pa.string()), + ] + for out_type in out_types: + _check_cast_case((in_data, in_type, in_data, out_type)) + + out_types = [ + + pa.union([pa.field('a', pa.binary(10)), + pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE), + pa.union([pa.field('a', pa.binary(10)), + pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE), + ] + in_arr = pa.array(in_data, type=pa.null()) + for out_type in out_types: + with pytest.raises(NotImplementedError): + in_arr.cast(out_type) + + +def test_cast_string_to_number_roundtrip(): + cases = [ + (pa.array(["1", "127", "-128"]), + pa.array([1, 127, -128], type=pa.int8())), + (pa.array([None, "18446744073709551615"]), + pa.array([None, 18446744073709551615], type=pa.uint64())), + ] + for in_arr, expected in cases: + casted = in_arr.cast(expected.type, safe=True) + casted.validate(full=True) + assert casted.equals(expected) + casted_back = casted.cast(in_arr.type, safe=True) + casted_back.validate(full=True) + assert casted_back.equals(in_arr) + + +def test_cast_dictionary(): + # cast to the value type + arr = pa.array( + ["foo", "bar", None], + type=pa.dictionary(pa.int64(), pa.string()) + ) + expected = pa.array(["foo", "bar", None]) + assert arr.type == pa.dictionary(pa.int64(), pa.string()) + assert arr.cast(pa.string()) == expected + + # cast to a different key type + for key_type in [pa.int8(), pa.int16(), pa.int32()]: + typ = pa.dictionary(key_type, pa.string()) + expected = pa.array( + ["foo", "bar", None], + type=pa.dictionary(key_type, pa.string()) + ) + assert arr.cast(typ) == expected + + # shouldn't crash (ARROW-7077) + with pytest.raises(pa.ArrowInvalid): + arr.cast(pa.int32()) + + +def test_view(): + # ARROW-5992 + arr = pa.array(['foo', 'bar', 'baz'], type=pa.utf8()) + expected = pa.array(['foo', 'bar', 'baz'], type=pa.binary()) + + assert arr.view(pa.binary()).equals(expected) + assert arr.view('binary').equals(expected) + + +def test_unique_simple(): + cases = [ + (pa.array([1, 2, 3, 1, 2, 3]), pa.array([1, 2, 3])), + (pa.array(['foo', None, 'bar', 'foo']), + pa.array(['foo', None, 'bar'])), + (pa.array(['foo', None, 'bar', 'foo'], pa.large_binary()), + pa.array(['foo', None, 'bar'], pa.large_binary())), + ] + for arr, expected in cases: + result = arr.unique() + assert result.equals(expected) + result = pa.chunked_array([arr]).unique() + assert result.equals(expected) + + +def test_value_counts_simple(): + cases = [ + (pa.array([1, 2, 3, 1, 2, 3]), + pa.array([1, 2, 3]), + pa.array([2, 2, 2], type=pa.int64())), + (pa.array(['foo', None, 'bar', 'foo']), + pa.array(['foo', None, 'bar']), + pa.array([2, 1, 1], type=pa.int64())), + (pa.array(['foo', None, 'bar', 'foo'], pa.large_binary()), + pa.array(['foo', None, 'bar'], pa.large_binary()), + pa.array([2, 1, 1], type=pa.int64())), + ] + for arr, expected_values, expected_counts in cases: + for arr_in in (arr, pa.chunked_array([arr])): + result = arr_in.value_counts() + assert result.type.equals( + pa.struct([pa.field("values", arr.type), + pa.field("counts", pa.int64())])) + assert result.field("values").equals(expected_values) + assert result.field("counts").equals(expected_counts) + + +def test_unique_value_counts_dictionary_type(): + indices = pa.array([3, 0, 0, 0, 1, 1, 3, 0, 1, 3, 0, 1]) + dictionary = pa.array(['foo', 'bar', 'baz', 'qux']) + + arr = pa.DictionaryArray.from_arrays(indices, dictionary) + + unique_result = arr.unique() + expected = pa.DictionaryArray.from_arrays(indices.unique(), dictionary) + assert unique_result.equals(expected) + + result = arr.value_counts() + assert result.field('values').equals(unique_result) + assert result.field('counts').equals(pa.array([3, 5, 4], type='int64')) + + arr = pa.DictionaryArray.from_arrays( + pa.array([], type='int64'), dictionary) + unique_result = arr.unique() + expected = pa.DictionaryArray.from_arrays(pa.array([], type='int64'), + pa.array([], type='utf8')) + assert unique_result.equals(expected) + + result = arr.value_counts() + assert result.field('values').equals(unique_result) + assert result.field('counts').equals(pa.array([], type='int64')) + + +def test_dictionary_encode_simple(): + cases = [ + (pa.array([1, 2, 3, None, 1, 2, 3]), + pa.DictionaryArray.from_arrays( + pa.array([0, 1, 2, None, 0, 1, 2], type='int32'), + [1, 2, 3])), + (pa.array(['foo', None, 'bar', 'foo']), + pa.DictionaryArray.from_arrays( + pa.array([0, None, 1, 0], type='int32'), + ['foo', 'bar'])), + (pa.array(['foo', None, 'bar', 'foo'], type=pa.large_binary()), + pa.DictionaryArray.from_arrays( + pa.array([0, None, 1, 0], type='int32'), + pa.array(['foo', 'bar'], type=pa.large_binary()))), + ] + for arr, expected in cases: + result = arr.dictionary_encode() + assert result.equals(expected) + result = pa.chunked_array([arr]).dictionary_encode() + assert result.num_chunks == 1 + assert result.chunk(0).equals(expected) + result = pa.chunked_array([], type=arr.type).dictionary_encode() + assert result.num_chunks == 0 + assert result.type == expected.type + + +def test_dictionary_encode_sliced(): + cases = [ + (pa.array([1, 2, 3, None, 1, 2, 3])[1:-1], + pa.DictionaryArray.from_arrays( + pa.array([0, 1, None, 2, 0], type='int32'), + [2, 3, 1])), + (pa.array([None, 'foo', 'bar', 'foo', 'xyzzy'])[1:-1], + pa.DictionaryArray.from_arrays( + pa.array([0, 1, 0], type='int32'), + ['foo', 'bar'])), + (pa.array([None, 'foo', 'bar', 'foo', 'xyzzy'], + type=pa.large_string())[1:-1], + pa.DictionaryArray.from_arrays( + pa.array([0, 1, 0], type='int32'), + pa.array(['foo', 'bar'], type=pa.large_string()))), + ] + for arr, expected in cases: + result = arr.dictionary_encode() + assert result.equals(expected) + result = pa.chunked_array([arr]).dictionary_encode() + assert result.num_chunks == 1 + assert result.type == expected.type + assert result.chunk(0).equals(expected) + result = pa.chunked_array([], type=arr.type).dictionary_encode() + assert result.num_chunks == 0 + assert result.type == expected.type + + # ARROW-9143 dictionary_encode after slice was segfaulting + array = pa.array(['foo', 'bar', 'baz']) + array.slice(1).dictionary_encode() + + +def test_dictionary_encode_zero_length(): + # User-facing experience of ARROW-7008 + arr = pa.array([], type=pa.string()) + encoded = arr.dictionary_encode() + assert len(encoded.dictionary) == 0 + encoded.validate(full=True) + + +def test_dictionary_decode(): + cases = [ + (pa.array([1, 2, 3, None, 1, 2, 3]), + pa.DictionaryArray.from_arrays( + pa.array([0, 1, 2, None, 0, 1, 2], type='int32'), + [1, 2, 3])), + (pa.array(['foo', None, 'bar', 'foo']), + pa.DictionaryArray.from_arrays( + pa.array([0, None, 1, 0], type='int32'), + ['foo', 'bar'])), + (pa.array(['foo', None, 'bar', 'foo'], type=pa.large_binary()), + pa.DictionaryArray.from_arrays( + pa.array([0, None, 1, 0], type='int32'), + pa.array(['foo', 'bar'], type=pa.large_binary()))), + ] + for expected, arr in cases: + result = arr.dictionary_decode() + assert result.equals(expected) + + +def test_cast_time32_to_int(): + arr = pa.array(np.array([0, 1, 2], dtype='int32'), + type=pa.time32('s')) + expected = pa.array([0, 1, 2], type='i4') + + result = arr.cast('i4') + assert result.equals(expected) + + +def test_cast_time64_to_int(): + arr = pa.array(np.array([0, 1, 2], dtype='int64'), + type=pa.time64('us')) + expected = pa.array([0, 1, 2], type='i8') + + result = arr.cast('i8') + assert result.equals(expected) + + +def test_cast_timestamp_to_int(): + arr = pa.array(np.array([0, 1, 2], dtype='int64'), + type=pa.timestamp('us')) + expected = pa.array([0, 1, 2], type='i8') + + result = arr.cast('i8') + assert result.equals(expected) + + +def test_cast_date32_to_int(): + arr = pa.array([0, 1, 2], type='i4') + + result1 = arr.cast('date32') + result2 = result1.cast('i4') + + expected1 = pa.array([ + datetime.date(1970, 1, 1), + datetime.date(1970, 1, 2), + datetime.date(1970, 1, 3) + ]).cast('date32') + + assert result1.equals(expected1) + assert result2.equals(arr) + + +def test_cast_duration_to_int(): + arr = pa.array(np.array([0, 1, 2], dtype='int64'), + type=pa.duration('us')) + expected = pa.array([0, 1, 2], type='i8') + + result = arr.cast('i8') + assert result.equals(expected) + + +def test_cast_binary_to_utf8(): + binary_arr = pa.array([b'foo', b'bar', b'baz'], type=pa.binary()) + utf8_arr = binary_arr.cast(pa.utf8()) + expected = pa.array(['foo', 'bar', 'baz'], type=pa.utf8()) + + assert utf8_arr.equals(expected) + + non_utf8_values = [('mañana').encode('utf-16-le')] + non_utf8_binary = pa.array(non_utf8_values) + assert non_utf8_binary.type == pa.binary() + with pytest.raises(ValueError): + non_utf8_binary.cast(pa.string()) + + non_utf8_all_null = pa.array(non_utf8_values, mask=np.array([True]), + type=pa.binary()) + # No error + casted = non_utf8_all_null.cast(pa.string()) + assert casted.null_count == 1 + + +def test_cast_date64_to_int(): + arr = pa.array(np.array([0, 1, 2], dtype='int64'), + type=pa.date64()) + expected = pa.array([0, 1, 2], type='i8') + + result = arr.cast('i8') + + assert result.equals(expected) + + +def test_date64_from_builtin_datetime(): + val1 = datetime.datetime(2000, 1, 1, 12, 34, 56, 123456) + val2 = datetime.datetime(2000, 1, 1) + result = pa.array([val1, val2], type='date64') + result2 = pa.array([val1.date(), val2.date()], type='date64') + + assert result.equals(result2) + + as_i8 = result.view('int64') + assert as_i8[0].as_py() == as_i8[1].as_py() + + +@pytest.mark.parametrize(('ty', 'values'), [ + ('bool', [True, False, True]), + ('uint8', range(0, 255)), + ('int8', range(0, 128)), + ('uint16', range(0, 10)), + ('int16', range(0, 10)), + ('uint32', range(0, 10)), + ('int32', range(0, 10)), + ('uint64', range(0, 10)), + ('int64', range(0, 10)), + ('float', [0.0, 0.1, 0.2]), + ('double', [0.0, 0.1, 0.2]), + ('string', ['a', 'b', 'c']), + ('binary', [b'a', b'b', b'c']), + (pa.binary(3), [b'abc', b'bcd', b'cde']) +]) +def test_cast_identities(ty, values): + arr = pa.array(values, type=ty) + assert arr.cast(ty).equals(arr) + + +pickle_test_parametrize = pytest.mark.parametrize( + ('data', 'typ'), + [ + ([True, False, True, True], pa.bool_()), + ([1, 2, 4, 6], pa.int64()), + ([1.0, 2.5, None], pa.float64()), + (['a', None, 'b'], pa.string()), + ([], None), + ([[1, 2], [3]], pa.list_(pa.int64())), + ([[4, 5], [6]], pa.large_list(pa.int16())), + ([['a'], None, ['b', 'c']], pa.list_(pa.string())), + ([[1, 2], [3]], pa.list_view(pa.int64())), + ([[4, 5], [6]], pa.large_list_view(pa.int16())), + ([['a'], None, ['b', 'c']], pa.list_view(pa.string())), + ([(1, 'a'), (2, 'c'), None], + pa.struct([pa.field('a', pa.int64()), pa.field('b', pa.string())])) + ] +) + + +@pickle_test_parametrize +def test_array_pickle(data, typ, pickle_module): + # Allocate here so that we don't have any Arrow data allocated. + # This is needed to ensure that allocator tests can be reliable. + array = pa.array(data, type=typ) + for proto in range(0, pickle_module.HIGHEST_PROTOCOL + 1): + result = pickle_module.loads(pickle_module.dumps(array, proto)) + assert array.equals(result) + + +def test_array_pickle_dictionary(pickle_module): + # not included in the above as dictionary array cannot be created with + # the pa.array function + array = pa.DictionaryArray.from_arrays([0, 1, 2, 0, 1], ['a', 'b', 'c']) + for proto in range(0, pickle_module.HIGHEST_PROTOCOL + 1): + result = pickle_module.loads(pickle_module.dumps(array, proto)) + assert array.equals(result) + + +@h.settings(suppress_health_check=(h.HealthCheck.too_slow,)) +@h.given( + past.arrays( + past.all_types, + size=st.integers(min_value=0, max_value=10) + ) +) +def test_pickling(pickle_module, arr): + data = pickle_module.dumps(arr) + restored = pickle_module.loads(data) + assert arr.equals(restored) + + +@pickle_test_parametrize +def test_array_pickle_protocol5(data, typ, pickle_module): + # Test zero-copy pickling with protocol 5 (PEP 574) + array = pa.array(data, type=typ) + addresses = [buf.address if buf is not None else 0 + for buf in array.buffers()] + + for proto in range(5, pickle_module.HIGHEST_PROTOCOL + 1): + buffers = [] + pickled = pickle_module.dumps(array, proto, buffer_callback=buffers.append) + result = pickle_module.loads(pickled, buffers=buffers) + assert array.equals(result) + + result_addresses = [buf.address if buf is not None else 0 + for buf in result.buffers()] + assert result_addresses == addresses + + +@pytest.mark.parametrize( + 'narr', + [ + np.arange(10, dtype=np.int64), + np.arange(10, dtype=np.int32), + np.arange(10, dtype=np.int16), + np.arange(10, dtype=np.int8), + np.arange(10, dtype=np.uint64), + np.arange(10, dtype=np.uint32), + np.arange(10, dtype=np.uint16), + np.arange(10, dtype=np.uint8), + np.arange(10, dtype=np.float64), + np.arange(10, dtype=np.float32), + np.arange(10, dtype=np.float16), + ] +) +def test_to_numpy_roundtrip(narr): + arr = pa.array(narr) + assert narr.dtype == arr.to_numpy().dtype + np.testing.assert_array_equal(narr, arr.to_numpy()) + np.testing.assert_array_equal(narr[:6], arr[:6].to_numpy()) + np.testing.assert_array_equal(narr[2:], arr[2:].to_numpy()) + np.testing.assert_array_equal(narr[2:6], arr[2:6].to_numpy()) + + +def test_array_uint64_from_py_over_range(): + arr = pa.array([2 ** 63], type=pa.uint64()) + expected = pa.array(np.array([2 ** 63], dtype='u8')) + assert arr.equals(expected) + + +def test_array_conversions_no_sentinel_values(): + arr = np.array([1, 2, 3, 4], dtype='int8') + refcount = sys.getrefcount(arr) + arr2 = pa.array(arr) # noqa + assert sys.getrefcount(arr) == (refcount + 1) + + assert arr2.type == 'int8' + + arr3 = pa.array(np.array([1, np.nan, 2, 3, np.nan, 4], dtype='float32'), + type='float32') + assert arr3.type == 'float32' + assert arr3.null_count == 0 + + +def test_time32_time64_from_integer(): + # ARROW-4111 + result = pa.array([1, 2, None], type=pa.time32('s')) + expected = pa.array([datetime.time(second=1), + datetime.time(second=2), None], + type=pa.time32('s')) + assert result.equals(expected) + + result = pa.array([1, 2, None], type=pa.time32('ms')) + expected = pa.array([datetime.time(microsecond=1000), + datetime.time(microsecond=2000), None], + type=pa.time32('ms')) + assert result.equals(expected) + + result = pa.array([1, 2, None], type=pa.time64('us')) + expected = pa.array([datetime.time(microsecond=1), + datetime.time(microsecond=2), None], + type=pa.time64('us')) + assert result.equals(expected) + + result = pa.array([1000, 2000, None], type=pa.time64('ns')) + expected = pa.array([datetime.time(microsecond=1), + datetime.time(microsecond=2), None], + type=pa.time64('ns')) + assert result.equals(expected) + + +def test_binary_string_pandas_null_sentinels(): + # ARROW-6227 + def _check_case(ty): + arr = pa.array(['string', np.nan], type=ty, from_pandas=True) + expected = pa.array(['string', None], type=ty) + assert arr.equals(expected) + _check_case('binary') + _check_case('utf8') + + +def test_pandas_null_sentinels_raise_error(): + # ARROW-6227 + cases = [ + ([None, np.nan], 'null'), + (['string', np.nan], 'binary'), + (['string', np.nan], 'utf8'), + (['string', np.nan], 'large_binary'), + (['string', np.nan], 'large_utf8'), + ([b'string', np.nan], pa.binary(6)), + ([True, np.nan], pa.bool_()), + ([decimal.Decimal('0'), np.nan], pa.decimal128(12, 2)), + ([0, np.nan], pa.date32()), + ([0, np.nan], pa.date32()), + ([0, np.nan], pa.date64()), + ([0, np.nan], pa.time32('s')), + ([0, np.nan], pa.time64('us')), + ([0, np.nan], pa.timestamp('us')), + ([0, np.nan], pa.duration('us')), + ] + for case, ty in cases: + # Both types of exceptions are raised. May want to clean that up + with pytest.raises((ValueError, TypeError)): + pa.array(case, type=ty) + + # from_pandas option suppresses failure + result = pa.array(case, type=ty, from_pandas=True) + assert result.null_count == (1 if ty != 'null' else 2) + + +@pytest.mark.pandas +def test_pandas_null_sentinels_index(): + # ARROW-7023 - ensure that when passing a pandas Index, "from_pandas" + # semantics are used + import pandas as pd + idx = pd.Index([1, 2, np.nan], dtype=object) + result = pa.array(idx) + expected = pa.array([1, 2, np.nan], from_pandas=True) + assert result.equals(expected) + + +def test_array_roundtrip_from_numpy_datetimeD(): + arr = np.array([None, datetime.date(2017, 4, 4)], dtype='datetime64[D]') + + result = pa.array(arr) + expected = pa.array([None, datetime.date(2017, 4, 4)], type=pa.date32()) + assert result.equals(expected) + result = result.to_numpy(zero_copy_only=False) + np.testing.assert_array_equal(result, arr) + assert result.dtype == arr.dtype + + +def test_array_from_naive_datetimes(): + arr = pa.array([ + None, + datetime.datetime(2017, 4, 4, 12, 11, 10), + datetime.datetime(2018, 1, 1, 0, 2, 0) + ]) + assert arr.type == pa.timestamp('us', tz=None) + + +@pytest.mark.parametrize(('dtype', 'type'), [ + ('datetime64[s]', pa.timestamp('s')), + ('datetime64[ms]', pa.timestamp('ms')), + ('datetime64[us]', pa.timestamp('us')), + ('datetime64[ns]', pa.timestamp('ns')) +]) +def test_array_from_numpy_datetime(dtype, type): + data = [ + None, + datetime.datetime(2017, 4, 4, 12, 11, 10), + datetime.datetime(2018, 1, 1, 0, 2, 0) + ] + + # from numpy array + arr = pa.array(np.array(data, dtype=dtype)) + expected = pa.array(data, type=type) + assert arr.equals(expected) + + # from list of numpy scalars + arr = pa.array(list(np.array(data, dtype=dtype))) + assert arr.equals(expected) + + +def test_array_from_different_numpy_datetime_units_raises(): + data = [ + None, + datetime.datetime(2017, 4, 4, 12, 11, 10), + datetime.datetime(2018, 1, 1, 0, 2, 0) + ] + s = np.array(data, dtype='datetime64[s]') + ms = np.array(data, dtype='datetime64[ms]') + data = list(s[:2]) + list(ms[2:]) + + with pytest.raises(pa.ArrowNotImplementedError): + pa.array(data) + + +@pytest.mark.parametrize('unit', ['ns', 'us', 'ms', 's']) +def test_array_from_list_of_timestamps(unit): + n = np.datetime64('NaT', unit) + x = np.datetime64('2017-01-01 01:01:01.111111111', unit) + y = np.datetime64('2018-11-22 12:24:48.111111111', unit) + + a1 = pa.array([n, x, y]) + a2 = pa.array([n, x, y], type=pa.timestamp(unit)) + + assert a1.type == a2.type + assert a1.type.unit == unit + assert a1[0] == a2[0] + + +def test_array_from_timestamp_with_generic_unit(): + n = np.datetime64('NaT') + x = np.datetime64('2017-01-01 01:01:01.111111111') + y = np.datetime64('2018-11-22 12:24:48.111111111') + + with pytest.raises(pa.ArrowNotImplementedError, + match='Unbound or generic datetime64 time unit'): + pa.array([n, x, y]) + + +@pytest.mark.parametrize(('dtype', 'type'), [ + ('timedelta64[s]', pa.duration('s')), + ('timedelta64[ms]', pa.duration('ms')), + ('timedelta64[us]', pa.duration('us')), + ('timedelta64[ns]', pa.duration('ns')) +]) +def test_array_from_numpy_timedelta(dtype, type): + data = [ + None, + datetime.timedelta(1), + datetime.timedelta(0, 1) + ] + + # from numpy array + np_arr = np.array(data, dtype=dtype) + arr = pa.array(np_arr) + assert isinstance(arr, pa.DurationArray) + assert arr.type == type + expected = pa.array(data, type=type) + assert arr.equals(expected) + assert arr.to_pylist() == data + + # from list of numpy scalars + arr = pa.array(list(np.array(data, dtype=dtype))) + assert arr.equals(expected) + assert arr.to_pylist() == data + + +def test_array_from_numpy_timedelta_incorrect_unit(): + # generic (no unit) + td = np.timedelta64(1) + + for data in [[td], np.array([td])]: + with pytest.raises(NotImplementedError): + pa.array(data) + + # unsupported unit + td = np.timedelta64(1, 'M') + for data in [[td], np.array([td])]: + with pytest.raises(NotImplementedError): + pa.array(data) + + +def test_array_from_numpy_ascii(): + arr = np.array(['abcde', 'abc', ''], dtype='|S5') + + arrow_arr = pa.array(arr) + assert arrow_arr.type == 'binary' + expected = pa.array(['abcde', 'abc', ''], type='binary') + assert arrow_arr.equals(expected) + + mask = np.array([False, True, False]) + arrow_arr = pa.array(arr, mask=mask) + expected = pa.array(['abcde', None, ''], type='binary') + assert arrow_arr.equals(expected) + + # Strided variant + arr = np.array(['abcde', 'abc', ''] * 5, dtype='|S5')[::2] + mask = np.array([False, True, False] * 5)[::2] + arrow_arr = pa.array(arr, mask=mask) + + expected = pa.array(['abcde', '', None, 'abcde', '', None, 'abcde', ''], + type='binary') + assert arrow_arr.equals(expected) + + # 0 itemsize + arr = np.array(['', '', ''], dtype='|S0') + arrow_arr = pa.array(arr) + expected = pa.array(['', '', ''], type='binary') + assert arrow_arr.equals(expected) + + +def test_interval_array_from_timedelta(): + data = [ + None, + datetime.timedelta(days=1, seconds=1, microseconds=1, + milliseconds=1, minutes=1, hours=1, weeks=1)] + + # From timedelta (explicit type required) + arr = pa.array(data, pa.month_day_nano_interval()) + assert isinstance(arr, pa.MonthDayNanoIntervalArray) + assert arr.type == pa.month_day_nano_interval() + expected_list = [ + None, + pa.MonthDayNano([0, 8, + (datetime.timedelta(seconds=1, microseconds=1, + milliseconds=1, minutes=1, + hours=1) // + datetime.timedelta(microseconds=1)) * 1000])] + expected = pa.array(expected_list) + assert arr.equals(expected) + assert arr.to_pylist() == expected_list + + +@pytest.mark.pandas +def test_interval_array_from_relativedelta(): + # dateutil is dependency of pandas + from dateutil.relativedelta import relativedelta + from pandas import DateOffset + data = [ + None, + relativedelta(years=1, months=1, + days=1, seconds=1, microseconds=1, + minutes=1, hours=1, weeks=1, leapdays=1)] + # Note leapdays are ignored. + + # From relativedelta + arr = pa.array(data) + assert isinstance(arr, pa.MonthDayNanoIntervalArray) + assert arr.type == pa.month_day_nano_interval() + expected_list = [ + None, + pa.MonthDayNano([13, 8, + (datetime.timedelta(seconds=1, microseconds=1, + minutes=1, hours=1) // + datetime.timedelta(microseconds=1)) * 1000])] + expected = pa.array(expected_list) + assert arr.equals(expected) + assert arr.to_pandas().tolist() == [ + None, DateOffset(months=13, days=8, + microseconds=( + datetime.timedelta(seconds=1, microseconds=1, + minutes=1, hours=1) // + datetime.timedelta(microseconds=1)), + nanoseconds=0)] + with pytest.raises(ValueError): + pa.array([DateOffset(years=((1 << 32) // 12), months=100)]) + with pytest.raises(ValueError): + pa.array([DateOffset(weeks=((1 << 32) // 7), days=100)]) + with pytest.raises(ValueError): + pa.array([DateOffset(seconds=((1 << 64) // 1000000000), + nanoseconds=1)]) + with pytest.raises(ValueError): + pa.array([DateOffset(microseconds=((1 << 64) // 100))]) + + +def test_interval_array_from_tuple(): + data = [None, (1, 2, -3)] + + # From timedelta (explicit type required) + arr = pa.array(data, pa.month_day_nano_interval()) + assert isinstance(arr, pa.MonthDayNanoIntervalArray) + assert arr.type == pa.month_day_nano_interval() + expected_list = [ + None, + pa.MonthDayNano([1, 2, -3])] + expected = pa.array(expected_list) + assert arr.equals(expected) + assert arr.to_pylist() == expected_list + + +@pytest.mark.pandas +def test_interval_array_from_dateoffset(): + from pandas.tseries.offsets import DateOffset + data = [ + None, + DateOffset(years=1, months=1, + days=1, seconds=1, microseconds=1, + minutes=1, hours=1, weeks=1, nanoseconds=1), + DateOffset()] + + arr = pa.array(data) + assert isinstance(arr, pa.MonthDayNanoIntervalArray) + assert arr.type == pa.month_day_nano_interval() + expected_list = [ + None, + pa.MonthDayNano([13, 8, 3661000001001]), + pa.MonthDayNano([0, 0, 0])] + expected = pa.array(expected_list) + assert arr.equals(expected) + expected_from_pandas = [ + None, DateOffset(months=13, days=8, + microseconds=( + datetime.timedelta(seconds=1, microseconds=1, + minutes=1, hours=1) // + datetime.timedelta(microseconds=1)), + nanoseconds=1), + DateOffset(months=0, days=0, microseconds=0, nanoseconds=0)] + + assert arr.to_pandas().tolist() == expected_from_pandas + + # nested list array conversion + actual_list = pa.array([data]).to_pandas().tolist() + assert len(actual_list) == 1 + assert list(actual_list[0]) == expected_from_pandas + + +def test_array_from_numpy_unicode(): + dtypes = ['U5'] + + for dtype in dtypes: + arr = np.array(['abcde', 'abc', ''], dtype=dtype) + + arrow_arr = pa.array(arr) + assert arrow_arr.type == 'utf8' + expected = pa.array(['abcde', 'abc', ''], type='utf8') + assert arrow_arr.equals(expected) + + mask = np.array([False, True, False]) + arrow_arr = pa.array(arr, mask=mask) + expected = pa.array(['abcde', None, ''], type='utf8') + assert arrow_arr.equals(expected) + + # Strided variant + arr = np.array(['abcde', 'abc', ''] * 5, dtype=dtype)[::2] + mask = np.array([False, True, False] * 5)[::2] + arrow_arr = pa.array(arr, mask=mask) + + expected = pa.array(['abcde', '', None, 'abcde', '', None, + 'abcde', ''], type='utf8') + assert arrow_arr.equals(expected) + + # 0 itemsize + arr = np.array(['', '', ''], dtype='= object.__sizeof__(a) + a.nbytes + a = pa.array([1, None, 3], type='int64') + assert a.nbytes == 8*3 + 1 + assert a.get_total_buffer_size() == 8*3 + 1 + assert sys.getsizeof(a) >= object.__sizeof__(a) + a.nbytes + a = pa.array([[1, 2], None, [3, None, 4, 5]], type=pa.list_(pa.int64())) + assert a.nbytes == 62 + assert a.get_total_buffer_size() == 1 + 4 * 4 + 1 + 6 * 8 + assert sys.getsizeof(a) >= object.__sizeof__(a) + a.nbytes + a = pa.array([[[5, 6, 7]], [[9, 10]]], type=pa.list_(pa.list_(pa.int8()))) + assert a.get_total_buffer_size() == (4 * 3) + (4 * 3) + (1 * 5) + assert a.nbytes == 21 + a = pa.array([[[1, 2], [3, 4]], [[5, 6, 7], None, [8]], [[9, 10]]], + type=pa.list_(pa.list_(pa.int8()))) + a1 = a.slice(1, 2) + assert a1.nbytes == (4 * 2) + 1 + (4 * 4) + (1 * 6) + assert a1.get_total_buffer_size() == (4 * 4) + 1 + (4 * 7) + (1 * 10) + + +def test_nbytes_size(): + a = pa.chunked_array([pa.array([1, None, 3], type=pa.int16()), + pa.array([4, 5, 6], type=pa.int16())]) + assert a.nbytes == 13 + + +def test_invalid_tensor_constructor_repr(): + # ARROW-2638: prevent calling extension class constructors directly + with pytest.raises(TypeError): + repr(pa.Tensor([1])) + + +def test_invalid_tensor_construction(): + with pytest.raises(TypeError): + pa.Tensor() + + +@pytest.mark.parametrize(('offset_type', 'list_type_factory'), + [(pa.int32(), pa.list_), (pa.int64(), pa.large_list)]) +def test_list_array_flatten(offset_type, list_type_factory): + typ2 = list_type_factory( + list_type_factory( + pa.int64() + ) + ) + arr2 = pa.array([ + None, + [ + [1, None, 2], + None, + [3, 4] + ], + [], + [ + [], + [5, 6], + None + ], + [ + [7, 8] + ] + ], type=typ2) + offsets2 = pa.array([0, 0, 3, 3, 6, 7], type=offset_type) + + typ1 = list_type_factory(pa.int64()) + arr1 = pa.array([ + [1, None, 2], + None, + [3, 4], + [], + [5, 6], + None, + [7, 8] + ], type=typ1) + offsets1 = pa.array([0, 3, 3, 5, 5, 7, 7, 9], type=offset_type) + + arr0 = pa.array([ + 1, None, 2, + 3, 4, + 5, 6, + 7, 8 + ], type=pa.int64()) + + assert arr2.flatten().equals(arr1) + assert arr2.offsets.equals(offsets2) + assert arr2.values.equals(arr1) + assert arr1.flatten().equals(arr0) + assert arr1.offsets.equals(offsets1) + assert arr1.values.equals(arr0) + assert arr2.flatten().flatten().equals(arr0) + assert arr2.values.values.equals(arr0) + + +@pytest.mark.parametrize('list_type', [ + pa.list_(pa.int32()), + pa.list_(pa.int32(), list_size=2), + pa.large_list(pa.int32())]) +def test_list_value_parent_indices(list_type): + arr = pa.array( + [ + [0, 1], + None, + [None, None], + [3, 4] + ], type=list_type) + expected = pa.array([0, 0, 2, 2, 3, 3], type=pa.int64()) + assert arr.value_parent_indices().equals(expected) + + +@pytest.mark.parametrize(('offset_type', 'list_type'), + [(pa.int32(), pa.list_(pa.int32())), + (pa.int32(), pa.list_(pa.int32(), list_size=2)), + (pa.int64(), pa.large_list(pa.int32()))]) +def test_list_value_lengths(offset_type, list_type): + + # FixedSizeListArray needs fixed list sizes + if getattr(list_type, "list_size", None): + arr = pa.array( + [ + [0, 1], + None, + [None, None], + [3, 4] + ], type=list_type) + expected = pa.array([2, None, 2, 2], type=offset_type) + + # Otherwise create variable list sizes + else: + arr = pa.array( + [ + [0, 1, 2], + None, + [], + [3, 4] + ], type=list_type) + expected = pa.array([3, None, 0, 2], type=offset_type) + assert arr.value_lengths().equals(expected) + + +@pytest.mark.parametrize('list_type_factory', [pa.list_, pa.large_list]) +def test_list_array_flatten_non_canonical(list_type_factory): + # Non-canonical list array (null elements backed by non-empty sublists) + typ = list_type_factory(pa.int64()) + arr = pa.array([[1], [2, 3], [4, 5, 6]], type=typ) + buffers = arr.buffers()[:2] + buffers[0] = pa.py_buffer(b"\x05") # validity bitmap + arr = arr.from_buffers(arr.type, len(arr), buffers, children=[arr.values]) + assert arr.to_pylist() == [[1], None, [4, 5, 6]] + assert arr.offsets.to_pylist() == [0, 1, 3, 6] + + flattened = arr.flatten() + flattened.validate(full=True) + assert flattened.type == typ.value_type + assert flattened.to_pylist() == [1, 4, 5, 6] + + # .values is the physical values array (including masked elements) + assert arr.values.to_pylist() == [1, 2, 3, 4, 5, 6] + + +@pytest.mark.parametrize('klass', [pa.ListArray, pa.LargeListArray]) +def test_list_array_values_offsets_sliced(klass): + # ARROW-7301 + arr = klass.from_arrays(offsets=[0, 3, 4, 6], values=[1, 2, 3, 4, 5, 6]) + assert arr.values.to_pylist() == [1, 2, 3, 4, 5, 6] + assert arr.offsets.to_pylist() == [0, 3, 4, 6] + + # sliced -> values keeps referring to full values buffer, but offsets is + # sliced as well so the offsets correctly point into the full values array + # sliced -> flatten() will return the sliced value array. + arr2 = arr[1:] + assert arr2.values.to_pylist() == [1, 2, 3, 4, 5, 6] + assert arr2.offsets.to_pylist() == [3, 4, 6] + assert arr2.flatten().to_pylist() == [4, 5, 6] + i = arr2.offsets[0].as_py() + j = arr2.offsets[1].as_py() + assert arr2[0].as_py() == arr2.values[i:j].to_pylist() == [4] + + +def test_fixed_size_list_array_flatten(): + typ2 = pa.list_(pa.list_(pa.int64(), 2), 3) + arr2 = pa.array([ + [ + [1, 2], + [3, 4], + [5, 6], + ], + None, + [ + [7, None], + None, + [8, 9] + ], + ], type=typ2) + assert arr2.type.equals(typ2) + + typ1 = pa.list_(pa.int64(), 2) + arr1 = pa.array([ + [1, 2], [3, 4], [5, 6], + [7, None], None, [8, 9] + ], type=typ1) + assert arr1.type.equals(typ1) + assert arr2.flatten().equals(arr1) + + typ0 = pa.int64() + arr0 = pa.array([ + 1, 2, 3, 4, 5, 6, 7, None, 8, 9, + ], type=typ0) + assert arr0.type.equals(typ0) + assert arr1.flatten().equals(arr0) + assert arr2.flatten().flatten().equals(arr0) + + +def test_fixed_size_list_array_flatten_with_slice(): + array = pa.array([[1], [2], [3]], + type=pa.list_(pa.float64(), list_size=1)) + assert array[2:].flatten() == pa.array([3], type=pa.float64()) + + +def test_map_array_values_offsets(): + ty = pa.map_(pa.utf8(), pa.int32()) + ty_values = pa.struct([pa.field("key", pa.utf8(), nullable=False), + pa.field("value", pa.int32())]) + a = pa.array([[('a', 1), ('b', 2)], [('c', 3)]], type=ty) + + assert a.values.type.equals(ty_values) + assert a.values == pa.array([ + {'key': 'a', 'value': 1}, + {'key': 'b', 'value': 2}, + {'key': 'c', 'value': 3}, + ], type=ty_values) + assert a.keys.equals(pa.array(['a', 'b', 'c'])) + assert a.items.equals(pa.array([1, 2, 3], type=pa.int32())) + + assert pa.ListArray.from_arrays(a.offsets, a.keys).equals( + pa.array([['a', 'b'], ['c']])) + assert pa.ListArray.from_arrays(a.offsets, a.items).equals( + pa.array([[1, 2], [3]], type=pa.list_(pa.int32()))) + + with pytest.raises(NotImplementedError): + a.flatten() + + +def test_struct_array_flatten(): + ty = pa.struct([pa.field('x', pa.int16()), + pa.field('y', pa.float32())]) + a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty) + xs, ys = a.flatten() + assert xs.type == pa.int16() + assert ys.type == pa.float32() + assert xs.to_pylist() == [1, 3, 5] + assert ys.to_pylist() == [2.5, 4.5, 6.5] + xs, ys = a[1:].flatten() + assert xs.to_pylist() == [3, 5] + assert ys.to_pylist() == [4.5, 6.5] + + a = pa.array([(1, 2.5), None, (3, 4.5)], type=ty) + xs, ys = a.flatten() + assert xs.to_pylist() == [1, None, 3] + assert ys.to_pylist() == [2.5, None, 4.5] + xs, ys = a[1:].flatten() + assert xs.to_pylist() == [None, 3] + assert ys.to_pylist() == [None, 4.5] + + a = pa.array([(1, None), (2, 3.5), (None, 4.5)], type=ty) + xs, ys = a.flatten() + assert xs.to_pylist() == [1, 2, None] + assert ys.to_pylist() == [None, 3.5, 4.5] + xs, ys = a[1:].flatten() + assert xs.to_pylist() == [2, None] + assert ys.to_pylist() == [3.5, 4.5] + + a = pa.array([(1, None), None, (None, 2.5)], type=ty) + xs, ys = a.flatten() + assert xs.to_pylist() == [1, None, None] + assert ys.to_pylist() == [None, None, 2.5] + xs, ys = a[1:].flatten() + assert xs.to_pylist() == [None, None] + assert ys.to_pylist() == [None, 2.5] + + +def test_struct_array_field(): + ty = pa.struct([pa.field('x', pa.int16()), + pa.field('y', pa.float32())]) + a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty) + + x0 = a.field(0) + y0 = a.field(1) + x1 = a.field(-2) + y1 = a.field(-1) + x2 = a.field('x') + y2 = a.field('y') + + assert isinstance(x0, pa.lib.Int16Array) + assert isinstance(y1, pa.lib.FloatArray) + assert x0.equals(pa.array([1, 3, 5], type=pa.int16())) + assert y0.equals(pa.array([2.5, 4.5, 6.5], type=pa.float32())) + assert x0.equals(x1) + assert x0.equals(x2) + assert y0.equals(y1) + assert y0.equals(y2) + + for invalid_index in [None, pa.int16()]: + with pytest.raises(TypeError): + a.field(invalid_index) + + for invalid_index in [3, -3]: + with pytest.raises(IndexError): + a.field(invalid_index) + + for invalid_name in ['z', '']: + with pytest.raises(KeyError): + a.field(invalid_name) + + +def test_struct_array_flattened_field(): + ty = pa.struct([pa.field('x', pa.int16()), + pa.field('y', pa.float32())]) + a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty, + mask=pa.array([False, True, False])) + + x0 = a._flattened_field(0) + y0 = a._flattened_field(1) + x1 = a._flattened_field(-2) + y1 = a._flattened_field(-1) + x2 = a._flattened_field('x') + y2 = a._flattened_field('y') + + assert isinstance(x0, pa.lib.Int16Array) + assert isinstance(y1, pa.lib.FloatArray) + assert x0.equals(pa.array([1, None, 5], type=pa.int16())) + assert y0.equals(pa.array([2.5, None, 6.5], type=pa.float32())) + assert x0.equals(x1) + assert x0.equals(x2) + assert y0.equals(y1) + assert y0.equals(y2) + + for invalid_index in [None, pa.int16()]: + with pytest.raises(TypeError): + a._flattened_field(invalid_index) + + for invalid_index in [3, -3]: + with pytest.raises(IndexError): + a._flattened_field(invalid_index) + + for invalid_name in ['z', '']: + with pytest.raises(KeyError): + a._flattened_field(invalid_name) + + +def test_empty_cast(): + types = [ + pa.null(), + pa.bool_(), + pa.int8(), + pa.int16(), + pa.int32(), + pa.int64(), + pa.uint8(), + pa.uint16(), + pa.uint32(), + pa.uint64(), + pa.float16(), + pa.float32(), + pa.float64(), + pa.date32(), + pa.date64(), + pa.binary(), + pa.binary(length=4), + pa.string(), + ] + + for (t1, t2) in itertools.product(types, types): + try: + # ARROW-4766: Ensure that supported types conversion don't segfault + # on empty arrays of common types + pa.array([], type=t1).cast(t2) + except (pa.lib.ArrowNotImplementedError, pa.ArrowInvalid): + continue + + +def test_nested_dictionary_array(): + dict_arr = pa.DictionaryArray.from_arrays([0, 1, 0], ['a', 'b']) + list_arr = pa.ListArray.from_arrays([0, 2, 3], dict_arr) + assert list_arr.to_pylist() == [['a', 'b'], ['a']] + + dict_arr = pa.DictionaryArray.from_arrays([0, 1, 0], ['a', 'b']) + dict_arr2 = pa.DictionaryArray.from_arrays([0, 1, 2, 1, 0], dict_arr) + assert dict_arr2.to_pylist() == ['a', 'b', 'a', 'b', 'a'] + + +def test_array_from_numpy_str_utf8(): + # ARROW-3890 -- in Python 3, NPY_UNICODE arrays are produced, but in Python + # 2 they are NPY_STRING (binary), so we must do UTF-8 validation + vec = np.array(["toto", "tata"]) + vec2 = np.array(["toto", "tata"], dtype=object) + + arr = pa.array(vec, pa.string()) + arr2 = pa.array(vec2, pa.string()) + expected = pa.array(["toto", "tata"]) + assert arr.equals(expected) + assert arr2.equals(expected) + + # with mask, separate code path + mask = np.array([False, False], dtype=bool) + arr = pa.array(vec, pa.string(), mask=mask) + assert arr.equals(expected) + + # UTF8 validation failures + vec = np.array([('mañana').encode('utf-16-le')]) + with pytest.raises(ValueError): + pa.array(vec, pa.string()) + + with pytest.raises(ValueError): + pa.array(vec, pa.string(), mask=np.array([False])) + + +@pytest.mark.slow +@pytest.mark.large_memory +def test_numpy_binary_overflow_to_chunked(): + # ARROW-3762, ARROW-5966 + + # 2^31 + 1 bytes + values = [b'x'] + unicode_values = ['x'] + + # Make 10 unique 1MB strings then repeat then 2048 times + unique_strings = { + i: b'x' * ((1 << 20) - 1) + str(i % 10).encode('utf8') + for i in range(10) + } + unicode_unique_strings = {i: x.decode('utf8') + for i, x in unique_strings.items()} + values += [unique_strings[i % 10] for i in range(1 << 11)] + unicode_values += [unicode_unique_strings[i % 10] + for i in range(1 << 11)] + + for case, ex_type in [(values, pa.binary()), + (unicode_values, pa.utf8())]: + arr = np.array(case) + arrow_arr = pa.array(arr) + arr = None + + assert isinstance(arrow_arr, pa.ChunkedArray) + assert arrow_arr.type == ex_type + + # Split up into 16MB chunks. 128 * 16 = 2048, so 129 + assert arrow_arr.num_chunks == 129 + + value_index = 0 + for i in range(arrow_arr.num_chunks): + chunk = arrow_arr.chunk(i) + for val in chunk: + assert val.as_py() == case[value_index] + value_index += 1 + + +@pytest.mark.large_memory +def test_list_child_overflow_to_chunked(): + kilobyte_string = 'x' * 1024 + two_mega = 2**21 + + vals = [[kilobyte_string]] * (two_mega - 1) + arr = pa.array(vals) + assert isinstance(arr, pa.Array) + assert len(arr) == two_mega - 1 + + vals = [[kilobyte_string]] * two_mega + arr = pa.array(vals) + assert isinstance(arr, pa.ChunkedArray) + assert len(arr) == two_mega + assert len(arr.chunk(0)) == two_mega - 1 + assert len(arr.chunk(1)) == 1 + + +def test_infer_type_masked(): + # ARROW-5208 + ty = pa.infer_type(['foo', 'bar', None, 2], + mask=[False, False, False, True]) + assert ty == pa.utf8() + + # all masked + ty = pa.infer_type(['foo', 'bar', None, 2], + mask=np.array([True, True, True, True])) + assert ty == pa.null() + + # length 0 + assert pa.infer_type([], mask=[]) == pa.null() + + +def test_array_masked(): + # ARROW-5208 + arr = pa.array([4, None, 4, 3.], + mask=np.array([False, True, False, True])) + assert arr.type == pa.int64() + + # ndarray dtype=object argument + arr = pa.array(np.array([4, None, 4, 3.], dtype="O"), + mask=np.array([False, True, False, True])) + assert arr.type == pa.int64() + + +def test_array_supported_masks(): + # ARROW-13883 + arr = pa.array([4, None, 4, 3.], + mask=np.array([False, True, False, True])) + assert arr.to_pylist() == [4, None, 4, None] + + arr = pa.array([4, None, 4, 3], + mask=pa.array([False, True, False, True])) + assert arr.to_pylist() == [4, None, 4, None] + + arr = pa.array([4, None, 4, 3], + mask=[False, True, False, True]) + assert arr.to_pylist() == [4, None, 4, None] + + arr = pa.array([4, 3, None, 3], + mask=[False, True, False, True]) + assert arr.to_pylist() == [4, None, None, None] + + # Non boolean values + with pytest.raises(pa.ArrowTypeError): + arr = pa.array([4, None, 4, 3], + mask=pa.array([1.0, 2.0, 3.0, 4.0])) + + with pytest.raises(pa.ArrowTypeError): + arr = pa.array([4, None, 4, 3], + mask=[1.0, 2.0, 3.0, 4.0]) + + with pytest.raises(pa.ArrowTypeError): + arr = pa.array([4, None, 4, 3], + mask=np.array([1.0, 2.0, 3.0, 4.0])) + + with pytest.raises(pa.ArrowTypeError): + arr = pa.array([4, None, 4, 3], + mask=pa.array([False, True, False, True], + mask=pa.array([True, True, True, True]))) + + with pytest.raises(pa.ArrowTypeError): + arr = pa.array([4, None, 4, 3], + mask=pa.array([False, None, False, True])) + + # Numpy arrays only accepts numpy masks + with pytest.raises(TypeError): + arr = pa.array(np.array([4, None, 4, 3.]), + mask=[True, False, True, False]) + + with pytest.raises(TypeError): + arr = pa.array(np.array([4, None, 4, 3.]), + mask=pa.array([True, False, True, False])) + + +@pytest.mark.pandas +def test_array_supported_pandas_masks(): + import pandas + arr = pa.array(pandas.Series([0, 1], name="a", dtype="int64"), + mask=pandas.Series([True, False], dtype='bool')) + assert arr.to_pylist() == [None, 1] + + +def test_binary_array_masked(): + # ARROW-12431 + masked_basic = pa.array([b'\x05'], type=pa.binary(1), + mask=np.array([False])) + assert [b'\x05'] == masked_basic.to_pylist() + + # Fixed Length Binary + masked = pa.array(np.array([b'\x05']), type=pa.binary(1), + mask=np.array([False])) + assert [b'\x05'] == masked.to_pylist() + + masked_nulls = pa.array(np.array([b'\x05']), type=pa.binary(1), + mask=np.array([True])) + assert [None] == masked_nulls.to_pylist() + + # Variable Length Binary + masked = pa.array(np.array([b'\x05']), type=pa.binary(), + mask=np.array([False])) + assert [b'\x05'] == masked.to_pylist() + + masked_nulls = pa.array(np.array([b'\x05']), type=pa.binary(), + mask=np.array([True])) + assert [None] == masked_nulls.to_pylist() + + # Fixed Length Binary, copy + npa = np.array([b'aaa', b'bbb', b'ccc']*10) + arrow_array = pa.array(npa, type=pa.binary(3), + mask=np.array([False, False, False]*10)) + npa[npa == b"bbb"] = b"XXX" + assert ([b'aaa', b'bbb', b'ccc']*10) == arrow_array.to_pylist() + + +def test_binary_array_strided(): + # Masked + nparray = np.array([b"ab", b"cd", b"ef"]) + arrow_array = pa.array(nparray[::2], pa.binary(2), + mask=np.array([False, False])) + assert [b"ab", b"ef"] == arrow_array.to_pylist() + + # Unmasked + nparray = np.array([b"ab", b"cd", b"ef"]) + arrow_array = pa.array(nparray[::2], pa.binary(2)) + assert [b"ab", b"ef"] == arrow_array.to_pylist() + + +def test_array_invalid_mask_raises(): + # ARROW-10742 + cases = [ + ([1, 2], np.array([False, False], dtype="O"), + TypeError, "must be boolean dtype"), + + ([1, 2], np.array([[False], [False]]), + pa.ArrowInvalid, "must be 1D array"), + + ([1, 2, 3], np.array([False, False]), + pa.ArrowInvalid, "different length"), + + (np.array([1, 2]), np.array([False, False], dtype="O"), + TypeError, "must be boolean dtype"), + + (np.array([1, 2]), np.array([[False], [False]]), + ValueError, "must be 1D array"), + + (np.array([1, 2, 3]), np.array([False, False]), + ValueError, "different length"), + ] + for obj, mask, ex, msg in cases: + with pytest.raises(ex, match=msg): + pa.array(obj, mask=mask) + + +def test_array_from_large_pyints(): + # ARROW-5430 + with pytest.raises(OverflowError): + # too large for int64 so dtype must be explicitly provided + pa.array([int(2 ** 63)]) + + +def test_numpy_array_protocol(): + # test the __array__ method on pyarrow.Array + arr = pa.array([1, 2, 3]) + result = np.asarray(arr) + expected = np.array([1, 2, 3], dtype="int64") + np.testing.assert_array_equal(result, expected) + + # this should not raise a deprecation warning with numpy 2.0+ + result = np.array(arr, copy=False) + np.testing.assert_array_equal(result, expected) + + result = np.array(arr, dtype="int64", copy=False) + np.testing.assert_array_equal(result, expected) + + # no zero-copy is possible + arr = pa.array([1, 2, None]) + expected = np.array([1, 2, np.nan], dtype="float64") + result = np.asarray(arr) + np.testing.assert_array_equal(result, expected) + + if Version(np.__version__) < Version("2.0"): + # copy keyword is not strict and not passed down to __array__ + result = np.array(arr, copy=False) + np.testing.assert_array_equal(result, expected) + + result = np.array(arr, dtype="float64", copy=False) + np.testing.assert_array_equal(result, expected) + else: + # starting with numpy 2.0, the copy=False keyword is assumed to be strict + with pytest.raises(ValueError, match="Unable to avoid a copy"): + np.array(arr, copy=False) + + arr = pa.array([1, 2, 3]) + with pytest.raises(ValueError): + np.array(arr, dtype="float64", copy=False) + + # copy=True -> not yet passed by numpy, so we have to call this directly to test + arr = pa.array([1, 2, 3]) + result = arr.__array__(copy=True) + assert result.flags.writeable + + arr = pa.array([1, 2, 3]) + result = arr.__array__(dtype=np.dtype("float64"), copy=True) + assert result.dtype == "float64" + + +def test_array_protocol(): + + class MyArray: + def __init__(self, data): + self.data = data + + def __arrow_array__(self, type=None): + return pa.array(self.data, type=type) + + arr = MyArray(np.array([1, 2, 3], dtype='int64')) + result = pa.array(arr) + expected = pa.array([1, 2, 3], type=pa.int64()) + assert result.equals(expected) + result = pa.array(arr, type=pa.int64()) + expected = pa.array([1, 2, 3], type=pa.int64()) + assert result.equals(expected) + result = pa.array(arr, type=pa.float64()) + expected = pa.array([1, 2, 3], type=pa.float64()) + assert result.equals(expected) + + # raise error when passing size or mask keywords + with pytest.raises(ValueError): + pa.array(arr, mask=np.array([True, False, True])) + with pytest.raises(ValueError): + pa.array(arr, size=3) + + # ensure the return value is an Array + class MyArrayInvalid: + def __init__(self, data): + self.data = data + + def __arrow_array__(self, type=None): + return np.array(self.data) + + arr = MyArrayInvalid(np.array([1, 2, 3], dtype='int64')) + with pytest.raises(TypeError): + pa.array(arr) + + # ARROW-7066 - allow ChunkedArray output + # GH-33727 - if num_chunks=1 return Array + class MyArray2: + def __init__(self, data): + self.data = data + + def __arrow_array__(self, type=None): + return pa.chunked_array([self.data], type=type) + + arr = MyArray2(np.array([1, 2, 3], dtype='int64')) + result = pa.array(arr) + expected = pa.array([1, 2, 3], type=pa.int64()) + assert result.equals(expected) + + class MyArray3: + def __init__(self, data1, data2): + self.data1 = data1 + self.data2 = data2 + + def __arrow_array__(self, type=None): + return pa.chunked_array([self.data1, self.data2], type=type) + + np_arr = np.array([1, 2, 3], dtype='int64') + arr = MyArray3(np_arr, np_arr) + result = pa.array(arr) + expected = pa.chunked_array([[1, 2, 3], [1, 2, 3]], type=pa.int64()) + assert result.equals(expected) + + +def test_c_array_protocol(): + class ArrayWrapper: + def __init__(self, data): + self.data = data + + def __arrow_c_array__(self, requested_schema=None): + return self.data.__arrow_c_array__(requested_schema) + + # Can roundtrip through the C array protocol + arr = ArrayWrapper(pa.array([1, 2, 3], type=pa.int64())) + result = pa.array(arr) + assert result == arr.data + + # Will cast to requested type + result = pa.array(arr, type=pa.int32()) + assert result == pa.array([1, 2, 3], type=pa.int32()) + + +def test_concat_array(): + concatenated = pa.concat_arrays( + [pa.array([1, 2]), pa.array([3, 4])]) + assert concatenated.equals(pa.array([1, 2, 3, 4])) + + +def test_concat_array_different_types(): + with pytest.raises(pa.ArrowInvalid): + pa.concat_arrays([pa.array([1]), pa.array([2.])]) + + +def test_concat_array_invalid_type(): + # ARROW-9920 - do not segfault on non-array input + + with pytest.raises(TypeError, match="should contain Array objects"): + pa.concat_arrays([None]) + + arr = pa.chunked_array([[0, 1], [3, 4]]) + with pytest.raises(TypeError, match="should contain Array objects"): + pa.concat_arrays(arr) + + +@pytest.mark.pandas +def test_to_pandas_timezone(): + # https://issues.apache.org/jira/browse/ARROW-6652 + arr = pa.array([1, 2, 3], type=pa.timestamp('s', tz='Europe/Brussels')) + s = arr.to_pandas() + assert s.dt.tz is not None + arr = pa.chunked_array([arr]) + s = arr.to_pandas() + assert s.dt.tz is not None + + +@pytest.mark.pandas +def test_to_pandas_float16_list(): + # https://github.com/apache/arrow/issues/36168 + expected = [[np.float16(1)], [np.float16(2)], [np.float16(3)]] + arr = pa.array(expected) + result = arr.to_pandas() + assert result[0].dtype == "float16" + assert result.tolist() == expected + + +def test_array_sort(): + arr = pa.array([5, 7, 35], type=pa.int64()) + sorted_arr = arr.sort("descending") + assert sorted_arr.to_pylist() == [35, 7, 5] + + arr = pa.chunked_array([[1, 2, 3], [4, 5, 6]]) + sorted_arr = arr.sort("descending") + assert sorted_arr.to_pylist() == [6, 5, 4, 3, 2, 1] + + arr = pa.array([5, 7, 35, None], type=pa.int64()) + sorted_arr = arr.sort("descending", null_placement="at_end") + assert sorted_arr.to_pylist() == [35, 7, 5, None] + sorted_arr = arr.sort("descending", null_placement="at_start") + assert sorted_arr.to_pylist() == [None, 35, 7, 5] + + +def test_struct_array_sort(): + arr = pa.StructArray.from_arrays([ + pa.array([5, 7, 7, 35], type=pa.int64()), + pa.array(["foo", "car", "bar", "foobar"]) + ], names=["a", "b"]) + + sorted_arr = arr.sort("descending", by="a") + assert sorted_arr.to_pylist() == [ + {"a": 35, "b": "foobar"}, + {"a": 7, "b": "car"}, + {"a": 7, "b": "bar"}, + {"a": 5, "b": "foo"}, + ] + + arr_with_nulls = pa.StructArray.from_arrays([ + pa.array([5, 7, 7, 35], type=pa.int64()), + pa.array(["foo", "car", "bar", "foobar"]) + ], names=["a", "b"], mask=pa.array([False, False, True, False])) + + sorted_arr = arr_with_nulls.sort( + "descending", by="a", null_placement="at_start") + assert sorted_arr.to_pylist() == [ + None, + {"a": 35, "b": "foobar"}, + {"a": 7, "b": "car"}, + {"a": 5, "b": "foo"}, + ] + + sorted_arr = arr_with_nulls.sort( + "descending", by="a", null_placement="at_end") + assert sorted_arr.to_pylist() == [ + {"a": 35, "b": "foobar"}, + {"a": 7, "b": "car"}, + {"a": 5, "b": "foo"}, + None + ] + + +def test_array_accepts_pyarrow_array(): + arr = pa.array([1, 2, 3]) + result = pa.array(arr) + assert arr == result + + # Test casting to a different type + result = pa.array(arr, type=pa.uint8()) + expected = pa.array([1, 2, 3], type=pa.uint8()) + assert expected == result + assert expected.type == pa.uint8() + + # Test casting with safe keyword + arr = pa.array([2 ** 63 - 1], type=pa.int64()) + + with pytest.raises(pa.ArrowInvalid): + pa.array(arr, type=pa.int32()) + + expected = pa.array([-1], type=pa.int32()) + result = pa.array(arr, type=pa.int32(), safe=False) + assert result == expected + + # Test memory_pool keyword is accepted + result = pa.array(arr, memory_pool=pa.default_memory_pool()) + assert arr == result + + +def check_run_end_encoded(ree_array, run_ends, values, logical_length, physical_length, + physical_offset): + assert ree_array.run_ends.to_pylist() == run_ends + assert ree_array.values.to_pylist() == values + assert len(ree_array) == logical_length + assert ree_array.find_physical_length() == physical_length + assert ree_array.find_physical_offset() == physical_offset + + +def check_run_end_encoded_from_arrays_with_type(ree_type=None): + run_ends = [3, 5, 10, 19] + values = [1, 2, 1, 3] + ree_array = pa.RunEndEncodedArray.from_arrays(run_ends, values, ree_type) + check_run_end_encoded(ree_array, run_ends, values, 19, 4, 0) + + +def test_run_end_encoded_from_arrays(): + check_run_end_encoded_from_arrays_with_type() + for run_end_type in [pa.int16(), pa.int32(), pa.int64()]: + for value_type in [pa.uint32(), pa.int32(), pa.uint64(), pa.int64()]: + ree_type = pa.run_end_encoded(run_end_type, value_type) + check_run_end_encoded_from_arrays_with_type(ree_type) + + +def test_run_end_encoded_from_buffers(): + run_ends = [3, 5, 10, 19] + values = [1, 2, 1, 3] + + ree_type = pa.run_end_encoded(run_end_type=pa.int32(), value_type=pa.uint8()) + length = 19 + buffers = [None] + null_count = 0 + offset = 0 + children = [run_ends, values] + + ree_array = pa.RunEndEncodedArray.from_buffers(ree_type, length, buffers, + null_count, offset, + children) + check_run_end_encoded(ree_array, run_ends, values, 19, 4, 0) + # buffers = [] + ree_array = pa.RunEndEncodedArray.from_buffers(ree_type, length, [], + null_count, offset, + children) + check_run_end_encoded(ree_array, run_ends, values, 19, 4, 0) + # null_count = -1 + ree_array = pa.RunEndEncodedArray.from_buffers(ree_type, length, buffers, + -1, offset, + children) + check_run_end_encoded(ree_array, run_ends, values, 19, 4, 0) + # offset = 4 + ree_array = pa.RunEndEncodedArray.from_buffers(ree_type, length - 4, buffers, + null_count, 4, children) + check_run_end_encoded(ree_array, run_ends, values, length - 4, 3, 1) + # buffers = [None, None] + with pytest.raises(ValueError): + pa.RunEndEncodedArray.from_buffers(ree_type, length, [None, None], + null_count, offset, children) + # children = None + with pytest.raises(ValueError): + pa.RunEndEncodedArray.from_buffers(ree_type, length, buffers, + null_count, offset, None) + # len(children) == 1 + with pytest.raises(ValueError): + pa.RunEndEncodedArray.from_buffers(ree_type, length, buffers, + null_count, offset, [run_ends]) + # null_count = 1 + with pytest.raises(ValueError): + pa.RunEndEncodedArray.from_buffers(ree_type, length, buffers, + 1, offset, children) + + +def test_run_end_encoded_from_array_with_type(): + run_ends = [1, 3, 6] + values = [1, 2, 3] + ree_type = pa.run_end_encoded(pa.int32(), pa.int64()) + expected = pa.RunEndEncodedArray.from_arrays(run_ends, values, + ree_type) + + arr = [1, 2, 2, 3, 3, 3] + result = pa.array(arr, type=ree_type) + assert result.equals(expected) + result = pa.array(np.array(arr), type=ree_type) + assert result.equals(expected) + + ree_type_2 = pa.run_end_encoded(pa.int16(), pa.float32()) + result = pa.array(arr, type=ree_type_2) + assert not result.equals(expected) + expected_2 = pa.RunEndEncodedArray.from_arrays(run_ends, values, + ree_type_2) + assert result.equals(expected_2) + + run_ends = [1, 3, 5, 6] + values = [1, 2, 3, None] + expected = pa.RunEndEncodedArray.from_arrays(run_ends, values, + ree_type) + + arr = [1, 2, 2, 3, 3, None] + result = pa.array(arr, type=ree_type) + assert result.equals(expected) + + run_ends = [1, 3, 4, 5, 6] + values = [1, 2, None, 3, None] + expected = pa.RunEndEncodedArray.from_arrays(run_ends, values, + ree_type) + + mask = pa.array([False, False, False, True, False, True]) + result = pa.array(arr, type=ree_type, mask=mask) + assert result.equals(expected) + + +def test_run_end_encoded_to_numpy(): + arr = [1, 2, 2, 3, 3, 3] + ree_array = pa.array(arr, pa.run_end_encoded(pa.int32(), pa.int64())) + expected = np.array(arr) + + np.testing.assert_array_equal(ree_array.to_numpy(zero_copy_only=False), expected) + + with pytest.raises(pa.ArrowInvalid): + ree_array.to_numpy() + + +@pytest.mark.pandas +def test_run_end_encoded_to_pandas(): + arr = [1, 2, 2, 3, 3, 3] + ree_array = pa.array(arr, pa.run_end_encoded(pa.int32(), pa.int64())) + + assert ree_array.to_pandas().tolist() == arr + + with pytest.raises(pa.ArrowInvalid): + ree_array.to_pandas(zero_copy_only=True) + + +@pytest.mark.parametrize(('list_array_type', 'list_type_factory'), + [(pa.ListViewArray, pa.list_view), + (pa.LargeListViewArray, pa.large_list_view)]) +def test_list_view_from_arrays(list_array_type, list_type_factory): + # test in order offsets, similar to ListArray representation + values = [1, 2, 3, 4, 5, 6, None, 7] + offsets = [0, 2, 4, 6] + sizes = [2, 2, 2, 2] + array = list_array_type.from_arrays(offsets, sizes, values) + + assert array.to_pylist() == [[1, 2], [3, 4], [5, 6], [None, 7]] + assert array.values.to_pylist() == values + assert array.offsets.to_pylist() == offsets + assert array.sizes.to_pylist() == sizes + + # with specified type + typ = list_type_factory(pa.field("name", pa.int64())) + result = list_array_type.from_arrays(offsets, sizes, values, typ) + assert result.type == typ + assert result.type.value_field.name == "name" + + # with mismatching type + typ = list_type_factory(pa.binary()) + with pytest.raises(TypeError): + list_array_type.from_arrays(offsets, sizes, values, type=typ) + + # test out of order offsets with overlapping values + values = [1, 2, 3, 4] + offsets = [2, 1, 0] + sizes = [2, 2, 2] + array = list_array_type.from_arrays(offsets, sizes, values) + + assert array.to_pylist() == [[3, 4], [2, 3], [1, 2]] + assert array.values.to_pylist() == values + assert array.offsets.to_pylist() == offsets + assert array.sizes.to_pylist() == sizes + + # test null offsets and empty list values + values = [] + offsets = [0, None] + sizes = [0, 0] + array = list_array_type.from_arrays(offsets, sizes, values) + + assert array.to_pylist() == [[], None] + assert array.values.to_pylist() == values + assert array.offsets.to_pylist() == [0, 0] + assert array.sizes.to_pylist() == sizes + + # test null sizes and empty list values + values = [] + offsets = [0, 0] + sizes = [None, 0] + array = list_array_type.from_arrays(offsets, sizes, values) + + assert array.to_pylist() == [None, []] + assert array.values.to_pylist() == values + assert array.offsets.to_pylist() == offsets + assert array.sizes.to_pylist() == [0, 0] + + # test null bitmask + values = [1, 2] + offsets = [0, 0, 1] + sizes = [1, 0, 1] + mask = pa.array([False, True, False]) + array = list_array_type.from_arrays(offsets, sizes, values, mask=mask) + + assert array.to_pylist() == [[1], None, [2]] + assert array.values.to_pylist() == values + assert array.offsets.to_pylist() == offsets + assert array.sizes.to_pylist() == sizes + + +@pytest.mark.parametrize(('list_array_type', 'list_type_factory'), + [(pa.ListViewArray, pa.list_view), + (pa.LargeListViewArray, pa.large_list_view)]) +def test_list_view_from_arrays_fails(list_array_type, list_type_factory): + values = [1, 2] + offsets = [0, 1, None] + sizes = [1, 1, 0] + mask = pa.array([False, False, True]) + + # Ambiguous to specify both validity map and offsets or sizes with nulls + with pytest.raises(pa.lib.ArrowInvalid): + list_array_type.from_arrays(offsets, sizes, values, mask=mask) + + offsets = [0, 1, 1] + array = list_array_type.from_arrays(offsets, sizes, values, mask=mask) + array_slice = array[1:] + + # List offsets and sizes must not be slices if a validity map is specified + with pytest.raises(pa.lib.ArrowInvalid): + list_array_type.from_arrays( + array_slice.offsets, array_slice.sizes, + array_slice.values, mask=array_slice.is_null()) + + +@pytest.mark.parametrize(('list_array_type', 'list_type_factory', 'offset_type'), + [(pa.ListViewArray, pa.list_view, pa.int32()), + (pa.LargeListViewArray, pa.large_list_view, pa.int64())]) +def test_list_view_flatten(list_array_type, list_type_factory, offset_type): + arr0 = pa.array([ + 1, None, 2, + 3, 4, + 5, 6, + 7, 8 + ], type=pa.int64()) + + typ1 = list_type_factory(pa.int64()) + arr1 = pa.array([ + [1, None, 2], + None, + [3, 4], + [], + [5, 6], + None, + [7, 8] + ], type=typ1) + offsets1 = pa.array([0, 3, 3, 5, 5, 7, 7], type=offset_type) + sizes1 = pa.array([3, 0, 2, 0, 2, 0, 2], type=offset_type) + + typ2 = list_type_factory( + list_type_factory( + pa.int64() + ) + ) + arr2 = pa.array([ + None, + [ + [1, None, 2], + None, + [3, 4] + ], + [], + [ + [], + [5, 6], + None + ], + [ + [7, 8] + ] + ], type=typ2) + offsets2 = pa.array([0, 0, 3, 3, 6], type=offset_type) + sizes2 = pa.array([0, 3, 0, 3, 1], type=offset_type) + + assert arr1.flatten().equals(arr0) + assert arr1.offsets.equals(offsets1) + assert arr1.sizes.equals(sizes1) + assert arr1.values.equals(arr0) + assert arr2.flatten().equals(arr1) + assert arr2.offsets.equals(offsets2) + assert arr2.sizes.equals(sizes2) + assert arr2.values.equals(arr1) + assert arr2.flatten().flatten().equals(arr0) + assert arr2.values.values.equals(arr0) + + # test out of order offsets + values = [1, 2, 3, 4] + offsets = [3, 2, 1, 0] + sizes = [1, 1, 1, 1] + array = list_array_type.from_arrays(offsets, sizes, values) + + assert array.flatten().to_pylist() == [4, 3, 2, 1] + + # test null elements backed by non-empty sublists + mask = pa.array([False, False, False, True]) + array = list_array_type.from_arrays(offsets, sizes, values, mask=mask) + + assert array.flatten().to_pylist() == [4, 3, 2] + assert array.values.to_pylist() == [1, 2, 3, 4] + + +@pytest.mark.parametrize('list_view_type', [pa.ListViewArray, pa.LargeListViewArray]) +def test_list_view_slice(list_view_type): + # sliced -> values keeps referring to full values buffer, but offsets is + # sliced as well so the offsets correctly point into the full values array + # sliced -> flatten() will return the sliced value array. + + array = list_view_type.from_arrays(offsets=[0, 3, 4], sizes=[ + 3, 1, 2], values=[1, 2, 3, 4, 5, 6]) + sliced_array = array[1:] + + assert sliced_array.values.to_pylist() == [1, 2, 3, 4, 5, 6] + assert sliced_array.offsets.to_pylist() == [3, 4] + assert sliced_array.flatten().to_pylist() == [4, 5, 6] + + i = sliced_array.offsets[0].as_py() + j = sliced_array.offsets[1].as_py() + + assert sliced_array[0].as_py() == sliced_array.values[i:j].to_pylist() == [4] diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_builder.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..abc8a0013df37542228b6b73c6ea0c45563ba7b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_builder.py @@ -0,0 +1,86 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import weakref + +import numpy as np + +import pyarrow as pa +from pyarrow.lib import StringBuilder, StringViewBuilder + + +def test_weakref(): + sbuilder = StringBuilder() + wr = weakref.ref(sbuilder) + assert wr() is not None + del sbuilder + assert wr() is None + + +def test_string_builder_append(): + sbuilder = StringBuilder() + sbuilder.append(b"a byte string") + sbuilder.append("a string") + sbuilder.append(np.nan) + sbuilder.append(None) + assert len(sbuilder) == 4 + assert sbuilder.null_count == 2 + arr = sbuilder.finish() + assert len(sbuilder) == 0 + assert isinstance(arr, pa.Array) + assert arr.null_count == 2 + assert arr.type == 'str' + expected = ["a byte string", "a string", None, None] + assert arr.to_pylist() == expected + + +def test_string_builder_append_values(): + sbuilder = StringBuilder() + sbuilder.append_values([np.nan, None, "text", None, "other text"]) + assert sbuilder.null_count == 3 + arr = sbuilder.finish() + assert arr.null_count == 3 + expected = [None, None, "text", None, "other text"] + assert arr.to_pylist() == expected + + +def test_string_builder_append_after_finish(): + sbuilder = StringBuilder() + sbuilder.append_values([np.nan, None, "text", None, "other text"]) + arr = sbuilder.finish() + sbuilder.append("No effect") + expected = [None, None, "text", None, "other text"] + assert arr.to_pylist() == expected + + +def test_string_view_builder(): + builder = StringViewBuilder() + builder.append(b"a byte string") + builder.append("a string") + builder.append("a longer not-inlined string") + builder.append(np.nan) + builder.append_values([None, "text"]) + assert len(builder) == 6 + assert builder.null_count == 2 + arr = builder.finish() + assert isinstance(arr, pa.Array) + assert arr.null_count == 2 + assert arr.type == 'string_view' + expected = [ + "a byte string", "a string", "a longer not-inlined string", None, None, "text" + ] + assert arr.to_pylist() == expected diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_cffi.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_cffi.py new file mode 100644 index 0000000000000000000000000000000000000000..5bf41c3c14b6e9a019eb28474e346a5c5686b19b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_cffi.py @@ -0,0 +1,707 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import contextlib +import ctypes +import gc + +import pyarrow as pa +try: + from pyarrow.cffi import ffi +except ImportError: + ffi = None + +import pytest + +try: + import pandas as pd + import pandas.testing as tm +except ImportError: + pd = tm = None + + +needs_cffi = pytest.mark.skipif(ffi is None, + reason="test needs cffi package installed") + +assert_schema_released = pytest.raises( + ValueError, match="Cannot import released ArrowSchema") + +assert_array_released = pytest.raises( + ValueError, match="Cannot import released ArrowArray") + +assert_stream_released = pytest.raises( + ValueError, match="Cannot import released ArrowArrayStream") + + +def PyCapsule_IsValid(capsule, name): + return ctypes.pythonapi.PyCapsule_IsValid(ctypes.py_object(capsule), name) == 1 + + +@contextlib.contextmanager +def registered_extension_type(ext_type): + pa.register_extension_type(ext_type) + try: + yield + finally: + pa.unregister_extension_type(ext_type.extension_name) + + +class ParamExtType(pa.ExtensionType): + + def __init__(self, width): + self._width = width + super().__init__(pa.binary(width), + "pyarrow.tests.test_cffi.ParamExtType") + + @property + def width(self): + return self._width + + def __arrow_ext_serialize__(self): + return str(self.width).encode() + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + width = int(serialized.decode()) + return cls(width) + + +def make_schema(): + return pa.schema([('ints', pa.list_(pa.int32()))], + metadata={b'key1': b'value1'}) + + +def make_extension_schema(): + return pa.schema([('ext', ParamExtType(3))], + metadata={b'key1': b'value1'}) + + +def make_extension_storage_schema(): + # Should be kept in sync with make_extension_schema + return pa.schema([('ext', ParamExtType(3).storage_type)], + metadata={b'key1': b'value1'}) + + +def make_batch(): + return pa.record_batch([[[1], [2, 42]]], make_schema()) + + +def make_extension_batch(): + schema = make_extension_schema() + ext_col = schema[0].type.wrap_array(pa.array([b"foo", b"bar"], + type=pa.binary(3))) + return pa.record_batch([ext_col], schema) + + +def make_batches(): + schema = make_schema() + return [ + pa.record_batch([[[1], [2, 42]]], schema), + pa.record_batch([[None, [], [5, 6]]], schema), + ] + + +def make_serialized(schema, batches): + with pa.BufferOutputStream() as sink: + with pa.ipc.new_stream(sink, schema) as out: + for batch in batches: + out.write(batch) + return sink.getvalue() + + +@needs_cffi +def test_export_import_type(): + c_schema = ffi.new("struct ArrowSchema*") + ptr_schema = int(ffi.cast("uintptr_t", c_schema)) + + gc.collect() # Make sure no Arrow data dangles in a ref cycle + old_allocated = pa.total_allocated_bytes() + + typ = pa.list_(pa.int32()) + typ._export_to_c(ptr_schema) + assert pa.total_allocated_bytes() > old_allocated + # Delete and recreate C++ object from exported pointer + del typ + assert pa.total_allocated_bytes() > old_allocated + typ_new = pa.DataType._import_from_c(ptr_schema) + assert typ_new == pa.list_(pa.int32()) + assert pa.total_allocated_bytes() == old_allocated + # Now released + with assert_schema_released: + pa.DataType._import_from_c(ptr_schema) + + # Invalid format string + pa.int32()._export_to_c(ptr_schema) + bad_format = ffi.new("char[]", b"zzz") + c_schema.format = bad_format + with pytest.raises(ValueError, + match="Invalid or unsupported format string"): + pa.DataType._import_from_c(ptr_schema) + # Now released + with assert_schema_released: + pa.DataType._import_from_c(ptr_schema) + + +@needs_cffi +def test_export_import_field(): + c_schema = ffi.new("struct ArrowSchema*") + ptr_schema = int(ffi.cast("uintptr_t", c_schema)) + + gc.collect() # Make sure no Arrow data dangles in a ref cycle + old_allocated = pa.total_allocated_bytes() + + field = pa.field("test", pa.list_(pa.int32()), nullable=True) + field._export_to_c(ptr_schema) + assert pa.total_allocated_bytes() > old_allocated + # Delete and recreate C++ object from exported pointer + del field + assert pa.total_allocated_bytes() > old_allocated + + field_new = pa.Field._import_from_c(ptr_schema) + assert field_new == pa.field("test", pa.list_(pa.int32()), nullable=True) + assert pa.total_allocated_bytes() == old_allocated + + # Now released + with assert_schema_released: + pa.Field._import_from_c(ptr_schema) + + +def check_export_import_array(array_type, exporter, importer): + c_schema = ffi.new("struct ArrowSchema*") + ptr_schema = int(ffi.cast("uintptr_t", c_schema)) + c_array = ffi.new(f"struct {array_type}*") + ptr_array = int(ffi.cast("uintptr_t", c_array)) + + gc.collect() # Make sure no Arrow data dangles in a ref cycle + old_allocated = pa.total_allocated_bytes() + + # Type is known up front + typ = pa.list_(pa.int32()) + arr = pa.array([[1], [2, 42]], type=typ) + py_value = arr.to_pylist() + exporter(arr, ptr_array) + assert pa.total_allocated_bytes() > old_allocated + # Delete recreate C++ object from exported pointer + del arr + arr_new = importer(ptr_array, typ) + assert arr_new.to_pylist() == py_value + assert arr_new.type == pa.list_(pa.int32()) + assert pa.total_allocated_bytes() > old_allocated + del arr_new, typ + assert pa.total_allocated_bytes() == old_allocated + # Now released + with assert_array_released: + importer(ptr_array, pa.list_(pa.int32())) + + # Type is exported and imported at the same time + arr = pa.array([[1], [2, 42]], type=pa.list_(pa.int32())) + py_value = arr.to_pylist() + exporter(arr, ptr_array, ptr_schema) + # Delete and recreate C++ objects from exported pointers + del arr + arr_new = importer(ptr_array, ptr_schema) + assert arr_new.to_pylist() == py_value + assert arr_new.type == pa.list_(pa.int32()) + assert pa.total_allocated_bytes() > old_allocated + del arr_new + assert pa.total_allocated_bytes() == old_allocated + # Now released + with assert_schema_released: + importer(ptr_array, ptr_schema) + + +@needs_cffi +def test_export_import_array(): + check_export_import_array( + "ArrowArray", + pa.Array._export_to_c, + pa.Array._import_from_c, + ) + + +@needs_cffi +def test_export_import_device_array(): + check_export_import_array( + "ArrowDeviceArray", + pa.Array._export_to_c_device, + pa.Array._import_from_c_device, + ) + + # verify exported struct + c_array = ffi.new("struct ArrowDeviceArray*") + ptr_array = int(ffi.cast("uintptr_t", c_array)) + arr = pa.array([[1], [2, 42]], type=pa.list_(pa.int32())) + arr._export_to_c_device(ptr_array) + + assert c_array.device_type == 1 # ARROW_DEVICE_CPU 1 + assert c_array.device_id == -1 + assert c_array.array.length == 2 + + +def check_export_import_schema(schema_factory, expected_schema_factory=None): + if expected_schema_factory is None: + expected_schema_factory = schema_factory + + c_schema = ffi.new("struct ArrowSchema*") + ptr_schema = int(ffi.cast("uintptr_t", c_schema)) + + gc.collect() # Make sure no Arrow data dangles in a ref cycle + old_allocated = pa.total_allocated_bytes() + + schema_factory()._export_to_c(ptr_schema) + assert pa.total_allocated_bytes() > old_allocated + # Delete and recreate C++ object from exported pointer + schema_new = pa.Schema._import_from_c(ptr_schema) + assert schema_new == expected_schema_factory() + assert pa.total_allocated_bytes() == old_allocated + del schema_new + assert pa.total_allocated_bytes() == old_allocated + # Now released + with assert_schema_released: + pa.Schema._import_from_c(ptr_schema) + + # Not a struct type + pa.int32()._export_to_c(ptr_schema) + with pytest.raises(ValueError, + match="ArrowSchema describes non-struct type"): + pa.Schema._import_from_c(ptr_schema) + # Now released + with assert_schema_released: + pa.Schema._import_from_c(ptr_schema) + + +@needs_cffi +def test_export_import_schema(): + check_export_import_schema(make_schema) + + +@needs_cffi +def test_export_import_schema_with_extension(): + # Extension type is unregistered => the storage type is imported + check_export_import_schema(make_extension_schema, + make_extension_storage_schema) + + # Extension type is registered => the extension type is imported + with registered_extension_type(ParamExtType(1)): + check_export_import_schema(make_extension_schema) + + +@needs_cffi +def test_export_import_schema_float_pointer(): + # Previous versions of the R Arrow library used to pass pointer + # values as a double. + c_schema = ffi.new("struct ArrowSchema*") + ptr_schema = int(ffi.cast("uintptr_t", c_schema)) + + match = "Passing a pointer value as a float is unsafe" + with pytest.warns(UserWarning, match=match): + make_schema()._export_to_c(float(ptr_schema)) + with pytest.warns(UserWarning, match=match): + schema_new = pa.Schema._import_from_c(float(ptr_schema)) + assert schema_new == make_schema() + + +def check_export_import_batch(array_type, exporter, importer, batch_factory): + c_schema = ffi.new("struct ArrowSchema*") + ptr_schema = int(ffi.cast("uintptr_t", c_schema)) + c_array = ffi.new(f"struct {array_type}*") + ptr_array = int(ffi.cast("uintptr_t", c_array)) + + gc.collect() # Make sure no Arrow data dangles in a ref cycle + old_allocated = pa.total_allocated_bytes() + + # Schema is known up front + batch = batch_factory() + schema = batch.schema + py_value = batch.to_pydict() + exporter(batch, ptr_array) + assert pa.total_allocated_bytes() > old_allocated + # Delete and recreate C++ object from exported pointer + del batch + batch_new = importer(ptr_array, schema) + assert batch_new.to_pydict() == py_value + assert batch_new.schema == schema + assert pa.total_allocated_bytes() > old_allocated + del batch_new, schema + assert pa.total_allocated_bytes() == old_allocated + # Now released + with assert_array_released: + importer(ptr_array, make_schema()) + + # Type is exported and imported at the same time + batch = batch_factory() + py_value = batch.to_pydict() + batch._export_to_c(ptr_array, ptr_schema) + # Delete and recreate C++ objects from exported pointers + del batch + batch_new = importer(ptr_array, ptr_schema) + assert batch_new.to_pydict() == py_value + assert batch_new.schema == batch_factory().schema + assert pa.total_allocated_bytes() > old_allocated + del batch_new + assert pa.total_allocated_bytes() == old_allocated + # Now released + with assert_schema_released: + importer(ptr_array, ptr_schema) + + # Not a struct type + pa.int32()._export_to_c(ptr_schema) + batch_factory()._export_to_c(ptr_array) + with pytest.raises(ValueError, + match="ArrowSchema describes non-struct type"): + importer(ptr_array, ptr_schema) + # Now released + with assert_schema_released: + importer(ptr_array, ptr_schema) + + +@needs_cffi +def test_export_import_batch(): + check_export_import_batch( + "ArrowArray", + pa.RecordBatch._export_to_c, + pa.RecordBatch._import_from_c, + make_batch, + ) + + +@needs_cffi +def test_export_import_batch_with_extension(): + with registered_extension_type(ParamExtType(1)): + check_export_import_batch( + "ArrowArray", + pa.RecordBatch._export_to_c, + pa.RecordBatch._import_from_c, + make_extension_batch, + ) + + +@needs_cffi +def test_export_import_device_batch(): + check_export_import_batch( + "ArrowDeviceArray", + pa.RecordBatch._export_to_c_device, + pa.RecordBatch._import_from_c_device, + make_batch, + ) + + # verify exported struct + c_array = ffi.new("struct ArrowDeviceArray*") + ptr_array = int(ffi.cast("uintptr_t", c_array)) + batch = make_batch() + batch._export_to_c_device(ptr_array) + assert c_array.device_type == 1 # ARROW_DEVICE_CPU 1 + assert c_array.device_id == -1 + assert c_array.array.length == 2 + + +def _export_import_batch_reader(ptr_stream, reader_factory): + # Prepare input + batches = make_batches() + schema = batches[0].schema + + reader = reader_factory(schema, batches) + reader._export_to_c(ptr_stream) + # Delete and recreate C++ object from exported pointer + del reader, batches + + reader_new = pa.RecordBatchReader._import_from_c(ptr_stream) + assert reader_new.schema == schema + got_batches = list(reader_new) + del reader_new + assert got_batches == make_batches() + + # Test read_pandas() + if pd is not None: + batches = make_batches() + schema = batches[0].schema + expected_df = pa.Table.from_batches(batches).to_pandas() + + reader = reader_factory(schema, batches) + reader._export_to_c(ptr_stream) + del reader, batches + + reader_new = pa.RecordBatchReader._import_from_c(ptr_stream) + got_df = reader_new.read_pandas() + del reader_new + tm.assert_frame_equal(expected_df, got_df) + + +def make_ipc_stream_reader(schema, batches): + return pa.ipc.open_stream(make_serialized(schema, batches)) + + +def make_py_record_batch_reader(schema, batches): + return pa.RecordBatchReader.from_batches(schema, batches) + + +@needs_cffi +@pytest.mark.parametrize('reader_factory', + [make_ipc_stream_reader, + make_py_record_batch_reader]) +def test_export_import_batch_reader(reader_factory): + c_stream = ffi.new("struct ArrowArrayStream*") + ptr_stream = int(ffi.cast("uintptr_t", c_stream)) + + gc.collect() # Make sure no Arrow data dangles in a ref cycle + old_allocated = pa.total_allocated_bytes() + + _export_import_batch_reader(ptr_stream, reader_factory) + + assert pa.total_allocated_bytes() == old_allocated + + # Now released + with assert_stream_released: + pa.RecordBatchReader._import_from_c(ptr_stream) + + +@needs_cffi +def test_export_import_exception_reader(): + # See: https://github.com/apache/arrow/issues/37164 + c_stream = ffi.new("struct ArrowArrayStream*") + ptr_stream = int(ffi.cast("uintptr_t", c_stream)) + + gc.collect() # Make sure no Arrow data dangles in a ref cycle + old_allocated = pa.total_allocated_bytes() + + def gen(): + if True: + try: + raise ValueError('foo') + except ValueError as e: + raise NotImplementedError('bar') from e + else: + yield from make_batches() + + original = pa.RecordBatchReader.from_batches(make_schema(), gen()) + original._export_to_c(ptr_stream) + + reader = pa.RecordBatchReader._import_from_c(ptr_stream) + with pytest.raises(OSError) as exc_info: + reader.read_next_batch() + + # inner *and* outer exception should be present + assert 'ValueError: foo' in str(exc_info.value) + assert 'NotImplementedError: bar' in str(exc_info.value) + # Stacktrace containing line of the raise statement + assert 'raise ValueError(\'foo\')' in str(exc_info.value) + + assert pa.total_allocated_bytes() == old_allocated + + +@needs_cffi +def test_imported_batch_reader_error(): + c_stream = ffi.new("struct ArrowArrayStream*") + ptr_stream = int(ffi.cast("uintptr_t", c_stream)) + + schema = pa.schema([('foo', pa.int32())]) + batches = [pa.record_batch([[1, 2, 3]], schema=schema), + pa.record_batch([[4, 5, 6]], schema=schema)] + buf = make_serialized(schema, batches) + + # Open a corrupt/incomplete stream and export it + reader = pa.ipc.open_stream(buf[:-16]) + reader._export_to_c(ptr_stream) + del reader + + reader_new = pa.RecordBatchReader._import_from_c(ptr_stream) + batch = reader_new.read_next_batch() + assert batch == batches[0] + with pytest.raises(OSError, + match="Expected to be able to read 16 bytes " + "for message body, got 8"): + reader_new.read_next_batch() + + # Again, but call read_all() + reader = pa.ipc.open_stream(buf[:-16]) + reader._export_to_c(ptr_stream) + del reader + + reader_new = pa.RecordBatchReader._import_from_c(ptr_stream) + with pytest.raises(OSError, + match="Expected to be able to read 16 bytes " + "for message body, got 8"): + reader_new.read_all() + + +@pytest.mark.parametrize('obj', [pa.int32(), pa.field('foo', pa.int32()), + pa.schema({'foo': pa.int32()})], + ids=['type', 'field', 'schema']) +def test_roundtrip_schema_capsule(obj): + gc.collect() # Make sure no Arrow data dangles in a ref cycle + old_allocated = pa.total_allocated_bytes() + + capsule = obj.__arrow_c_schema__() + assert PyCapsule_IsValid(capsule, b"arrow_schema") == 1 + assert pa.total_allocated_bytes() > old_allocated + obj_out = type(obj)._import_from_c_capsule(capsule) + assert obj_out == obj + + assert pa.total_allocated_bytes() == old_allocated + + capsule = obj.__arrow_c_schema__() + + assert pa.total_allocated_bytes() > old_allocated + del capsule + assert pa.total_allocated_bytes() == old_allocated + + +@pytest.mark.parametrize('arr,schema_accessor,bad_type,good_type', [ + (pa.array(['a', 'b', 'c']), lambda x: x.type, pa.int32(), pa.string()), + ( + pa.record_batch([pa.array(['a', 'b', 'c'])], names=['x']), + lambda x: x.schema, + pa.schema({'x': pa.int32()}), + pa.schema({'x': pa.string()}) + ), +], ids=['array', 'record_batch']) +def test_roundtrip_array_capsule(arr, schema_accessor, bad_type, good_type): + gc.collect() # Make sure no Arrow data dangles in a ref cycle + old_allocated = pa.total_allocated_bytes() + + import_array = type(arr)._import_from_c_capsule + + schema_capsule, capsule = arr.__arrow_c_array__() + assert PyCapsule_IsValid(schema_capsule, b"arrow_schema") == 1 + assert PyCapsule_IsValid(capsule, b"arrow_array") == 1 + arr_out = import_array(schema_capsule, capsule) + assert arr_out.equals(arr) + + assert pa.total_allocated_bytes() > old_allocated + del arr_out + + assert pa.total_allocated_bytes() == old_allocated + + capsule = arr.__arrow_c_array__() + + assert pa.total_allocated_bytes() > old_allocated + del capsule + assert pa.total_allocated_bytes() == old_allocated + + with pytest.raises(ValueError, + match=r"Could not cast.* string to requested .* int32"): + arr.__arrow_c_array__(bad_type.__arrow_c_schema__()) + + schema_capsule, array_capsule = arr.__arrow_c_array__( + good_type.__arrow_c_schema__()) + arr_out = import_array(schema_capsule, array_capsule) + assert schema_accessor(arr_out) == good_type + + +# TODO: implement requested_schema for stream +@pytest.mark.parametrize('constructor', [ + pa.RecordBatchReader.from_batches, + # Use a lambda because we need to re-order the parameters + lambda schema, batches: pa.Table.from_batches(batches, schema), +], ids=['recordbatchreader', 'table']) +def test_roundtrip_reader_capsule(constructor): + batches = make_batches() + schema = batches[0].schema + + gc.collect() # Make sure no Arrow data dangles in a ref cycle + old_allocated = pa.total_allocated_bytes() + + obj = constructor(schema, batches) + + capsule = obj.__arrow_c_stream__() + assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1 + imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule) + assert imported_reader.schema == schema + imported_batches = list(imported_reader) + assert len(imported_batches) == len(batches) + for batch, expected in zip(imported_batches, batches): + assert batch.equals(expected) + + del obj, imported_reader, batch, expected, imported_batches + + assert pa.total_allocated_bytes() == old_allocated + + obj = constructor(schema, batches) + + bad_schema = pa.schema({'ints': pa.int32()}) + with pytest.raises(pa.lib.ArrowTypeError, match="Field 0 cannot be cast"): + obj.__arrow_c_stream__(bad_schema.__arrow_c_schema__()) + + # Can work with matching schema + matching_schema = pa.schema({'ints': pa.list_(pa.int32())}) + capsule = obj.__arrow_c_stream__(matching_schema.__arrow_c_schema__()) + imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule) + assert imported_reader.schema == matching_schema + for batch, expected in zip(imported_reader, batches): + assert batch.equals(expected) + + +def test_roundtrip_batch_reader_capsule_requested_schema(): + batch = make_batch() + requested_schema = pa.schema([('ints', pa.list_(pa.int64()))]) + requested_capsule = requested_schema.__arrow_c_schema__() + batch_as_requested = batch.cast(requested_schema) + + capsule = batch.__arrow_c_stream__(requested_capsule) + assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1 + imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule) + assert imported_reader.schema == requested_schema + assert imported_reader.read_next_batch().equals(batch_as_requested) + with pytest.raises(StopIteration): + imported_reader.read_next_batch() + + +def test_roundtrip_batch_reader_capsule(): + batch = make_batch() + + capsule = batch.__arrow_c_stream__() + assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1 + imported_reader = pa.RecordBatchReader._import_from_c_capsule(capsule) + assert imported_reader.schema == batch.schema + assert imported_reader.read_next_batch().equals(batch) + with pytest.raises(StopIteration): + imported_reader.read_next_batch() + + +def test_roundtrip_chunked_array_capsule(): + chunked = pa.chunked_array([pa.array(["a", "b", "c"])]) + + capsule = chunked.__arrow_c_stream__() + assert PyCapsule_IsValid(capsule, b"arrow_array_stream") == 1 + imported_chunked = pa.ChunkedArray._import_from_c_capsule(capsule) + assert imported_chunked.type == chunked.type + assert imported_chunked == chunked + + +def test_roundtrip_chunked_array_capsule_requested_schema(): + chunked = pa.chunked_array([pa.array(["a", "b", "c"])]) + + # Requesting the same type should work + requested_capsule = chunked.type.__arrow_c_schema__() + capsule = chunked.__arrow_c_stream__(requested_capsule) + imported_chunked = pa.ChunkedArray._import_from_c_capsule(capsule) + assert imported_chunked == chunked + + # Casting to something else should error if not possible + requested_type = pa.binary() + requested_capsule = requested_type.__arrow_c_schema__() + capsule = chunked.__arrow_c_stream__(requested_capsule) + imported_chunked = pa.ChunkedArray._import_from_c_capsule(capsule) + assert imported_chunked == chunked.cast(pa.binary()) + + requested_type = pa.int64() + requested_capsule = requested_type.__arrow_c_schema__() + with pytest.raises( + ValueError, match="Could not cast string to requested type int64" + ): + chunked.__arrow_c_stream__(requested_capsule) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_compute.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_compute.py new file mode 100644 index 0000000000000000000000000000000000000000..98cbd920b509b59205192f28a3159faf8acdda6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_compute.py @@ -0,0 +1,3683 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from collections import namedtuple +import datetime +import decimal +from functools import lru_cache, partial +import inspect +import itertools +import math +import os +import pytest +import random +import sys +import textwrap + +import numpy as np + +try: + import pandas as pd +except ImportError: + pd = None + +import pyarrow as pa +import pyarrow.compute as pc +from pyarrow.lib import ArrowNotImplementedError +from pyarrow.tests import util + +try: + import pyarrow.substrait as pas +except ImportError: + pas = None + +all_array_types = [ + ('bool', [True, False, False, True, True]), + ('uint8', np.arange(5)), + ('int8', np.arange(5)), + ('uint16', np.arange(5)), + ('int16', np.arange(5)), + ('uint32', np.arange(5)), + ('int32', np.arange(5)), + ('uint64', np.arange(5, 10)), + ('int64', np.arange(5, 10)), + ('float', np.arange(0, 0.5, 0.1)), + ('double', np.arange(0, 0.5, 0.1)), + ('string', ['a', 'b', None, 'ddd', 'ee']), + ('binary', [b'a', b'b', b'c', b'ddd', b'ee']), + (pa.binary(3), [b'abc', b'bcd', b'cde', b'def', b'efg']), + (pa.list_(pa.int8()), [[1, 2], [3, 4], [5, 6], None, [9, 16]]), + (pa.large_list(pa.int16()), [[1], [2, 3, 4], [5, 6], None, [9, 16]]), + (pa.struct([('a', pa.int8()), ('b', pa.int8())]), [ + {'a': 1, 'b': 2}, None, {'a': 3, 'b': 4}, None, {'a': 5, 'b': 6}]), +] + +exported_functions = [ + func for (name, func) in sorted(pc.__dict__.items()) + if hasattr(func, '__arrow_compute_function__')] + +exported_option_classes = [ + cls for (name, cls) in sorted(pc.__dict__.items()) + if (isinstance(cls, type) and + cls is not pc.FunctionOptions and + issubclass(cls, pc.FunctionOptions))] + +numerical_arrow_types = [ + pa.int8(), + pa.int16(), + pa.int64(), + pa.uint8(), + pa.uint16(), + pa.uint64(), + pa.float32(), + pa.float64() +] + + +def test_exported_functions(): + # Check that all exported concrete functions can be called with + # the right number of arguments. + # Note that unregistered functions (e.g. with a mismatching name) + # will raise KeyError. + functions = exported_functions + assert len(functions) >= 10 + for func in functions: + desc = func.__arrow_compute_function__ + if desc['options_required']: + # Skip this function as it will fail with a different error + # message if we don't pass an options instance. + continue + arity = desc['arity'] + if arity == 0: + continue + if arity is Ellipsis: + args = [object()] * 3 + else: + args = [object()] * arity + with pytest.raises(TypeError, + match="Got unexpected argument type " + " for compute function"): + func(*args) + + +def test_hash_aggregate_not_exported(): + # Ensure we are not leaking hash aggregate functions + # which are not callable by themselves. + for func in exported_functions: + arrow_f = pc.get_function(func.__arrow_compute_function__["name"]) + assert arrow_f.kind != "hash_aggregate" + + +def test_exported_option_classes(): + classes = exported_option_classes + assert len(classes) >= 10 + for cls in classes: + # Option classes must have an introspectable constructor signature, + # and that signature should not have any *args or **kwargs. + sig = inspect.signature(cls) + for param in sig.parameters.values(): + assert param.kind not in (param.VAR_POSITIONAL, + param.VAR_KEYWORD) + + +@pytest.mark.filterwarnings( + "ignore:pyarrow.CumulativeSumOptions is deprecated as of 14.0" +) +def test_option_class_equality(): + options = [ + pc.ArraySortOptions(), + pc.AssumeTimezoneOptions("UTC"), + pc.CastOptions.safe(pa.int8()), + pc.CountOptions(), + pc.DayOfWeekOptions(count_from_zero=False, week_start=0), + pc.DictionaryEncodeOptions(), + pc.RunEndEncodeOptions(), + pc.ElementWiseAggregateOptions(skip_nulls=True), + pc.ExtractRegexOptions("pattern"), + pc.FilterOptions(), + pc.IndexOptions(pa.scalar(1)), + pc.JoinOptions(), + pc.ListSliceOptions(0, -1, 1, True), + pc.MakeStructOptions(["field", "names"], + field_nullability=[True, True], + field_metadata=[pa.KeyValueMetadata({"a": "1"}), + pa.KeyValueMetadata({"b": "2"})]), + pc.MapLookupOptions(pa.scalar(1), "first"), + pc.MatchSubstringOptions("pattern"), + pc.ModeOptions(), + pc.NullOptions(), + pc.PadOptions(5), + pc.PairwiseOptions(period=1), + pc.PartitionNthOptions(1, null_placement="at_start"), + pc.CumulativeOptions(start=None, skip_nulls=False), + pc.QuantileOptions(), + pc.RandomOptions(), + pc.RankOptions(sort_keys="ascending", + null_placement="at_start", tiebreaker="max"), + pc.ReplaceSliceOptions(0, 1, "a"), + pc.ReplaceSubstringOptions("a", "b"), + pc.RoundOptions(2, "towards_infinity"), + pc.RoundBinaryOptions("towards_infinity"), + pc.RoundTemporalOptions(1, "second", week_starts_monday=True), + pc.RoundToMultipleOptions(100, "towards_infinity"), + pc.ScalarAggregateOptions(), + pc.SelectKOptions(0, sort_keys=[("b", "ascending")]), + pc.SetLookupOptions(pa.array([1])), + pc.SliceOptions(0, 1, 1), + pc.SortOptions([("dummy", "descending")], null_placement="at_start"), + pc.SplitOptions(), + pc.SplitPatternOptions("pattern"), + pc.StrftimeOptions(), + pc.StrptimeOptions("%Y", "s", True), + pc.StructFieldOptions(indices=[]), + pc.TakeOptions(), + pc.TDigestOptions(), + pc.TrimOptions(" "), + pc.Utf8NormalizeOptions("NFKC"), + pc.VarianceOptions(), + pc.WeekOptions(week_starts_monday=True, count_from_zero=False, + first_week_is_fully_in_year=False), + ] + # Timezone database might not be installed on Windows + if sys.platform != "win32" or util.windows_has_tzdata(): + options.append(pc.AssumeTimezoneOptions("Europe/Ljubljana")) + + classes = {type(option) for option in options} + + for cls in exported_option_classes: + # Timezone database might not be installed on Windows + if ( + cls not in classes + and (sys.platform != "win32" or util.windows_has_tzdata()) + and cls != pc.AssumeTimezoneOptions + ): + try: + options.append(cls()) + except TypeError: + pytest.fail(f"Options class is not tested: {cls}") + + for option in options: + assert option == option + assert repr(option).startswith(option.__class__.__name__) + buf = option.serialize() + deserialized = pc.FunctionOptions.deserialize(buf) + assert option == deserialized + # TODO remove the check under the if statement and the filterwarnings + # mark when the deprecated class CumulativeSumOptions is removed. + if repr(option).startswith("CumulativeSumOptions"): + assert repr(deserialized).startswith("CumulativeOptions") + else: + assert repr(option) == repr(deserialized) + for option1, option2 in zip(options, options[1:]): + assert option1 != option2 + + assert repr(pc.IndexOptions(pa.scalar(1))) == "IndexOptions(value=int64:1)" + assert repr(pc.ArraySortOptions()) == \ + "ArraySortOptions(order=Ascending, null_placement=AtEnd)" + + +def test_list_functions(): + assert len(pc.list_functions()) > 10 + assert "add" in pc.list_functions() + + +def _check_get_function(name, expected_func_cls, expected_ker_cls, + min_num_kernels=1): + func = pc.get_function(name) + assert isinstance(func, expected_func_cls) + n = func.num_kernels + assert n >= min_num_kernels + assert n == len(func.kernels) + assert all(isinstance(ker, expected_ker_cls) for ker in func.kernels) + + +def test_get_function_scalar(): + _check_get_function("add", pc.ScalarFunction, pc.ScalarKernel, 8) + + +def test_get_function_vector(): + _check_get_function("unique", pc.VectorFunction, pc.VectorKernel, 8) + + +def test_get_function_scalar_aggregate(): + _check_get_function("mean", pc.ScalarAggregateFunction, + pc.ScalarAggregateKernel, 8) + + +def test_get_function_hash_aggregate(): + _check_get_function("hash_sum", pc.HashAggregateFunction, + pc.HashAggregateKernel, 1) + + +def test_call_function_with_memory_pool(): + arr = pa.array(["foo", "bar", "baz"]) + indices = np.array([2, 2, 1]) + result1 = arr.take(indices) + result2 = pc.call_function('take', [arr, indices], + memory_pool=pa.default_memory_pool()) + expected = pa.array(["baz", "baz", "bar"]) + assert result1.equals(expected) + assert result2.equals(expected) + + result3 = pc.take(arr, indices, memory_pool=pa.default_memory_pool()) + assert result3.equals(expected) + + +def test_pickle_functions(pickle_module): + # Pickle registered functions + for name in pc.list_functions(): + func = pc.get_function(name) + reconstructed = pickle_module.loads(pickle_module.dumps(func)) + assert type(reconstructed) is type(func) + assert reconstructed.name == func.name + assert reconstructed.arity == func.arity + assert reconstructed.num_kernels == func.num_kernels + + +def test_pickle_global_functions(pickle_module): + # Pickle global wrappers (manual or automatic) of registered functions + for name in pc.list_functions(): + try: + func = getattr(pc, name) + except AttributeError: + # hash_aggregate functions are not exported as callables. + continue + reconstructed = pickle_module.loads(pickle_module.dumps(func)) + assert reconstructed is func + + +def test_function_attributes(): + # Sanity check attributes of registered functions + for name in pc.list_functions(): + func = pc.get_function(name) + assert isinstance(func, pc.Function) + assert func.name == name + kernels = func.kernels + assert func.num_kernels == len(kernels) + assert all(isinstance(ker, pc.Kernel) for ker in kernels) + repr(func) + for ker in kernels: + repr(ker) + + +def test_input_type_conversion(): + # Automatic array conversion from Python + arr = pc.add([1, 2], [4, None]) + assert arr.to_pylist() == [5, None] + # Automatic scalar conversion from Python + arr = pc.add([1, 2], 4) + assert arr.to_pylist() == [5, 6] + # Other scalar type + assert pc.equal(["foo", "bar", None], + "foo").to_pylist() == [True, False, None] + + +@pytest.mark.parametrize('arrow_type', numerical_arrow_types) +def test_sum_array(arrow_type): + arr = pa.array([1, 2, 3, 4], type=arrow_type) + assert arr.sum().as_py() == 10 + assert pc.sum(arr).as_py() == 10 + + arr = pa.array([1, 2, 3, 4, None], type=arrow_type) + assert arr.sum().as_py() == 10 + assert pc.sum(arr).as_py() == 10 + + arr = pa.array([None], type=arrow_type) + assert arr.sum().as_py() is None # noqa: E711 + assert pc.sum(arr).as_py() is None # noqa: E711 + assert arr.sum(min_count=0).as_py() == 0 + assert pc.sum(arr, min_count=0).as_py() == 0 + + arr = pa.array([], type=arrow_type) + assert arr.sum().as_py() is None # noqa: E711 + assert arr.sum(min_count=0).as_py() == 0 + assert pc.sum(arr, min_count=0).as_py() == 0 + + +@pytest.mark.parametrize('arrow_type', numerical_arrow_types) +def test_sum_chunked_array(arrow_type): + arr = pa.chunked_array([pa.array([1, 2, 3, 4], type=arrow_type)]) + assert pc.sum(arr).as_py() == 10 + + arr = pa.chunked_array([ + pa.array([1, 2], type=arrow_type), pa.array([3, 4], type=arrow_type) + ]) + assert pc.sum(arr).as_py() == 10 + + arr = pa.chunked_array([ + pa.array([1, 2], type=arrow_type), + pa.array([], type=arrow_type), + pa.array([3, 4], type=arrow_type) + ]) + assert pc.sum(arr).as_py() == 10 + + arr = pa.chunked_array((), type=arrow_type) + assert arr.num_chunks == 0 + assert pc.sum(arr).as_py() is None # noqa: E711 + assert pc.sum(arr, min_count=0).as_py() == 0 + + +def test_mode_array(): + # ARROW-9917 + arr = pa.array([1, 1, 3, 4, 3, 5], type='int64') + mode = pc.mode(arr) + assert len(mode) == 1 + assert mode[0].as_py() == {"mode": 1, "count": 2} + + mode = pc.mode(arr, n=2) + assert len(mode) == 2 + assert mode[0].as_py() == {"mode": 1, "count": 2} + assert mode[1].as_py() == {"mode": 3, "count": 2} + + arr = pa.array([], type='int64') + assert len(pc.mode(arr)) == 0 + + arr = pa.array([1, 1, 3, 4, 3, None], type='int64') + mode = pc.mode(arr, skip_nulls=False) + assert len(mode) == 0 + mode = pc.mode(arr, min_count=6) + assert len(mode) == 0 + mode = pc.mode(arr, skip_nulls=False, min_count=5) + assert len(mode) == 0 + + arr = pa.array([True, False]) + mode = pc.mode(arr, n=2) + assert len(mode) == 2 + assert mode[0].as_py() == {"mode": False, "count": 1} + assert mode[1].as_py() == {"mode": True, "count": 1} + + +def test_mode_chunked_array(): + # ARROW-9917 + arr = pa.chunked_array([pa.array([1, 1, 3, 4, 3, 5], type='int64')]) + mode = pc.mode(arr) + assert len(mode) == 1 + assert mode[0].as_py() == {"mode": 1, "count": 2} + + mode = pc.mode(arr, n=2) + assert len(mode) == 2 + assert mode[0].as_py() == {"mode": 1, "count": 2} + assert mode[1].as_py() == {"mode": 3, "count": 2} + + arr = pa.chunked_array((), type='int64') + assert arr.num_chunks == 0 + assert len(pc.mode(arr)) == 0 + + +def test_empty_chunked_array(): + msg = "cannot construct ChunkedArray from empty vector and omitted type" + with pytest.raises(pa.ArrowInvalid, match=msg): + pa.chunked_array([]) + + pa.chunked_array([], type=pa.int8()) + + +def test_variance(): + data = [1, 2, 3, 4, 5, 6, 7, 8] + assert pc.variance(data).as_py() == 5.25 + assert pc.variance(data, ddof=0).as_py() == 5.25 + assert pc.variance(data, ddof=1).as_py() == 6.0 + + +def test_count_substring(): + for (ty, offset) in [(pa.string(), pa.int32()), + (pa.large_string(), pa.int64())]: + arr = pa.array(["ab", "cab", "abcab", "ba", "AB", None], type=ty) + + result = pc.count_substring(arr, "ab") + expected = pa.array([1, 1, 2, 0, 0, None], type=offset) + assert expected == result + + result = pc.count_substring(arr, "ab", ignore_case=True) + expected = pa.array([1, 1, 2, 0, 1, None], type=offset) + assert expected == result + + +def test_count_substring_regex(): + for (ty, offset) in [(pa.string(), pa.int32()), + (pa.large_string(), pa.int64())]: + arr = pa.array(["ab", "cab", "baAacaa", "ba", "AB", None], type=ty) + + result = pc.count_substring_regex(arr, "a+") + expected = pa.array([1, 1, 3, 1, 0, None], type=offset) + assert expected.equals(result) + + result = pc.count_substring_regex(arr, "a+", ignore_case=True) + expected = pa.array([1, 1, 2, 1, 1, None], type=offset) + assert expected.equals(result) + + +def test_find_substring(): + for ty in [pa.string(), pa.binary(), pa.large_string(), pa.large_binary()]: + arr = pa.array(["ab", "cab", "ba", None], type=ty) + result = pc.find_substring(arr, "ab") + assert result.to_pylist() == [0, 1, -1, None] + + result = pc.find_substring_regex(arr, "a?b") + assert result.to_pylist() == [0, 1, 0, None] + + arr = pa.array(["ab*", "cAB*", "ba", "aB?"], type=ty) + result = pc.find_substring(arr, "aB*", ignore_case=True) + assert result.to_pylist() == [0, 1, -1, -1] + + result = pc.find_substring_regex(arr, "a?b", ignore_case=True) + assert result.to_pylist() == [0, 1, 0, 0] + + +def test_match_like(): + arr = pa.array(["ab", "ba%", "ba", "ca%d", None]) + result = pc.match_like(arr, r"_a\%%") + expected = pa.array([False, True, False, True, None]) + assert expected.equals(result) + + arr = pa.array(["aB", "bA%", "ba", "ca%d", None]) + result = pc.match_like(arr, r"_a\%%", ignore_case=True) + expected = pa.array([False, True, False, True, None]) + assert expected.equals(result) + result = pc.match_like(arr, r"_a\%%", ignore_case=False) + expected = pa.array([False, False, False, True, None]) + assert expected.equals(result) + + +def test_match_substring(): + arr = pa.array(["ab", "abc", "ba", None]) + result = pc.match_substring(arr, "ab") + expected = pa.array([True, True, False, None]) + assert expected.equals(result) + + arr = pa.array(["áB", "Ábc", "ba", None]) + result = pc.match_substring(arr, "áb", ignore_case=True) + expected = pa.array([True, True, False, None]) + assert expected.equals(result) + result = pc.match_substring(arr, "áb", ignore_case=False) + expected = pa.array([False, False, False, None]) + assert expected.equals(result) + + +def test_match_substring_regex(): + arr = pa.array(["ab", "abc", "ba", "c", None]) + result = pc.match_substring_regex(arr, "^a?b") + expected = pa.array([True, True, True, False, None]) + assert expected.equals(result) + + arr = pa.array(["aB", "Abc", "BA", "c", None]) + result = pc.match_substring_regex(arr, "^a?b", ignore_case=True) + expected = pa.array([True, True, True, False, None]) + assert expected.equals(result) + result = pc.match_substring_regex(arr, "^a?b", ignore_case=False) + expected = pa.array([False, False, False, False, None]) + assert expected.equals(result) + + +def test_trim(): + # \u3000 is unicode whitespace + arr = pa.array([" foo", None, " \u3000foo bar \t"]) + result = pc.utf8_trim_whitespace(arr) + expected = pa.array(["foo", None, "foo bar"]) + assert expected.equals(result) + + arr = pa.array([" foo", None, " \u3000foo bar \t"]) + result = pc.ascii_trim_whitespace(arr) + expected = pa.array(["foo", None, "\u3000foo bar"]) + assert expected.equals(result) + + arr = pa.array([" foo", None, " \u3000foo bar \t"]) + result = pc.utf8_trim(arr, characters=' f\u3000') + expected = pa.array(["oo", None, "oo bar \t"]) + assert expected.equals(result) + # Positional option + result = pc.utf8_trim(arr, ' f\u3000') + expected = pa.array(["oo", None, "oo bar \t"]) + assert expected.equals(result) + + +def test_slice_compatibility(): + arr = pa.array(["", "𝑓", "𝑓ö", "𝑓öõ", "𝑓öõḍ", "𝑓öõḍš"]) + for start in range(-6, 6): + for stop in itertools.chain(range(-6, 6), [None]): + for step in [-3, -2, -1, 1, 2, 3]: + expected = pa.array([k.as_py()[start:stop:step] + for k in arr]) + result = pc.utf8_slice_codeunits( + arr, start=start, stop=stop, step=step) + assert expected.equals(result) + # Positional options + assert pc.utf8_slice_codeunits(arr, + start, stop, step) == result + + +def test_binary_slice_compatibility(): + data = [b"", b"a", b"a\xff", b"ab\x00", b"abc\xfb", b"ab\xf2de"] + arr = pa.array(data) + for start, stop, step in itertools.product(range(-6, 6), + range(-6, 6), + range(-3, 4)): + if step == 0: + continue + expected = pa.array([k.as_py()[start:stop:step] + for k in arr]) + result = pc.binary_slice( + arr, start=start, stop=stop, step=step) + assert expected.equals(result) + # Positional options + assert pc.binary_slice(arr, start, stop, step) == result + # Fixed size binary input / output + for item in data: + fsb_scalar = pa.scalar(item, type=pa.binary(len(item))) + expected = item[start:stop:step] + actual = pc.binary_slice(fsb_scalar, start, stop, step) + assert actual.type == pa.binary(len(expected)) + assert actual.as_py() == expected + + +def test_split_pattern(): + arr = pa.array(["-foo---bar--", "---foo---b"]) + result = pc.split_pattern(arr, pattern="---") + expected = pa.array([["-foo", "bar--"], ["", "foo", "b"]]) + assert expected.equals(result) + + result = pc.split_pattern(arr, "---", max_splits=1) + expected = pa.array([["-foo", "bar--"], ["", "foo---b"]]) + assert expected.equals(result) + + result = pc.split_pattern(arr, "---", max_splits=1, reverse=True) + expected = pa.array([["-foo", "bar--"], ["---foo", "b"]]) + assert expected.equals(result) + + +def test_split_whitespace_utf8(): + arr = pa.array(["foo bar", " foo \u3000\tb"]) + result = pc.utf8_split_whitespace(arr) + expected = pa.array([["foo", "bar"], ["", "foo", "b"]]) + assert expected.equals(result) + + result = pc.utf8_split_whitespace(arr, max_splits=1) + expected = pa.array([["foo", "bar"], ["", "foo \u3000\tb"]]) + assert expected.equals(result) + + result = pc.utf8_split_whitespace(arr, max_splits=1, reverse=True) + expected = pa.array([["foo", "bar"], [" foo", "b"]]) + assert expected.equals(result) + + +def test_split_whitespace_ascii(): + arr = pa.array(["foo bar", " foo \u3000\tb"]) + result = pc.ascii_split_whitespace(arr) + expected = pa.array([["foo", "bar"], ["", "foo", "\u3000", "b"]]) + assert expected.equals(result) + + result = pc.ascii_split_whitespace(arr, max_splits=1) + expected = pa.array([["foo", "bar"], ["", "foo \u3000\tb"]]) + assert expected.equals(result) + + result = pc.ascii_split_whitespace(arr, max_splits=1, reverse=True) + expected = pa.array([["foo", "bar"], [" foo \u3000", "b"]]) + assert expected.equals(result) + + +def test_split_pattern_regex(): + arr = pa.array(["-foo---bar--", "---foo---b"]) + result = pc.split_pattern_regex(arr, pattern="-+") + expected = pa.array([["", "foo", "bar", ""], ["", "foo", "b"]]) + assert expected.equals(result) + + result = pc.split_pattern_regex(arr, "-+", max_splits=1) + expected = pa.array([["", "foo---bar--"], ["", "foo---b"]]) + assert expected.equals(result) + + with pytest.raises(NotImplementedError, + match="Cannot split in reverse with regex"): + result = pc.split_pattern_regex( + arr, pattern="---", max_splits=1, reverse=True) + + +def test_min_max(): + # An example generated function wrapper with possible options + data = [4, 5, 6, None, 1] + s = pc.min_max(data) + assert s.as_py() == {'min': 1, 'max': 6} + s = pc.min_max(data, options=pc.ScalarAggregateOptions()) + assert s.as_py() == {'min': 1, 'max': 6} + s = pc.min_max(data, options=pc.ScalarAggregateOptions(skip_nulls=True)) + assert s.as_py() == {'min': 1, 'max': 6} + s = pc.min_max(data, options=pc.ScalarAggregateOptions(skip_nulls=False)) + assert s.as_py() == {'min': None, 'max': None} + + # Options as dict of kwargs + s = pc.min_max(data, options={'skip_nulls': False}) + assert s.as_py() == {'min': None, 'max': None} + # Options as named functions arguments + s = pc.min_max(data, skip_nulls=False) + assert s.as_py() == {'min': None, 'max': None} + + # Both options and named arguments + with pytest.raises(TypeError): + s = pc.min_max( + data, options=pc.ScalarAggregateOptions(), skip_nulls=False) + + # Wrong options type + options = pc.TakeOptions() + with pytest.raises(TypeError): + s = pc.min_max(data, options=options) + + # Missing argument + with pytest.raises(TypeError, match="min_max takes 1 positional"): + s = pc.min_max() + + +def test_any(): + # ARROW-1846 + + options = pc.ScalarAggregateOptions(skip_nulls=False, min_count=0) + + a = pa.array([], type='bool') + assert pc.any(a).as_py() is None + assert pc.any(a, min_count=0).as_py() is False + assert pc.any(a, options=options).as_py() is False + + a = pa.array([False, None, True]) + assert pc.any(a).as_py() is True + assert pc.any(a, options=options).as_py() is True + + a = pa.array([False, None, False]) + assert pc.any(a).as_py() is False + assert pc.any(a, options=options).as_py() is None + + +def test_all(): + # ARROW-10301 + + options = pc.ScalarAggregateOptions(skip_nulls=False, min_count=0) + + a = pa.array([], type='bool') + assert pc.all(a).as_py() is None + assert pc.all(a, min_count=0).as_py() is True + assert pc.all(a, options=options).as_py() is True + + a = pa.array([False, True]) + assert pc.all(a).as_py() is False + assert pc.all(a, options=options).as_py() is False + + a = pa.array([True, None]) + assert pc.all(a).as_py() is True + assert pc.all(a, options=options).as_py() is None + + a = pa.chunked_array([[True], [True, None]]) + assert pc.all(a).as_py() is True + assert pc.all(a, options=options).as_py() is None + + a = pa.chunked_array([[True], [False]]) + assert pc.all(a).as_py() is False + assert pc.all(a, options=options).as_py() is False + + +def test_is_valid(): + # An example generated function wrapper without options + data = [4, 5, None] + assert pc.is_valid(data).to_pylist() == [True, True, False] + + with pytest.raises(TypeError): + pc.is_valid(data, options=None) + + +def test_generated_docstrings(): + # With options + assert pc.min_max.__doc__ == textwrap.dedent("""\ + Compute the minimum and maximum values of a numeric array. + + Null values are ignored by default. + This can be changed through ScalarAggregateOptions. + + Parameters + ---------- + array : Array-like + Argument to compute function. + skip_nulls : bool, default True + Whether to skip (ignore) nulls in the input. + If False, any null in the input forces the output to null. + min_count : int, default 1 + Minimum number of non-null values in the input. If the number + of non-null values is below `min_count`, the output is null. + options : pyarrow.compute.ScalarAggregateOptions, optional + Alternative way of passing options. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + """) + # Without options + assert pc.add.__doc__ == textwrap.dedent("""\ + Add the arguments element-wise. + + Results will wrap around on integer overflow. + Use function "add_checked" if you want overflow + to return an error. + + Parameters + ---------- + x : Array-like or scalar-like + Argument to compute function. + y : Array-like or scalar-like + Argument to compute function. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + """) + # Varargs with options + assert pc.min_element_wise.__doc__ == textwrap.dedent("""\ + Find the element-wise minimum value. + + Nulls are ignored (by default) or propagated. + NaN is preferred over null, but not over any valid value. + + Parameters + ---------- + *args : Array-like or scalar-like + Argument to compute function. + skip_nulls : bool, default True + Whether to skip (ignore) nulls in the input. + If False, any null in the input forces the output to null. + options : pyarrow.compute.ElementWiseAggregateOptions, optional + Alternative way of passing options. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + """) + assert pc.filter.__doc__ == textwrap.dedent("""\ + Filter with a boolean selection filter. + + The output is populated with values from the input at positions + where the selection filter is non-zero. Nulls in the selection filter + are handled based on FilterOptions. + + Parameters + ---------- + input : Array-like or scalar-like + Argument to compute function. + selection_filter : Array-like or scalar-like + Argument to compute function. + null_selection_behavior : str, default "drop" + How to handle nulls in the selection filter. + Accepted values are "drop", "emit_null". + options : pyarrow.compute.FilterOptions, optional + Alternative way of passing options. + memory_pool : pyarrow.MemoryPool, optional + If not passed, will allocate memory from the default memory pool. + + Examples + -------- + >>> import pyarrow as pa + >>> arr = pa.array(["a", "b", "c", None, "e"]) + >>> mask = pa.array([True, False, None, False, True]) + >>> arr.filter(mask) + + [ + "a", + "e" + ] + >>> arr.filter(mask, null_selection_behavior='emit_null') + + [ + "a", + null, + "e" + ] + """) + + +def test_generated_signatures(): + # The self-documentation provided by signatures should show acceptable + # options and their default values. + + # Without options + sig = inspect.signature(pc.add) + assert str(sig) == "(x, y, /, *, memory_pool=None)" + # With options + sig = inspect.signature(pc.min_max) + assert str(sig) == ("(array, /, *, skip_nulls=True, min_count=1, " + "options=None, memory_pool=None)") + # With positional options + sig = inspect.signature(pc.quantile) + assert str(sig) == ("(array, /, q=0.5, *, interpolation='linear', " + "skip_nulls=True, min_count=0, " + "options=None, memory_pool=None)") + # Varargs with options + sig = inspect.signature(pc.binary_join_element_wise) + assert str(sig) == ("(*strings, null_handling='emit_null', " + "null_replacement='', options=None, " + "memory_pool=None)") + # Varargs without options + sig = inspect.signature(pc.choose) + assert str(sig) == "(indices, /, *values, memory_pool=None)" + # Nullary with options + sig = inspect.signature(pc.random) + assert str(sig) == ("(n, *, initializer='system', " + "options=None, memory_pool=None)") + + +# We use isprintable to find about codepoints that Python doesn't know, but +# utf8proc does (or in a future version of Python the other way around). +# These codepoints cannot be compared between Arrow and the Python +# implementation. +@lru_cache() +def find_new_unicode_codepoints(): + new = set() + characters = [chr(c) for c in range(0x80, 0x11000) + if not (0xD800 <= c < 0xE000)] + is_printable = pc.utf8_is_printable(pa.array(characters)).to_pylist() + for i, c in enumerate(characters): + if is_printable[i] != c.isprintable(): + new.add(ord(c)) + return new + + +# Python claims there are not alpha, not sure why, they are in +# gc='Other Letter': https://graphemica.com/%E1%B3%B2 +unknown_issue_is_alpha = {0x1cf2, 0x1cf3} +# utf8proc does not know if codepoints are lower case +utf8proc_issue_is_lower = { + 0xaa, 0xba, 0x2b0, 0x2b1, 0x2b2, 0x2b3, 0x2b4, + 0x2b5, 0x2b6, 0x2b7, 0x2b8, 0x2c0, 0x2c1, 0x2e0, + 0x2e1, 0x2e2, 0x2e3, 0x2e4, 0x37a, 0x1d2c, 0x1d2d, + 0x1d2e, 0x1d2f, 0x1d30, 0x1d31, 0x1d32, 0x1d33, + 0x1d34, 0x1d35, 0x1d36, 0x1d37, 0x1d38, 0x1d39, + 0x1d3a, 0x1d3b, 0x1d3c, 0x1d3d, 0x1d3e, 0x1d3f, + 0x1d40, 0x1d41, 0x1d42, 0x1d43, 0x1d44, 0x1d45, + 0x1d46, 0x1d47, 0x1d48, 0x1d49, 0x1d4a, 0x1d4b, + 0x1d4c, 0x1d4d, 0x1d4e, 0x1d4f, 0x1d50, 0x1d51, + 0x1d52, 0x1d53, 0x1d54, 0x1d55, 0x1d56, 0x1d57, + 0x1d58, 0x1d59, 0x1d5a, 0x1d5b, 0x1d5c, 0x1d5d, + 0x1d5e, 0x1d5f, 0x1d60, 0x1d61, 0x1d62, 0x1d63, + 0x1d64, 0x1d65, 0x1d66, 0x1d67, 0x1d68, 0x1d69, + 0x1d6a, 0x1d78, 0x1d9b, 0x1d9c, 0x1d9d, 0x1d9e, + 0x1d9f, 0x1da0, 0x1da1, 0x1da2, 0x1da3, 0x1da4, + 0x1da5, 0x1da6, 0x1da7, 0x1da8, 0x1da9, 0x1daa, + 0x1dab, 0x1dac, 0x1dad, 0x1dae, 0x1daf, 0x1db0, + 0x1db1, 0x1db2, 0x1db3, 0x1db4, 0x1db5, 0x1db6, + 0x1db7, 0x1db8, 0x1db9, 0x1dba, 0x1dbb, 0x1dbc, + 0x1dbd, 0x1dbe, 0x1dbf, 0x2071, 0x207f, 0x2090, + 0x2091, 0x2092, 0x2093, 0x2094, 0x2095, 0x2096, + 0x2097, 0x2098, 0x2099, 0x209a, 0x209b, 0x209c, + 0x2c7c, 0x2c7d, 0xa69c, 0xa69d, 0xa770, 0xa7f8, + 0xa7f9, 0xab5c, 0xab5d, 0xab5e, 0xab5f, } +# utf8proc does not store if a codepoint is numeric +numeric_info_missing = { + 0x3405, 0x3483, 0x382a, 0x3b4d, 0x4e00, 0x4e03, + 0x4e07, 0x4e09, 0x4e5d, 0x4e8c, 0x4e94, 0x4e96, + 0x4ebf, 0x4ec0, 0x4edf, 0x4ee8, 0x4f0d, 0x4f70, + 0x5104, 0x5146, 0x5169, 0x516b, 0x516d, 0x5341, + 0x5343, 0x5344, 0x5345, 0x534c, 0x53c1, 0x53c2, + 0x53c3, 0x53c4, 0x56db, 0x58f1, 0x58f9, 0x5e7a, + 0x5efe, 0x5eff, 0x5f0c, 0x5f0d, 0x5f0e, 0x5f10, + 0x62fe, 0x634c, 0x67d2, 0x6f06, 0x7396, 0x767e, + 0x8086, 0x842c, 0x8cae, 0x8cb3, 0x8d30, 0x9621, + 0x9646, 0x964c, 0x9678, 0x96f6, 0xf96b, 0xf973, + 0xf978, 0xf9b2, 0xf9d1, 0xf9d3, 0xf9fd, 0x10fc5, + 0x10fc6, 0x10fc7, 0x10fc8, 0x10fc9, 0x10fca, + 0x10fcb, } +# utf8proc has no no digit/numeric information +digit_info_missing = { + 0xb2, 0xb3, 0xb9, 0x1369, 0x136a, 0x136b, 0x136c, + 0x136d, 0x136e, 0x136f, 0x1370, 0x1371, 0x19da, 0x2070, + 0x2074, 0x2075, 0x2076, 0x2077, 0x2078, 0x2079, 0x2080, + 0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087, + 0x2088, 0x2089, 0x2460, 0x2461, 0x2462, 0x2463, 0x2464, + 0x2465, 0x2466, 0x2467, 0x2468, 0x2474, 0x2475, 0x2476, + 0x2477, 0x2478, 0x2479, 0x247a, 0x247b, 0x247c, 0x2488, + 0x2489, 0x248a, 0x248b, 0x248c, 0x248d, 0x248e, 0x248f, + 0x2490, 0x24ea, 0x24f5, 0x24f6, 0x24f7, 0x24f8, 0x24f9, + 0x24fa, 0x24fb, 0x24fc, 0x24fd, 0x24ff, 0x2776, 0x2777, + 0x2778, 0x2779, 0x277a, 0x277b, 0x277c, 0x277d, 0x277e, + 0x2780, 0x2781, 0x2782, 0x2783, 0x2784, 0x2785, 0x2786, + 0x2787, 0x2788, 0x278a, 0x278b, 0x278c, 0x278d, 0x278e, + 0x278f, 0x2790, 0x2791, 0x2792, 0x10a40, 0x10a41, + 0x10a42, 0x10a43, 0x10e60, 0x10e61, 0x10e62, 0x10e63, + 0x10e64, 0x10e65, 0x10e66, 0x10e67, 0x10e68, } +numeric_info_missing = { + 0x3405, 0x3483, 0x382a, 0x3b4d, 0x4e00, 0x4e03, + 0x4e07, 0x4e09, 0x4e5d, 0x4e8c, 0x4e94, 0x4e96, + 0x4ebf, 0x4ec0, 0x4edf, 0x4ee8, 0x4f0d, 0x4f70, + 0x5104, 0x5146, 0x5169, 0x516b, 0x516d, 0x5341, + 0x5343, 0x5344, 0x5345, 0x534c, 0x53c1, 0x53c2, + 0x53c3, 0x53c4, 0x56db, 0x58f1, 0x58f9, 0x5e7a, + 0x5efe, 0x5eff, 0x5f0c, 0x5f0d, 0x5f0e, 0x5f10, + 0x62fe, 0x634c, 0x67d2, 0x6f06, 0x7396, 0x767e, + 0x8086, 0x842c, 0x8cae, 0x8cb3, 0x8d30, 0x9621, + 0x9646, 0x964c, 0x9678, 0x96f6, 0xf96b, 0xf973, + 0xf978, 0xf9b2, 0xf9d1, 0xf9d3, 0xf9fd, } + +codepoints_ignore = { + 'is_alnum': numeric_info_missing | digit_info_missing | + unknown_issue_is_alpha, + 'is_alpha': unknown_issue_is_alpha, + 'is_digit': digit_info_missing, + 'is_numeric': numeric_info_missing, + 'is_lower': utf8proc_issue_is_lower +} + + +@pytest.mark.parametrize('function_name', ['is_alnum', 'is_alpha', + 'is_ascii', 'is_decimal', + 'is_digit', 'is_lower', + 'is_numeric', 'is_printable', + 'is_space', 'is_upper', ]) +@pytest.mark.parametrize('variant', ['ascii', 'utf8']) +def test_string_py_compat_boolean(function_name, variant): + arrow_name = variant + "_" + function_name + py_name = function_name.replace('_', '') + ignore = codepoints_ignore.get(function_name, set()) | \ + find_new_unicode_codepoints() + for i in range(128 if ascii else 0x11000): + if i in range(0xD800, 0xE000): + continue # bug? pyarrow doesn't allow utf16 surrogates + # the issues we know of, we skip + if i in ignore: + continue + # Compare results with the equivalent Python predicate + # (except "is_space" where functions are known to be incompatible) + c = chr(i) + if hasattr(pc, arrow_name) and function_name != 'is_space': + ar = pa.array([c]) + arrow_func = getattr(pc, arrow_name) + assert arrow_func(ar)[0].as_py() == getattr(c, py_name)() + + +def test_pad(): + arr = pa.array([None, 'a', 'abcd']) + assert pc.ascii_center(arr, width=3).tolist() == [None, ' a ', 'abcd'] + assert pc.ascii_lpad(arr, width=3).tolist() == [None, ' a', 'abcd'] + assert pc.ascii_rpad(arr, width=3).tolist() == [None, 'a ', 'abcd'] + assert pc.ascii_center(arr, 3).tolist() == [None, ' a ', 'abcd'] + assert pc.ascii_lpad(arr, 3).tolist() == [None, ' a', 'abcd'] + assert pc.ascii_rpad(arr, 3).tolist() == [None, 'a ', 'abcd'] + + arr = pa.array([None, 'á', 'abcd']) + assert pc.utf8_center(arr, width=3).tolist() == [None, ' á ', 'abcd'] + assert pc.utf8_lpad(arr, width=3).tolist() == [None, ' á', 'abcd'] + assert pc.utf8_rpad(arr, width=3).tolist() == [None, 'á ', 'abcd'] + assert pc.utf8_center(arr, 3).tolist() == [None, ' á ', 'abcd'] + assert pc.utf8_lpad(arr, 3).tolist() == [None, ' á', 'abcd'] + assert pc.utf8_rpad(arr, 3).tolist() == [None, 'á ', 'abcd'] + + +@pytest.mark.pandas +def test_replace_slice(): + offsets = range(-3, 4) + + arr = pa.array([None, '', 'a', 'ab', 'abc', 'abcd', 'abcde']) + series = arr.to_pandas() + for start in offsets: + for stop in offsets: + expected = series.str.slice_replace(start, stop, 'XX') + actual = pc.binary_replace_slice( + arr, start=start, stop=stop, replacement='XX') + assert actual.tolist() == expected.tolist() + # Positional options + assert pc.binary_replace_slice(arr, start, stop, 'XX') == actual + + arr = pa.array([None, '', 'π', 'πb', 'πbθ', 'πbθd', 'πbθde']) + series = arr.to_pandas() + for start in offsets: + for stop in offsets: + expected = series.str.slice_replace(start, stop, 'XX') + actual = pc.utf8_replace_slice( + arr, start=start, stop=stop, replacement='XX') + assert actual.tolist() == expected.tolist() + + +def test_replace_plain(): + data = pa.array(['foozfoo', 'food', None]) + ar = pc.replace_substring(data, pattern='foo', replacement='bar') + assert ar.tolist() == ['barzbar', 'bard', None] + ar = pc.replace_substring(data, 'foo', 'bar') + assert ar.tolist() == ['barzbar', 'bard', None] + + ar = pc.replace_substring(data, pattern='foo', replacement='bar', + max_replacements=1) + assert ar.tolist() == ['barzfoo', 'bard', None] + ar = pc.replace_substring(data, 'foo', 'bar', max_replacements=1) + assert ar.tolist() == ['barzfoo', 'bard', None] + + +def test_replace_regex(): + data = pa.array(['foo', 'mood', None]) + expected = ['f00', 'm00d', None] + ar = pc.replace_substring_regex(data, pattern='(.)oo', replacement=r'\100') + assert ar.tolist() == expected + ar = pc.replace_substring_regex(data, '(.)oo', replacement=r'\100') + assert ar.tolist() == expected + ar = pc.replace_substring_regex(data, '(.)oo', r'\100') + assert ar.tolist() == expected + + +def test_extract_regex(): + ar = pa.array(['a1', 'zb2z']) + expected = [{'letter': 'a', 'digit': '1'}, {'letter': 'b', 'digit': '2'}] + struct = pc.extract_regex(ar, pattern=r'(?P[ab])(?P\d)') + assert struct.tolist() == expected + struct = pc.extract_regex(ar, r'(?P[ab])(?P\d)') + assert struct.tolist() == expected + + +def test_binary_join(): + ar_list = pa.array([['foo', 'bar'], None, []]) + expected = pa.array(['foo-bar', None, '']) + assert pc.binary_join(ar_list, '-').equals(expected) + + separator_array = pa.array(['1', '2'], type=pa.binary()) + expected = pa.array(['a1b', 'c2d'], type=pa.binary()) + ar_list = pa.array([['a', 'b'], ['c', 'd']], type=pa.list_(pa.binary())) + assert pc.binary_join(ar_list, separator_array).equals(expected) + + +def test_binary_join_element_wise(): + null = pa.scalar(None, type=pa.string()) + arrs = [[None, 'a', 'b'], ['c', None, 'd'], [None, '-', '--']] + assert pc.binary_join_element_wise(*arrs).to_pylist() == \ + [None, None, 'b--d'] + assert pc.binary_join_element_wise('a', 'b', '-').as_py() == 'a-b' + assert pc.binary_join_element_wise('a', null, '-').as_py() is None + assert pc.binary_join_element_wise('a', 'b', null).as_py() is None + + skip = pc.JoinOptions(null_handling='skip') + assert pc.binary_join_element_wise(*arrs, options=skip).to_pylist() == \ + [None, 'a', 'b--d'] + assert pc.binary_join_element_wise( + 'a', 'b', '-', options=skip).as_py() == 'a-b' + assert pc.binary_join_element_wise( + 'a', null, '-', options=skip).as_py() == 'a' + assert pc.binary_join_element_wise( + 'a', 'b', null, options=skip).as_py() is None + + replace = pc.JoinOptions(null_handling='replace', null_replacement='spam') + assert pc.binary_join_element_wise(*arrs, options=replace).to_pylist() == \ + [None, 'a-spam', 'b--d'] + assert pc.binary_join_element_wise( + 'a', 'b', '-', options=replace).as_py() == 'a-b' + assert pc.binary_join_element_wise( + 'a', null, '-', options=replace).as_py() == 'a-spam' + assert pc.binary_join_element_wise( + 'a', 'b', null, options=replace).as_py() is None + + +@pytest.mark.parametrize(('ty', 'values'), all_array_types) +def test_take(ty, values): + arr = pa.array(values, type=ty) + for indices_type in [pa.int8(), pa.int64()]: + indices = pa.array([0, 4, 2, None], type=indices_type) + result = arr.take(indices) + result.validate() + expected = pa.array([values[0], values[4], values[2], None], type=ty) + assert result.equals(expected) + + # empty indices + indices = pa.array([], type=indices_type) + result = arr.take(indices) + result.validate() + expected = pa.array([], type=ty) + assert result.equals(expected) + + indices = pa.array([2, 5]) + with pytest.raises(IndexError): + arr.take(indices) + + indices = pa.array([2, -1]) + with pytest.raises(IndexError): + arr.take(indices) + + +def test_take_indices_types(): + arr = pa.array(range(5)) + + for indices_type in ['uint8', 'int8', 'uint16', 'int16', + 'uint32', 'int32', 'uint64', 'int64']: + indices = pa.array([0, 4, 2, None], type=indices_type) + result = arr.take(indices) + result.validate() + expected = pa.array([0, 4, 2, None]) + assert result.equals(expected) + + for indices_type in [pa.float32(), pa.float64()]: + indices = pa.array([0, 4, 2], type=indices_type) + with pytest.raises(NotImplementedError): + arr.take(indices) + + +def test_take_on_chunked_array(): + # ARROW-9504 + arr = pa.chunked_array([ + [ + "a", + "b", + "c", + "d", + "e" + ], + [ + "f", + "g", + "h", + "i", + "j" + ] + ]) + + indices = np.array([0, 5, 1, 6, 9, 2]) + result = arr.take(indices) + expected = pa.chunked_array([["a", "f", "b", "g", "j", "c"]]) + assert result.equals(expected) + + indices = pa.chunked_array([[1], [9, 2]]) + result = arr.take(indices) + expected = pa.chunked_array([ + [ + "b" + ], + [ + "j", + "c" + ] + ]) + assert result.equals(expected) + + +@pytest.mark.parametrize('ordered', [False, True]) +def test_take_dictionary(ordered): + arr = pa.DictionaryArray.from_arrays([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], + ordered=ordered) + result = arr.take(pa.array([0, 1, 3])) + result.validate() + assert result.to_pylist() == ['a', 'b', 'a'] + assert result.dictionary.to_pylist() == ['a', 'b', 'c'] + assert result.type.ordered is ordered + + +def test_take_null_type(): + # ARROW-10027 + arr = pa.array([None] * 10) + chunked_arr = pa.chunked_array([[None] * 5] * 2) + batch = pa.record_batch([arr], names=['a']) + table = pa.table({'a': arr}) + + indices = pa.array([1, 3, 7, None]) + assert len(arr.take(indices)) == 4 + assert len(chunked_arr.take(indices)) == 4 + assert len(batch.take(indices).column(0)) == 4 + assert len(table.take(indices).column(0)) == 4 + + +@pytest.mark.parametrize(('ty', 'values'), all_array_types) +def test_drop_null(ty, values): + arr = pa.array(values, type=ty) + result = arr.drop_null() + result.validate(full=True) + indices = [i for i in range(len(arr)) if arr[i].is_valid] + expected = arr.take(pa.array(indices)) + assert result.equals(expected) + + +def test_drop_null_chunked_array(): + arr = pa.chunked_array([["a", None], ["c", "d", None], [None], []]) + expected_drop = pa.chunked_array([["a"], ["c", "d"], [], []]) + + result = arr.drop_null() + assert result.equals(expected_drop) + + +def test_drop_null_record_batch(): + batch = pa.record_batch( + [pa.array(["a", None, "c", "d", None])], names=["a'"]) + result = batch.drop_null() + expected = pa.record_batch([pa.array(["a", "c", "d"])], names=["a'"]) + assert result.equals(expected) + + batch = pa.record_batch( + [pa.array(["a", None, "c", "d", None]), + pa.array([None, None, "c", None, "e"])], names=["a'", "b'"]) + + result = batch.drop_null() + expected = pa.record_batch( + [pa.array(["c"]), pa.array(["c"])], names=["a'", "b'"]) + assert result.equals(expected) + + +def test_drop_null_table(): + table = pa.table([pa.array(["a", None, "c", "d", None])], names=["a"]) + expected = pa.table([pa.array(["a", "c", "d"])], names=["a"]) + result = table.drop_null() + assert result.equals(expected) + + table = pa.table([pa.chunked_array([["a", None], ["c", "d", None]]), + pa.chunked_array([["a", None], [None, "d", None]]), + pa.chunked_array([["a"], ["b"], [None], ["d", None]])], + names=["a", "b", "c"]) + expected = pa.table([pa.array(["a", "d"]), + pa.array(["a", "d"]), + pa.array(["a", "d"])], + names=["a", "b", "c"]) + result = table.drop_null() + assert result.equals(expected) + + table = pa.table([pa.chunked_array([["a", "b"], ["c", "d", "e"]]), + pa.chunked_array([["A"], ["B"], [None], ["D", None]]), + pa.chunked_array([["a`", None], ["c`", "d`", None]])], + names=["a", "b", "c"]) + expected = pa.table([pa.array(["a", "d"]), + pa.array(["A", "D"]), + pa.array(["a`", "d`"])], + names=["a", "b", "c"]) + result = table.drop_null() + assert result.equals(expected) + + +def test_drop_null_null_type(): + arr = pa.array([None] * 10) + chunked_arr = pa.chunked_array([[None] * 5] * 2) + batch = pa.record_batch([arr], names=['a']) + table = pa.table({'a': arr}) + + assert len(arr.drop_null()) == 0 + assert len(chunked_arr.drop_null()) == 0 + assert len(batch.drop_null().column(0)) == 0 + assert len(table.drop_null().column(0)) == 0 + + +@pytest.mark.parametrize(('ty', 'values'), all_array_types) +def test_filter(ty, values): + arr = pa.array(values, type=ty) + + mask = pa.array([True, False, False, True, None]) + result = arr.filter(mask, null_selection_behavior='drop') + result.validate() + assert result.equals(pa.array([values[0], values[3]], type=ty)) + result = arr.filter(mask, null_selection_behavior='emit_null') + result.validate() + assert result.equals(pa.array([values[0], values[3], None], type=ty)) + + # non-boolean dtype + mask = pa.array([0, 1, 0, 1, 0]) + with pytest.raises(NotImplementedError): + arr.filter(mask) + + # wrong length + mask = pa.array([True, False, True]) + with pytest.raises(ValueError, match="must all be the same length"): + arr.filter(mask) + + +def test_filter_chunked_array(): + arr = pa.chunked_array([["a", None], ["c", "d", "e"]]) + expected_drop = pa.chunked_array([["a"], ["e"]]) + expected_null = pa.chunked_array([["a"], [None, "e"]]) + + for mask in [ + # mask is array + pa.array([True, False, None, False, True]), + # mask is chunked array + pa.chunked_array([[True, False, None], [False, True]]), + # mask is python object + [True, False, None, False, True] + ]: + result = arr.filter(mask) + assert result.equals(expected_drop) + result = arr.filter(mask, null_selection_behavior="emit_null") + assert result.equals(expected_null) + + +def test_filter_record_batch(): + batch = pa.record_batch( + [pa.array(["a", None, "c", "d", "e"])], names=["a'"]) + + # mask is array + mask = pa.array([True, False, None, False, True]) + result = batch.filter(mask) + expected = pa.record_batch([pa.array(["a", "e"])], names=["a'"]) + assert result.equals(expected) + + result = batch.filter(mask, null_selection_behavior="emit_null") + expected = pa.record_batch([pa.array(["a", None, "e"])], names=["a'"]) + assert result.equals(expected) + + +def test_filter_table(): + table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"]) + expected_drop = pa.table([pa.array(["a", "e"])], names=["a"]) + expected_null = pa.table([pa.array(["a", None, "e"])], names=["a"]) + + for mask in [ + # mask is array + pa.array([True, False, None, False, True]), + # mask is chunked array + pa.chunked_array([[True, False], [None, False, True]]), + # mask is python object + [True, False, None, False, True] + ]: + result = table.filter(mask) + assert result.equals(expected_drop) + result = table.filter(mask, null_selection_behavior="emit_null") + assert result.equals(expected_null) + + +def test_filter_errors(): + arr = pa.chunked_array([["a", None], ["c", "d", "e"]]) + batch = pa.record_batch( + [pa.array(["a", None, "c", "d", "e"])], names=["a'"]) + table = pa.table([pa.array(["a", None, "c", "d", "e"])], names=["a"]) + + for obj in [arr, batch, table]: + # non-boolean dtype + mask = pa.array([0, 1, 0, 1, 0]) + with pytest.raises(NotImplementedError): + obj.filter(mask) + + # wrong length + mask = pa.array([True, False, True]) + with pytest.raises(pa.ArrowInvalid, + match="must all be the same length"): + obj.filter(mask) + + scalar = pa.scalar(True) + for filt in [batch, table, scalar]: + with pytest.raises(TypeError): + table.filter(filt) + + +def test_filter_null_type(): + # ARROW-10027 + arr = pa.array([None] * 10) + chunked_arr = pa.chunked_array([[None] * 5] * 2) + batch = pa.record_batch([arr], names=['a']) + table = pa.table({'a': arr}) + + mask = pa.array([True, False] * 5) + assert len(arr.filter(mask)) == 5 + assert len(chunked_arr.filter(mask)) == 5 + assert len(batch.filter(mask).column(0)) == 5 + assert len(table.filter(mask).column(0)) == 5 + + +@pytest.mark.parametrize("typ", ["array", "chunked_array"]) +def test_compare_array(typ): + if typ == "array": + def con(values): + return pa.array(values) + else: + def con(values): + return pa.chunked_array([values]) + + arr1 = con([1, 2, 3, 4, None]) + arr2 = con([1, 1, 4, None, 4]) + + result = pc.equal(arr1, arr2) + assert result.equals(con([True, False, False, None, None])) + + result = pc.not_equal(arr1, arr2) + assert result.equals(con([False, True, True, None, None])) + + result = pc.less(arr1, arr2) + assert result.equals(con([False, False, True, None, None])) + + result = pc.less_equal(arr1, arr2) + assert result.equals(con([True, False, True, None, None])) + + result = pc.greater(arr1, arr2) + assert result.equals(con([False, True, False, None, None])) + + result = pc.greater_equal(arr1, arr2) + assert result.equals(con([True, True, False, None, None])) + + +@pytest.mark.parametrize("typ", ["array", "chunked_array"]) +def test_compare_string_scalar(typ): + if typ == "array": + def con(values): + return pa.array(values) + else: + def con(values): + return pa.chunked_array([values]) + + arr = con(['a', 'b', 'c', None]) + scalar = pa.scalar('b') + + result = pc.equal(arr, scalar) + assert result.equals(con([False, True, False, None])) + + if typ == "array": + nascalar = pa.scalar(None, type="string") + result = pc.equal(arr, nascalar) + isnull = pc.is_null(result) + assert isnull.equals(con([True, True, True, True])) + + result = pc.not_equal(arr, scalar) + assert result.equals(con([True, False, True, None])) + + result = pc.less(arr, scalar) + assert result.equals(con([True, False, False, None])) + + result = pc.less_equal(arr, scalar) + assert result.equals(con([True, True, False, None])) + + result = pc.greater(arr, scalar) + assert result.equals(con([False, False, True, None])) + + result = pc.greater_equal(arr, scalar) + assert result.equals(con([False, True, True, None])) + + +@pytest.mark.parametrize("typ", ["array", "chunked_array"]) +def test_compare_scalar(typ): + if typ == "array": + def con(values): + return pa.array(values) + else: + def con(values): + return pa.chunked_array([values]) + + arr = con([1, 2, 3, None]) + scalar = pa.scalar(2) + + result = pc.equal(arr, scalar) + assert result.equals(con([False, True, False, None])) + + if typ == "array": + nascalar = pa.scalar(None, type="int64") + result = pc.equal(arr, nascalar) + assert result.to_pylist() == [None, None, None, None] + + result = pc.not_equal(arr, scalar) + assert result.equals(con([True, False, True, None])) + + result = pc.less(arr, scalar) + assert result.equals(con([True, False, False, None])) + + result = pc.less_equal(arr, scalar) + assert result.equals(con([True, True, False, None])) + + result = pc.greater(arr, scalar) + assert result.equals(con([False, False, True, None])) + + result = pc.greater_equal(arr, scalar) + assert result.equals(con([False, True, True, None])) + + +def test_compare_chunked_array_mixed(): + arr = pa.array([1, 2, 3, 4, None]) + arr_chunked = pa.chunked_array([[1, 2, 3], [4, None]]) + arr_chunked2 = pa.chunked_array([[1, 2], [3, 4, None]]) + + expected = pa.chunked_array([[True, True, True, True, None]]) + + for left, right in [ + (arr, arr_chunked), + (arr_chunked, arr), + (arr_chunked, arr_chunked2), + ]: + result = pc.equal(left, right) + assert result.equals(expected) + + +def test_arithmetic_add(): + left = pa.array([1, 2, 3, 4, 5]) + right = pa.array([0, -1, 1, 2, 3]) + result = pc.add(left, right) + expected = pa.array([1, 1, 4, 6, 8]) + assert result.equals(expected) + + +def test_arithmetic_subtract(): + left = pa.array([1, 2, 3, 4, 5]) + right = pa.array([0, -1, 1, 2, 3]) + result = pc.subtract(left, right) + expected = pa.array([1, 3, 2, 2, 2]) + assert result.equals(expected) + + +def test_arithmetic_multiply(): + left = pa.array([1, 2, 3, 4, 5]) + right = pa.array([0, -1, 1, 2, 3]) + result = pc.multiply(left, right) + expected = pa.array([0, -2, 3, 8, 15]) + assert result.equals(expected) + + +@pytest.mark.parametrize("ty", ["round", "round_to_multiple"]) +def test_round_to_integer(ty): + if ty == "round": + round = pc.round + RoundOptions = partial(pc.RoundOptions, ndigits=0) + elif ty == "round_to_multiple": + round = pc.round_to_multiple + RoundOptions = partial(pc.RoundToMultipleOptions, multiple=1) + + values = [3.2, 3.5, 3.7, 4.5, -3.2, -3.5, -3.7, None] + rmode_and_expected = { + "down": [3, 3, 3, 4, -4, -4, -4, None], + "up": [4, 4, 4, 5, -3, -3, -3, None], + "towards_zero": [3, 3, 3, 4, -3, -3, -3, None], + "towards_infinity": [4, 4, 4, 5, -4, -4, -4, None], + "half_down": [3, 3, 4, 4, -3, -4, -4, None], + "half_up": [3, 4, 4, 5, -3, -3, -4, None], + "half_towards_zero": [3, 3, 4, 4, -3, -3, -4, None], + "half_towards_infinity": [3, 4, 4, 5, -3, -4, -4, None], + "half_to_even": [3, 4, 4, 4, -3, -4, -4, None], + "half_to_odd": [3, 3, 4, 5, -3, -3, -4, None], + } + for round_mode, expected in rmode_and_expected.items(): + options = RoundOptions(round_mode=round_mode) + result = round(values, options=options) + np.testing.assert_array_equal(result, pa.array(expected)) + + +def test_round(): + values = [320, 3.5, 3.075, 4.5, -3.212, -35.1234, -3.045, None] + ndigits_and_expected = { + -2: [300, 0, 0, 0, -0, -0, -0, None], + -1: [320, 0, 0, 0, -0, -40, -0, None], + 0: [320, 4, 3, 5, -3, -35, -3, None], + 1: [320, 3.5, 3.1, 4.5, -3.2, -35.1, -3, None], + 2: [320, 3.5, 3.08, 4.5, -3.21, -35.12, -3.05, None], + } + for ndigits, expected in ndigits_and_expected.items(): + options = pc.RoundOptions(ndigits, "half_towards_infinity") + result = pc.round(values, options=options) + np.testing.assert_allclose(result, pa.array(expected), equal_nan=True) + assert pc.round(values, ndigits, + round_mode="half_towards_infinity") == result + assert pc.round(values, ndigits, "half_towards_infinity") == result + + +def test_round_to_multiple(): + values = [320, 3.5, 3.075, 4.5, -3.212, -35.1234, -3.045, None] + multiple_and_expected = { + 0.05: [320, 3.5, 3.1, 4.5, -3.2, -35.1, -3.05, None], + pa.scalar(0.1): [320, 3.5, 3.1, 4.5, -3.2, -35.1, -3, None], + 2: [320, 4, 4, 4, -4, -36, -4, None], + 10: [320, 0, 0, 0, -0, -40, -0, None], + pa.scalar(100, type=pa.decimal256(10, 4)): + [300, 0, 0, 0, -0, -0, -0, None], + } + for multiple, expected in multiple_and_expected.items(): + options = pc.RoundToMultipleOptions(multiple, "half_towards_infinity") + result = pc.round_to_multiple(values, options=options) + np.testing.assert_allclose(result, pa.array(expected), equal_nan=True) + assert pc.round_to_multiple(values, multiple, + "half_towards_infinity") == result + + for multiple in [0, -2, pa.scalar(-10.4)]: + with pytest.raises(pa.ArrowInvalid, + match="Rounding multiple must be positive"): + pc.round_to_multiple(values, multiple=multiple) + + for multiple in [object, 99999999999999999999999]: + with pytest.raises(TypeError, match="is not a valid multiple type"): + pc.round_to_multiple(values, multiple=multiple) + + +def test_round_binary(): + values = [123.456, 234.567, 345.678, 456.789, 123.456, 234.567, 345.678] + scales = pa.array([-3, -2, -1, 0, 1, 2, 3], pa.int32()) + expected = pa.array( + [0, 200, 350, 457, 123.5, 234.57, 345.678], pa.float64()) + assert pc.round_binary(values, scales) == expected + + expect_zero = pa.scalar(0, pa.float64()) + expect_inf = pa.scalar(10, pa.float64()) + scale = pa.scalar(-1, pa.int32()) + + assert pc.round_binary( + 5.0, scale, round_mode="half_towards_zero") == expect_zero + assert pc.round_binary( + 5.0, scale, round_mode="half_towards_infinity") == expect_inf + + +def test_is_null(): + arr = pa.array([1, 2, 3, None]) + result = arr.is_null() + expected = pa.array([False, False, False, True]) + assert result.equals(expected) + assert result.equals(pc.is_null(arr)) + result = arr.is_valid() + expected = pa.array([True, True, True, False]) + assert result.equals(expected) + assert result.equals(pc.is_valid(arr)) + + arr = pa.chunked_array([[1, 2], [3, None]]) + result = arr.is_null() + expected = pa.chunked_array([[False, False], [False, True]]) + assert result.equals(expected) + result = arr.is_valid() + expected = pa.chunked_array([[True, True], [True, False]]) + assert result.equals(expected) + + arr = pa.array([1, 2, 3, None, np.nan]) + result = arr.is_null() + expected = pa.array([False, False, False, True, False]) + assert result.equals(expected) + + result = arr.is_null(nan_is_null=True) + expected = pa.array([False, False, False, True, True]) + assert result.equals(expected) + + +def test_is_nan(): + arr = pa.array([1, 2, 3, None, np.nan]) + result = arr.is_nan() + expected = pa.array([False, False, False, None, True]) + assert result.equals(expected) + + arr = pa.array(["1", "2", None], type=pa.string()) + with pytest.raises( + ArrowNotImplementedError, match="has no kernel matching input types"): + _ = arr.is_nan() + + with pytest.raises( + ArrowNotImplementedError, match="has no kernel matching input types"): + arr = pa.array([b'a', b'bb', None], type=pa.large_binary()) + _ = arr.is_nan() + + +def test_fill_null(): + arr = pa.array([1, 2, None, 4], type=pa.int8()) + fill_value = pa.array([5], type=pa.int8()) + with pytest.raises(pa.ArrowInvalid, + match="Array arguments must all be the same length"): + arr.fill_null(fill_value) + + arr = pa.array([None, None, None, None], type=pa.null()) + fill_value = pa.scalar(None, type=pa.null()) + result = arr.fill_null(fill_value) + expected = pa.array([None, None, None, None]) + assert result.equals(expected) + + arr = pa.array(['a', 'bb', None]) + result = arr.fill_null('ccc') + expected = pa.array(['a', 'bb', 'ccc']) + assert result.equals(expected) + + arr = pa.array([b'a', b'bb', None], type=pa.large_binary()) + result = arr.fill_null('ccc') + expected = pa.array([b'a', b'bb', b'ccc'], type=pa.large_binary()) + assert result.equals(expected) + + arr = pa.array(['a', 'bb', None]) + result = arr.fill_null(None) + expected = pa.array(['a', 'bb', None]) + assert result.equals(expected) + + +@pytest.mark.parametrize('arrow_type', numerical_arrow_types) +def test_fill_null_array(arrow_type): + arr = pa.array([1, 2, None, 4], type=arrow_type) + fill_value = pa.scalar(5, type=arrow_type) + result = arr.fill_null(fill_value) + expected = pa.array([1, 2, 5, 4], type=arrow_type) + assert result.equals(expected) + + # Implicit conversions + result = arr.fill_null(5) + assert result.equals(expected) + + # ARROW-9451: Unsigned integers allow this for some reason + if not pa.types.is_unsigned_integer(arr.type): + with pytest.raises((ValueError, TypeError)): + arr.fill_null('5') + + result = arr.fill_null(pa.scalar(5, type='int8')) + assert result.equals(expected) + + +@pytest.mark.parametrize('arrow_type', numerical_arrow_types) +def test_fill_null_chunked_array(arrow_type): + fill_value = pa.scalar(5, type=arrow_type) + arr = pa.chunked_array([pa.array([None, 2, 3, 4], type=arrow_type)]) + result = arr.fill_null(fill_value) + expected = pa.chunked_array([pa.array([5, 2, 3, 4], type=arrow_type)]) + assert result.equals(expected) + + arr = pa.chunked_array([ + pa.array([1, 2], type=arrow_type), + pa.array([], type=arrow_type), + pa.array([None, 4], type=arrow_type) + ]) + expected = pa.chunked_array([ + pa.array([1, 2], type=arrow_type), + pa.array([], type=arrow_type), + pa.array([5, 4], type=arrow_type) + ]) + result = arr.fill_null(fill_value) + assert result.equals(expected) + + # Implicit conversions + result = arr.fill_null(5) + assert result.equals(expected) + + result = arr.fill_null(pa.scalar(5, type='int8')) + assert result.equals(expected) + + +def test_logical(): + a = pa.array([True, False, False, None]) + b = pa.array([True, True, False, True]) + + assert pc.and_(a, b) == pa.array([True, False, False, None]) + assert pc.and_kleene(a, b) == pa.array([True, False, False, None]) + + assert pc.or_(a, b) == pa.array([True, True, False, None]) + assert pc.or_kleene(a, b) == pa.array([True, True, False, True]) + + assert pc.xor(a, b) == pa.array([False, True, False, None]) + + assert pc.invert(a) == pa.array([False, True, True, None]) + + +def test_dictionary_decode(): + array = pa.array(["a", "a", "b", "c", "b"]) + dictionary_array = array.dictionary_encode() + dictionary_array_decode = pc.dictionary_decode(dictionary_array) + + assert array != dictionary_array + + assert array == dictionary_array_decode + assert array == pc.dictionary_decode(array) + assert pc.dictionary_encode(dictionary_array) == dictionary_array + + +def test_cast(): + arr = pa.array([1, 2, 3, 4], type='int64') + options = pc.CastOptions(pa.int8()) + + with pytest.raises(TypeError): + pc.cast(arr, target_type=None) + + with pytest.raises(ValueError): + pc.cast(arr, 'int32', options=options) + + with pytest.raises(ValueError): + pc.cast(arr, safe=True, options=options) + + assert pc.cast(arr, options=options) == pa.array( + [1, 2, 3, 4], type='int8') + + arr = pa.array([2 ** 63 - 1], type='int64') + allow_overflow_options = pc.CastOptions( + pa.int32(), allow_int_overflow=True) + + with pytest.raises(pa.ArrowInvalid): + pc.cast(arr, 'int32') + + assert pc.cast(arr, 'int32', safe=False) == pa.array([-1], type='int32') + + assert pc.cast(arr, options=allow_overflow_options) == pa.array( + [-1], type='int32') + + arr = pa.array( + [datetime.datetime(2010, 1, 1), datetime.datetime(2015, 1, 1)]) + expected = pa.array([1262304000000, 1420070400000], type='timestamp[ms]') + assert pc.cast(arr, 'timestamp[ms]') == expected + + arr = pa.array([[1, 2], [3, 4, 5]], type=pa.large_list(pa.int8())) + expected = pa.array([["1", "2"], ["3", "4", "5"]], + type=pa.list_(pa.utf8())) + assert pc.cast(arr, expected.type) == expected + + +@pytest.mark.parametrize('value_type', numerical_arrow_types) +def test_fsl_to_fsl_cast(value_type): + # Different field name and different type. + cast_type = pa.list_(pa.field("element", value_type), 2) + + dtype = pa.int32() + type = pa.list_(pa.field("values", dtype), 2) + + fsl = pa.FixedSizeListArray.from_arrays( + pa.array([1, 2, 3, 4, 5, 6], type=dtype), type=type) + assert cast_type == fsl.cast(cast_type).type + + # Different field name and different type (with null values). + fsl = pa.FixedSizeListArray.from_arrays( + pa.array([1, None, None, 4, 5, 6], type=dtype), type=type) + assert cast_type == fsl.cast(cast_type).type + + # Null FSL type. + dtype = pa.null() + type = pa.list_(pa.field("values", dtype), 2) + fsl = pa.FixedSizeListArray.from_arrays( + pa.array([None, None, None, None, None, None], type=dtype), type=type) + assert cast_type == fsl.cast(cast_type).type + + # Different sized FSL + cast_type = pa.list_(pa.field("element", value_type), 3) + err_msg = 'Size of FixedSizeList is not the same.' + with pytest.raises(pa.lib.ArrowTypeError, match=err_msg): + fsl.cast(cast_type) + + +DecimalTypeTraits = namedtuple('DecimalTypeTraits', + ('name', 'factory', 'max_precision')) + +FloatToDecimalCase = namedtuple('FloatToDecimalCase', + ('precision', 'scale', 'float_val')) + +decimal_type_traits = [DecimalTypeTraits('decimal128', pa.decimal128, 38), + DecimalTypeTraits('decimal256', pa.decimal256, 76)] + + +def largest_scaled_float_not_above(val, scale): + """ + Find the largest float f such as `f * 10**scale <= val` + """ + assert val >= 0 + assert scale >= 0 + float_val = float(val) / 10**scale + if float_val * 10**scale > val: + # Take the float just below... it *should* satisfy + float_val = np.nextafter(float_val, 0.0) + if float_val * 10**scale > val: + float_val = np.nextafter(float_val, 0.0) + assert float_val * 10**scale <= val + return float_val + + +def scaled_float(int_val, scale): + """ + Return a float representation (possibly approximate) of `int_val**-scale` + """ + assert isinstance(int_val, int) + unscaled = decimal.Decimal(int_val) + scaled = unscaled.scaleb(-scale) + float_val = float(scaled) + return float_val + + +def integral_float_to_decimal_cast_cases(float_ty, max_precision): + """ + Return FloatToDecimalCase instances with integral values. + """ + mantissa_digits = 16 + for precision in range(1, max_precision, 3): + for scale in range(0, precision, 2): + yield FloatToDecimalCase(precision, scale, 0.0) + yield FloatToDecimalCase(precision, scale, 1.0) + epsilon = 10**max(precision - mantissa_digits, scale) + abs_maxval = largest_scaled_float_not_above( + 10**precision - epsilon, scale) + yield FloatToDecimalCase(precision, scale, abs_maxval) + + +def real_float_to_decimal_cast_cases(float_ty, max_precision): + """ + Return FloatToDecimalCase instances with real values. + """ + mantissa_digits = 16 + for precision in range(1, max_precision, 3): + for scale in range(0, precision, 2): + epsilon = 2 * 10**max(precision - mantissa_digits, 0) + abs_minval = largest_scaled_float_not_above(epsilon, scale) + abs_maxval = largest_scaled_float_not_above( + 10**precision - epsilon, scale) + yield FloatToDecimalCase(precision, scale, abs_minval) + yield FloatToDecimalCase(precision, scale, abs_maxval) + + +def random_float_to_decimal_cast_cases(float_ty, max_precision): + """ + Return random-generated FloatToDecimalCase instances. + """ + r = random.Random(42) + for precision in range(1, max_precision, 6): + for scale in range(0, precision, 4): + for i in range(20): + unscaled = r.randrange(0, 10**precision) + float_val = scaled_float(unscaled, scale) + assert float_val * 10**scale < 10**precision + yield FloatToDecimalCase(precision, scale, float_val) + + +def check_cast_float_to_decimal(float_ty, float_val, decimal_ty, decimal_ctx, + max_precision): + # Use the Python decimal module to build the expected result + # using the right precision + decimal_ctx.prec = decimal_ty.precision + decimal_ctx.rounding = decimal.ROUND_HALF_EVEN + expected = decimal_ctx.create_decimal_from_float(float_val) + # Round `expected` to `scale` digits after the decimal point + expected = expected.quantize(decimal.Decimal(1).scaleb(-decimal_ty.scale)) + s = pa.scalar(float_val, type=float_ty) + actual = pc.cast(s, decimal_ty).as_py() + if actual != expected: + # Allow the last digit to vary. The tolerance is higher for + # very high precisions as rounding errors can accumulate in + # the iterative algorithm (GH-35576). + diff_digits = abs(actual - expected) * 10**decimal_ty.scale + limit = 2 if decimal_ty.precision < max_precision - 1 else 4 + assert diff_digits <= limit, ( + f"float_val = {float_val!r}, precision={decimal_ty.precision}, " + f"expected = {expected!r}, actual = {actual!r}, " + f"diff_digits = {diff_digits!r}") + + +# Cannot test float32 as case generators above assume float64 +@pytest.mark.parametrize('float_ty', [pa.float64()], ids=str) +@pytest.mark.parametrize('decimal_ty', decimal_type_traits, + ids=lambda v: v.name) +@pytest.mark.parametrize('case_generator', + [integral_float_to_decimal_cast_cases, + real_float_to_decimal_cast_cases, + random_float_to_decimal_cast_cases], + ids=['integrals', 'reals', 'random']) +def test_cast_float_to_decimal(float_ty, decimal_ty, case_generator): + with decimal.localcontext() as ctx: + for case in case_generator(float_ty, decimal_ty.max_precision): + check_cast_float_to_decimal( + float_ty, case.float_val, + decimal_ty.factory(case.precision, case.scale), + ctx, decimal_ty.max_precision) + + +@pytest.mark.parametrize('float_ty', [pa.float32(), pa.float64()], ids=str) +@pytest.mark.parametrize('decimal_traits', decimal_type_traits, + ids=lambda v: v.name) +def test_cast_float_to_decimal_random(float_ty, decimal_traits): + """ + Test float-to-decimal conversion against exactly generated values. + """ + r = random.Random(43) + np_float_ty = { + pa.float32(): np.float32, + pa.float64(): np.float64, + }[float_ty] + mantissa_bits = { + pa.float32(): 24, + pa.float64(): 53, + }[float_ty] + float_exp_min, float_exp_max = { + pa.float32(): (-126, 127), + pa.float64(): (-1022, 1023), + }[float_ty] + mantissa_digits = math.floor(math.log10(2**mantissa_bits)) + max_precision = decimal_traits.max_precision + + with decimal.localcontext() as ctx: + precision = mantissa_digits + ctx.prec = precision + # The scale must be chosen so as + # 1) it's within bounds for the decimal type + # 2) the floating point exponent is within bounds + min_scale = max(-max_precision, + precision + math.ceil(math.log10(2**float_exp_min))) + max_scale = min(max_precision, + math.floor(math.log10(2**float_exp_max))) + for scale in range(min_scale, max_scale): + decimal_ty = decimal_traits.factory(precision, scale) + # We want to random-generate a float from its mantissa bits + # and exponent, and compute the expected value in the + # decimal domain. The float exponent has to ensure the + # expected value doesn't overflow and doesn't lose precision. + float_exp = (-mantissa_bits + + math.floor(math.log2(10**(precision - scale)))) + assert float_exp_min <= float_exp <= float_exp_max + for i in range(5): + mantissa = r.randrange(0, 2**mantissa_bits) + float_val = np.ldexp(np_float_ty(mantissa), float_exp) + assert isinstance(float_val, np_float_ty) + # Make sure we compute the exact expected value and + # round by half-to-even when converting to the expected precision. + if float_exp >= 0: + expected = decimal.Decimal(mantissa) * 2**float_exp + else: + expected = decimal.Decimal(mantissa) / 2**-float_exp + expected_as_int = round(expected.scaleb(scale)) + actual = pc.cast( + pa.scalar(float_val, type=float_ty), decimal_ty).as_py() + actual_as_int = round(actual.scaleb(scale)) + # We allow for a minor rounding error between expected and actual + assert abs(actual_as_int - expected_as_int) <= 1 + + +def test_strptime(): + arr = pa.array(["5/1/2020", None, "12/13/1900"]) + + got = pc.strptime(arr, format='%m/%d/%Y', unit='s') + expected = pa.array( + [datetime.datetime(2020, 5, 1), None, datetime.datetime(1900, 12, 13)], + type=pa.timestamp('s')) + assert got == expected + # Positional format + assert pc.strptime(arr, '%m/%d/%Y', unit='s') == got + + expected = pa.array([datetime.datetime(2020, 1, 5), None, None], + type=pa.timestamp('s')) + got = pc.strptime(arr, format='%d/%m/%Y', unit='s', error_is_null=True) + assert got == expected + + with pytest.raises(pa.ArrowInvalid, + match="Failed to parse string: '5/1/2020'"): + pc.strptime(arr, format='%Y-%m-%d', unit='s', error_is_null=False) + + with pytest.raises(pa.ArrowInvalid, + match="Failed to parse string: '5/1/2020'"): + pc.strptime(arr, format='%Y-%m-%d', unit='s') + + got = pc.strptime(arr, format='%Y-%m-%d', unit='s', error_is_null=True) + assert got == pa.array([None, None, None], type=pa.timestamp('s')) + + +@pytest.mark.pandas +@pytest.mark.skipif(sys.platform == "win32" and not util.windows_has_tzdata(), + reason="Timezone database is not installed on Windows") +def test_strftime(): + times = ["2018-03-10 09:00", "2038-01-31 12:23", None] + timezones = ["CET", "UTC", "Europe/Ljubljana"] + + formats = ["%a", "%A", "%w", "%d", "%b", "%B", "%m", "%y", "%Y", "%H", "%I", + "%p", "%M", "%z", "%Z", "%j", "%U", "%W", "%%", "%G", "%V", "%u"] + if sys.platform != "win32": + # Locale-dependent formats don't match on Windows + formats.extend(["%c", "%x", "%X"]) + + for timezone in timezones: + ts = pd.to_datetime(times).tz_localize(timezone) + for unit in ["s", "ms", "us", "ns"]: + tsa = pa.array(ts, type=pa.timestamp(unit, timezone)) + for fmt in formats: + options = pc.StrftimeOptions(fmt) + result = pc.strftime(tsa, options=options) + expected = pa.array(ts.strftime(fmt)) + assert result.equals(expected) + + fmt = "%Y-%m-%dT%H:%M:%S" + + # Default format + tsa = pa.array(ts, type=pa.timestamp("s", timezone)) + result = pc.strftime(tsa, options=pc.StrftimeOptions()) + expected = pa.array(ts.strftime(fmt)) + assert result.equals(expected) + + # Default format plus timezone + tsa = pa.array(ts, type=pa.timestamp("s", timezone)) + result = pc.strftime(tsa, options=pc.StrftimeOptions(fmt + "%Z")) + expected = pa.array(ts.strftime(fmt + "%Z")) + assert result.equals(expected) + + # Pandas %S is equivalent to %S in arrow for unit="s" + tsa = pa.array(ts, type=pa.timestamp("s", timezone)) + options = pc.StrftimeOptions("%S") + result = pc.strftime(tsa, options=options) + expected = pa.array(ts.strftime("%S")) + assert result.equals(expected) + + # Pandas %S.%f is equivalent to %S in arrow for unit="us" + tsa = pa.array(ts, type=pa.timestamp("us", timezone)) + options = pc.StrftimeOptions("%S") + result = pc.strftime(tsa, options=options) + expected = pa.array(ts.strftime("%S.%f")) + assert result.equals(expected) + + # Test setting locale + tsa = pa.array(ts, type=pa.timestamp("s", timezone)) + options = pc.StrftimeOptions(fmt, locale="C") + result = pc.strftime(tsa, options=options) + expected = pa.array(ts.strftime(fmt)) + assert result.equals(expected) + + # Test timestamps without timezone + fmt = "%Y-%m-%dT%H:%M:%S" + ts = pd.to_datetime(times) + tsa = pa.array(ts, type=pa.timestamp("s")) + result = pc.strftime(tsa, options=pc.StrftimeOptions(fmt)) + expected = pa.array(ts.strftime(fmt)) + + # Positional format + assert pc.strftime(tsa, fmt) == result + + assert result.equals(expected) + with pytest.raises(pa.ArrowInvalid, + match="Timezone not present, cannot convert to string"): + pc.strftime(tsa, options=pc.StrftimeOptions(fmt + "%Z")) + with pytest.raises(pa.ArrowInvalid, + match="Timezone not present, cannot convert to string"): + pc.strftime(tsa, options=pc.StrftimeOptions(fmt + "%z")) + + +def _check_datetime_components(timestamps, timezone=None): + from pyarrow.vendored.version import Version + + ts = pd.to_datetime(timestamps).tz_localize( + "UTC").tz_convert(timezone).to_series() + tsa = pa.array(ts, pa.timestamp("ns", tz=timezone)) + + subseconds = ((ts.dt.microsecond * 10 ** 3 + + ts.dt.nanosecond) * 10 ** -9).round(9) + iso_calendar_fields = [ + pa.field('iso_year', pa.int64()), + pa.field('iso_week', pa.int64()), + pa.field('iso_day_of_week', pa.int64()) + ] + + if Version(pd.__version__) < Version("1.1.0"): + # https://github.com/pandas-dev/pandas/issues/33206 + iso_year = ts.map(lambda x: x.isocalendar()[0]).astype("int64") + iso_week = ts.map(lambda x: x.isocalendar()[1]).astype("int64") + iso_day = ts.map(lambda x: x.isocalendar()[2]).astype("int64") + else: + # Casting is required because pandas isocalendar returns int32 + # while arrow isocalendar returns int64. + iso_year = ts.dt.isocalendar()["year"].astype("int64") + iso_week = ts.dt.isocalendar()["week"].astype("int64") + iso_day = ts.dt.isocalendar()["day"].astype("int64") + + iso_calendar = pa.StructArray.from_arrays( + [iso_year, iso_week, iso_day], + fields=iso_calendar_fields) + + # Casting is required because pandas with 2.0.0 various numeric + # date/time attributes have dtype int32 (previously int64) + year = ts.dt.year.astype("int64") + month = ts.dt.month.astype("int64") + day = ts.dt.day.astype("int64") + dayofweek = ts.dt.dayofweek.astype("int64") + dayofyear = ts.dt.dayofyear.astype("int64") + quarter = ts.dt.quarter.astype("int64") + hour = ts.dt.hour.astype("int64") + minute = ts.dt.minute.astype("int64") + second = ts.dt.second.values.astype("int64") + microsecond = ts.dt.microsecond.astype("int64") + nanosecond = ts.dt.nanosecond.astype("int64") + + assert pc.year(tsa).equals(pa.array(year)) + assert pc.is_leap_year(tsa).equals(pa.array(ts.dt.is_leap_year)) + assert pc.month(tsa).equals(pa.array(month)) + assert pc.day(tsa).equals(pa.array(day)) + assert pc.day_of_week(tsa).equals(pa.array(dayofweek)) + assert pc.day_of_year(tsa).equals(pa.array(dayofyear)) + assert pc.iso_year(tsa).equals(pa.array(iso_year)) + assert pc.iso_week(tsa).equals(pa.array(iso_week)) + assert pc.iso_calendar(tsa).equals(iso_calendar) + assert pc.quarter(tsa).equals(pa.array(quarter)) + assert pc.hour(tsa).equals(pa.array(hour)) + assert pc.minute(tsa).equals(pa.array(minute)) + assert pc.second(tsa).equals(pa.array(second)) + assert pc.millisecond(tsa).equals(pa.array(microsecond // 10 ** 3)) + assert pc.microsecond(tsa).equals(pa.array(microsecond % 10 ** 3)) + assert pc.nanosecond(tsa).equals(pa.array(nanosecond)) + assert pc.subsecond(tsa).equals(pa.array(subseconds)) + assert pc.local_timestamp(tsa).equals(pa.array(ts.dt.tz_localize(None))) + + if ts.dt.tz: + if ts.dt.tz is datetime.timezone.utc: + # datetime with utc returns None for dst() + is_dst = [False] * len(ts) + else: + is_dst = ts.apply(lambda x: x.dst().seconds > 0) + assert pc.is_dst(tsa).equals(pa.array(is_dst)) + + day_of_week_options = pc.DayOfWeekOptions( + count_from_zero=False, week_start=1) + assert pc.day_of_week(tsa, options=day_of_week_options).equals( + pa.array(dayofweek + 1)) + + week_options = pc.WeekOptions( + week_starts_monday=True, count_from_zero=False, + first_week_is_fully_in_year=False) + assert pc.week(tsa, options=week_options).equals(pa.array(iso_week)) + + +@pytest.mark.pandas +def test_extract_datetime_components(): + timestamps = ["1970-01-01T00:00:59.123456789", + "2000-02-29T23:23:23.999999999", + "2033-05-18T03:33:20.000000000", + "2020-01-01T01:05:05.001", + "2019-12-31T02:10:10.002", + "2019-12-30T03:15:15.003", + "2009-12-31T04:20:20.004132", + "2010-01-01T05:25:25.005321", + "2010-01-03T06:30:30.006163", + "2010-01-04T07:35:35.0", + "2006-01-01T08:40:40.0", + "2005-12-31T09:45:45.0", + "2008-12-28T00:00:00.0", + "2008-12-29T00:00:00.0", + "2012-01-01T01:02:03.0"] + timezones = ["UTC", "US/Central", "Asia/Kolkata", + "Etc/GMT-4", "Etc/GMT+4", "Australia/Broken_Hill"] + + # Test timezone naive timestamp array + _check_datetime_components(timestamps) + + # Test timezone aware timestamp array + if sys.platform == "win32" and not util.windows_has_tzdata(): + pytest.skip('Timezone database is not installed on Windows') + else: + for timezone in timezones: + _check_datetime_components(timestamps, timezone) + + +@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) +def test_iso_calendar_longer_array(unit): + # https://github.com/apache/arrow/issues/38655 + # ensure correct result for array length > 32 + arr = pa.array([datetime.datetime(2022, 1, 2, 9)]*50, pa.timestamp(unit)) + result = pc.iso_calendar(arr) + expected = pa.StructArray.from_arrays( + [[2021]*50, [52]*50, [7]*50], + names=['iso_year', 'iso_week', 'iso_day_of_week'] + ) + assert result.equals(expected) + + +@pytest.mark.pandas +@pytest.mark.skipif(sys.platform == "win32" and not util.windows_has_tzdata(), + reason="Timezone database is not installed on Windows") +def test_assume_timezone(): + ts_type = pa.timestamp("ns") + timestamps = pd.to_datetime(["1970-01-01T00:00:59.123456789", + "2000-02-29T23:23:23.999999999", + "2033-05-18T03:33:20.000000000", + "2020-01-01T01:05:05.001", + "2019-12-31T02:10:10.002", + "2019-12-30T03:15:15.003", + "2009-12-31T04:20:20.004132", + "2010-01-01T05:25:25.005321", + "2010-01-03T06:30:30.006163", + "2010-01-04T07:35:35.0", + "2006-01-01T08:40:40.0", + "2005-12-31T09:45:45.0", + "2008-12-28T00:00:00.0", + "2008-12-29T00:00:00.0", + "2012-01-01T01:02:03.0"]) + nonexistent = pd.to_datetime(["2015-03-29 02:30:00", + "2015-03-29 03:30:00"]) + ambiguous = pd.to_datetime(["2018-10-28 01:20:00", + "2018-10-28 02:36:00", + "2018-10-28 03:46:00"]) + ambiguous_array = pa.array(ambiguous, type=ts_type) + nonexistent_array = pa.array(nonexistent, type=ts_type) + + for timezone in ["UTC", "US/Central", "Asia/Kolkata"]: + options = pc.AssumeTimezoneOptions(timezone) + ta = pa.array(timestamps, type=ts_type) + expected = timestamps.tz_localize(timezone) + result = pc.assume_timezone(ta, options=options) + assert result.equals(pa.array(expected)) + result = pc.assume_timezone(ta, timezone) # Positional option + assert result.equals(pa.array(expected)) + + ta_zoned = pa.array(timestamps, type=pa.timestamp("ns", timezone)) + with pytest.raises(pa.ArrowInvalid, match="already have a timezone:"): + pc.assume_timezone(ta_zoned, options=options) + + invalid_options = pc.AssumeTimezoneOptions("Europe/Brusselsss") + with pytest.raises(ValueError, match="not found in timezone database"): + pc.assume_timezone(ta, options=invalid_options) + + timezone = "Europe/Brussels" + + options_nonexistent_raise = pc.AssumeTimezoneOptions(timezone) + options_nonexistent_earliest = pc.AssumeTimezoneOptions( + timezone, ambiguous="raise", nonexistent="earliest") + options_nonexistent_latest = pc.AssumeTimezoneOptions( + timezone, ambiguous="raise", nonexistent="latest") + + with pytest.raises(ValueError, + match="Timestamp doesn't exist in " + f"timezone '{timezone}'"): + pc.assume_timezone(nonexistent_array, + options=options_nonexistent_raise) + + expected = pa.array(nonexistent.tz_localize( + timezone, nonexistent="shift_forward")) + result = pc.assume_timezone( + nonexistent_array, options=options_nonexistent_latest) + expected.equals(result) + + expected = pa.array(nonexistent.tz_localize( + timezone, nonexistent="shift_backward")) + result = pc.assume_timezone( + nonexistent_array, options=options_nonexistent_earliest) + expected.equals(result) + + options_ambiguous_raise = pc.AssumeTimezoneOptions(timezone) + options_ambiguous_latest = pc.AssumeTimezoneOptions( + timezone, ambiguous="latest", nonexistent="raise") + options_ambiguous_earliest = pc.AssumeTimezoneOptions( + timezone, ambiguous="earliest", nonexistent="raise") + + with pytest.raises(ValueError, + match="Timestamp is ambiguous in " + f"timezone '{timezone}'"): + pc.assume_timezone(ambiguous_array, options=options_ambiguous_raise) + + expected = ambiguous.tz_localize(timezone, ambiguous=[True, True, True]) + result = pc.assume_timezone( + ambiguous_array, options=options_ambiguous_earliest) + result.equals(pa.array(expected)) + + expected = ambiguous.tz_localize(timezone, ambiguous=[False, False, False]) + result = pc.assume_timezone( + ambiguous_array, options=options_ambiguous_latest) + result.equals(pa.array(expected)) + + +def _check_temporal_rounding(ts, values, unit): + unit_shorthand = { + "nanosecond": "ns", + "microsecond": "us", + "millisecond": "ms", + "second": "s", + "minute": "min", + "hour": "h", + "day": "D" + } + greater_unit = { + "nanosecond": "us", + "microsecond": "ms", + "millisecond": "s", + "second": "min", + "minute": "h", + "hour": "d", + } + ta = pa.array(ts) + + for value in values: + frequency = str(value) + unit_shorthand[unit] + options = pc.RoundTemporalOptions(value, unit) + + result = pc.ceil_temporal(ta, options=options).to_pandas() + expected = ts.dt.ceil(frequency) + np.testing.assert_array_equal(result, expected) + + result = pc.floor_temporal(ta, options=options).to_pandas() + expected = ts.dt.floor(frequency) + np.testing.assert_array_equal(result, expected) + + result = pc.round_temporal(ta, options=options).to_pandas() + expected = ts.dt.round(frequency) + np.testing.assert_array_equal(result, expected) + + # Check rounding with calendar_based_origin=True. + # Note: rounding to month is not supported in Pandas so we can't + # approximate this functionality and exclude unit == "day". + if unit != "day": + options = pc.RoundTemporalOptions( + value, unit, calendar_based_origin=True) + origin = ts.dt.floor(greater_unit[unit]) + + if ta.type.tz is None: + result = pc.ceil_temporal(ta, options=options).to_pandas() + expected = (ts - origin).dt.ceil(frequency) + origin + np.testing.assert_array_equal(result, expected) + + result = pc.floor_temporal(ta, options=options).to_pandas() + expected = (ts - origin).dt.floor(frequency) + origin + np.testing.assert_array_equal(result, expected) + + result = pc.round_temporal(ta, options=options).to_pandas() + expected = (ts - origin).dt.round(frequency) + origin + np.testing.assert_array_equal(result, expected) + + # Check RoundTemporalOptions partial defaults + if unit == "day": + result = pc.ceil_temporal(ta, multiple=value).to_pandas() + expected = ts.dt.ceil(frequency) + np.testing.assert_array_equal(result, expected) + + result = pc.floor_temporal(ta, multiple=value).to_pandas() + expected = ts.dt.floor(frequency) + np.testing.assert_array_equal(result, expected) + + result = pc.round_temporal(ta, multiple=value).to_pandas() + expected = ts.dt.round(frequency) + np.testing.assert_array_equal(result, expected) + + # We naively test ceil_is_strictly_greater by adding time unit multiple + # to regular ceiled timestamp if it is equal to the original timestamp. + # This does not work if timestamp is zoned since our logic will not + # account for DST jumps. + if ta.type.tz is None: + options = pc.RoundTemporalOptions( + value, unit, ceil_is_strictly_greater=True) + result = pc.ceil_temporal(ta, options=options) + expected = ts.dt.ceil(frequency) + + expected = np.where( + expected == ts, + expected + pd.Timedelta(value, unit_shorthand[unit]), + expected) + np.testing.assert_array_equal(result, expected) + + # Check RoundTemporalOptions defaults + if unit == "day": + frequency = "1D" + + result = pc.ceil_temporal(ta).to_pandas() + expected = ts.dt.ceil(frequency) + np.testing.assert_array_equal(result, expected) + + result = pc.floor_temporal(ta).to_pandas() + expected = ts.dt.floor(frequency) + np.testing.assert_array_equal(result, expected) + + result = pc.round_temporal(ta).to_pandas() + expected = ts.dt.round(frequency) + np.testing.assert_array_equal(result, expected) + + +@pytest.mark.skipif(sys.platform == "win32" and not util.windows_has_tzdata(), + reason="Timezone database is not installed on Windows") +@pytest.mark.parametrize('unit', ("nanosecond", "microsecond", "millisecond", + "second", "minute", "hour", "day")) +@pytest.mark.pandas +def test_round_temporal(unit): + values = (1, 2, 3, 4, 5, 6, 7, 10, 15, 24, 60, 250, 500, 750) + timestamps = [ + "1923-07-07 08:52:35.203790336", + "1931-03-17 10:45:00.641559040", + "1932-06-16 01:16:42.911994368", + "1941-05-27 11:46:43.822831872", + "1943-12-14 07:32:05.424766464", + "1954-04-12 04:31:50.699881472", + "1966-02-12 17:41:28.693282560", + "1967-02-26 05:56:46.922376960", + "1975-11-01 10:55:37.016146432", + "1982-01-21 18:43:44.517366784", + "1992-01-01 00:00:00.100000000", + "1999-12-04 05:55:34.794991104", + "2026-10-26 08:39:00.316686848"] + ts = pd.Series([pd.Timestamp(x, unit="ns") for x in timestamps]) + _check_temporal_rounding(ts, values, unit) + + timezones = ["Asia/Kolkata", "America/New_York", "Etc/GMT-4", "Etc/GMT+4", + "Europe/Brussels", "Pacific/Marquesas", "US/Central", "UTC"] + + for timezone in timezones: + ts_zoned = ts.dt.tz_localize("UTC").dt.tz_convert(timezone) + _check_temporal_rounding(ts_zoned, values, unit) + + +def test_count(): + arr = pa.array([1, 2, 3, None, None]) + assert pc.count(arr).as_py() == 3 + assert pc.count(arr, mode='only_valid').as_py() == 3 + assert pc.count(arr, mode='only_null').as_py() == 2 + assert pc.count(arr, mode='all').as_py() == 5 + assert pc.count(arr, 'all').as_py() == 5 + + with pytest.raises(ValueError, + match='"something else" is not a valid count mode'): + pc.count(arr, 'something else') + + +def test_index(): + arr = pa.array([0, 1, None, 3, 4], type=pa.int64()) + assert pc.index(arr, pa.scalar(0)).as_py() == 0 + assert pc.index(arr, pa.scalar(2, type=pa.int8())).as_py() == -1 + assert pc.index(arr, 4).as_py() == 4 + assert arr.index(3, start=2).as_py() == 3 + assert arr.index(None).as_py() == -1 + + arr = pa.chunked_array([[1, 2], [1, 3]], type=pa.int64()) + assert arr.index(1).as_py() == 0 + assert arr.index(1, start=2).as_py() == 2 + assert arr.index(1, start=1, end=2).as_py() == -1 + + +def check_partition_nth(data, indices, pivot, null_placement): + indices = indices.to_pylist() + assert len(indices) == len(data) + assert sorted(indices) == list(range(len(data))) + until_pivot = [data[indices[i]] for i in range(pivot)] + after_pivot = [data[indices[i]] for i in range(pivot, len(data))] + p = data[indices[pivot]] + if p is None: + if null_placement == "at_start": + assert all(v is None for v in until_pivot) + else: + assert all(v is None for v in after_pivot) + else: + if null_placement == "at_start": + assert all(v is None or v <= p for v in until_pivot) + assert all(v >= p for v in after_pivot) + else: + assert all(v <= p for v in until_pivot) + assert all(v is None or v >= p for v in after_pivot) + + +def test_partition_nth(): + data = list(range(100, 140)) + random.shuffle(data) + pivot = 10 + indices = pc.partition_nth_indices(data, pivot=pivot) + check_partition_nth(data, indices, pivot, "at_end") + # Positional pivot argument + assert pc.partition_nth_indices(data, pivot) == indices + + with pytest.raises( + ValueError, + match="'partition_nth_indices' cannot be called without options"): + pc.partition_nth_indices(data) + + +def test_partition_nth_null_placement(): + data = list(range(10)) + [None] * 10 + random.shuffle(data) + + for pivot in (0, 7, 13, 19): + for null_placement in ("at_start", "at_end"): + indices = pc.partition_nth_indices(data, pivot=pivot, + null_placement=null_placement) + check_partition_nth(data, indices, pivot, null_placement) + + +def test_select_k_array(): + def validate_select_k(select_k_indices, arr, order, stable_sort=False): + sorted_indices = pc.sort_indices(arr, sort_keys=[("dummy", order)]) + head_k_indices = sorted_indices.slice(0, len(select_k_indices)) + if stable_sort: + assert select_k_indices == head_k_indices + else: + expected = pc.take(arr, head_k_indices) + actual = pc.take(arr, select_k_indices) + assert actual == expected + + arr = pa.array([1, 2, None, 0]) + for k in [0, 2, 4]: + for order in ["descending", "ascending"]: + result = pc.select_k_unstable( + arr, k=k, sort_keys=[("dummy", order)]) + validate_select_k(result, arr, order) + + result = pc.top_k_unstable(arr, k=k) + validate_select_k(result, arr, "descending") + + result = pc.bottom_k_unstable(arr, k=k) + validate_select_k(result, arr, "ascending") + + result = pc.select_k_unstable( + arr, options=pc.SelectKOptions( + k=2, sort_keys=[("dummy", "descending")]) + ) + validate_select_k(result, arr, "descending") + + result = pc.select_k_unstable( + arr, options=pc.SelectKOptions(k=2, sort_keys=[("dummy", "ascending")]) + ) + validate_select_k(result, arr, "ascending") + + # Position options + assert pc.select_k_unstable(arr, 2, + sort_keys=[("dummy", "ascending")]) == result + assert pc.select_k_unstable(arr, 2, [("dummy", "ascending")]) == result + + +def test_select_k_table(): + def validate_select_k(select_k_indices, tbl, sort_keys, stable_sort=False): + sorted_indices = pc.sort_indices(tbl, sort_keys=sort_keys) + head_k_indices = sorted_indices.slice(0, len(select_k_indices)) + if stable_sort: + assert select_k_indices == head_k_indices + else: + expected = pc.take(tbl, head_k_indices) + actual = pc.take(tbl, select_k_indices) + assert actual == expected + + table = pa.table({"a": [1, 2, 0], "b": [1, 0, 1]}) + for k in [0, 2, 4]: + result = pc.select_k_unstable( + table, k=k, sort_keys=[("a", "ascending")]) + validate_select_k(result, table, sort_keys=[("a", "ascending")]) + + result = pc.select_k_unstable( + table, k=k, sort_keys=[(pc.field("a"), "ascending"), ("b", "ascending")]) + validate_select_k( + result, table, sort_keys=[("a", "ascending"), ("b", "ascending")]) + + result = pc.top_k_unstable(table, k=k, sort_keys=["a"]) + validate_select_k(result, table, sort_keys=[("a", "descending")]) + + result = pc.bottom_k_unstable(table, k=k, sort_keys=["a", "b"]) + validate_select_k( + result, table, sort_keys=[("a", "ascending"), ("b", "ascending")]) + + with pytest.raises( + ValueError, + match="'select_k_unstable' cannot be called without options"): + pc.select_k_unstable(table) + + with pytest.raises(ValueError, + match="select_k_unstable requires a nonnegative `k`"): + pc.select_k_unstable(table, k=-1, sort_keys=[("a", "ascending")]) + + with pytest.raises(ValueError, + match="select_k_unstable requires a " + "non-empty `sort_keys`"): + pc.select_k_unstable(table, k=2, sort_keys=[]) + + with pytest.raises(ValueError, match="not a valid sort order"): + pc.select_k_unstable(table, k=k, sort_keys=[("a", "nonscending")]) + + with pytest.raises(ValueError, + match="Invalid sort key column: No match for.*unknown"): + pc.select_k_unstable(table, k=k, sort_keys=[("unknown", "ascending")]) + + +def test_array_sort_indices(): + arr = pa.array([1, 2, None, 0]) + result = pc.array_sort_indices(arr) + assert result.to_pylist() == [3, 0, 1, 2] + result = pc.array_sort_indices(arr, order="ascending") + assert result.to_pylist() == [3, 0, 1, 2] + result = pc.array_sort_indices(arr, order="descending") + assert result.to_pylist() == [1, 0, 3, 2] + result = pc.array_sort_indices(arr, order="descending", + null_placement="at_start") + assert result.to_pylist() == [2, 1, 0, 3] + result = pc.array_sort_indices(arr, "descending", + null_placement="at_start") + assert result.to_pylist() == [2, 1, 0, 3] + + with pytest.raises(ValueError, match="not a valid sort order"): + pc.array_sort_indices(arr, order="nonscending") + + +def test_sort_indices_array(): + arr = pa.array([1, 2, None, 0]) + result = pc.sort_indices(arr) + assert result.to_pylist() == [3, 0, 1, 2] + result = pc.sort_indices(arr, sort_keys=[("dummy", "ascending")]) + assert result.to_pylist() == [3, 0, 1, 2] + result = pc.sort_indices(arr, sort_keys=[("dummy", "descending")]) + assert result.to_pylist() == [1, 0, 3, 2] + result = pc.sort_indices(arr, sort_keys=[("dummy", "descending")], + null_placement="at_start") + assert result.to_pylist() == [2, 1, 0, 3] + # Positional `sort_keys` + result = pc.sort_indices(arr, [("dummy", "descending")], + null_placement="at_start") + assert result.to_pylist() == [2, 1, 0, 3] + # Using SortOptions + result = pc.sort_indices( + arr, options=pc.SortOptions(sort_keys=[("dummy", "descending")]) + ) + assert result.to_pylist() == [1, 0, 3, 2] + result = pc.sort_indices( + arr, options=pc.SortOptions(sort_keys=[("dummy", "descending")], + null_placement="at_start") + ) + assert result.to_pylist() == [2, 1, 0, 3] + + +def test_sort_indices_table(): + table = pa.table({"a": [1, 1, None, 0], "b": [1, 0, 0, 1]}) + + result = pc.sort_indices(table, sort_keys=[("a", "ascending")]) + assert result.to_pylist() == [3, 0, 1, 2] + result = pc.sort_indices(table, sort_keys=[(pc.field("a"), "ascending")], + null_placement="at_start") + assert result.to_pylist() == [2, 3, 0, 1] + + result = pc.sort_indices( + table, sort_keys=[("a", "descending"), ("b", "ascending")] + ) + assert result.to_pylist() == [1, 0, 3, 2] + result = pc.sort_indices( + table, sort_keys=[("a", "descending"), ("b", "ascending")], + null_placement="at_start" + ) + assert result.to_pylist() == [2, 1, 0, 3] + # Positional `sort_keys` + result = pc.sort_indices( + table, [("a", "descending"), ("b", "ascending")], + null_placement="at_start" + ) + assert result.to_pylist() == [2, 1, 0, 3] + + with pytest.raises(ValueError, match="Must specify one or more sort keys"): + pc.sort_indices(table) + + with pytest.raises(ValueError, + match="Invalid sort key column: No match for.*unknown"): + pc.sort_indices(table, sort_keys=[("unknown", "ascending")]) + + with pytest.raises(ValueError, match="not a valid sort order"): + pc.sort_indices(table, sort_keys=[("a", "nonscending")]) + + +def test_is_in(): + arr = pa.array([1, 2, None, 1, 2, 3]) + + result = pc.is_in(arr, value_set=pa.array([1, 3, None])) + assert result.to_pylist() == [True, False, True, True, False, True] + + result = pc.is_in(arr, value_set=pa.array([1, 3, None]), skip_nulls=True) + assert result.to_pylist() == [True, False, False, True, False, True] + + result = pc.is_in(arr, value_set=pa.array([1, 3])) + assert result.to_pylist() == [True, False, False, True, False, True] + + result = pc.is_in(arr, value_set=pa.array([1, 3]), skip_nulls=True) + assert result.to_pylist() == [True, False, False, True, False, True] + + +def test_index_in(): + arr = pa.array([1, 2, None, 1, 2, 3]) + + result = pc.index_in(arr, value_set=pa.array([1, 3, None])) + assert result.to_pylist() == [0, None, 2, 0, None, 1] + + result = pc.index_in(arr, value_set=pa.array([1, 3, None]), + skip_nulls=True) + assert result.to_pylist() == [0, None, None, 0, None, 1] + + result = pc.index_in(arr, value_set=pa.array([1, 3])) + assert result.to_pylist() == [0, None, None, 0, None, 1] + + result = pc.index_in(arr, value_set=pa.array([1, 3]), skip_nulls=True) + assert result.to_pylist() == [0, None, None, 0, None, 1] + + # Positional value_set + result = pc.index_in(arr, pa.array([1, 3]), skip_nulls=True) + assert result.to_pylist() == [0, None, None, 0, None, 1] + + +def test_quantile(): + arr = pa.array([1, 2, 3, 4]) + + result = pc.quantile(arr) + assert result.to_pylist() == [2.5] + + result = pc.quantile(arr, interpolation='lower') + assert result.to_pylist() == [2] + result = pc.quantile(arr, interpolation='higher') + assert result.to_pylist() == [3] + result = pc.quantile(arr, interpolation='nearest') + assert result.to_pylist() == [3] + result = pc.quantile(arr, interpolation='midpoint') + assert result.to_pylist() == [2.5] + result = pc.quantile(arr, interpolation='linear') + assert result.to_pylist() == [2.5] + + arr = pa.array([1, 2]) + + result = pc.quantile(arr, q=[0.25, 0.5, 0.75]) + assert result.to_pylist() == [1.25, 1.5, 1.75] + + result = pc.quantile(arr, q=[0.25, 0.5, 0.75], interpolation='lower') + assert result.to_pylist() == [1, 1, 1] + result = pc.quantile(arr, q=[0.25, 0.5, 0.75], interpolation='higher') + assert result.to_pylist() == [2, 2, 2] + result = pc.quantile(arr, q=[0.25, 0.5, 0.75], interpolation='midpoint') + assert result.to_pylist() == [1.5, 1.5, 1.5] + result = pc.quantile(arr, q=[0.25, 0.5, 0.75], interpolation='nearest') + assert result.to_pylist() == [1, 1, 2] + result = pc.quantile(arr, q=[0.25, 0.5, 0.75], interpolation='linear') + assert result.to_pylist() == [1.25, 1.5, 1.75] + + # Positional `q` + result = pc.quantile(arr, [0.25, 0.5, 0.75], interpolation='linear') + assert result.to_pylist() == [1.25, 1.5, 1.75] + + with pytest.raises(ValueError, match="Quantile must be between 0 and 1"): + pc.quantile(arr, q=1.1) + with pytest.raises(ValueError, match="not a valid quantile interpolation"): + pc.quantile(arr, interpolation='zzz') + + +def test_tdigest(): + arr = pa.array([1, 2, 3, 4]) + result = pc.tdigest(arr) + assert result.to_pylist() == [2.5] + + arr = pa.chunked_array([pa.array([1, 2]), pa.array([3, 4])]) + result = pc.tdigest(arr) + assert result.to_pylist() == [2.5] + + arr = pa.array([1, 2, 3, 4]) + result = pc.tdigest(arr, q=[0, 0.5, 1]) + assert result.to_pylist() == [1, 2.5, 4] + + arr = pa.chunked_array([pa.array([1, 2]), pa.array([3, 4])]) + result = pc.tdigest(arr, [0, 0.5, 1]) # positional `q` + assert result.to_pylist() == [1, 2.5, 4] + + +def test_fill_null_segfault(): + # ARROW-12672 + arr = pa.array([None], pa.bool_()).fill_null(False) + result = arr.cast(pa.int8()) + assert result == pa.array([0], pa.int8()) + + +def test_min_max_element_wise(): + arr1 = pa.array([1, 2, 3]) + arr2 = pa.array([3, 1, 2]) + arr3 = pa.array([2, 3, None]) + + result = pc.max_element_wise(arr1, arr2) + assert result == pa.array([3, 2, 3]) + result = pc.min_element_wise(arr1, arr2) + assert result == pa.array([1, 1, 2]) + + result = pc.max_element_wise(arr1, arr2, arr3) + assert result == pa.array([3, 3, 3]) + result = pc.min_element_wise(arr1, arr2, arr3) + assert result == pa.array([1, 1, 2]) + + # with specifying the option + result = pc.max_element_wise(arr1, arr3, skip_nulls=True) + assert result == pa.array([2, 3, 3]) + result = pc.min_element_wise(arr1, arr3, skip_nulls=True) + assert result == pa.array([1, 2, 3]) + result = pc.max_element_wise( + arr1, arr3, options=pc.ElementWiseAggregateOptions()) + assert result == pa.array([2, 3, 3]) + result = pc.min_element_wise( + arr1, arr3, options=pc.ElementWiseAggregateOptions()) + assert result == pa.array([1, 2, 3]) + + # not skipping nulls + result = pc.max_element_wise(arr1, arr3, skip_nulls=False) + assert result == pa.array([2, 3, None]) + result = pc.min_element_wise(arr1, arr3, skip_nulls=False) + assert result == pa.array([1, 2, None]) + + +@pytest.mark.parametrize('start', (1.25, 10.5, -10.5)) +@pytest.mark.parametrize('skip_nulls', (True, False)) +def test_cumulative_sum(start, skip_nulls): + # Exact tests (e.g., integral types) + start_int = int(start) + starts = [None, start_int, pa.scalar(start_int, type=pa.int8()), + pa.scalar(start_int, type=pa.int64())] + for strt in starts: + arrays = [ + pa.array([1, 2, 3]), + pa.array([0, None, 20, 30]), + pa.chunked_array([[0, None], [20, 30]]) + ] + expected_arrays = [ + pa.array([1, 3, 6]), + pa.array([0, None, 20, 50]) + if skip_nulls else pa.array([0, None, None, None]), + pa.chunked_array([[0, None, 20, 50]]) + if skip_nulls else pa.chunked_array([[0, None, None, None]]) + ] + for i, arr in enumerate(arrays): + result = pc.cumulative_sum(arr, start=strt, skip_nulls=skip_nulls) + # Add `start` offset to expected array before comparing + expected = pc.add(expected_arrays[i], strt if strt is not None + else 0) + assert result.equals(expected) + + starts = [None, start, pa.scalar(start, type=pa.float32()), + pa.scalar(start, type=pa.float64())] + for strt in starts: + arrays = [ + pa.array([1.125, 2.25, 3.03125]), + pa.array([1, np.nan, 2, -3, 4, 5]), + pa.array([1, np.nan, None, 3, None, 5]) + ] + expected_arrays = [ + np.array([1.125, 3.375, 6.40625]), + np.array([1, np.nan, np.nan, np.nan, np.nan, np.nan]), + np.array([1, np.nan, None, np.nan, None, np.nan]) + if skip_nulls else np.array([1, np.nan, None, None, None, None]) + ] + for i, arr in enumerate(arrays): + result = pc.cumulative_sum(arr, start=strt, skip_nulls=skip_nulls) + # Add `start` offset to expected array before comparing + expected = pc.add(expected_arrays[i], strt if strt is not None + else 0) + np.testing.assert_array_almost_equal(result.to_numpy( + zero_copy_only=False), expected.to_numpy(zero_copy_only=False)) + + for strt in ['a', pa.scalar('arrow'), 1.1]: + with pytest.raises(pa.ArrowInvalid): + pc.cumulative_sum([1, 2, 3], start=strt) + + +@pytest.mark.parametrize('start', (1.25, 10.5, -10.5)) +@pytest.mark.parametrize('skip_nulls', (True, False)) +def test_cumulative_prod(start, skip_nulls): + # Exact tests (e.g., integral types) + start_int = int(start) + starts = [None, start_int, pa.scalar(start_int, type=pa.int8()), + pa.scalar(start_int, type=pa.int64())] + for strt in starts: + arrays = [ + pa.array([1, 2, 3]), + pa.array([1, None, 20, 5]), + pa.chunked_array([[1, None], [20, 5]]) + ] + expected_arrays = [ + pa.array([1, 2, 6]), + pa.array([1, None, 20, 100]) + if skip_nulls else pa.array([1, None, None, None]), + pa.chunked_array([[1, None, 20, 100]]) + if skip_nulls else pa.chunked_array([[1, None, None, None]]) + ] + for i, arr in enumerate(arrays): + result = pc.cumulative_prod(arr, start=strt, skip_nulls=skip_nulls) + # Multiply `start` offset to expected array before comparing + expected = pc.multiply(expected_arrays[i], strt if strt is not None + else 1) + assert result.equals(expected) + + starts = [None, start, pa.scalar(start, type=pa.float32()), + pa.scalar(start, type=pa.float64())] + for strt in starts: + arrays = [ + pa.array([1.5, 2.5, 3.5]), + pa.array([1, np.nan, 2, -3, 4, 5]), + pa.array([1, np.nan, None, 3, None, 5]) + ] + expected_arrays = [ + np.array([1.5, 3.75, 13.125]), + np.array([1, np.nan, np.nan, np.nan, np.nan, np.nan]), + np.array([1, np.nan, None, np.nan, None, np.nan]) + if skip_nulls else np.array([1, np.nan, None, None, None, None]) + ] + for i, arr in enumerate(arrays): + result = pc.cumulative_prod(arr, start=strt, skip_nulls=skip_nulls) + # Multiply `start` offset to expected array before comparing + expected = pc.multiply(expected_arrays[i], strt if strt is not None + else 1) + np.testing.assert_array_almost_equal(result.to_numpy( + zero_copy_only=False), expected.to_numpy(zero_copy_only=False)) + + for strt in ['a', pa.scalar('arrow'), 1.1]: + with pytest.raises(pa.ArrowInvalid): + pc.cumulative_prod([1, 2, 3], start=strt) + + +@pytest.mark.parametrize('start', (0.5, 3.5, 6.5)) +@pytest.mark.parametrize('skip_nulls', (True, False)) +def test_cumulative_max(start, skip_nulls): + # Exact tests (e.g., integral types) + start_int = int(start) + starts = [None, start_int, pa.scalar(start_int, type=pa.int8()), + pa.scalar(start_int, type=pa.int64())] + for strt in starts: + arrays = [ + pa.array([2, 1, 3, 5, 4, 6]), + pa.array([2, 1, None, 5, 4, None]), + pa.chunked_array([[2, 1, None], [5, 4, None]]) + ] + expected_arrays = [ + pa.array([2, 2, 3, 5, 5, 6]), + pa.array([2, 2, None, 5, 5, None]) + if skip_nulls else pa.array([2, 2, None, None, None, None]), + pa.chunked_array([[2, 2, None, 5, 5, None]]) + if skip_nulls else + pa.chunked_array([[2, 2, None, None, None, None]]) + ] + for i, arr in enumerate(arrays): + result = pc.cumulative_max(arr, start=strt, skip_nulls=skip_nulls) + # Max `start` offset with expected array before comparing + expected = pc.max_element_wise( + expected_arrays[i], strt if strt is not None else int(-1e9), + skip_nulls=False) + assert result.equals(expected) + + starts = [None, start, pa.scalar(start, type=pa.float32()), + pa.scalar(start, type=pa.float64())] + for strt in starts: + arrays = [ + pa.array([2.5, 1.3, 3.7, 5.1, 4.9, 6.2]), + pa.array([2.5, 1.3, 3.7, np.nan, 4.9, 6.2]), + pa.array([2.5, 1.3, None, np.nan, 4.9, None]) + ] + expected_arrays = [ + np.array([2.5, 2.5, 3.7, 5.1, 5.1, 6.2]), + np.array([2.5, 2.5, 3.7, 3.7, 4.9, 6.2]), + np.array([2.5, 2.5, None, 2.5, 4.9, None]) + if skip_nulls else np.array([2.5, 2.5, None, None, None, None]) + ] + for i, arr in enumerate(arrays): + result = pc.cumulative_max(arr, start=strt, skip_nulls=skip_nulls) + # Max `start` offset with expected array before comparing + expected = pc.max_element_wise( + expected_arrays[i], strt if strt is not None else -1e9, + skip_nulls=False) + np.testing.assert_array_almost_equal(result.to_numpy( + zero_copy_only=False), expected.to_numpy(zero_copy_only=False)) + + for strt in ['a', pa.scalar('arrow'), 1.1]: + with pytest.raises(pa.ArrowInvalid): + pc.cumulative_max([1, 2, 3], start=strt) + + +@pytest.mark.parametrize('start', (0.5, 3.5, 6.5)) +@pytest.mark.parametrize('skip_nulls', (True, False)) +def test_cumulative_min(start, skip_nulls): + # Exact tests (e.g., integral types) + start_int = int(start) + starts = [None, start_int, pa.scalar(start_int, type=pa.int8()), + pa.scalar(start_int, type=pa.int64())] + for strt in starts: + arrays = [ + pa.array([5, 6, 4, 2, 3, 1]), + pa.array([5, 6, None, 2, 3, None]), + pa.chunked_array([[5, 6, None], [2, 3, None]]) + ] + expected_arrays = [ + pa.array([5, 5, 4, 2, 2, 1]), + pa.array([5, 5, None, 2, 2, None]) + if skip_nulls else pa.array([5, 5, None, None, None, None]), + pa.chunked_array([[5, 5, None, 2, 2, None]]) + if skip_nulls else + pa.chunked_array([[5, 5, None, None, None, None]]) + ] + for i, arr in enumerate(arrays): + result = pc.cumulative_min(arr, start=strt, skip_nulls=skip_nulls) + # Min `start` offset with expected array before comparing + expected = pc.min_element_wise( + expected_arrays[i], strt if strt is not None else int(1e9), + skip_nulls=False) + assert result.equals(expected) + + starts = [None, start, pa.scalar(start, type=pa.float32()), + pa.scalar(start, type=pa.float64())] + for strt in starts: + arrays = [ + pa.array([5.5, 6.3, 4.7, 2.1, 3.9, 1.2]), + pa.array([5.5, 6.3, 4.7, np.nan, 3.9, 1.2]), + pa.array([5.5, 6.3, None, np.nan, 3.9, None]) + ] + expected_arrays = [ + np.array([5.5, 5.5, 4.7, 2.1, 2.1, 1.2]), + np.array([5.5, 5.5, 4.7, 4.7, 3.9, 1.2]), + np.array([5.5, 5.5, None, 5.5, 3.9, None]) + if skip_nulls else np.array([5.5, 5.5, None, None, None, None]) + ] + for i, arr in enumerate(arrays): + result = pc.cumulative_min(arr, start=strt, skip_nulls=skip_nulls) + # Min `start` offset with expected array before comparing + expected = pc.min_element_wise( + expected_arrays[i], strt if strt is not None else 1e9, + skip_nulls=False) + np.testing.assert_array_almost_equal(result.to_numpy( + zero_copy_only=False), expected.to_numpy(zero_copy_only=False)) + + for strt in ['a', pa.scalar('arrow'), 1.1]: + with pytest.raises(pa.ArrowInvalid): + pc.cumulative_max([1, 2, 3], start=strt) + + +def test_make_struct(): + assert pc.make_struct(1, 'a').as_py() == {'0': 1, '1': 'a'} + + assert pc.make_struct(1, 'a', field_names=['i', 's']).as_py() == { + 'i': 1, 's': 'a'} + + assert pc.make_struct([1, 2, 3], + "a b c".split()) == pa.StructArray.from_arrays([ + [1, 2, 3], + "a b c".split()], names='0 1'.split()) + + with pytest.raises(ValueError, + match="Array arguments must all be the same length"): + pc.make_struct([1, 2, 3, 4], "a b c".split()) + + with pytest.raises(ValueError, match="0 arguments but 2 field names"): + pc.make_struct(field_names=['one', 'two']) + + +def test_map_lookup(): + ty = pa.map_(pa.utf8(), pa.int32()) + arr = pa.array([[('one', 1), ('two', 2)], [('none', 3)], + [], [('one', 5), ('one', 7)], None], type=ty) + result_first = pa.array([1, None, None, 5, None], type=pa.int32()) + result_last = pa.array([1, None, None, 7, None], type=pa.int32()) + result_all = pa.array([[1], None, None, [5, 7], None], + type=pa.list_(pa.int32())) + + assert pc.map_lookup(arr, 'one', 'first') == result_first + assert pc.map_lookup(arr, pa.scalar( + 'one', type=pa.utf8()), 'first') == result_first + assert pc.map_lookup(arr, pa.scalar( + 'one', type=pa.utf8()), 'last') == result_last + assert pc.map_lookup(arr, pa.scalar( + 'one', type=pa.utf8()), 'all') == result_all + + +def test_struct_fields_options(): + a = pa.array([4, 5, 6], type=pa.int64()) + b = pa.array(["bar", None, ""]) + c = pa.StructArray.from_arrays([a, b], ["a", "b"]) + arr = pa.StructArray.from_arrays([a, c], ["a", "c"]) + + assert pc.struct_field(arr, '.c.b') == b + assert pc.struct_field(arr, b'.c.b') == b + assert pc.struct_field(arr, ['c', 'b']) == b + assert pc.struct_field(arr, [1, 'b']) == b + assert pc.struct_field(arr, (b'c', 'b')) == b + assert pc.struct_field(arr, pc.field(('c', 'b'))) == b + + assert pc.struct_field(arr, '.a') == a + assert pc.struct_field(arr, ['a']) == a + assert pc.struct_field(arr, 'a') == a + assert pc.struct_field(arr, pc.field(('a',))) == a + + assert pc.struct_field(arr, indices=[1, 1]) == b + assert pc.struct_field(arr, (1, 1)) == b + assert pc.struct_field(arr, [0]) == a + assert pc.struct_field(arr, []) == arr + + with pytest.raises(pa.ArrowInvalid, match="No match for FieldRef"): + pc.struct_field(arr, 'foo') + + with pytest.raises(pa.ArrowInvalid, match="No match for FieldRef"): + pc.struct_field(arr, '.c.foo') + + # drill into a non-struct array and continue to ask for a field + with pytest.raises(pa.ArrowInvalid, match="No match for FieldRef"): + pc.struct_field(arr, '.a.foo') + + # TODO: https://issues.apache.org/jira/browse/ARROW-14853 + # assert pc.struct_field(arr) == arr + + +def test_case_when(): + assert pc.case_when(pc.make_struct([True, False, None], + [False, True, None]), + [1, 2, 3], + [11, 12, 13]) == pa.array([1, 12, None]) + + +def test_list_element(): + element_type = pa.struct([('a', pa.float64()), ('b', pa.int8())]) + list_type = pa.list_(element_type) + l1 = [{'a': .4, 'b': 2}, None, {'a': .2, 'b': 4}, None, {'a': 5.6, 'b': 6}] + l2 = [None, {'a': .52, 'b': 3}, {'a': .7, 'b': 4}, None, {'a': .6, 'b': 8}] + lists = pa.array([l1, l2], list_type) + + index = 1 + result = pa.compute.list_element(lists, index) + expected = pa.array([None, {'a': 0.52, 'b': 3}], element_type) + assert result.equals(expected) + + index = 4 + result = pa.compute.list_element(lists, index) + expected = pa.array([{'a': 5.6, 'b': 6}, {'a': .6, 'b': 8}], element_type) + assert result.equals(expected) + + +def test_count_distinct(): + samples = [datetime.datetime(year=y, month=1, day=1) for y in range(1992, 2092)] + arr = pa.array(samples, pa.timestamp("ns")) + assert pc.count_distinct(arr) == pa.scalar(len(samples), type=pa.int64()) + + +def test_count_distinct_options(): + arr = pa.array([1, 2, 3, None, None]) + assert pc.count_distinct(arr).as_py() == 3 + assert pc.count_distinct(arr, mode='only_valid').as_py() == 3 + assert pc.count_distinct(arr, mode='only_null').as_py() == 1 + assert pc.count_distinct(arr, mode='all').as_py() == 4 + assert pc.count_distinct(arr, 'all').as_py() == 4 + + +def test_utf8_normalize(): + arr = pa.array(["01²3"]) + assert pc.utf8_normalize(arr, form="NFC") == arr + assert pc.utf8_normalize(arr, form="NFKC") == pa.array(["0123"]) + assert pc.utf8_normalize(arr, "NFD") == arr + assert pc.utf8_normalize(arr, "NFKD") == pa.array(["0123"]) + with pytest.raises( + ValueError, + match='"NFZ" is not a valid Unicode normalization form'): + pc.utf8_normalize(arr, form="NFZ") + + +def test_random(): + # (note negative integer initializers are accepted) + for initializer in ['system', 42, -42, b"abcdef"]: + assert pc.random(0, initializer=initializer) == \ + pa.array([], type=pa.float64()) + + # System random initialization => outputs all distinct + arrays = [tuple(pc.random(100).to_pylist()) for i in range(10)] + assert len(set(arrays)) == len(arrays) + + arrays = [tuple(pc.random(100, initializer=i % 7).to_pylist()) + for i in range(0, 100)] + assert len(set(arrays)) == 7 + + # Arbitrary hashable objects can be given as initializer + initializers = [object(), (4, 5, 6), "foo"] + initializers.extend(os.urandom(10) for i in range(10)) + arrays = [tuple(pc.random(100, initializer=i).to_pylist()) + for i in initializers] + assert len(set(arrays)) == len(arrays) + + with pytest.raises(TypeError, + match=r"initializer should be 'system', an integer, " + r"or a hashable object; got \[\]"): + pc.random(100, initializer=[]) + + +@pytest.mark.parametrize( + "tiebreaker,expected_values", + [("min", [3, 1, 4, 6, 4, 6, 1]), + ("max", [3, 2, 5, 7, 5, 7, 2]), + ("first", [3, 1, 4, 6, 5, 7, 2]), + ("dense", [2, 1, 3, 4, 3, 4, 1])] +) +def test_rank_options_tiebreaker(tiebreaker, expected_values): + arr = pa.array([1.2, 0.0, 5.3, None, 5.3, None, 0.0]) + rank_options = pc.RankOptions(sort_keys="ascending", + null_placement="at_end", + tiebreaker=tiebreaker) + result = pc.rank(arr, options=rank_options) + expected = pa.array(expected_values, type=pa.uint64()) + assert result.equals(expected) + + +def test_rank_options(): + arr = pa.array([1.2, 0.0, 5.3, None, 5.3, None, 0.0]) + expected = pa.array([3, 1, 4, 6, 5, 7, 2], type=pa.uint64()) + + # Ensure rank can be called without specifying options + result = pc.rank(arr) + assert result.equals(expected) + + # Ensure default RankOptions + result = pc.rank(arr, options=pc.RankOptions()) + assert result.equals(expected) + + # Ensure sort_keys tuple usage + result = pc.rank(arr, options=pc.RankOptions( + sort_keys=[("b", "ascending")]) + ) + assert result.equals(expected) + + result = pc.rank(arr, null_placement="at_start") + expected_at_start = pa.array([5, 3, 6, 1, 7, 2, 4], type=pa.uint64()) + assert result.equals(expected_at_start) + + result = pc.rank(arr, sort_keys="descending") + expected_descending = pa.array([3, 4, 1, 6, 2, 7, 5], type=pa.uint64()) + assert result.equals(expected_descending) + + with pytest.raises(ValueError, + match=r'"NonExisting" is not a valid tiebreaker'): + pc.RankOptions(sort_keys="descending", + null_placement="at_end", + tiebreaker="NonExisting") + + +def create_sample_expressions(): + # We need a schema for substrait conversion + schema = pa.schema([pa.field("i64", pa.int64()), pa.field( + "foo", pa.struct([pa.field("bar", pa.string())]))]) + + # Creates a bunch of sample expressions for testing + # serialization and deserialization. The expressions are categorized + # to reflect certain nuances in Substrait conversion. + a = pc.scalar(1) + b = pc.scalar(1.1) + c = pc.scalar(True) + d = pc.scalar("string") + e = pc.scalar(None) + f = pc.scalar({'a': 1}) + g = pc.scalar(pa.scalar(1)) + h = pc.scalar(np.int64(2)) + j = pc.scalar(False) + + # These expression consist entirely of literals + literal_exprs = [a, b, c, d, e, g, h, j] + + # These expressions include at least one function call + exprs_with_call = [a == b, a != b, a > b, c & j, c | j, ~c, d.is_valid(), + a + b, a - b, a * b, a / b, pc.negate(a), + pc.add(a, b), pc.subtract(a, b), pc.divide(a, b), + pc.multiply(a, b), pc.power(a, a), pc.sqrt(a), + pc.exp(b), pc.cos(b), pc.sin(b), pc.tan(b), + pc.acos(b), pc.atan(b), pc.asin(b), pc.atan2(b, b), + pc.abs(b), pc.sign(a), pc.bit_wise_not(a), + pc.bit_wise_and(a, a), pc.bit_wise_or(a, a), + pc.bit_wise_xor(a, a), pc.is_nan(b), pc.is_finite(b), + pc.coalesce(a, b), + a.cast(pa.int32(), safe=False)] + + # These expressions test out various reference styles and may include function + # calls. Named references are used here. + exprs_with_ref = [pc.field('i64') > 5, pc.field('i64') == 5, + pc.field('i64') == 7, + pc.field(('foo', 'bar')) == 'value', + pc.field('foo', 'bar') == 'value'] + + # Similar to above but these use numeric references instead of string refs + exprs_with_numeric_refs = [pc.field(0) > 5, pc.field(0) == 5, + pc.field(0) == 7, + pc.field((1, 0)) == 'value', + pc.field(1, 0) == 'value'] + + # Expressions that behave uniquely when converting to/from substrait + special_cases = [ + f, # Struct literals lose their field names + a.isin([1, 2, 3]), # isin converts to an or list + pc.field('i64').is_null() # pyarrow always specifies a FunctionOptions + # for is_null which, being the default, is + # dropped on serialization + ] + + all_exprs = literal_exprs.copy() + all_exprs += exprs_with_call + all_exprs += exprs_with_ref + all_exprs += special_cases + + return { + "all": all_exprs, + "literals": literal_exprs, + "calls": exprs_with_call, + "refs": exprs_with_ref, + "numeric_refs": exprs_with_numeric_refs, + "special": special_cases, + "schema": schema + } + +# Tests the Arrow-specific serialization mechanism + + +def test_expression_serialization_arrow(pickle_module): + for expr in create_sample_expressions()["all"]: + assert isinstance(expr, pc.Expression) + restored = pickle_module.loads(pickle_module.dumps(expr)) + assert expr.equals(restored) + + +@pytest.mark.substrait +def test_expression_serialization_substrait(): + + exprs = create_sample_expressions() + schema = exprs["schema"] + + # Basic literals don't change on binding and so they will round + # trip without any change + for expr in exprs["literals"]: + serialized = expr.to_substrait(schema) + deserialized = pc.Expression.from_substrait(serialized) + assert expr.equals(deserialized) + + # Expressions are bound when they get serialized. Since bound + # expressions are not equal to their unbound variants we cannot + # compare the round tripped with the original + for expr in exprs["calls"]: + serialized = expr.to_substrait(schema) + deserialized = pc.Expression.from_substrait(serialized) + # We can't compare the expressions themselves because of the bound + # unbound difference. But we can compare the string representation + assert str(deserialized) == str(expr) + serialized_again = deserialized.to_substrait(schema) + deserialized_again = pc.Expression.from_substrait(serialized_again) + assert deserialized.equals(deserialized_again) + + for expr, expr_norm in zip(exprs["refs"], exprs["numeric_refs"]): + serialized = expr.to_substrait(schema) + deserialized = pc.Expression.from_substrait(serialized) + assert str(deserialized) == str(expr_norm) + serialized_again = deserialized.to_substrait(schema) + deserialized_again = pc.Expression.from_substrait(serialized_again) + assert deserialized.equals(deserialized_again) + + # For the special cases we get various wrinkles in serialization but we + # should always get the same thing from round tripping twice + for expr in exprs["special"]: + serialized = expr.to_substrait(schema) + deserialized = pc.Expression.from_substrait(serialized) + serialized_again = deserialized.to_substrait(schema) + deserialized_again = pc.Expression.from_substrait(serialized_again) + assert deserialized.equals(deserialized_again) + + # Special case, we lose the field names of struct literals + f = exprs["special"][0] + serialized = f.to_substrait(schema) + deserialized = pc.Expression.from_substrait(serialized) + assert deserialized.equals(pc.scalar({'': 1})) + + # Special case, is_in converts to a == opt[0] || a == opt[1] ... + a = pc.scalar(1) + expr = a.isin([1, 2, 3]) + target = (a == 1) | (a == 2) | (a == 3) + serialized = expr.to_substrait(schema) + deserialized = pc.Expression.from_substrait(serialized) + # Compare str's here to bypass the bound/unbound difference + assert str(target) == str(deserialized) + serialized_again = deserialized.to_substrait(schema) + deserialized_again = pc.Expression.from_substrait(serialized_again) + assert deserialized.equals(deserialized_again) + + +def test_expression_construction(): + zero = pc.scalar(0) + one = pc.scalar(1) + true = pc.scalar(True) + false = pc.scalar(False) + string = pc.scalar("string") + field = pc.field("field") + nested_mixed_types = pc.field(b"a", 1, "b") + nested_field = pc.field(("nested", "field")) + nested_field2 = pc.field("nested", "field") + + zero | one == string + ~true == false + for typ in ("bool", pa.bool_()): + field.cast(typ) == true + + field.isin([1, 2]) + nested_mixed_types.isin(["foo", "bar"]) + nested_field.isin(["foo", "bar"]) + nested_field2.isin(["foo", "bar"]) + + with pytest.raises(TypeError): + field.isin(1) + + with pytest.raises(pa.ArrowInvalid): + field != object() + + +def test_expression_boolean_operators(): + # https://issues.apache.org/jira/browse/ARROW-11412 + true = pc.scalar(True) + false = pc.scalar(False) + + with pytest.raises(ValueError, match="cannot be evaluated to python True"): + true and false + + with pytest.raises(ValueError, match="cannot be evaluated to python True"): + true or false + + with pytest.raises(ValueError, match="cannot be evaluated to python True"): + bool(true) + + with pytest.raises(ValueError, match="cannot be evaluated to python True"): + not true + + +def test_expression_call_function(): + field = pc.field("field") + + # no options + assert str(pc.hour(field)) == "hour(field)" + + # default options + assert str(pc.round(field)) == "round(field)" + # specified options + assert str(pc.round(field, ndigits=1)) == \ + "round(field, {ndigits=1, round_mode=HALF_TO_EVEN})" + + # Will convert non-expression arguments if possible + assert str(pc.add(field, 1)) == "add(field, 1)" + assert str(pc.add(field, pa.scalar(1))) == "add(field, 1)" + + # Invalid pc.scalar input gives original error message + msg = "only other expressions allowed as arguments" + with pytest.raises(TypeError, match=msg): + pc.add(field, object) + + +def test_cast_table_raises(): + table = pa.table({'a': [1, 2]}) + + with pytest.raises(pa.lib.ArrowTypeError): + pc.cast(table, pa.int64()) + + +@pytest.mark.parametrize("start,stop,expected", ( + (0, None, [[1, 2, 3], [4, 5, None], [6, None, None], None]), + (0, 1, [[1], [4], [6], None]), + (0, 2, [[1, 2], [4, 5], [6, None], None]), + (1, 2, [[2], [5], [None], None]), + (2, 4, [[3, None], [None, None], [None, None], None]) +)) +@pytest.mark.parametrize("step", (1, 2)) +@pytest.mark.parametrize("value_type", (pa.string, pa.int16, pa.float64)) +@pytest.mark.parametrize("list_type", (pa.list_, pa.large_list, "fixed")) +def test_list_slice_output_fixed(start, stop, step, expected, value_type, + list_type): + if list_type == "fixed": + arr = pa.array([[1, 2, 3], [4, 5, None], [6, None, None], None], + pa.list_(pa.int8(), 3)).cast(pa.list_(value_type(), 3)) + else: + arr = pa.array([[1, 2, 3], [4, 5], [6], None], + pa.list_(pa.int8())).cast(list_type(value_type())) + + args = arr, start, stop, step, True + if stop is None and list_type != "fixed": + msg = ("Unable to produce FixedSizeListArray from " + "non-FixedSizeListArray without `stop` being set.") + with pytest.raises(pa.ArrowNotImplementedError, match=msg): + pc.list_slice(*args) + else: + result = pc.list_slice(*args) + pylist = result.cast(pa.list_(pa.int8(), + result.type.list_size)).to_pylist() + assert pylist == [e[::step] if e else e for e in expected] + + +@pytest.mark.parametrize("start,stop", ( + (0, None,), + (0, 1,), + (0, 2,), + (1, 2,), + (2, 4,) +)) +@pytest.mark.parametrize("step", (1, 2)) +@pytest.mark.parametrize("value_type", (pa.string, pa.int16, pa.float64)) +@pytest.mark.parametrize("list_type", (pa.list_, pa.large_list, "fixed")) +def test_list_slice_output_variable(start, stop, step, value_type, list_type): + if list_type == "fixed": + data = [[1, 2, 3], [4, 5, None], [6, None, None], None] + arr = pa.array( + data, + pa.list_(pa.int8(), 3)).cast(pa.list_(value_type(), 3)) + else: + data = [[1, 2, 3], [4, 5], [6], None] + arr = pa.array(data, + pa.list_(pa.int8())).cast(list_type(value_type())) + + # Gets same list type (ListArray vs LargeList) + if list_type == "fixed": + list_type = pa.list_ # non fixed output type + + result = pc.list_slice(arr, start, stop, step, + return_fixed_size_list=False) + assert result.type == list_type(value_type()) + + pylist = result.cast(pa.list_(pa.int8())).to_pylist() + + # Variable output slicing follows Python's slice semantics + expected = [d[start:stop:step] if d is not None else None for d in data] + assert pylist == expected + + +@pytest.mark.parametrize("return_fixed_size", (True, False, None)) +@pytest.mark.parametrize("type", ( + lambda: pa.list_(pa.field('col', pa.int8())), + lambda: pa.list_(pa.field('col', pa.int8()), 1), + lambda: pa.large_list(pa.field('col', pa.int8())))) +def test_list_slice_field_names_retained(return_fixed_size, type): + arr = pa.array([[1]], type()) + out = pc.list_slice(arr, 0, 1, return_fixed_size_list=return_fixed_size) + assert arr.type.field(0).name == out.type.field(0).name + + # Verify out type matches in type if return_fixed_size_list==None + if return_fixed_size is None: + assert arr.type == out.type + + +def test_list_slice_bad_parameters(): + arr = pa.array([[1]], pa.list_(pa.int8(), 1)) + msg = r"`start`(.*) should be greater than 0 and smaller than `stop`(.*)" + with pytest.raises(pa.ArrowInvalid, match=msg): + pc.list_slice(arr, -1, 1) # negative start? + with pytest.raises(pa.ArrowInvalid, match=msg): + pc.list_slice(arr, 2, 1) # start > stop? + + # TODO(ARROW-18281): start==stop -> empty lists + with pytest.raises(pa.ArrowInvalid, match=msg): + pc.list_slice(arr, 0, 0) # start == stop? + + # Step not >= 1 + msg = "`step` must be >= 1, got: " + with pytest.raises(pa.ArrowInvalid, match=msg + "0"): + pc.list_slice(arr, 0, 1, step=0) + with pytest.raises(pa.ArrowInvalid, match=msg + "-1"): + pc.list_slice(arr, 0, 1, step=-1) + + +def check_run_end_encode_decode(run_end_encode_opts=None): + arr = pa.array([1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3]) + encoded = pc.run_end_encode(arr, options=run_end_encode_opts) + decoded = pc.run_end_decode(encoded) + assert decoded.type == arr.type + assert decoded.equals(arr) + + +def test_run_end_encode(): + check_run_end_encode_decode() + check_run_end_encode_decode(pc.RunEndEncodeOptions(pa.int16())) + check_run_end_encode_decode(pc.RunEndEncodeOptions('int32')) + check_run_end_encode_decode(pc.RunEndEncodeOptions(pa.int64())) + + +def test_pairwise_diff(): + arr = pa.array([1, 2, 3, None, 4, 5]) + expected = pa.array([None, 1, 1, None, None, 1]) + result = pa.compute.pairwise_diff(arr, period=1) + assert result.equals(expected) + + arr = pa.array([1, 2, 3, None, 4, 5]) + expected = pa.array([None, None, 2, None, 1, None]) + result = pa.compute.pairwise_diff(arr, period=2) + assert result.equals(expected) + + # negative period + arr = pa.array([1, 2, 3, None, 4, 5], type=pa.int8()) + expected = pa.array([-1, -1, None, None, -1, None], type=pa.int8()) + result = pa.compute.pairwise_diff(arr, period=-1) + assert result.equals(expected) + + # wrap around overflow + arr = pa.array([1, 2, 3, None, 4, 5], type=pa.uint8()) + expected = pa.array([255, 255, None, None, 255, None], type=pa.uint8()) + result = pa.compute.pairwise_diff(arr, period=-1) + assert result.equals(expected) + + # fail on overflow + arr = pa.array([1, 2, 3, None, 4, 5], type=pa.uint8()) + with pytest.raises(pa.ArrowInvalid, + match="overflow"): + pa.compute.pairwise_diff_checked(arr, period=-1) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_convert_builtin.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_convert_builtin.py new file mode 100644 index 0000000000000000000000000000000000000000..b824b8956437429bb6672c9dbf66774891811a47 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_convert_builtin.py @@ -0,0 +1,2536 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections +import datetime +import decimal +import itertools +import math +import re +import sys + +import hypothesis as h +import numpy as np +import pytest + +from pyarrow.pandas_compat import _pandas_api # noqa +import pyarrow as pa +from pyarrow.tests import util +import pyarrow.tests.strategies as past + + +int_type_pairs = [ + (np.int8, pa.int8()), + (np.int16, pa.int16()), + (np.int32, pa.int32()), + (np.int64, pa.int64()), + (np.uint8, pa.uint8()), + (np.uint16, pa.uint16()), + (np.uint32, pa.uint32()), + (np.uint64, pa.uint64())] + + +np_int_types, pa_int_types = zip(*int_type_pairs) + + +class StrangeIterable: + def __init__(self, lst): + self.lst = lst + + def __iter__(self): + return self.lst.__iter__() + + +class MyInt: + def __init__(self, value): + self.value = value + + def __int__(self): + return self.value + + +class MyBrokenInt: + def __int__(self): + 1/0 # MARKER + + +def check_struct_type(ty, expected): + """ + Check a struct type is as expected, but not taking order into account. + """ + assert pa.types.is_struct(ty) + assert set(ty) == set(expected) + + +def test_iterable_types(): + arr1 = pa.array(StrangeIterable([0, 1, 2, 3])) + arr2 = pa.array((0, 1, 2, 3)) + + assert arr1.equals(arr2) + + +def test_empty_iterable(): + arr = pa.array(StrangeIterable([])) + assert len(arr) == 0 + assert arr.null_count == 0 + assert arr.type == pa.null() + assert arr.to_pylist() == [] + + +def test_limited_iterator_types(): + arr1 = pa.array(iter(range(3)), type=pa.int64(), size=3) + arr2 = pa.array((0, 1, 2)) + assert arr1.equals(arr2) + + +def test_limited_iterator_size_overflow(): + arr1 = pa.array(iter(range(3)), type=pa.int64(), size=2) + arr2 = pa.array((0, 1)) + assert arr1.equals(arr2) + + +def test_limited_iterator_size_underflow(): + arr1 = pa.array(iter(range(3)), type=pa.int64(), size=10) + arr2 = pa.array((0, 1, 2)) + assert arr1.equals(arr2) + + +def test_iterator_without_size(): + expected = pa.array((0, 1, 2)) + arr1 = pa.array(iter(range(3))) + assert arr1.equals(expected) + # Same with explicit type + arr1 = pa.array(iter(range(3)), type=pa.int64()) + assert arr1.equals(expected) + + +def test_infinite_iterator(): + expected = pa.array((0, 1, 2)) + arr1 = pa.array(itertools.count(0), size=3) + assert arr1.equals(expected) + # Same with explicit type + arr1 = pa.array(itertools.count(0), type=pa.int64(), size=3) + assert arr1.equals(expected) + + +def test_failing_iterator(): + with pytest.raises(ZeroDivisionError): + pa.array((1 // 0 for x in range(10))) + # ARROW-17253 + with pytest.raises(ZeroDivisionError): + pa.array((1 // 0 for x in range(10)), size=10) + + +class ObjectWithOnlyGetitem: + def __getitem__(self, key): + return 3 + + +def test_object_with_getitem(): + # https://github.com/apache/arrow/issues/34944 + # considered as sequence because of __getitem__, but has no length + with pytest.raises(TypeError, match="has no len()"): + pa.array(ObjectWithOnlyGetitem()) + + +def _as_list(xs): + return xs + + +def _as_tuple(xs): + return tuple(xs) + + +def _as_deque(xs): + # deque is a sequence while neither tuple nor list + return collections.deque(xs) + + +def _as_dict_values(xs): + # a dict values object is not a sequence, just a regular iterable + dct = {k: v for k, v in enumerate(xs)} + return dct.values() + + +def _as_numpy_array(xs): + arr = np.empty(len(xs), dtype=object) + arr[:] = xs + return arr + + +def _as_set(xs): + return set(xs) + + +SEQUENCE_TYPES = [_as_list, _as_tuple, _as_numpy_array] +ITERABLE_TYPES = [_as_set, _as_dict_values] + SEQUENCE_TYPES +COLLECTIONS_TYPES = [_as_deque] + ITERABLE_TYPES + +parametrize_with_iterable_types = pytest.mark.parametrize( + "seq", ITERABLE_TYPES +) + +parametrize_with_sequence_types = pytest.mark.parametrize( + "seq", SEQUENCE_TYPES +) + +parametrize_with_collections_types = pytest.mark.parametrize( + "seq", COLLECTIONS_TYPES +) + + +@parametrize_with_collections_types +def test_sequence_types(seq): + arr1 = pa.array(seq([1, 2, 3])) + arr2 = pa.array([1, 2, 3]) + + assert arr1.equals(arr2) + + +@parametrize_with_iterable_types +def test_nested_sequence_types(seq): + arr1 = pa.array([seq([1, 2, 3])]) + arr2 = pa.array([[1, 2, 3]]) + + assert arr1.equals(arr2) + + +@parametrize_with_sequence_types +def test_sequence_boolean(seq): + expected = [True, None, False, None] + arr = pa.array(seq(expected)) + assert len(arr) == 4 + assert arr.null_count == 2 + assert arr.type == pa.bool_() + assert arr.to_pylist() == expected + + +@parametrize_with_sequence_types +def test_sequence_numpy_boolean(seq): + expected = [np.bool_(True), None, np.bool_(False), None] + arr = pa.array(seq(expected)) + assert arr.type == pa.bool_() + assert arr.to_pylist() == [True, None, False, None] + + +@parametrize_with_sequence_types +def test_sequence_mixed_numpy_python_bools(seq): + values = np.array([True, False]) + arr = pa.array(seq([values[0], None, values[1], True, False])) + assert arr.type == pa.bool_() + assert arr.to_pylist() == [True, None, False, True, False] + + +@parametrize_with_collections_types +def test_empty_list(seq): + arr = pa.array(seq([])) + assert len(arr) == 0 + assert arr.null_count == 0 + assert arr.type == pa.null() + assert arr.to_pylist() == [] + + +@parametrize_with_sequence_types +def test_nested_lists(seq): + data = [[], [1, 2], None] + arr = pa.array(seq(data)) + assert len(arr) == 3 + assert arr.null_count == 1 + assert arr.type == pa.list_(pa.int64()) + assert arr.to_pylist() == data + + +@parametrize_with_sequence_types +@pytest.mark.parametrize("factory", [ + pa.list_, pa.large_list, pa.list_view, pa.large_list_view]) +def test_nested_lists_with_explicit_type(seq, factory): + data = [[], [1, 2], None] + arr = pa.array(seq(data), type=factory(pa.int16())) + assert len(arr) == 3 + assert arr.null_count == 1 + assert arr.type == factory(pa.int16()) + assert arr.to_pylist() == data + + +@parametrize_with_collections_types +def test_list_with_non_list(seq): + # List types don't accept non-sequences + with pytest.raises(TypeError): + pa.array(seq([[], [1, 2], 3]), type=pa.list_(pa.int64())) + with pytest.raises(TypeError): + pa.array(seq([[], [1, 2], 3]), type=pa.large_list(pa.int64())) + with pytest.raises(TypeError): + pa.array(seq([[], [1, 2], 3]), type=pa.list_view(pa.int64())) + with pytest.raises(TypeError): + pa.array(seq([[], [1, 2], 3]), type=pa.large_list_view(pa.int64())) + + +@parametrize_with_sequence_types +@pytest.mark.parametrize("factory", [ + pa.list_, pa.large_list, pa.list_view, pa.large_list_view]) +def test_nested_arrays(seq, factory): + arr = pa.array(seq([np.array([], dtype=np.int64), + np.array([1, 2], dtype=np.int64), None]), + type=factory(pa.int64())) + assert len(arr) == 3 + assert arr.null_count == 1 + assert arr.type == factory(pa.int64()) + assert arr.to_pylist() == [[], [1, 2], None] + + +@parametrize_with_sequence_types +def test_nested_fixed_size_list(seq): + # sequence of lists + data = [[1, 2], [3, None], None] + arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2)) + assert len(arr) == 3 + assert arr.null_count == 1 + assert arr.type == pa.list_(pa.int64(), 2) + assert arr.to_pylist() == data + + # sequence of numpy arrays + data = [np.array([1, 2], dtype='int64'), np.array([3, 4], dtype='int64'), + None] + arr = pa.array(seq(data), type=pa.list_(pa.int64(), 2)) + assert len(arr) == 3 + assert arr.null_count == 1 + assert arr.type == pa.list_(pa.int64(), 2) + assert arr.to_pylist() == [[1, 2], [3, 4], None] + + # incorrect length of the lists or arrays + data = [[1, 2, 4], [3, None], None] + for data in [[[1, 2, 3]], [np.array([1, 2, 4], dtype='int64')]]: + with pytest.raises( + ValueError, match="Length of item not correct: expected 2"): + pa.array(seq(data), type=pa.list_(pa.int64(), 2)) + + # with list size of 0 + data = [[], [], None] + arr = pa.array(seq(data), type=pa.list_(pa.int64(), 0)) + assert len(arr) == 3 + assert arr.null_count == 1 + assert arr.type == pa.list_(pa.int64(), 0) + assert arr.to_pylist() == [[], [], None] + + +@parametrize_with_sequence_types +def test_sequence_all_none(seq): + arr = pa.array(seq([None, None])) + assert len(arr) == 2 + assert arr.null_count == 2 + assert arr.type == pa.null() + assert arr.to_pylist() == [None, None] + + +@parametrize_with_sequence_types +@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs) +def test_sequence_integer(seq, np_scalar_pa_type): + np_scalar, pa_type = np_scalar_pa_type + expected = [1, None, 3, None, + np.iinfo(np_scalar).min, np.iinfo(np_scalar).max] + arr = pa.array(seq(expected), type=pa_type) + assert len(arr) == 6 + assert arr.null_count == 2 + assert arr.type == pa_type + assert arr.to_pylist() == expected + + +@parametrize_with_collections_types +@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs) +def test_sequence_integer_np_nan(seq, np_scalar_pa_type): + # ARROW-2806: numpy.nan is a double value and thus should produce + # a double array. + _, pa_type = np_scalar_pa_type + with pytest.raises(ValueError): + pa.array(seq([np.nan]), type=pa_type, from_pandas=False) + + arr = pa.array(seq([np.nan]), type=pa_type, from_pandas=True) + expected = [None] + assert len(arr) == 1 + assert arr.null_count == 1 + assert arr.type == pa_type + assert arr.to_pylist() == expected + + +@parametrize_with_sequence_types +@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs) +def test_sequence_integer_nested_np_nan(seq, np_scalar_pa_type): + # ARROW-2806: numpy.nan is a double value and thus should produce + # a double array. + _, pa_type = np_scalar_pa_type + with pytest.raises(ValueError): + pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=False) + + arr = pa.array(seq([[np.nan]]), type=pa.list_(pa_type), from_pandas=True) + expected = [[None]] + assert len(arr) == 1 + assert arr.null_count == 0 + assert arr.type == pa.list_(pa_type) + assert arr.to_pylist() == expected + + +@parametrize_with_sequence_types +def test_sequence_integer_inferred(seq): + expected = [1, None, 3, None] + arr = pa.array(seq(expected)) + assert len(arr) == 4 + assert arr.null_count == 2 + assert arr.type == pa.int64() + assert arr.to_pylist() == expected + + +@parametrize_with_sequence_types +@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs) +def test_sequence_numpy_integer(seq, np_scalar_pa_type): + np_scalar, pa_type = np_scalar_pa_type + expected = [np_scalar(1), None, np_scalar(3), None, + np_scalar(np.iinfo(np_scalar).min), + np_scalar(np.iinfo(np_scalar).max)] + arr = pa.array(seq(expected), type=pa_type) + assert len(arr) == 6 + assert arr.null_count == 2 + assert arr.type == pa_type + assert arr.to_pylist() == expected + + +@parametrize_with_sequence_types +@pytest.mark.parametrize("np_scalar_pa_type", int_type_pairs) +def test_sequence_numpy_integer_inferred(seq, np_scalar_pa_type): + np_scalar, pa_type = np_scalar_pa_type + expected = [np_scalar(1), None, np_scalar(3), None] + expected += [np_scalar(np.iinfo(np_scalar).min), + np_scalar(np.iinfo(np_scalar).max)] + arr = pa.array(seq(expected)) + assert len(arr) == 6 + assert arr.null_count == 2 + assert arr.type == pa_type + assert arr.to_pylist() == expected + + +@parametrize_with_sequence_types +def test_sequence_custom_integers(seq): + expected = [0, 42, 2**33 + 1, -2**63] + data = list(map(MyInt, expected)) + arr = pa.array(seq(data), type=pa.int64()) + assert arr.to_pylist() == expected + + +@parametrize_with_collections_types +def test_broken_integers(seq): + data = [MyBrokenInt()] + with pytest.raises(pa.ArrowInvalid, match="tried to convert to int"): + pa.array(seq(data), type=pa.int64()) + + +def test_numpy_scalars_mixed_type(): + # ARROW-4324 + data = [np.int32(10), np.float32(0.5)] + arr = pa.array(data) + expected = pa.array([10, 0.5], type="float64") + assert arr.equals(expected) + + # ARROW-9490 + data = [np.int8(10), np.float32(0.5)] + arr = pa.array(data) + expected = pa.array([10, 0.5], type="float32") + assert arr.equals(expected) + + +@pytest.mark.xfail(reason="Type inference for uint64 not implemented", + raises=OverflowError) +def test_uint64_max_convert(): + data = [0, np.iinfo(np.uint64).max] + + arr = pa.array(data, type=pa.uint64()) + expected = pa.array(np.array(data, dtype='uint64')) + assert arr.equals(expected) + + arr_inferred = pa.array(data) + assert arr_inferred.equals(expected) + + +@pytest.mark.parametrize("bits", [8, 16, 32, 64]) +def test_signed_integer_overflow(bits): + ty = getattr(pa, "int%d" % bits)() + # XXX ideally would always raise OverflowError + with pytest.raises((OverflowError, pa.ArrowInvalid)): + pa.array([2 ** (bits - 1)], ty) + with pytest.raises((OverflowError, pa.ArrowInvalid)): + pa.array([-2 ** (bits - 1) - 1], ty) + + +@pytest.mark.parametrize("bits", [8, 16, 32, 64]) +def test_unsigned_integer_overflow(bits): + ty = getattr(pa, "uint%d" % bits)() + # XXX ideally would always raise OverflowError + with pytest.raises((OverflowError, pa.ArrowInvalid)): + pa.array([2 ** bits], ty) + with pytest.raises((OverflowError, pa.ArrowInvalid)): + pa.array([-1], ty) + + +@parametrize_with_collections_types +@pytest.mark.parametrize("typ", pa_int_types) +def test_integer_from_string_error(seq, typ): + # ARROW-9451: pa.array(['1'], type=pa.uint32()) should not succeed + with pytest.raises(pa.ArrowInvalid): + pa.array(seq(['1']), type=typ) + + +def test_convert_with_mask(): + data = [1, 2, 3, 4, 5] + mask = np.array([False, True, False, False, True]) + + result = pa.array(data, mask=mask) + expected = pa.array([1, None, 3, 4, None]) + + assert result.equals(expected) + + # Mask wrong length + with pytest.raises(ValueError): + pa.array(data, mask=mask[1:]) + + +def test_garbage_collection(): + import gc + + # Force the cyclic garbage collector to run + gc.collect() + + bytes_before = pa.total_allocated_bytes() + pa.array([1, None, 3, None]) + gc.collect() + assert pa.total_allocated_bytes() == bytes_before + + +def test_sequence_double(): + data = [1.5, 1., None, 2.5, None, None] + arr = pa.array(data) + assert len(arr) == 6 + assert arr.null_count == 3 + assert arr.type == pa.float64() + assert arr.to_pylist() == data + + +def test_double_auto_coerce_from_integer(): + # Done as part of ARROW-2814 + data = [1.5, 1., None, 2.5, None, None] + arr = pa.array(data) + + data2 = [1.5, 1, None, 2.5, None, None] + arr2 = pa.array(data2) + + assert arr.equals(arr2) + + data3 = [1, 1.5, None, 2.5, None, None] + arr3 = pa.array(data3) + + data4 = [1., 1.5, None, 2.5, None, None] + arr4 = pa.array(data4) + + assert arr3.equals(arr4) + + +def test_double_integer_coerce_representable_range(): + valid_values = [1.5, 1, 2, None, 1 << 53, -(1 << 53)] + invalid_values = [1.5, 1, 2, None, (1 << 53) + 1] + invalid_values2 = [1.5, 1, 2, None, -((1 << 53) + 1)] + + # it works + pa.array(valid_values) + + # it fails + with pytest.raises(ValueError): + pa.array(invalid_values) + + with pytest.raises(ValueError): + pa.array(invalid_values2) + + +def test_float32_integer_coerce_representable_range(): + f32 = np.float32 + valid_values = [f32(1.5), 1 << 24, -(1 << 24)] + invalid_values = [f32(1.5), (1 << 24) + 1] + invalid_values2 = [f32(1.5), -((1 << 24) + 1)] + + # it works + pa.array(valid_values, type=pa.float32()) + + # it fails + with pytest.raises(ValueError): + pa.array(invalid_values, type=pa.float32()) + + with pytest.raises(ValueError): + pa.array(invalid_values2, type=pa.float32()) + + +def test_mixed_sequence_errors(): + with pytest.raises(ValueError, match="tried to convert to boolean"): + pa.array([True, 'foo'], type=pa.bool_()) + + with pytest.raises(ValueError, match="tried to convert to float32"): + pa.array([1.5, 'foo'], type=pa.float32()) + + with pytest.raises(ValueError, match="tried to convert to double"): + pa.array([1.5, 'foo']) + + +@parametrize_with_sequence_types +@pytest.mark.parametrize("np_scalar,pa_type", [ + (np.float16, pa.float16()), + (np.float32, pa.float32()), + (np.float64, pa.float64()) +]) +@pytest.mark.parametrize("from_pandas", [True, False]) +def test_sequence_numpy_double(seq, np_scalar, pa_type, from_pandas): + data = [np_scalar(1.5), np_scalar(1), None, np_scalar(2.5), None, np.nan] + arr = pa.array(seq(data), from_pandas=from_pandas) + assert len(arr) == 6 + if from_pandas: + assert arr.null_count == 3 + else: + assert arr.null_count == 2 + if from_pandas: + # The NaN is skipped in type inference, otherwise it forces a + # float64 promotion + assert arr.type == pa_type + else: + assert arr.type == pa.float64() + + assert arr.to_pylist()[:4] == data[:4] + if from_pandas: + assert arr.to_pylist()[5] is None + else: + assert np.isnan(arr.to_pylist()[5]) + + +@pytest.mark.parametrize("from_pandas", [True, False]) +@pytest.mark.parametrize("inner_seq", [np.array, list]) +def test_ndarray_nested_numpy_double(from_pandas, inner_seq): + # ARROW-2806 + data = np.array([ + inner_seq([1., 2.]), + inner_seq([1., 2., 3.]), + inner_seq([np.nan]), + None + ], dtype=object) + arr = pa.array(data, from_pandas=from_pandas) + assert len(arr) == 4 + assert arr.null_count == 1 + assert arr.type == pa.list_(pa.float64()) + if from_pandas: + assert arr.to_pylist() == [[1.0, 2.0], [1.0, 2.0, 3.0], [None], None] + else: + np.testing.assert_equal(arr.to_pylist(), + [[1., 2.], [1., 2., 3.], [np.nan], None]) + + +def test_nested_ndarray_in_object_array(): + # ARROW-4350 + arr = np.empty(2, dtype=object) + arr[:] = [np.array([1, 2], dtype=np.int64), + np.array([2, 3], dtype=np.int64)] + + arr2 = np.empty(2, dtype=object) + arr2[0] = [3, 4] + arr2[1] = [5, 6] + + expected_type = pa.list_(pa.list_(pa.int64())) + assert pa.infer_type([arr]) == expected_type + + result = pa.array([arr, arr2]) + expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]], + type=expected_type) + + assert result.equals(expected) + + # test case for len-1 arrays to ensure they are interpreted as + # sublists and not scalars + arr = np.empty(2, dtype=object) + arr[:] = [np.array([1]), np.array([2])] + result = pa.array([arr, arr]) + assert result.to_pylist() == [[[1], [2]], [[1], [2]]] + + +@pytest.mark.xfail(reason=("Type inference for multidimensional ndarray " + "not yet implemented"), + raises=AssertionError) +def test_multidimensional_ndarray_as_nested_list(): + # TODO(wesm): see ARROW-5645 + arr = np.array([[1, 2], [2, 3]], dtype=np.int64) + arr2 = np.array([[3, 4], [5, 6]], dtype=np.int64) + + expected_type = pa.list_(pa.list_(pa.int64())) + assert pa.infer_type([arr]) == expected_type + + result = pa.array([arr, arr2]) + expected = pa.array([[[1, 2], [2, 3]], [[3, 4], [5, 6]]], + type=expected_type) + + assert result.equals(expected) + + +@pytest.mark.parametrize(('data', 'value_type'), [ + ([True, False], pa.bool_()), + ([None, None], pa.null()), + ([1, 2, None], pa.int8()), + ([1, 2., 3., None], pa.float32()), + ([datetime.date.today(), None], pa.date32()), + ([None, datetime.date.today()], pa.date64()), + ([datetime.time(1, 1, 1), None], pa.time32('s')), + ([None, datetime.time(2, 2, 2)], pa.time64('us')), + ([datetime.datetime.now(), None], pa.timestamp('us')), + ([datetime.timedelta(seconds=10)], pa.duration('s')), + ([b"a", b"b"], pa.binary()), + ([b"aaa", b"bbb", b"ccc"], pa.binary(3)), + ([b"a", b"b", b"c"], pa.large_binary()), + (["a", "b", "c"], pa.string()), + (["a", "b", "c"], pa.large_string()), + ( + [{"a": 1, "b": 2}, None, {"a": 5, "b": None}], + pa.struct([('a', pa.int8()), ('b', pa.int16())]) + ) +]) +def test_list_array_from_object_ndarray(data, value_type): + ty = pa.list_(value_type) + ndarray = np.array(data, dtype=object) + arr = pa.array([ndarray], type=ty) + assert arr.type.equals(ty) + assert arr.to_pylist() == [data] + + +@pytest.mark.parametrize(('data', 'value_type'), [ + ([[1, 2], [3]], pa.list_(pa.int64())), + ([[1, 2], [3, 4]], pa.list_(pa.int64(), 2)), + ([[1], [2, 3]], pa.large_list(pa.int64())) +]) +def test_nested_list_array_from_object_ndarray(data, value_type): + ndarray = np.empty(len(data), dtype=object) + ndarray[:] = [np.array(item, dtype=object) for item in data] + + ty = pa.list_(value_type) + arr = pa.array([ndarray], type=ty) + assert arr.type.equals(ty) + assert arr.to_pylist() == [data] + + +def test_array_ignore_nan_from_pandas(): + # See ARROW-4324, this reverts logic that was introduced in + # ARROW-2240 + with pytest.raises(ValueError): + pa.array([np.nan, 'str']) + + arr = pa.array([np.nan, 'str'], from_pandas=True) + expected = pa.array([None, 'str']) + assert arr.equals(expected) + + +def test_nested_ndarray_different_dtypes(): + data = [ + np.array([1, 2, 3], dtype='int64'), + None, + np.array([4, 5, 6], dtype='uint32') + ] + + arr = pa.array(data) + expected = pa.array([[1, 2, 3], None, [4, 5, 6]], + type=pa.list_(pa.int64())) + assert arr.equals(expected) + + t2 = pa.list_(pa.uint32()) + arr2 = pa.array(data, type=t2) + expected2 = expected.cast(t2) + assert arr2.equals(expected2) + + +def test_sequence_unicode(): + data = ['foo', 'bar', None, 'mañana'] + arr = pa.array(data) + assert len(arr) == 4 + assert arr.null_count == 1 + assert arr.type == pa.string() + assert arr.to_pylist() == data + + +@pytest.mark.parametrize("ty", [pa.string(), pa.large_string(), pa.string_view()]) +def test_sequence_unicode_explicit_type(ty): + data = ['foo', 'bar', None, 'mañana'] + arr = pa.array(data, type=ty) + assert len(arr) == 4 + assert arr.null_count == 1 + assert arr.type == ty + assert arr.to_pylist() == data + + +def check_array_mixed_unicode_bytes(binary_type, string_type): + values = ['qux', b'foo', bytearray(b'barz')] + b_values = [b'qux', b'foo', b'barz'] + u_values = ['qux', 'foo', 'barz'] + + arr = pa.array(values) + expected = pa.array(b_values, type=pa.binary()) + assert arr.type == pa.binary() + assert arr.equals(expected) + + arr = pa.array(values, type=binary_type) + expected = pa.array(b_values, type=binary_type) + assert arr.type == binary_type + assert arr.equals(expected) + + arr = pa.array(values, type=string_type) + expected = pa.array(u_values, type=string_type) + assert arr.type == string_type + assert arr.equals(expected) + + +def test_array_mixed_unicode_bytes(): + check_array_mixed_unicode_bytes(pa.binary(), pa.string()) + check_array_mixed_unicode_bytes(pa.large_binary(), pa.large_string()) + check_array_mixed_unicode_bytes(pa.binary_view(), pa.string_view()) + + +@pytest.mark.large_memory +@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()]) +def test_large_binary_array(ty): + # Construct a large binary array with more than 4GB of data + s = b"0123456789abcdefghijklmnopqrstuvwxyz" * 10 + nrepeats = math.ceil((2**32 + 5) / len(s)) + data = [s] * nrepeats + arr = pa.array(data, type=ty) + assert isinstance(arr, pa.Array) + assert arr.type == ty + assert len(arr) == nrepeats + + +@pytest.mark.slow +@pytest.mark.large_memory +@pytest.mark.parametrize("ty", [pa.large_binary(), pa.large_string()]) +def test_large_binary_value(ty): + # Construct a large binary array with a single value larger than 4GB + s = b"0123456789abcdefghijklmnopqrstuvwxyz" + nrepeats = math.ceil((2**32 + 5) / len(s)) + arr = pa.array([b"foo", s * nrepeats, None, b"bar"], type=ty) + assert isinstance(arr, pa.Array) + assert arr.type == ty + assert len(arr) == 4 + buf = arr[1].as_buffer() + assert len(buf) == len(s) * nrepeats + + +@pytest.mark.large_memory +@pytest.mark.parametrize("ty", [pa.binary(), pa.string(), pa.string_view()]) +def test_string_too_large(ty): + # Construct a binary array with a single value larger than 4GB + s = b"0123456789abcdefghijklmnopqrstuvwxyz" + nrepeats = math.ceil((2**32 + 5) / len(s)) + with pytest.raises(pa.ArrowCapacityError): + pa.array([b"foo", s * nrepeats, None, b"bar"], type=ty) + + +def test_sequence_bytes(): + u1 = b'ma\xc3\xb1ana' + + data = [b'foo', + memoryview(b'dada'), + memoryview(b'd-a-t-a')[::2], # non-contiguous is made contiguous + u1.decode('utf-8'), # unicode gets encoded, + bytearray(b'bar'), + None] + for ty in [None, pa.binary(), pa.large_binary(), pa.binary_view()]: + arr = pa.array(data, type=ty) + assert len(arr) == 6 + assert arr.null_count == 1 + assert arr.type == ty or pa.binary() + assert arr.to_pylist() == [b'foo', b'dada', b'data', u1, b'bar', None] + + +@pytest.mark.parametrize("ty", [pa.string(), pa.large_string(), pa.string_view()]) +def test_sequence_utf8_to_unicode(ty): + # ARROW-1225 + data = [b'foo', None, b'bar'] + arr = pa.array(data, type=ty) + assert arr.type == ty + assert arr[0].as_py() == 'foo' + + # test a non-utf8 unicode string + val = ('mañana').encode('utf-16-le') + with pytest.raises(pa.ArrowInvalid): + pa.array([val], type=ty) + + +def test_sequence_fixed_size_bytes(): + data = [b'foof', None, bytearray(b'barb'), b'2346'] + arr = pa.array(data, type=pa.binary(4)) + assert len(arr) == 4 + assert arr.null_count == 1 + assert arr.type == pa.binary(4) + assert arr.to_pylist() == [b'foof', None, b'barb', b'2346'] + + +def test_fixed_size_bytes_does_not_accept_varying_lengths(): + data = [b'foo', None, b'barb', b'2346'] + with pytest.raises(pa.ArrowInvalid): + pa.array(data, type=pa.binary(4)) + + +def test_fixed_size_binary_length_check(): + # ARROW-10193 + data = [b'\x19h\r\x9e\x00\x00\x00\x00\x01\x9b\x9fA'] + assert len(data[0]) == 12 + ty = pa.binary(12) + arr = pa.array(data, type=ty) + assert arr.to_pylist() == data + + +def test_sequence_date(): + data = [datetime.date(2000, 1, 1), None, datetime.date(1970, 1, 1), + datetime.date(2040, 2, 26)] + arr = pa.array(data) + assert len(arr) == 4 + assert arr.type == pa.date32() + assert arr.null_count == 1 + assert arr[0].as_py() == datetime.date(2000, 1, 1) + assert arr[1].as_py() is None + assert arr[2].as_py() == datetime.date(1970, 1, 1) + assert arr[3].as_py() == datetime.date(2040, 2, 26) + + +@pytest.mark.parametrize('input', + [(pa.date32(), [10957, None]), + (pa.date64(), [10957 * 86400000, None])]) +def test_sequence_explicit_types(input): + t, ex_values = input + data = [datetime.date(2000, 1, 1), None] + arr = pa.array(data, type=t) + arr2 = pa.array(ex_values, type=t) + + for x in [arr, arr2]: + assert len(x) == 2 + assert x.type == t + assert x.null_count == 1 + assert x[0].as_py() == datetime.date(2000, 1, 1) + assert x[1].as_py() is None + + +def test_date32_overflow(): + # Overflow + data3 = [2**32, None] + with pytest.raises((OverflowError, pa.ArrowException)): + pa.array(data3, type=pa.date32()) + + +@pytest.mark.parametrize(('time_type', 'unit', 'int_type'), [ + (pa.time32, 's', 'int32'), + (pa.time32, 'ms', 'int32'), + (pa.time64, 'us', 'int64'), + (pa.time64, 'ns', 'int64'), +]) +def test_sequence_time_with_timezone(time_type, unit, int_type): + def expected_integer_value(t): + # only use with utc time object because it doesn't adjust with the + # offset + units = ['s', 'ms', 'us', 'ns'] + multiplier = 10**(units.index(unit) * 3) + if t is None: + return None + seconds = ( + t.hour * 3600 + + t.minute * 60 + + t.second + + t.microsecond * 10**-6 + ) + return int(seconds * multiplier) + + def expected_time_value(t): + # only use with utc time object because it doesn't adjust with the + # time objects tzdata + if unit == 's': + return t.replace(microsecond=0) + elif unit == 'ms': + return t.replace(microsecond=(t.microsecond // 1000) * 1000) + else: + return t + + # only timezone naive times are supported in arrow + data = [ + datetime.time(8, 23, 34, 123456), + datetime.time(5, 0, 0, 1000), + None, + datetime.time(1, 11, 56, 432539), + datetime.time(23, 10, 0, 437699) + ] + + ty = time_type(unit) + arr = pa.array(data, type=ty) + assert len(arr) == 5 + assert arr.type == ty + assert arr.null_count == 1 + + # test that the underlying integers are UTC values + values = arr.cast(int_type) + expected = list(map(expected_integer_value, data)) + assert values.to_pylist() == expected + + # test that the scalars are datetime.time objects with UTC timezone + assert arr[0].as_py() == expected_time_value(data[0]) + assert arr[1].as_py() == expected_time_value(data[1]) + assert arr[2].as_py() is None + assert arr[3].as_py() == expected_time_value(data[3]) + assert arr[4].as_py() == expected_time_value(data[4]) + + def tz(hours, minutes=0): + offset = datetime.timedelta(hours=hours, minutes=minutes) + return datetime.timezone(offset) + + +def test_sequence_timestamp(): + data = [ + datetime.datetime(2007, 7, 13, 1, 23, 34, 123456), + None, + datetime.datetime(2006, 1, 13, 12, 34, 56, 432539), + datetime.datetime(2010, 8, 13, 5, 46, 57, 437699) + ] + arr = pa.array(data) + assert len(arr) == 4 + assert arr.type == pa.timestamp('us') + assert arr.null_count == 1 + assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1, + 23, 34, 123456) + assert arr[1].as_py() is None + assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12, + 34, 56, 432539) + assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5, + 46, 57, 437699) + + +@pytest.mark.parametrize('timezone', [ + None, + 'UTC', + 'Etc/GMT-1', + 'Europe/Budapest', +]) +@pytest.mark.parametrize('unit', [ + 's', + 'ms', + 'us', + 'ns' +]) +def test_sequence_timestamp_with_timezone(timezone, unit): + pytz = pytest.importorskip("pytz") + + def expected_integer_value(dt): + units = ['s', 'ms', 'us', 'ns'] + multiplier = 10**(units.index(unit) * 3) + if dt is None: + return None + else: + # avoid float precision issues + ts = decimal.Decimal(str(dt.timestamp())) + return int(ts * multiplier) + + def expected_datetime_value(dt): + if dt is None: + return None + + if unit == 's': + dt = dt.replace(microsecond=0) + elif unit == 'ms': + dt = dt.replace(microsecond=(dt.microsecond // 1000) * 1000) + + # adjust the timezone + if timezone is None: + # make datetime timezone unaware + return dt.replace(tzinfo=None) + else: + # convert to the expected timezone + return dt.astimezone(pytz.timezone(timezone)) + + data = [ + datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive + pytz.utc.localize( + datetime.datetime(2008, 1, 5, 5, 0, 0, 1000) + ), + None, + pytz.timezone('US/Eastern').localize( + datetime.datetime(2006, 1, 13, 12, 34, 56, 432539) + ), + pytz.timezone('Europe/Moscow').localize( + datetime.datetime(2010, 8, 13, 5, 0, 0, 437699) + ), + ] + utcdata = [ + pytz.utc.localize(data[0]), + data[1], + None, + data[3].astimezone(pytz.utc), + data[4].astimezone(pytz.utc), + ] + + ty = pa.timestamp(unit, tz=timezone) + arr = pa.array(data, type=ty) + assert len(arr) == 5 + assert arr.type == ty + assert arr.null_count == 1 + + # test that the underlying integers are UTC values + values = arr.cast('int64') + expected = list(map(expected_integer_value, utcdata)) + assert values.to_pylist() == expected + + # test that the scalars are datetimes with the correct timezone + for i in range(len(arr)): + assert arr[i].as_py() == expected_datetime_value(utcdata[i]) + + +@pytest.mark.parametrize('timezone', [ + None, + 'UTC', + 'Etc/GMT-1', + 'Europe/Budapest', +]) +def test_pyarrow_ignore_timezone_environment_variable(monkeypatch, timezone): + # note that any non-empty value will evaluate to true + pytest.importorskip("pytz") + import pytz + + monkeypatch.setenv("PYARROW_IGNORE_TIMEZONE", "1") + data = [ + datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive + pytz.utc.localize( + datetime.datetime(2008, 1, 5, 5, 0, 0, 1000) + ), + pytz.timezone('US/Eastern').localize( + datetime.datetime(2006, 1, 13, 12, 34, 56, 432539) + ), + pytz.timezone('Europe/Moscow').localize( + datetime.datetime(2010, 8, 13, 5, 0, 0, 437699) + ), + ] + + expected = [dt.replace(tzinfo=None) for dt in data] + if timezone is not None: + tzinfo = pytz.timezone(timezone) + expected = [tzinfo.fromutc(dt) for dt in expected] + + ty = pa.timestamp('us', tz=timezone) + arr = pa.array(data, type=ty) + assert arr.to_pylist() == expected + + +def test_sequence_timestamp_with_timezone_inference(): + pytest.importorskip("pytz") + import pytz + + data = [ + datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive + pytz.utc.localize( + datetime.datetime(2008, 1, 5, 5, 0, 0, 1000) + ), + None, + pytz.timezone('US/Eastern').localize( + datetime.datetime(2006, 1, 13, 12, 34, 56, 432539) + ), + pytz.timezone('Europe/Moscow').localize( + datetime.datetime(2010, 8, 13, 5, 0, 0, 437699) + ), + ] + expected = [ + pa.timestamp('us', tz=None), + pa.timestamp('us', tz='UTC'), + pa.timestamp('us', tz=None), + pa.timestamp('us', tz='US/Eastern'), + pa.timestamp('us', tz='Europe/Moscow') + ] + for dt, expected_type in zip(data, expected): + prepended = [dt] + data + arr = pa.array(prepended) + assert arr.type == expected_type + + +def test_sequence_timestamp_with_zoneinfo_timezone_inference(): + pytest.importorskip("zoneinfo") + import zoneinfo + + data = [ + datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive + datetime.datetime(2008, 1, 5, 5, 0, 0, 1000, + tzinfo=datetime.timezone.utc), + None, + datetime.datetime(2006, 1, 13, 12, 34, 56, 432539, + tzinfo=zoneinfo.ZoneInfo(key='US/Eastern')), + datetime.datetime(2010, 8, 13, 5, 0, 0, 437699, + tzinfo=zoneinfo.ZoneInfo(key='Europe/Moscow')), + ] + expected = [ + pa.timestamp('us', tz=None), + pa.timestamp('us', tz='UTC'), + pa.timestamp('us', tz=None), + pa.timestamp('us', tz='US/Eastern'), + pa.timestamp('us', tz='Europe/Moscow') + ] + for dt, expected_type in zip(data, expected): + prepended = [dt] + data + arr = pa.array(prepended) + assert arr.type == expected_type + + +@pytest.mark.pandas +def test_sequence_timestamp_from_mixed_builtin_and_pandas_datetimes(): + pytest.importorskip("pytz") + import pytz + import pandas as pd + + data = [ + pd.Timestamp(1184307814123456123, tz=pytz.timezone('US/Eastern'), + unit='ns'), + datetime.datetime(2007, 7, 13, 8, 23, 34, 123456), # naive + pytz.utc.localize( + datetime.datetime(2008, 1, 5, 5, 0, 0, 1000) + ), + None, + ] + utcdata = [ + data[0].astimezone(pytz.utc), + pytz.utc.localize(data[1]), + data[2].astimezone(pytz.utc), + None, + ] + + arr = pa.array(data) + assert arr.type == pa.timestamp('us', tz='US/Eastern') + + values = arr.cast('int64') + expected = [int(dt.timestamp() * 10**6) if dt else None for dt in utcdata] + assert values.to_pylist() == expected + + +def test_sequence_timestamp_out_of_bounds_nanosecond(): + # https://issues.apache.org/jira/browse/ARROW-9768 + # datetime outside of range supported for nanosecond resolution + data = [datetime.datetime(2262, 4, 12)] + with pytest.raises(ValueError, match="out of bounds"): + pa.array(data, type=pa.timestamp('ns')) + + # with microsecond resolution it works fine + arr = pa.array(data, type=pa.timestamp('us')) + assert arr.to_pylist() == data + + # case where the naive is within bounds, but converted to UTC not + tz = datetime.timezone(datetime.timedelta(hours=-1)) + data = [datetime.datetime(2262, 4, 11, 23, tzinfo=tz)] + with pytest.raises(ValueError, match="out of bounds"): + pa.array(data, type=pa.timestamp('ns')) + + arr = pa.array(data, type=pa.timestamp('us')) + assert arr.to_pylist()[0] == datetime.datetime(2262, 4, 12) + + +def test_sequence_numpy_timestamp(): + data = [ + np.datetime64(datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)), + None, + np.datetime64(datetime.datetime(2006, 1, 13, 12, 34, 56, 432539)), + np.datetime64(datetime.datetime(2010, 8, 13, 5, 46, 57, 437699)) + ] + arr = pa.array(data) + assert len(arr) == 4 + assert arr.type == pa.timestamp('us') + assert arr.null_count == 1 + assert arr[0].as_py() == datetime.datetime(2007, 7, 13, 1, + 23, 34, 123456) + assert arr[1].as_py() is None + assert arr[2].as_py() == datetime.datetime(2006, 1, 13, 12, + 34, 56, 432539) + assert arr[3].as_py() == datetime.datetime(2010, 8, 13, 5, + 46, 57, 437699) + + +class MyDate(datetime.date): + pass + + +class MyDatetime(datetime.datetime): + pass + + +class MyTimedelta(datetime.timedelta): + pass + + +def test_datetime_subclassing(): + data = [ + MyDate(2007, 7, 13), + ] + date_type = pa.date32() + arr_date = pa.array(data, type=date_type) + assert len(arr_date) == 1 + assert arr_date.type == date_type + assert arr_date[0].as_py() == datetime.date(2007, 7, 13) + + data = [ + MyDatetime(2007, 7, 13, 1, 23, 34, 123456), + ] + + s = pa.timestamp('s') + ms = pa.timestamp('ms') + us = pa.timestamp('us') + + arr_s = pa.array(data, type=s) + assert len(arr_s) == 1 + assert arr_s.type == s + assert arr_s[0].as_py() == datetime.datetime(2007, 7, 13, 1, + 23, 34, 0) + + arr_ms = pa.array(data, type=ms) + assert len(arr_ms) == 1 + assert arr_ms.type == ms + assert arr_ms[0].as_py() == datetime.datetime(2007, 7, 13, 1, + 23, 34, 123000) + + arr_us = pa.array(data, type=us) + assert len(arr_us) == 1 + assert arr_us.type == us + assert arr_us[0].as_py() == datetime.datetime(2007, 7, 13, 1, + 23, 34, 123456) + + data = [ + MyTimedelta(123, 456, 1002), + ] + + s = pa.duration('s') + ms = pa.duration('ms') + us = pa.duration('us') + + arr_s = pa.array(data) + assert len(arr_s) == 1 + assert arr_s.type == us + assert arr_s[0].as_py() == datetime.timedelta(123, 456, 1002) + + arr_s = pa.array(data, type=s) + assert len(arr_s) == 1 + assert arr_s.type == s + assert arr_s[0].as_py() == datetime.timedelta(123, 456) + + arr_ms = pa.array(data, type=ms) + assert len(arr_ms) == 1 + assert arr_ms.type == ms + assert arr_ms[0].as_py() == datetime.timedelta(123, 456, 1000) + + arr_us = pa.array(data, type=us) + assert len(arr_us) == 1 + assert arr_us.type == us + assert arr_us[0].as_py() == datetime.timedelta(123, 456, 1002) + + +@pytest.mark.xfail(not _pandas_api.have_pandas, + reason="pandas required for nanosecond conversion") +def test_sequence_timestamp_nanoseconds(): + inputs = [ + [datetime.datetime(2007, 7, 13, 1, 23, 34, 123456)], + [MyDatetime(2007, 7, 13, 1, 23, 34, 123456)] + ] + + for data in inputs: + ns = pa.timestamp('ns') + arr_ns = pa.array(data, type=ns) + assert len(arr_ns) == 1 + assert arr_ns.type == ns + assert arr_ns[0].as_py() == datetime.datetime(2007, 7, 13, 1, + 23, 34, 123456) + + +@pytest.mark.pandas +@pytest.mark.skipif(sys.platform == "win32" and not util.windows_has_tzdata(), + reason="Timezone database is not installed on Windows") +def test_sequence_timestamp_from_int_with_unit(): + # TODO(wesm): This test might be rewritten to assert the actual behavior + # when pandas is not installed + + data = [1] + + s = pa.timestamp('s') + ms = pa.timestamp('ms') + us = pa.timestamp('us') + ns = pa.timestamp('ns') + + arr_s = pa.array(data, type=s) + assert len(arr_s) == 1 + assert arr_s.type == s + assert repr(arr_s[0]) == ( + "" + ) + assert str(arr_s[0]) == "1970-01-01 00:00:01" + + arr_ms = pa.array(data, type=ms) + assert len(arr_ms) == 1 + assert arr_ms.type == ms + assert repr(arr_ms[0].as_py()) == ( + "datetime.datetime(1970, 1, 1, 0, 0, 0, 1000)" + ) + assert str(arr_ms[0]) == "1970-01-01 00:00:00.001000" + + arr_us = pa.array(data, type=us) + assert len(arr_us) == 1 + assert arr_us.type == us + assert repr(arr_us[0].as_py()) == ( + "datetime.datetime(1970, 1, 1, 0, 0, 0, 1)" + ) + assert str(arr_us[0]) == "1970-01-01 00:00:00.000001" + + arr_ns = pa.array(data, type=ns) + assert len(arr_ns) == 1 + assert arr_ns.type == ns + assert repr(arr_ns[0].as_py()) == ( + "Timestamp('1970-01-01 00:00:00.000000001')" + ) + assert str(arr_ns[0]) == "1970-01-01 00:00:00.000000001" + + expected_exc = TypeError + + class CustomClass(): + pass + + for ty in [ns, pa.date32(), pa.date64()]: + with pytest.raises(expected_exc): + pa.array([1, CustomClass()], type=ty) + + +@pytest.mark.parametrize('np_scalar', [True, False]) +def test_sequence_duration(np_scalar): + td1 = datetime.timedelta(2, 3601, 1) + td2 = datetime.timedelta(1, 100, 1000) + if np_scalar: + data = [np.timedelta64(td1), None, np.timedelta64(td2)] + else: + data = [td1, None, td2] + + arr = pa.array(data) + assert len(arr) == 3 + assert arr.type == pa.duration('us') + assert arr.null_count == 1 + assert arr[0].as_py() == td1 + assert arr[1].as_py() is None + assert arr[2].as_py() == td2 + + +@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns']) +def test_sequence_duration_with_unit(unit): + data = [ + datetime.timedelta(3, 22, 1001), + ] + expected = {'s': datetime.timedelta(3, 22), + 'ms': datetime.timedelta(3, 22, 1000), + 'us': datetime.timedelta(3, 22, 1001), + 'ns': datetime.timedelta(3, 22, 1001)} + + ty = pa.duration(unit) + + arr_s = pa.array(data, type=ty) + assert len(arr_s) == 1 + assert arr_s.type == ty + assert arr_s[0].as_py() == expected[unit] + + +@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns']) +def test_sequence_duration_from_int_with_unit(unit): + data = [5] + + ty = pa.duration(unit) + arr = pa.array(data, type=ty) + assert len(arr) == 1 + assert arr.type == ty + assert arr[0].value == 5 + + +def test_sequence_duration_nested_lists(): + td1 = datetime.timedelta(1, 1, 1000) + td2 = datetime.timedelta(1, 100) + + data = [[td1, None], [td1, td2]] + + arr = pa.array(data) + assert len(arr) == 2 + assert arr.type == pa.list_(pa.duration('us')) + assert arr.to_pylist() == data + + +@pytest.mark.parametrize("factory", [ + pa.list_, pa.large_list, pa.list_view, pa.large_list_view]) +def test_sequence_duration_nested_lists_with_explicit_type(factory): + td1 = datetime.timedelta(1, 1, 1000) + td2 = datetime.timedelta(1, 100) + + data = [[td1, None], [td1, td2]] + + arr = pa.array(data, type=factory(pa.duration('ms'))) + assert len(arr) == 2 + assert arr.type == factory(pa.duration('ms')) + assert arr.to_pylist() == data + + +def test_sequence_duration_nested_lists_numpy(): + td1 = datetime.timedelta(1, 1, 1000) + td2 = datetime.timedelta(1, 100) + + data = [[np.timedelta64(td1), None], + [np.timedelta64(td1), np.timedelta64(td2)]] + + arr = pa.array(data) + assert len(arr) == 2 + assert arr.type == pa.list_(pa.duration('us')) + assert arr.to_pylist() == [[td1, None], [td1, td2]] + + data = [np.array([np.timedelta64(td1), None], dtype='timedelta64[us]'), + np.array([np.timedelta64(td1), np.timedelta64(td2)])] + + arr = pa.array(data) + assert len(arr) == 2 + assert arr.type == pa.list_(pa.duration('us')) + assert arr.to_pylist() == [[td1, None], [td1, td2]] + + +def test_sequence_nesting_levels(): + data = [1, 2, None] + arr = pa.array(data) + assert arr.type == pa.int64() + assert arr.to_pylist() == data + + data = [[1], [2], None] + arr = pa.array(data) + assert arr.type == pa.list_(pa.int64()) + assert arr.to_pylist() == data + + data = [[1], [2, 3, 4], [None]] + arr = pa.array(data) + assert arr.type == pa.list_(pa.int64()) + assert arr.to_pylist() == data + + data = [None, [[None, 1]], [[2, 3, 4], None], [None]] + arr = pa.array(data) + assert arr.type == pa.list_(pa.list_(pa.int64())) + assert arr.to_pylist() == data + + exceptions = (pa.ArrowInvalid, pa.ArrowTypeError) + + # Mixed nesting levels are rejected + with pytest.raises(exceptions): + pa.array([1, 2, [1]]) + + with pytest.raises(exceptions): + pa.array([1, 2, []]) + + with pytest.raises(exceptions): + pa.array([[1], [2], [None, [1]]]) + + +def test_sequence_mixed_types_fails(): + data = ['a', 1, 2.0] + with pytest.raises(pa.ArrowTypeError): + pa.array(data) + + +def test_sequence_mixed_types_with_specified_type_fails(): + data = ['-10', '-5', {'a': 1}, '0', '5', '10'] + + type = pa.string() + with pytest.raises(TypeError): + pa.array(data, type=type) + + +def test_sequence_decimal(): + data = [decimal.Decimal('1234.183'), decimal.Decimal('8094.234')] + for type in [pa.decimal128, pa.decimal256]: + arr = pa.array(data, type=type(precision=7, scale=3)) + assert arr.to_pylist() == data + + +def test_sequence_decimal_different_precisions(): + data = [ + decimal.Decimal('1234234983.183'), decimal.Decimal('80943244.234') + ] + for type in [pa.decimal128, pa.decimal256]: + arr = pa.array(data, type=type(precision=13, scale=3)) + assert arr.to_pylist() == data + + +def test_sequence_decimal_no_scale(): + data = [decimal.Decimal('1234234983'), decimal.Decimal('8094324')] + for type in [pa.decimal128, pa.decimal256]: + arr = pa.array(data, type=type(precision=10)) + assert arr.to_pylist() == data + + +def test_sequence_decimal_negative(): + data = [decimal.Decimal('-1234.234983'), decimal.Decimal('-8.094324')] + for type in [pa.decimal128, pa.decimal256]: + arr = pa.array(data, type=type(precision=10, scale=6)) + assert arr.to_pylist() == data + + +def test_sequence_decimal_no_whole_part(): + data = [decimal.Decimal('-.4234983'), decimal.Decimal('.0103943')] + for type in [pa.decimal128, pa.decimal256]: + arr = pa.array(data, type=type(precision=7, scale=7)) + assert arr.to_pylist() == data + + +def test_sequence_decimal_large_integer(): + data = [decimal.Decimal('-394029506937548693.42983'), + decimal.Decimal('32358695912932.01033')] + for type in [pa.decimal128, pa.decimal256]: + arr = pa.array(data, type=type(precision=23, scale=5)) + assert arr.to_pylist() == data + + +def test_sequence_decimal_from_integers(): + data = [0, 1, -39402950693754869342983] + expected = [decimal.Decimal(x) for x in data] + for type in [pa.decimal128, pa.decimal256]: + arr = pa.array(data, type=type(precision=28, scale=5)) + assert arr.to_pylist() == expected + + +def test_sequence_decimal_too_high_precision(): + # ARROW-6989 python decimal has too high precision + with pytest.raises(ValueError, match="precision out of range"): + pa.array([decimal.Decimal('1' * 80)]) + + +def test_sequence_decimal_infer(): + for data, typ in [ + # simple case + (decimal.Decimal('1.234'), pa.decimal128(4, 3)), + # trailing zeros + (decimal.Decimal('12300'), pa.decimal128(5, 0)), + (decimal.Decimal('12300.0'), pa.decimal128(6, 1)), + # scientific power notation + (decimal.Decimal('1.23E+4'), pa.decimal128(5, 0)), + (decimal.Decimal('123E+2'), pa.decimal128(5, 0)), + (decimal.Decimal('123E+4'), pa.decimal128(7, 0)), + # leading zeros + (decimal.Decimal('0.0123'), pa.decimal128(4, 4)), + (decimal.Decimal('0.01230'), pa.decimal128(5, 5)), + (decimal.Decimal('1.230E-2'), pa.decimal128(5, 5)), + ]: + assert pa.infer_type([data]) == typ + arr = pa.array([data]) + assert arr.type == typ + assert arr.to_pylist()[0] == data + + +def test_sequence_decimal_infer_mixed(): + # ARROW-12150 - ensure mixed precision gets correctly inferred to + # common type that can hold all input values + cases = [ + ([decimal.Decimal('1.234'), decimal.Decimal('3.456')], + pa.decimal128(4, 3)), + ([decimal.Decimal('1.234'), decimal.Decimal('456.7')], + pa.decimal128(6, 3)), + ([decimal.Decimal('123.4'), decimal.Decimal('4.567')], + pa.decimal128(6, 3)), + ([decimal.Decimal('123e2'), decimal.Decimal('4567e3')], + pa.decimal128(7, 0)), + ([decimal.Decimal('123e4'), decimal.Decimal('4567e2')], + pa.decimal128(7, 0)), + ([decimal.Decimal('0.123'), decimal.Decimal('0.04567')], + pa.decimal128(5, 5)), + ([decimal.Decimal('0.001'), decimal.Decimal('1.01E5')], + pa.decimal128(9, 3)), + ] + for data, typ in cases: + assert pa.infer_type(data) == typ + arr = pa.array(data) + assert arr.type == typ + assert arr.to_pylist() == data + + +def test_sequence_decimal_given_type(): + for data, typs, wrong_typs in [ + # simple case + ( + decimal.Decimal('1.234'), + [pa.decimal128(4, 3), pa.decimal128(5, 3), pa.decimal128(5, 4)], + [pa.decimal128(4, 2), pa.decimal128(4, 4)] + ), + # trailing zeros + ( + decimal.Decimal('12300'), + [pa.decimal128(5, 0), pa.decimal128(6, 0), pa.decimal128(3, -2)], + [pa.decimal128(4, 0), pa.decimal128(3, -3)] + ), + # scientific power notation + ( + decimal.Decimal('1.23E+4'), + [pa.decimal128(5, 0), pa.decimal128(6, 0), pa.decimal128(3, -2)], + [pa.decimal128(4, 0), pa.decimal128(3, -3)] + ), + ]: + for typ in typs: + arr = pa.array([data], type=typ) + assert arr.type == typ + assert arr.to_pylist()[0] == data + for typ in wrong_typs: + with pytest.raises(ValueError): + pa.array([data], type=typ) + + +def test_range_types(): + arr1 = pa.array(range(3)) + arr2 = pa.array((0, 1, 2)) + assert arr1.equals(arr2) + + +def test_empty_range(): + arr = pa.array(range(0)) + assert len(arr) == 0 + assert arr.null_count == 0 + assert arr.type == pa.null() + assert arr.to_pylist() == [] + + +def test_structarray(): + arr = pa.StructArray.from_arrays([], names=[]) + assert arr.type == pa.struct([]) + assert len(arr) == 0 + assert arr.to_pylist() == [] + + ints = pa.array([None, 2, 3], type=pa.int64()) + strs = pa.array(['a', None, 'c'], type=pa.string()) + bools = pa.array([True, False, None], type=pa.bool_()) + arr = pa.StructArray.from_arrays( + [ints, strs, bools], + ['ints', 'strs', 'bools']) + + expected = [ + {'ints': None, 'strs': 'a', 'bools': True}, + {'ints': 2, 'strs': None, 'bools': False}, + {'ints': 3, 'strs': 'c', 'bools': None}, + ] + + pylist = arr.to_pylist() + assert pylist == expected, (pylist, expected) + + # len(names) != len(arrays) + with pytest.raises(ValueError): + pa.StructArray.from_arrays([ints], ['ints', 'strs']) + + +def test_struct_from_dicts(): + ty = pa.struct([pa.field('a', pa.int32()), + pa.field('b', pa.string()), + pa.field('c', pa.bool_())]) + arr = pa.array([], type=ty) + assert arr.to_pylist() == [] + + data = [{'a': 5, 'b': 'foo', 'c': True}, + {'a': 6, 'b': 'bar', 'c': False}] + arr = pa.array(data, type=ty) + assert arr.to_pylist() == data + + # With omitted values + data = [{'a': 5, 'c': True}, + None, + {}, + {'a': None, 'b': 'bar'}] + arr = pa.array(data, type=ty) + expected = [{'a': 5, 'b': None, 'c': True}, + None, + {'a': None, 'b': None, 'c': None}, + {'a': None, 'b': 'bar', 'c': None}] + assert arr.to_pylist() == expected + + +def test_struct_from_dicts_bytes_keys(): + # ARROW-6878 + ty = pa.struct([pa.field('a', pa.int32()), + pa.field('b', pa.string()), + pa.field('c', pa.bool_())]) + arr = pa.array([], type=ty) + assert arr.to_pylist() == [] + + data = [{b'a': 5, b'b': 'foo'}, + {b'a': 6, b'c': False}] + arr = pa.array(data, type=ty) + assert arr.to_pylist() == [ + {'a': 5, 'b': 'foo', 'c': None}, + {'a': 6, 'b': None, 'c': False}, + ] + + +def test_struct_from_tuples(): + ty = pa.struct([pa.field('a', pa.int32()), + pa.field('b', pa.string()), + pa.field('c', pa.bool_())]) + + data = [(5, 'foo', True), + (6, 'bar', False)] + expected = [{'a': 5, 'b': 'foo', 'c': True}, + {'a': 6, 'b': 'bar', 'c': False}] + arr = pa.array(data, type=ty) + + data_as_ndarray = np.empty(len(data), dtype=object) + data_as_ndarray[:] = data + arr2 = pa.array(data_as_ndarray, type=ty) + assert arr.to_pylist() == expected + + assert arr.equals(arr2) + + # With omitted values + data = [(5, 'foo', None), + None, + (6, None, False)] + expected = [{'a': 5, 'b': 'foo', 'c': None}, + None, + {'a': 6, 'b': None, 'c': False}] + arr = pa.array(data, type=ty) + assert arr.to_pylist() == expected + + # Invalid tuple size + for tup in [(5, 'foo'), (), ('5', 'foo', True, None)]: + with pytest.raises(ValueError, match="(?i)tuple size"): + pa.array([tup], type=ty) + + +def test_struct_from_list_of_pairs(): + ty = pa.struct([ + pa.field('a', pa.int32()), + pa.field('b', pa.string()), + pa.field('c', pa.bool_()) + ]) + data = [ + [('a', 5), ('b', 'foo'), ('c', True)], + [('a', 6), ('b', 'bar'), ('c', False)], + None + ] + arr = pa.array(data, type=ty) + assert arr.to_pylist() == [ + {'a': 5, 'b': 'foo', 'c': True}, + {'a': 6, 'b': 'bar', 'c': False}, + None + ] + + # test with duplicated field names + ty = pa.struct([ + pa.field('a', pa.int32()), + pa.field('a', pa.string()), + pa.field('b', pa.bool_()) + ]) + data = [ + [('a', 5), ('a', 'foo'), ('b', True)], + [('a', 6), ('a', 'bar'), ('b', False)], + ] + arr = pa.array(data, type=ty) + with pytest.raises(ValueError): + # TODO(kszucs): ARROW-9997 + arr.to_pylist() + + # test with empty elements + ty = pa.struct([ + pa.field('a', pa.int32()), + pa.field('b', pa.string()), + pa.field('c', pa.bool_()) + ]) + data = [ + [], + [('a', 5), ('b', 'foo'), ('c', True)], + [('a', 2), ('b', 'baz')], + [('a', 1), ('b', 'bar'), ('c', False), ('d', 'julia')], + ] + expected = [ + {'a': None, 'b': None, 'c': None}, + {'a': 5, 'b': 'foo', 'c': True}, + {'a': 2, 'b': 'baz', 'c': None}, + {'a': 1, 'b': 'bar', 'c': False}, + ] + arr = pa.array(data, type=ty) + assert arr.to_pylist() == expected + + +def test_struct_from_list_of_pairs_errors(): + ty = pa.struct([ + pa.field('a', pa.int32()), + pa.field('b', pa.string()), + pa.field('c', pa.bool_()) + ]) + + # test that it raises if the key doesn't match the expected field name + data = [ + [], + [('a', 5), ('c', True), ('b', None)], + ] + msg = "The expected field name is `b` but `c` was given" + with pytest.raises(ValueError, match=msg): + pa.array(data, type=ty) + + # test various errors both at the first position and after because of key + # type inference + template = ( + r"Could not convert {} with type {}: was expecting tuple of " + r"(key, value) pair" + ) + cases = [ + tuple(), # empty key-value pair + tuple('a',), # missing value + tuple('unknown-key',), # not known field name + 'string', # not a tuple + ] + for key_value_pair in cases: + msg = re.escape(template.format( + repr(key_value_pair), type(key_value_pair).__name__ + )) + + with pytest.raises(TypeError, match=msg): + pa.array([ + [key_value_pair], + [('a', 5), ('b', 'foo'), ('c', None)], + ], type=ty) + + with pytest.raises(TypeError, match=msg): + pa.array([ + [('a', 5), ('b', 'foo'), ('c', None)], + [key_value_pair], + ], type=ty) + + +def test_struct_from_mixed_sequence(): + # It is forbidden to mix dicts and tuples when initializing a struct array + ty = pa.struct([pa.field('a', pa.int32()), + pa.field('b', pa.string()), + pa.field('c', pa.bool_())]) + data = [(5, 'foo', True), + {'a': 6, 'b': 'bar', 'c': False}] + with pytest.raises(TypeError): + pa.array(data, type=ty) + + +def test_struct_from_dicts_inference(): + expected_type = pa.struct([pa.field('a', pa.int64()), + pa.field('b', pa.string()), + pa.field('c', pa.bool_())]) + data = [{'a': 5, 'b': 'foo', 'c': True}, + {'a': 6, 'b': 'bar', 'c': False}] + + arr = pa.array(data) + check_struct_type(arr.type, expected_type) + assert arr.to_pylist() == data + + # With omitted values + data = [{'a': 5, 'c': True}, + None, + {}, + {'a': None, 'b': 'bar'}] + expected = [{'a': 5, 'b': None, 'c': True}, + None, + {'a': None, 'b': None, 'c': None}, + {'a': None, 'b': 'bar', 'c': None}] + + arr = pa.array(data) + data_as_ndarray = np.empty(len(data), dtype=object) + data_as_ndarray[:] = data + arr2 = pa.array(data) + + check_struct_type(arr.type, expected_type) + assert arr.to_pylist() == expected + assert arr.equals(arr2) + + # Nested + expected_type = pa.struct([ + pa.field('a', pa.struct([pa.field('aa', pa.list_(pa.int64())), + pa.field('ab', pa.bool_())])), + pa.field('b', pa.string())]) + data = [{'a': {'aa': [5, 6], 'ab': True}, 'b': 'foo'}, + {'a': {'aa': None, 'ab': False}, 'b': None}, + {'a': None, 'b': 'bar'}] + arr = pa.array(data) + + assert arr.to_pylist() == data + + # Edge cases + arr = pa.array([{}]) + assert arr.type == pa.struct([]) + assert arr.to_pylist() == [{}] + + # Mixing structs and scalars is rejected + with pytest.raises((pa.ArrowInvalid, pa.ArrowTypeError)): + pa.array([1, {'a': 2}]) + + +def test_structarray_from_arrays_coerce(): + # ARROW-1706 + ints = [None, 2, 3] + strs = ['a', None, 'c'] + bools = [True, False, None] + ints_nonnull = [1, 2, 3] + + arrays = [ints, strs, bools, ints_nonnull] + result = pa.StructArray.from_arrays(arrays, + ['ints', 'strs', 'bools', + 'int_nonnull']) + expected = pa.StructArray.from_arrays( + [pa.array(ints, type='int64'), + pa.array(strs, type='utf8'), + pa.array(bools), + pa.array(ints_nonnull, type='int64')], + ['ints', 'strs', 'bools', 'int_nonnull']) + + with pytest.raises(ValueError): + pa.StructArray.from_arrays(arrays) + + assert result.equals(expected) + + +def test_decimal_array_with_none_and_nan(): + values = [decimal.Decimal('1.234'), None, np.nan, decimal.Decimal('nan')] + + with pytest.raises(TypeError): + # ARROW-6227: Without from_pandas=True, NaN is considered a float + array = pa.array(values) + + array = pa.array(values, from_pandas=True) + assert array.type == pa.decimal128(4, 3) + assert array.to_pylist() == values[:2] + [None, None] + + array = pa.array(values, type=pa.decimal128(10, 4), from_pandas=True) + assert array.to_pylist() == [decimal.Decimal('1.2340'), None, None, None] + + +def test_map_from_dicts(): + data = [[{'key': b'a', 'value': 1}, {'key': b'b', 'value': 2}], + [{'key': b'c', 'value': 3}], + [{'key': b'd', 'value': 4}, {'key': b'e', 'value': 5}, + {'key': b'f', 'value': None}], + [{'key': b'g', 'value': 7}]] + expected = [[(d['key'], d['value']) for d in entry] for entry in data] + + arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32())) + + assert arr.to_pylist() == expected + + # With omitted values + data[1] = None + expected[1] = None + + arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32())) + + assert arr.to_pylist() == expected + + # Invalid dictionary + for entry in [[{'value': 5}], [{}], [{'k': 1, 'v': 2}]]: + with pytest.raises(ValueError, match="Invalid Map"): + pa.array([entry], type=pa.map_('i4', 'i4')) + + # Invalid dictionary types + for entry in [[{'key': '1', 'value': 5}], [{'key': {'value': 2}}]]: + with pytest.raises(pa.ArrowInvalid, match="tried to convert to int"): + pa.array([entry], type=pa.map_('i4', 'i4')) + + +def test_map_from_tuples(): + expected = [[(b'a', 1), (b'b', 2)], + [(b'c', 3)], + [(b'd', 4), (b'e', 5), (b'f', None)], + [(b'g', 7)]] + + arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32())) + + assert arr.to_pylist() == expected + + # With omitted values + expected[1] = None + + arr = pa.array(expected, type=pa.map_(pa.binary(), pa.int32())) + + assert arr.to_pylist() == expected + + # Invalid tuple size + for entry in [[(5,)], [()], [('5', 'foo', True)]]: + with pytest.raises(ValueError, match="(?i)tuple size"): + pa.array([entry], type=pa.map_('i4', 'i4')) + + +def test_dictionary_from_boolean(): + typ = pa.dictionary(pa.int8(), value_type=pa.bool_()) + a = pa.array([False, False, True, False, True], type=typ) + assert isinstance(a.type, pa.DictionaryType) + assert a.type.equals(typ) + + expected_indices = pa.array([0, 0, 1, 0, 1], type=pa.int8()) + expected_dictionary = pa.array([False, True], type=pa.bool_()) + assert a.indices.equals(expected_indices) + assert a.dictionary.equals(expected_dictionary) + + +@pytest.mark.parametrize('value_type', [ + pa.int8(), + pa.int16(), + pa.int32(), + pa.int64(), + pa.uint8(), + pa.uint16(), + pa.uint32(), + pa.uint64(), + pa.float32(), + pa.float64(), +]) +def test_dictionary_from_integers(value_type): + typ = pa.dictionary(pa.int8(), value_type=value_type) + a = pa.array([1, 2, 1, 1, 2, 3], type=typ) + assert isinstance(a.type, pa.DictionaryType) + assert a.type.equals(typ) + + expected_indices = pa.array([0, 1, 0, 0, 1, 2], type=pa.int8()) + expected_dictionary = pa.array([1, 2, 3], type=value_type) + assert a.indices.equals(expected_indices) + assert a.dictionary.equals(expected_dictionary) + + +@pytest.mark.parametrize('input_index_type', [ + pa.int8(), + pa.int16(), + pa.int32(), + pa.int64() +]) +def test_dictionary_index_type(input_index_type): + # dictionary array is constructed using adaptive index type builder, + # but the input index type is considered as the minimal width type to use + + typ = pa.dictionary(input_index_type, value_type=pa.int64()) + arr = pa.array(range(10), type=typ) + assert arr.type.equals(typ) + + +def test_dictionary_is_always_adaptive(): + # dictionary array is constructed using adaptive index type builder, + # meaning that the output index type may be wider than the given index type + # since it depends on the input data + typ = pa.dictionary(pa.int8(), value_type=pa.int64()) + + a = pa.array(range(2**7), type=typ) + expected = pa.dictionary(pa.int8(), pa.int64()) + assert a.type.equals(expected) + + a = pa.array(range(2**7 + 1), type=typ) + expected = pa.dictionary(pa.int16(), pa.int64()) + assert a.type.equals(expected) + + +def test_dictionary_from_strings(): + for value_type in [pa.binary(), pa.string()]: + typ = pa.dictionary(pa.int8(), value_type) + a = pa.array(["", "a", "bb", "a", "bb", "ccc"], type=typ) + + assert isinstance(a.type, pa.DictionaryType) + + expected_indices = pa.array([0, 1, 2, 1, 2, 3], type=pa.int8()) + expected_dictionary = pa.array(["", "a", "bb", "ccc"], type=value_type) + assert a.indices.equals(expected_indices) + assert a.dictionary.equals(expected_dictionary) + + # fixed size binary type + typ = pa.dictionary(pa.int8(), pa.binary(3)) + a = pa.array(["aaa", "aaa", "bbb", "ccc", "bbb"], type=typ) + assert isinstance(a.type, pa.DictionaryType) + + expected_indices = pa.array([0, 0, 1, 2, 1], type=pa.int8()) + expected_dictionary = pa.array(["aaa", "bbb", "ccc"], type=pa.binary(3)) + assert a.indices.equals(expected_indices) + assert a.dictionary.equals(expected_dictionary) + + +@pytest.mark.parametrize(('unit', 'expected'), [ + ('s', datetime.timedelta(seconds=-2147483000)), + ('ms', datetime.timedelta(milliseconds=-2147483000)), + ('us', datetime.timedelta(microseconds=-2147483000)), + ('ns', datetime.timedelta(microseconds=-2147483)) +]) +def test_duration_array_roundtrip_corner_cases(unit, expected): + # Corner case discovered by hypothesis: there were implicit conversions to + # unsigned values resulting wrong values with wrong signs. + ty = pa.duration(unit) + arr = pa.array([-2147483000], type=ty) + restored = pa.array(arr.to_pylist(), type=ty) + assert arr.equals(restored) + + expected_list = [expected] + if unit == 'ns': + # if pandas is available then a pandas Timedelta is returned + try: + import pandas as pd + except ImportError: + pass + else: + expected_list = [pd.Timedelta(-2147483000, unit='ns')] + + assert restored.to_pylist() == expected_list + + +@pytest.mark.pandas +def test_roundtrip_nanosecond_resolution_pandas_temporal_objects(): + # corner case discovered by hypothesis: preserving the nanoseconds on + # conversion from a list of Timedelta and Timestamp objects + import pandas as pd + + ty = pa.duration('ns') + arr = pa.array([9223371273709551616], type=ty) + data = arr.to_pylist() + assert isinstance(data[0], pd.Timedelta) + restored = pa.array(data, type=ty) + assert arr.equals(restored) + assert restored.to_pylist() == [ + pd.Timedelta(9223371273709551616, unit='ns') + ] + + ty = pa.timestamp('ns') + arr = pa.array([9223371273709551616], type=ty) + data = arr.to_pylist() + assert isinstance(data[0], pd.Timestamp) + restored = pa.array(data, type=ty) + assert arr.equals(restored) + assert restored.to_pylist() == [ + pd.Timestamp(9223371273709551616, unit='ns') + ] + + ty = pa.timestamp('ns', tz='US/Eastern') + value = 1604119893000000000 + arr = pa.array([value], type=ty) + data = arr.to_pylist() + assert isinstance(data[0], pd.Timestamp) + restored = pa.array(data, type=ty) + assert arr.equals(restored) + assert restored.to_pylist() == [ + pd.Timestamp(value, unit='ns').tz_localize( + "UTC").tz_convert('US/Eastern') + ] + + +@h.given(past.all_arrays) +def test_array_to_pylist_roundtrip(arr): + seq = arr.to_pylist() + restored = pa.array(seq, type=arr.type) + assert restored.equals(arr) + + +@pytest.mark.large_memory +def test_auto_chunking_binary_like(): + # single chunk + v1 = b'x' * 100000000 + v2 = b'x' * 147483646 + + # single chunk + one_chunk_data = [v1] * 20 + [b'', None, v2] + arr = pa.array(one_chunk_data, type=pa.binary()) + assert isinstance(arr, pa.Array) + assert len(arr) == 23 + assert arr[20].as_py() == b'' + assert arr[21].as_py() is None + assert arr[22].as_py() == v2 + + # two chunks + two_chunk_data = one_chunk_data + [b'two'] + arr = pa.array(two_chunk_data, type=pa.binary()) + assert isinstance(arr, pa.ChunkedArray) + assert arr.num_chunks == 2 + assert len(arr.chunk(0)) == 23 + assert len(arr.chunk(1)) == 1 + assert arr.chunk(0)[20].as_py() == b'' + assert arr.chunk(0)[21].as_py() is None + assert arr.chunk(0)[22].as_py() == v2 + assert arr.chunk(1).to_pylist() == [b'two'] + + # three chunks + three_chunk_data = one_chunk_data * 2 + [b'three', b'three'] + arr = pa.array(three_chunk_data, type=pa.binary()) + assert isinstance(arr, pa.ChunkedArray) + assert arr.num_chunks == 3 + assert len(arr.chunk(0)) == 23 + assert len(arr.chunk(1)) == 23 + assert len(arr.chunk(2)) == 2 + for i in range(2): + assert arr.chunk(i)[20].as_py() == b'' + assert arr.chunk(i)[21].as_py() is None + assert arr.chunk(i)[22].as_py() == v2 + assert arr.chunk(2).to_pylist() == [b'three', b'three'] + + +@pytest.mark.large_memory +def test_auto_chunking_list_of_binary(): + # ARROW-6281 + vals = [['x' * 1024]] * ((2 << 20) + 1) + arr = pa.array(vals) + assert isinstance(arr, pa.ChunkedArray) + assert arr.num_chunks == 2 + assert len(arr.chunk(0)) == 2**21 - 1 + assert len(arr.chunk(1)) == 2 + assert arr.chunk(1).to_pylist() == [['x' * 1024]] * 2 + + +@pytest.mark.large_memory +def test_auto_chunking_list_like(): + item = np.ones((2**28,), dtype='uint8') + data = [item] * (2**3 - 1) + arr = pa.array(data, type=pa.list_(pa.uint8())) + assert isinstance(arr, pa.Array) + assert len(arr) == 7 + + item = np.ones((2**28,), dtype='uint8') + data = [item] * 2**3 + arr = pa.array(data, type=pa.list_(pa.uint8())) + assert isinstance(arr, pa.ChunkedArray) + assert arr.num_chunks == 2 + assert len(arr.chunk(0)) == 7 + assert len(arr.chunk(1)) == 1 + chunk = arr.chunk(1) + scalar = chunk[0] + assert isinstance(scalar, pa.ListScalar) + expected = pa.array(item, type=pa.uint8()) + assert scalar.values == expected + + +@pytest.mark.slow +@pytest.mark.large_memory +def test_auto_chunking_map_type(): + # takes ~20 minutes locally + ty = pa.map_(pa.int8(), pa.int8()) + item = [(1, 1)] * 2**28 + data = [item] * 2**3 + arr = pa.array(data, type=ty) + assert isinstance(arr, pa.ChunkedArray) + assert len(arr.chunk(0)) == 7 + assert len(arr.chunk(1)) == 1 + + +@pytest.mark.large_memory +@pytest.mark.parametrize(('ty', 'char'), [ + (pa.string(), 'x'), + (pa.binary(), b'x'), +]) +def test_nested_auto_chunking(ty, char): + v1 = char * 100000000 + v2 = char * 147483646 + + struct_type = pa.struct([ + pa.field('bool', pa.bool_()), + pa.field('integer', pa.int64()), + pa.field('string-like', ty), + ]) + + data = [{'bool': True, 'integer': 1, 'string-like': v1}] * 20 + data.append({'bool': True, 'integer': 1, 'string-like': v2}) + arr = pa.array(data, type=struct_type) + assert isinstance(arr, pa.Array) + + data.append({'bool': True, 'integer': 1, 'string-like': char}) + arr = pa.array(data, type=struct_type) + assert isinstance(arr, pa.ChunkedArray) + assert arr.num_chunks == 2 + assert len(arr.chunk(0)) == 21 + assert len(arr.chunk(1)) == 1 + assert arr.chunk(1)[0].as_py() == { + 'bool': True, + 'integer': 1, + 'string-like': char + } + + +@pytest.mark.large_memory +def test_array_from_pylist_data_overflow(): + # Regression test for ARROW-12983 + # Data buffer overflow - should result in chunked array + items = [b'a' * 4096] * (2 ** 19) + arr = pa.array(items, type=pa.string()) + assert isinstance(arr, pa.ChunkedArray) + assert len(arr) == 2**19 + assert len(arr.chunks) > 1 + + mask = np.zeros(2**19, bool) + arr = pa.array(items, mask=mask, type=pa.string()) + assert isinstance(arr, pa.ChunkedArray) + assert len(arr) == 2**19 + assert len(arr.chunks) > 1 + + arr = pa.array(items, type=pa.binary()) + assert isinstance(arr, pa.ChunkedArray) + assert len(arr) == 2**19 + assert len(arr.chunks) > 1 + + +@pytest.mark.slow +@pytest.mark.large_memory +def test_array_from_pylist_offset_overflow(): + # Regression test for ARROW-12983 + # Offset buffer overflow - should result in chunked array + # Note this doesn't apply to primitive arrays + items = [b'a'] * (2 ** 31) + arr = pa.array(items, type=pa.string()) + assert isinstance(arr, pa.ChunkedArray) + assert len(arr) == 2**31 + assert len(arr.chunks) > 1 + + mask = np.zeros(2**31, bool) + arr = pa.array(items, mask=mask, type=pa.string()) + assert isinstance(arr, pa.ChunkedArray) + assert len(arr) == 2**31 + assert len(arr.chunks) > 1 + + arr = pa.array(items, type=pa.binary()) + assert isinstance(arr, pa.ChunkedArray) + assert len(arr) == 2**31 + assert len(arr.chunks) > 1 + + +@parametrize_with_collections_types +@pytest.mark.parametrize(('data', 'scalar_data', 'value_type'), [ + ([True, False, None], [pa.scalar(True), pa.scalar(False), None], pa.bool_()), + ( + [1, 2, None], + [pa.scalar(1), pa.scalar(2), pa.scalar(None, pa.int64())], + pa.int64() + ), + ([1, None, None], [pa.scalar(1), None, pa.scalar(None, pa.int64())], pa.int64()), + ([None, None], [pa.scalar(None), pa.scalar(None)], pa.null()), + ([1., 2., None], [pa.scalar(1.), pa.scalar(2.), None], pa.float64()), + ( + [None, datetime.date.today()], + [None, pa.scalar(datetime.date.today())], + pa.date32() + ), + ( + [None, datetime.date.today()], + [None, pa.scalar(datetime.date.today(), pa.date64())], + pa.date64() + ), + ( + [datetime.time(1, 1, 1), None], + [pa.scalar(datetime.time(1, 1, 1)), None], + pa.time64('us') + ), + ( + [datetime.timedelta(seconds=10)], + [pa.scalar(datetime.timedelta(seconds=10))], + pa.duration('us') + ), + ( + [None, datetime.datetime(2014, 1, 1)], + [None, pa.scalar(datetime.datetime(2014, 1, 1))], + pa.timestamp('us') + ), + ( + [pa.MonthDayNano([1, -1, -10100])], + [pa.scalar(pa.MonthDayNano([1, -1, -10100]))], + pa.month_day_nano_interval() + ), + (["a", "b"], [pa.scalar("a"), pa.scalar("b")], pa.string()), + ([b"a", b"b"], [pa.scalar(b"a"), pa.scalar(b"b")], pa.binary()), + ( + [b"a", b"b"], + [pa.scalar(b"a", pa.binary(1)), pa.scalar(b"b", pa.binary(1))], + pa.binary(1) + ), + ([[1, 2, 3]], [pa.scalar([1, 2, 3])], pa.list_(pa.int64())), + ([["a", "b"]], [pa.scalar(["a", "b"])], pa.list_(pa.string())), + ([[1, 2, 3]], [pa.scalar([1, 2, 3], type=pa.list_view(pa.int64()))], + pa.list_view(pa.int64())), + ([["a", "b"]], [pa.scalar(["a", "b"], type=pa.list_view(pa.string()))], + pa.list_view(pa.string())), + ( + [1, 2, None], + [pa.scalar(1, type=pa.int8()), pa.scalar(2, type=pa.int8()), None], + pa.int8() + ), + ([1, None], [pa.scalar(1.0, type=pa.int32()), None], pa.int32()), + ( + ["aaa", "bbb"], + [pa.scalar("aaa", type=pa.binary(3)), pa.scalar("bbb", type=pa.binary(3))], + pa.binary(3)), + ([b"a"], [pa.scalar("a", type=pa.large_binary())], pa.large_binary()), + (["a"], [pa.scalar("a", type=pa.large_string())], pa.large_string()), + ([b"a"], [pa.scalar("a", type=pa.binary_view())], pa.binary_view()), + (["a"], [pa.scalar("a", type=pa.string_view())], pa.string_view()), + ( + ["a"], + [pa.scalar("a", type=pa.dictionary(pa.int64(), pa.string()))], + pa.dictionary(pa.int64(), pa.string()) + ), + ( + ["a", "b"], + [pa.scalar("a", pa.dictionary(pa.int64(), pa.string())), + pa.scalar("b", pa.dictionary(pa.int64(), pa.string()))], + pa.dictionary(pa.int64(), pa.string()) + ), + ( + [1], + [pa.scalar(1, type=pa.dictionary(pa.int64(), pa.int32()))], + pa.dictionary(pa.int64(), pa.int32()) + ), + ( + [(1, 2)], + [pa.scalar([('a', 1), ('b', 2)], type=pa.struct( + [('a', pa.int8()), ('b', pa.int8())]))], + pa.struct([('a', pa.int8()), ('b', pa.int8())]) + ), + ( + [(1, 'bar')], + [pa.scalar([('a', 1), ('b', 'bar')], type=pa.struct( + [('a', pa.int8()), ('b', pa.string())]))], + pa.struct([('a', pa.int8()), ('b', pa.string())]) + ) +]) +def test_array_accepts_pyarrow_scalar(seq, data, scalar_data, value_type): + if type(seq(scalar_data)) == set: + pytest.skip("The elements in the set get reordered.") + expect = pa.array(data, type=value_type) + result = pa.array(seq(scalar_data)) + assert expect.equals(result) + + result = pa.array(seq(scalar_data), type=value_type) + assert expect.equals(result) + + +@parametrize_with_collections_types +def test_array_accepts_pyarrow_scalar_errors(seq): + sequence = seq([pa.scalar(1), pa.scalar("a"), pa.scalar(3.0)]) + with pytest.raises(pa.ArrowInvalid, + match="cannot mix scalars with different types"): + pa.array(sequence) + + sequence = seq([1, pa.scalar("a"), None]) + with pytest.raises(pa.ArrowInvalid, + match="pyarrow scalars cannot be mixed with other " + "Python scalar values currently"): + pa.array(sequence) + + sequence = seq([np.float16("0.1"), pa.scalar("a"), None]) + with pytest.raises(pa.ArrowInvalid, + match="pyarrow scalars cannot be mixed with other " + "Python scalar values currently"): + pa.array(sequence) + + sequence = seq([pa.scalar("a"), np.float16("0.1"), None]) + with pytest.raises(pa.ArrowInvalid, + match="pyarrow scalars cannot be mixed with other " + "Python scalar values currently"): + pa.array(sequence) + + with pytest.raises(pa.ArrowInvalid, + match="Cannot append scalar of type string " + "to builder for type int32"): + pa.array([pa.scalar("a")], type=pa.int32()) + + with pytest.raises(pa.ArrowInvalid, + match="Cannot append scalar of type int64 " + "to builder for type null"): + pa.array([pa.scalar(1)], type=pa.null()) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_cpp_internals.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_cpp_internals.py new file mode 100644 index 0000000000000000000000000000000000000000..83800b77f894b7b348310d032c364d5d1f68948a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_cpp_internals.py @@ -0,0 +1,50 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os.path +from os.path import join as pjoin + +from pyarrow._pyarrow_cpp_tests import get_cpp_tests + + +def inject_cpp_tests(ns): + """ + Inject C++ tests as Python functions into namespace `ns` (a dict). + """ + for case in get_cpp_tests(): + def wrapper(case=case): + case() + wrapper.__name__ = wrapper.__qualname__ = case.name + wrapper.__module__ = ns['__name__'] + ns[case.name] = wrapper + + +inject_cpp_tests(globals()) + + +def test_pyarrow_include(): + # We need to make sure that pyarrow/include is always + # created. Either with PyArrow C++ header files or with + # Arrow C++ and PyArrow C++ header files together + + source = os.path.dirname(os.path.abspath(__file__)) + pyarrow_dir = pjoin(source, '..') + pyarrow_include = pjoin(pyarrow_dir, 'include') + pyarrow_cpp_include = pjoin(pyarrow_include, 'arrow', 'python') + + assert os.path.exists(pyarrow_include) + assert os.path.exists(pyarrow_cpp_include) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_csv.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_csv.py new file mode 100644 index 0000000000000000000000000000000000000000..bc1dd8a09a7689b78fab32485eeed1acb643fce7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_csv.py @@ -0,0 +1,2018 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import abc +import bz2 +from datetime import date, datetime +from decimal import Decimal +import gc +import gzip +import io +import itertools +import os +import select +import shutil +import signal +import string +import tempfile +import threading +import time +import unittest +import weakref + +import pytest + +import numpy as np + +import pyarrow as pa +from pyarrow.csv import ( + open_csv, read_csv, ReadOptions, ParseOptions, ConvertOptions, ISO8601, + write_csv, WriteOptions, CSVWriter, InvalidRow) +from pyarrow.tests import util + + +def generate_col_names(): + # 'a', 'b'... 'z', then 'aa', 'ab'... + letters = string.ascii_lowercase + yield from letters + for first in letters: + for second in letters: + yield first + second + + +def make_random_csv(num_cols=2, num_rows=10, linesep='\r\n', write_names=True): + arr = np.random.RandomState(42).randint(0, 1000, size=(num_cols, num_rows)) + csv = io.StringIO() + col_names = list(itertools.islice(generate_col_names(), num_cols)) + if write_names: + csv.write(",".join(col_names)) + csv.write(linesep) + for row in arr.T: + csv.write(",".join(map(str, row))) + csv.write(linesep) + csv = csv.getvalue().encode() + columns = [pa.array(a, type=pa.int64()) for a in arr] + expected = pa.Table.from_arrays(columns, col_names) + return csv, expected + + +def make_empty_csv(column_names): + csv = io.StringIO() + csv.write(",".join(column_names)) + csv.write("\n") + return csv.getvalue().encode() + + +def check_options_class(cls, **attr_values): + """ + Check setting and getting attributes of an *Options class. + """ + opts = cls() + + for name, values in attr_values.items(): + assert getattr(opts, name) == values[0], \ + "incorrect default value for " + name + for v in values: + setattr(opts, name, v) + assert getattr(opts, name) == v, "failed setting value" + + with pytest.raises(AttributeError): + opts.zzz_non_existent = True + + # Check constructor named arguments + non_defaults = {name: values[1] for name, values in attr_values.items()} + opts = cls(**non_defaults) + for name, value in non_defaults.items(): + assert getattr(opts, name) == value + + +# The various options classes need to be picklable for dataset +def check_options_class_pickling(cls, pickler, **attr_values): + opts = cls(**attr_values) + new_opts = pickler.loads(pickler.dumps(opts, + protocol=pickler.HIGHEST_PROTOCOL)) + for name, value in attr_values.items(): + assert getattr(new_opts, name) == value + + +class InvalidRowHandler: + def __init__(self, result): + self.result = result + self.rows = [] + + def __call__(self, row): + self.rows.append(row) + return self.result + + def __eq__(self, other): + return (isinstance(other, InvalidRowHandler) and + other.result == self.result) + + def __ne__(self, other): + return (not isinstance(other, InvalidRowHandler) or + other.result != self.result) + + +def test_read_options(pickle_module): + cls = ReadOptions + opts = cls() + + check_options_class(cls, use_threads=[True, False], + skip_rows=[0, 3], + column_names=[[], ["ab", "cd"]], + autogenerate_column_names=[False, True], + encoding=['utf8', 'utf16'], + skip_rows_after_names=[0, 27]) + + check_options_class_pickling(cls, pickler=pickle_module, + use_threads=True, + skip_rows=3, + column_names=["ab", "cd"], + autogenerate_column_names=False, + encoding='utf16', + skip_rows_after_names=27) + + assert opts.block_size > 0 + opts.block_size = 12345 + assert opts.block_size == 12345 + + opts = cls(block_size=1234) + assert opts.block_size == 1234 + + opts.validate() + + match = "ReadOptions: block_size must be at least 1: 0" + with pytest.raises(pa.ArrowInvalid, match=match): + opts = cls() + opts.block_size = 0 + opts.validate() + + match = "ReadOptions: skip_rows cannot be negative: -1" + with pytest.raises(pa.ArrowInvalid, match=match): + opts = cls() + opts.skip_rows = -1 + opts.validate() + + match = "ReadOptions: skip_rows_after_names cannot be negative: -1" + with pytest.raises(pa.ArrowInvalid, match=match): + opts = cls() + opts.skip_rows_after_names = -1 + opts.validate() + + match = "ReadOptions: autogenerate_column_names cannot be true when" \ + " column_names are provided" + with pytest.raises(pa.ArrowInvalid, match=match): + opts = cls() + opts.autogenerate_column_names = True + opts.column_names = ('a', 'b') + opts.validate() + + +def test_parse_options(pickle_module): + cls = ParseOptions + skip_handler = InvalidRowHandler('skip') + + check_options_class(cls, delimiter=[',', 'x'], + escape_char=[False, 'y'], + quote_char=['"', 'z', False], + double_quote=[True, False], + newlines_in_values=[False, True], + ignore_empty_lines=[True, False], + invalid_row_handler=[None, skip_handler]) + + check_options_class_pickling(cls, pickler=pickle_module, + delimiter='x', + escape_char='y', + quote_char=False, + double_quote=False, + newlines_in_values=True, + ignore_empty_lines=False, + invalid_row_handler=skip_handler) + + cls().validate() + opts = cls() + opts.delimiter = "\t" + opts.validate() + + match = "ParseOptions: delimiter cannot be \\\\r or \\\\n" + with pytest.raises(pa.ArrowInvalid, match=match): + opts = cls() + opts.delimiter = "\n" + opts.validate() + + with pytest.raises(pa.ArrowInvalid, match=match): + opts = cls() + opts.delimiter = "\r" + opts.validate() + + match = "ParseOptions: quote_char cannot be \\\\r or \\\\n" + with pytest.raises(pa.ArrowInvalid, match=match): + opts = cls() + opts.quote_char = "\n" + opts.validate() + + with pytest.raises(pa.ArrowInvalid, match=match): + opts = cls() + opts.quote_char = "\r" + opts.validate() + + match = "ParseOptions: escape_char cannot be \\\\r or \\\\n" + with pytest.raises(pa.ArrowInvalid, match=match): + opts = cls() + opts.escape_char = "\n" + opts.validate() + + with pytest.raises(pa.ArrowInvalid, match=match): + opts = cls() + opts.escape_char = "\r" + opts.validate() + + +def test_convert_options(pickle_module): + cls = ConvertOptions + opts = cls() + + check_options_class( + cls, check_utf8=[True, False], + strings_can_be_null=[False, True], + quoted_strings_can_be_null=[True, False], + decimal_point=['.', ','], + include_columns=[[], ['def', 'abc']], + include_missing_columns=[False, True], + auto_dict_encode=[False, True], + timestamp_parsers=[[], [ISO8601, '%y-%m']]) + + check_options_class_pickling( + cls, pickler=pickle_module, + check_utf8=False, + strings_can_be_null=True, + quoted_strings_can_be_null=False, + decimal_point=',', + include_columns=['def', 'abc'], + include_missing_columns=False, + auto_dict_encode=True, + timestamp_parsers=[ISO8601, '%y-%m']) + + with pytest.raises(ValueError): + opts.decimal_point = '..' + + assert opts.auto_dict_max_cardinality > 0 + opts.auto_dict_max_cardinality = 99999 + assert opts.auto_dict_max_cardinality == 99999 + + assert opts.column_types == {} + # Pass column_types as mapping + opts.column_types = {'b': pa.int16(), 'c': pa.float32()} + assert opts.column_types == {'b': pa.int16(), 'c': pa.float32()} + opts.column_types = {'v': 'int16', 'w': 'null'} + assert opts.column_types == {'v': pa.int16(), 'w': pa.null()} + # Pass column_types as schema + schema = pa.schema([('a', pa.int32()), ('b', pa.string())]) + opts.column_types = schema + assert opts.column_types == {'a': pa.int32(), 'b': pa.string()} + # Pass column_types as sequence + opts.column_types = [('x', pa.binary())] + assert opts.column_types == {'x': pa.binary()} + + with pytest.raises(TypeError, match='DataType expected'): + opts.column_types = {'a': None} + with pytest.raises(TypeError): + opts.column_types = 0 + + assert isinstance(opts.null_values, list) + assert '' in opts.null_values + assert 'N/A' in opts.null_values + opts.null_values = ['xxx', 'yyy'] + assert opts.null_values == ['xxx', 'yyy'] + + assert isinstance(opts.true_values, list) + opts.true_values = ['xxx', 'yyy'] + assert opts.true_values == ['xxx', 'yyy'] + + assert isinstance(opts.false_values, list) + opts.false_values = ['xxx', 'yyy'] + assert opts.false_values == ['xxx', 'yyy'] + + assert opts.timestamp_parsers == [] + opts.timestamp_parsers = [ISO8601] + assert opts.timestamp_parsers == [ISO8601] + + opts = cls(column_types={'a': pa.null()}, + null_values=['N', 'nn'], true_values=['T', 'tt'], + false_values=['F', 'ff'], auto_dict_max_cardinality=999, + timestamp_parsers=[ISO8601, '%Y-%m-%d']) + assert opts.column_types == {'a': pa.null()} + assert opts.null_values == ['N', 'nn'] + assert opts.false_values == ['F', 'ff'] + assert opts.true_values == ['T', 'tt'] + assert opts.auto_dict_max_cardinality == 999 + assert opts.timestamp_parsers == [ISO8601, '%Y-%m-%d'] + + +def test_write_options(): + cls = WriteOptions + opts = cls() + + check_options_class( + cls, include_header=[True, False], delimiter=[',', '\t', '|'], + quoting_style=['needed', 'none', 'all_valid']) + + assert opts.batch_size > 0 + opts.batch_size = 12345 + assert opts.batch_size == 12345 + + opts = cls(batch_size=9876) + assert opts.batch_size == 9876 + + opts.validate() + + match = "WriteOptions: batch_size must be at least 1: 0" + with pytest.raises(pa.ArrowInvalid, match=match): + opts = cls() + opts.batch_size = 0 + opts.validate() + + +class BaseTestCSV(abc.ABC): + """Common tests which are shared by streaming and non streaming readers""" + + @abc.abstractmethod + def read_bytes(self, b, **kwargs): + """ + :param b: bytes to be parsed + :param kwargs: arguments passed on to open the csv file + :return: b parsed as a single RecordBatch + """ + raise NotImplementedError + + @property + @abc.abstractmethod + def use_threads(self): + """Whether this test is multi-threaded""" + raise NotImplementedError + + @staticmethod + def check_names(table, names): + assert table.num_columns == len(names) + assert table.column_names == names + + def test_header_skip_rows(self): + rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n" + + opts = ReadOptions() + opts.skip_rows = 1 + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["ef", "gh"]) + assert table.to_pydict() == { + "ef": ["ij", "mn"], + "gh": ["kl", "op"], + } + + opts.skip_rows = 3 + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["mn", "op"]) + assert table.to_pydict() == { + "mn": [], + "op": [], + } + + opts.skip_rows = 4 + with pytest.raises(pa.ArrowInvalid): + # Not enough rows + table = self.read_bytes(rows, read_options=opts) + + # Can skip rows with a different number of columns + rows = b"abcd\n,,,,,\nij,kl\nmn,op\n" + opts.skip_rows = 2 + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["ij", "kl"]) + assert table.to_pydict() == { + "ij": ["mn"], + "kl": ["op"], + } + + # Can skip all rows exactly when columns are given + opts.skip_rows = 4 + opts.column_names = ['ij', 'kl'] + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["ij", "kl"]) + assert table.to_pydict() == { + "ij": [], + "kl": [], + } + + def test_skip_rows_after_names(self): + rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n" + + opts = ReadOptions() + opts.skip_rows_after_names = 1 + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["ab", "cd"]) + assert table.to_pydict() == { + "ab": ["ij", "mn"], + "cd": ["kl", "op"], + } + + # Can skip exact number of rows + opts.skip_rows_after_names = 3 + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["ab", "cd"]) + assert table.to_pydict() == { + "ab": [], + "cd": [], + } + + # Can skip beyond all rows + opts.skip_rows_after_names = 4 + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["ab", "cd"]) + assert table.to_pydict() == { + "ab": [], + "cd": [], + } + + # Can skip rows with a different number of columns + rows = b"abcd\n,,,,,\nij,kl\nmn,op\n" + opts.skip_rows_after_names = 2 + opts.column_names = ["f0", "f1"] + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["f0", "f1"]) + assert table.to_pydict() == { + "f0": ["ij", "mn"], + "f1": ["kl", "op"], + } + opts = ReadOptions() + + # Can skip rows with new lines in the value + rows = b'ab,cd\n"e\nf","g\n\nh"\n"ij","k\nl"\nmn,op' + opts.skip_rows_after_names = 2 + parse_opts = ParseOptions() + parse_opts.newlines_in_values = True + table = self.read_bytes(rows, read_options=opts, + parse_options=parse_opts) + self.check_names(table, ["ab", "cd"]) + assert table.to_pydict() == { + "ab": ["mn"], + "cd": ["op"], + } + + # Can skip rows when block ends in middle of quoted value + opts.skip_rows_after_names = 2 + opts.block_size = 26 + table = self.read_bytes(rows, read_options=opts, + parse_options=parse_opts) + self.check_names(table, ["ab", "cd"]) + assert table.to_pydict() == { + "ab": ["mn"], + "cd": ["op"], + } + opts = ReadOptions() + + # Can skip rows that are beyond the first block without lexer + rows, expected = make_random_csv(num_cols=5, num_rows=1000) + opts.skip_rows_after_names = 900 + opts.block_size = len(rows) / 11 + table = self.read_bytes(rows, read_options=opts) + assert table.schema == expected.schema + assert table.num_rows == 100 + table_dict = table.to_pydict() + for name, values in expected.to_pydict().items(): + assert values[900:] == table_dict[name] + + # Can skip rows that are beyond the first block with lexer + table = self.read_bytes(rows, read_options=opts, + parse_options=parse_opts) + assert table.schema == expected.schema + assert table.num_rows == 100 + table_dict = table.to_pydict() + for name, values in expected.to_pydict().items(): + assert values[900:] == table_dict[name] + + # Skip rows and skip rows after names + rows, expected = make_random_csv(num_cols=5, num_rows=200, + write_names=False) + opts = ReadOptions() + opts.skip_rows = 37 + opts.skip_rows_after_names = 41 + opts.column_names = expected.schema.names + table = self.read_bytes(rows, read_options=opts, + parse_options=parse_opts) + assert table.schema == expected.schema + assert (table.num_rows == + expected.num_rows - opts.skip_rows - + opts.skip_rows_after_names) + table_dict = table.to_pydict() + for name, values in expected.to_pydict().items(): + assert (values[opts.skip_rows + opts.skip_rows_after_names:] == + table_dict[name]) + + def test_row_number_offset_in_errors(self): + # Row numbers are only correctly counted in serial reads + def format_msg(msg_format, row, *args): + if self.use_threads: + row_info = "" + else: + row_info = "Row #{}: ".format(row) + return msg_format.format(row_info, *args) + + csv, _ = make_random_csv(4, 100, write_names=True) + + read_options = ReadOptions() + read_options.block_size = len(csv) / 3 + convert_options = ConvertOptions() + convert_options.column_types = {"a": pa.int32()} + + # Test without skip_rows and column names in the csv + csv_bad_columns = csv + b"1,2\r\n" + message_columns = format_msg("{}Expected 4 columns, got 2", 102) + with pytest.raises(pa.ArrowInvalid, match=message_columns): + self.read_bytes(csv_bad_columns, + read_options=read_options, + convert_options=convert_options) + + csv_bad_type = csv + b"a,b,c,d\r\n" + message_value = format_msg( + "In CSV column #0: {}" + "CSV conversion error to int32: invalid value 'a'", + 102, csv) + with pytest.raises(pa.ArrowInvalid, match=message_value): + self.read_bytes(csv_bad_type, + read_options=read_options, + convert_options=convert_options) + + long_row = (b"this is a long row" * 15) + b",3\r\n" + csv_bad_columns_long = csv + long_row + message_long = format_msg("{}Expected 4 columns, got 2: {} ...", 102, + long_row[0:96].decode("utf-8")) + with pytest.raises(pa.ArrowInvalid, match=message_long): + self.read_bytes(csv_bad_columns_long, + read_options=read_options, + convert_options=convert_options) + + # Test skipping rows after the names + read_options.skip_rows_after_names = 47 + + with pytest.raises(pa.ArrowInvalid, match=message_columns): + self.read_bytes(csv_bad_columns, + read_options=read_options, + convert_options=convert_options) + + with pytest.raises(pa.ArrowInvalid, match=message_value): + self.read_bytes(csv_bad_type, + read_options=read_options, + convert_options=convert_options) + + with pytest.raises(pa.ArrowInvalid, match=message_long): + self.read_bytes(csv_bad_columns_long, + read_options=read_options, + convert_options=convert_options) + + read_options.skip_rows_after_names = 0 + + # Test without skip_rows and column names not in the csv + csv, _ = make_random_csv(4, 100, write_names=False) + read_options.column_names = ["a", "b", "c", "d"] + csv_bad_columns = csv + b"1,2\r\n" + message_columns = format_msg("{}Expected 4 columns, got 2", 101) + with pytest.raises(pa.ArrowInvalid, match=message_columns): + self.read_bytes(csv_bad_columns, + read_options=read_options, + convert_options=convert_options) + + csv_bad_columns_long = csv + long_row + message_long = format_msg("{}Expected 4 columns, got 2: {} ...", 101, + long_row[0:96].decode("utf-8")) + with pytest.raises(pa.ArrowInvalid, match=message_long): + self.read_bytes(csv_bad_columns_long, + read_options=read_options, + convert_options=convert_options) + + csv_bad_type = csv + b"a,b,c,d\r\n" + message_value = format_msg( + "In CSV column #0: {}" + "CSV conversion error to int32: invalid value 'a'", + 101) + message_value = message_value.format(len(csv)) + with pytest.raises(pa.ArrowInvalid, match=message_value): + self.read_bytes(csv_bad_type, + read_options=read_options, + convert_options=convert_options) + + # Test with skip_rows and column names not in the csv + read_options.skip_rows = 23 + with pytest.raises(pa.ArrowInvalid, match=message_columns): + self.read_bytes(csv_bad_columns, + read_options=read_options, + convert_options=convert_options) + + with pytest.raises(pa.ArrowInvalid, match=message_value): + self.read_bytes(csv_bad_type, + read_options=read_options, + convert_options=convert_options) + + def test_invalid_row_handler(self, pickle_module): + rows = b"a,b\nc\nd,e\nf,g,h\ni,j\n" + parse_opts = ParseOptions() + with pytest.raises( + ValueError, + match="Expected 2 columns, got 1: c"): + self.read_bytes(rows, parse_options=parse_opts) + + # Skip requested + parse_opts.invalid_row_handler = InvalidRowHandler('skip') + table = self.read_bytes(rows, parse_options=parse_opts) + assert table.to_pydict() == { + 'a': ["d", "i"], + 'b': ["e", "j"], + } + + def row_num(x): + return None if self.use_threads else x + expected_rows = [ + InvalidRow(2, 1, row_num(2), "c"), + InvalidRow(2, 3, row_num(4), "f,g,h"), + ] + assert parse_opts.invalid_row_handler.rows == expected_rows + + # Error requested + parse_opts.invalid_row_handler = InvalidRowHandler('error') + with pytest.raises( + ValueError, + match="Expected 2 columns, got 1: c"): + self.read_bytes(rows, parse_options=parse_opts) + expected_rows = [InvalidRow(2, 1, row_num(2), "c")] + assert parse_opts.invalid_row_handler.rows == expected_rows + + # Test ser/de + parse_opts.invalid_row_handler = InvalidRowHandler('skip') + parse_opts = pickle_module.loads(pickle_module.dumps(parse_opts)) + + table = self.read_bytes(rows, parse_options=parse_opts) + assert table.to_pydict() == { + 'a': ["d", "i"], + 'b': ["e", "j"], + } + + def test_chunker_out_of_sync(self): + # GH-39892: if there are newlines in values, the parser may become + # out of sync with the chunker. In this case, we try to produce an + # informative error message. + rows = b"""a,b,c\nd,e,"f\n"\ng,h,i\n""" + expected = { + 'a': ["d", "g"], + 'b': ["e", "h"], + 'c': ["f\n", "i"], + } + for block_size in range(8, 15): + # Sanity check: parsing works with newlines_in_values=True + d = self.read_bytes( + rows, parse_options=ParseOptions(newlines_in_values=True), + read_options=ReadOptions(block_size=block_size)).to_pydict() + assert d == expected + # With these block sizes, a block would end on the physical newline + # inside the quoted cell value, leading to a mismatch between + # CSV chunker and parser. + for block_size in range(8, 11): + with pytest.raises(ValueError, + match="cell values spanning multiple lines"): + self.read_bytes( + rows, read_options=ReadOptions(block_size=block_size)) + + +class BaseCSVTableRead(BaseTestCSV): + + def read_csv(self, csv, *args, validate_full=True, **kwargs): + """ + Reads the CSV file into memory using pyarrow's read_csv + csv The CSV bytes + args Positional arguments to be forwarded to pyarrow's read_csv + validate_full Whether or not to fully validate the resulting table + kwargs Keyword arguments to be forwarded to pyarrow's read_csv + """ + assert isinstance(self.use_threads, bool) # sanity check + read_options = kwargs.setdefault('read_options', ReadOptions()) + read_options.use_threads = self.use_threads + table = read_csv(csv, *args, **kwargs) + table.validate(full=validate_full) + return table + + def read_bytes(self, b, **kwargs): + return self.read_csv(pa.py_buffer(b), **kwargs) + + def test_file_object(self): + data = b"a,b\n1,2\n" + expected_data = {'a': [1], 'b': [2]} + bio = io.BytesIO(data) + table = self.read_csv(bio) + assert table.to_pydict() == expected_data + # Text files not allowed + sio = io.StringIO(data.decode()) + with pytest.raises(TypeError): + self.read_csv(sio) + + def test_header(self): + rows = b"abc,def,gh\n" + table = self.read_bytes(rows) + assert isinstance(table, pa.Table) + self.check_names(table, ["abc", "def", "gh"]) + assert table.num_rows == 0 + + def test_bom(self): + rows = b"\xef\xbb\xbfa,b\n1,2\n" + expected_data = {'a': [1], 'b': [2]} + table = self.read_bytes(rows) + assert table.to_pydict() == expected_data + + def test_one_chunk(self): + # ARROW-7661: lack of newline at end of file should not produce + # an additional chunk. + rows = [b"a,b", b"1,2", b"3,4", b"56,78"] + for line_ending in [b'\n', b'\r', b'\r\n']: + for file_ending in [b'', line_ending]: + data = line_ending.join(rows) + file_ending + table = self.read_bytes(data) + assert len(table.to_batches()) == 1 + assert table.to_pydict() == { + "a": [1, 3, 56], + "b": [2, 4, 78], + } + + def test_header_column_names(self): + rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n" + + opts = ReadOptions() + opts.column_names = ["x", "y"] + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["x", "y"]) + assert table.to_pydict() == { + "x": ["ab", "ef", "ij", "mn"], + "y": ["cd", "gh", "kl", "op"], + } + + opts.skip_rows = 3 + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["x", "y"]) + assert table.to_pydict() == { + "x": ["mn"], + "y": ["op"], + } + + opts.skip_rows = 4 + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["x", "y"]) + assert table.to_pydict() == { + "x": [], + "y": [], + } + + opts.skip_rows = 5 + with pytest.raises(pa.ArrowInvalid): + # Not enough rows + table = self.read_bytes(rows, read_options=opts) + + # Unexpected number of columns + opts.skip_rows = 0 + opts.column_names = ["x", "y", "z"] + with pytest.raises(pa.ArrowInvalid, + match="Expected 3 columns, got 2"): + table = self.read_bytes(rows, read_options=opts) + + # Can skip rows with a different number of columns + rows = b"abcd\n,,,,,\nij,kl\nmn,op\n" + opts.skip_rows = 2 + opts.column_names = ["x", "y"] + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["x", "y"]) + assert table.to_pydict() == { + "x": ["ij", "mn"], + "y": ["kl", "op"], + } + + def test_header_autogenerate_column_names(self): + rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n" + + opts = ReadOptions() + opts.autogenerate_column_names = True + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["f0", "f1"]) + assert table.to_pydict() == { + "f0": ["ab", "ef", "ij", "mn"], + "f1": ["cd", "gh", "kl", "op"], + } + + opts.skip_rows = 3 + table = self.read_bytes(rows, read_options=opts) + self.check_names(table, ["f0", "f1"]) + assert table.to_pydict() == { + "f0": ["mn"], + "f1": ["op"], + } + + # Not enough rows, impossible to infer number of columns + opts.skip_rows = 4 + with pytest.raises(pa.ArrowInvalid): + table = self.read_bytes(rows, read_options=opts) + + def test_include_columns(self): + rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n" + + convert_options = ConvertOptions() + convert_options.include_columns = ['ab'] + table = self.read_bytes(rows, convert_options=convert_options) + self.check_names(table, ["ab"]) + assert table.to_pydict() == { + "ab": ["ef", "ij", "mn"], + } + + # Order of include_columns is respected, regardless of CSV order + convert_options.include_columns = ['cd', 'ab'] + table = self.read_bytes(rows, convert_options=convert_options) + schema = pa.schema([('cd', pa.string()), + ('ab', pa.string())]) + assert table.schema == schema + assert table.to_pydict() == { + "cd": ["gh", "kl", "op"], + "ab": ["ef", "ij", "mn"], + } + + # Include a column not in the CSV file => raises by default + convert_options.include_columns = ['xx', 'ab', 'yy'] + with pytest.raises(KeyError, + match="Column 'xx' in include_columns " + "does not exist in CSV file"): + self.read_bytes(rows, convert_options=convert_options) + + def test_include_missing_columns(self): + rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n" + + read_options = ReadOptions() + convert_options = ConvertOptions() + convert_options.include_columns = ['xx', 'ab', 'yy'] + convert_options.include_missing_columns = True + table = self.read_bytes(rows, read_options=read_options, + convert_options=convert_options) + schema = pa.schema([('xx', pa.null()), + ('ab', pa.string()), + ('yy', pa.null())]) + assert table.schema == schema + assert table.to_pydict() == { + "xx": [None, None, None], + "ab": ["ef", "ij", "mn"], + "yy": [None, None, None], + } + + # Combining with `column_names` + read_options.column_names = ["xx", "yy"] + convert_options.include_columns = ["yy", "cd"] + table = self.read_bytes(rows, read_options=read_options, + convert_options=convert_options) + schema = pa.schema([('yy', pa.string()), + ('cd', pa.null())]) + assert table.schema == schema + assert table.to_pydict() == { + "yy": ["cd", "gh", "kl", "op"], + "cd": [None, None, None, None], + } + + # And with `column_types` as well + convert_options.column_types = {"yy": pa.binary(), + "cd": pa.int32()} + table = self.read_bytes(rows, read_options=read_options, + convert_options=convert_options) + schema = pa.schema([('yy', pa.binary()), + ('cd', pa.int32())]) + assert table.schema == schema + assert table.to_pydict() == { + "yy": [b"cd", b"gh", b"kl", b"op"], + "cd": [None, None, None, None], + } + + def test_simple_ints(self): + # Infer integer columns + rows = b"a,b,c\n1,2,3\n4,5,6\n" + table = self.read_bytes(rows) + schema = pa.schema([('a', pa.int64()), + ('b', pa.int64()), + ('c', pa.int64())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [1, 4], + 'b': [2, 5], + 'c': [3, 6], + } + + def test_simple_varied(self): + # Infer various kinds of data + rows = b"a,b,c,d\n1,2,3,0\n4.0,-5,foo,True\n" + table = self.read_bytes(rows) + schema = pa.schema([('a', pa.float64()), + ('b', pa.int64()), + ('c', pa.string()), + ('d', pa.bool_())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [1.0, 4.0], + 'b': [2, -5], + 'c': ["3", "foo"], + 'd': [False, True], + } + + def test_simple_nulls(self): + # Infer various kinds of data, with nulls + rows = (b"a,b,c,d,e,f\n" + b"1,2,,,3,N/A\n" + b"nan,-5,foo,,nan,TRUE\n" + b"4.5,#N/A,nan,,\xff,false\n") + table = self.read_bytes(rows) + schema = pa.schema([('a', pa.float64()), + ('b', pa.int64()), + ('c', pa.string()), + ('d', pa.null()), + ('e', pa.binary()), + ('f', pa.bool_())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [1.0, None, 4.5], + 'b': [2, -5, None], + 'c': ["", "foo", "nan"], + 'd': [None, None, None], + 'e': [b"3", b"nan", b"\xff"], + 'f': [None, True, False], + } + + def test_decimal_point(self): + # Infer floats with a custom decimal point + parse_options = ParseOptions(delimiter=';') + rows = b"a;b\n1.25;2,5\nNA;-3\n-4;NA" + + table = self.read_bytes(rows, parse_options=parse_options) + schema = pa.schema([('a', pa.float64()), + ('b', pa.string())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [1.25, None, -4.0], + 'b': ["2,5", "-3", "NA"], + } + + convert_options = ConvertOptions(decimal_point=',') + table = self.read_bytes(rows, parse_options=parse_options, + convert_options=convert_options) + schema = pa.schema([('a', pa.string()), + ('b', pa.float64())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': ["1.25", "NA", "-4"], + 'b': [2.5, -3.0, None], + } + + def test_simple_timestamps(self): + # Infer a timestamp column + rows = (b"a,b,c\n" + b"1970,1970-01-01 00:00:00,1970-01-01 00:00:00.123\n" + b"1989,1989-07-14 01:00:00,1989-07-14 01:00:00.123456\n") + table = self.read_bytes(rows) + schema = pa.schema([('a', pa.int64()), + ('b', pa.timestamp('s')), + ('c', pa.timestamp('ns'))]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [1970, 1989], + 'b': [datetime(1970, 1, 1), datetime(1989, 7, 14, 1)], + 'c': [datetime(1970, 1, 1, 0, 0, 0, 123000), + datetime(1989, 7, 14, 1, 0, 0, 123456)], + } + + def test_timestamp_parsers(self): + # Infer timestamps with custom parsers + rows = b"a,b\n1970/01/01,1980-01-01 00\n1970/01/02,1980-01-02 00\n" + opts = ConvertOptions() + + table = self.read_bytes(rows, convert_options=opts) + schema = pa.schema([('a', pa.string()), + ('b', pa.timestamp('s'))]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': ['1970/01/01', '1970/01/02'], + 'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)], + } + + opts.timestamp_parsers = ['%Y/%m/%d'] + table = self.read_bytes(rows, convert_options=opts) + schema = pa.schema([('a', pa.timestamp('s')), + ('b', pa.string())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)], + 'b': ['1980-01-01 00', '1980-01-02 00'], + } + + opts.timestamp_parsers = ['%Y/%m/%d', ISO8601] + table = self.read_bytes(rows, convert_options=opts) + schema = pa.schema([('a', pa.timestamp('s')), + ('b', pa.timestamp('s'))]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [datetime(1970, 1, 1), datetime(1970, 1, 2)], + 'b': [datetime(1980, 1, 1), datetime(1980, 1, 2)], + } + + def test_dates(self): + # Dates are inferred as date32 by default + rows = b"a,b\n1970-01-01,1970-01-02\n1971-01-01,1971-01-02\n" + table = self.read_bytes(rows) + schema = pa.schema([('a', pa.date32()), + ('b', pa.date32())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [date(1970, 1, 1), date(1971, 1, 1)], + 'b': [date(1970, 1, 2), date(1971, 1, 2)], + } + + # Can ask for date types explicitly + opts = ConvertOptions() + opts.column_types = {'a': pa.date32(), 'b': pa.date64()} + table = self.read_bytes(rows, convert_options=opts) + schema = pa.schema([('a', pa.date32()), + ('b', pa.date64())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [date(1970, 1, 1), date(1971, 1, 1)], + 'b': [date(1970, 1, 2), date(1971, 1, 2)], + } + + # Can ask for timestamp types explicitly + opts = ConvertOptions() + opts.column_types = {'a': pa.timestamp('s'), 'b': pa.timestamp('ms')} + table = self.read_bytes(rows, convert_options=opts) + schema = pa.schema([('a', pa.timestamp('s')), + ('b', pa.timestamp('ms'))]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [datetime(1970, 1, 1), datetime(1971, 1, 1)], + 'b': [datetime(1970, 1, 2), datetime(1971, 1, 2)], + } + + def test_times(self): + # Times are inferred as time32[s] by default + from datetime import time + + rows = b"a,b\n12:34:56,12:34:56.789\n23:59:59,23:59:59.999\n" + table = self.read_bytes(rows) + # Column 'b' has subseconds, so cannot be inferred as time32[s] + schema = pa.schema([('a', pa.time32('s')), + ('b', pa.string())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [time(12, 34, 56), time(23, 59, 59)], + 'b': ["12:34:56.789", "23:59:59.999"], + } + + # Can ask for time types explicitly + opts = ConvertOptions() + opts.column_types = {'a': pa.time64('us'), 'b': pa.time32('ms')} + table = self.read_bytes(rows, convert_options=opts) + schema = pa.schema([('a', pa.time64('us')), + ('b', pa.time32('ms'))]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [time(12, 34, 56), time(23, 59, 59)], + 'b': [time(12, 34, 56, 789000), time(23, 59, 59, 999000)], + } + + def test_auto_dict_encode(self): + opts = ConvertOptions(auto_dict_encode=True) + rows = "a,b\nab,1\ncdé,2\ncdé,3\nab,4".encode() + table = self.read_bytes(rows, convert_options=opts) + schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.string())), + ('b', pa.int64())]) + expected = { + 'a': ["ab", "cdé", "cdé", "ab"], + 'b': [1, 2, 3, 4], + } + assert table.schema == schema + assert table.to_pydict() == expected + + opts.auto_dict_max_cardinality = 2 + table = self.read_bytes(rows, convert_options=opts) + assert table.schema == schema + assert table.to_pydict() == expected + + # Cardinality above max => plain-encoded + opts.auto_dict_max_cardinality = 1 + table = self.read_bytes(rows, convert_options=opts) + assert table.schema == pa.schema([('a', pa.string()), + ('b', pa.int64())]) + assert table.to_pydict() == expected + + # With invalid UTF8, not checked + opts.auto_dict_max_cardinality = 50 + opts.check_utf8 = False + rows = b"a,b\nab,1\ncd\xff,2\nab,3" + table = self.read_bytes(rows, convert_options=opts, + validate_full=False) + assert table.schema == schema + dict_values = table['a'].chunk(0).dictionary + assert len(dict_values) == 2 + assert dict_values[0].as_py() == "ab" + assert dict_values[1].as_buffer() == b"cd\xff" + + # With invalid UTF8, checked + opts.check_utf8 = True + table = self.read_bytes(rows, convert_options=opts) + schema = pa.schema([('a', pa.dictionary(pa.int32(), pa.binary())), + ('b', pa.int64())]) + expected = { + 'a': [b"ab", b"cd\xff", b"ab"], + 'b': [1, 2, 3], + } + assert table.schema == schema + assert table.to_pydict() == expected + + def test_custom_nulls(self): + # Infer nulls with custom values + opts = ConvertOptions(null_values=['Xxx', 'Zzz']) + rows = b"""a,b,c,d\nZzz,"Xxx",1,2\nXxx,#N/A,,Zzz\n""" + table = self.read_bytes(rows, convert_options=opts) + schema = pa.schema([('a', pa.null()), + ('b', pa.string()), + ('c', pa.string()), + ('d', pa.int64())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [None, None], + 'b': ["Xxx", "#N/A"], + 'c': ["1", ""], + 'd': [2, None], + } + + opts = ConvertOptions(null_values=['Xxx', 'Zzz'], + strings_can_be_null=True) + table = self.read_bytes(rows, convert_options=opts) + assert table.to_pydict() == { + 'a': [None, None], + 'b': [None, "#N/A"], + 'c': ["1", ""], + 'd': [2, None], + } + opts.quoted_strings_can_be_null = False + table = self.read_bytes(rows, convert_options=opts) + assert table.to_pydict() == { + 'a': [None, None], + 'b': ["Xxx", "#N/A"], + 'c': ["1", ""], + 'd': [2, None], + } + + opts = ConvertOptions(null_values=[]) + rows = b"a,b\n#N/A,\n" + table = self.read_bytes(rows, convert_options=opts) + schema = pa.schema([('a', pa.string()), + ('b', pa.string())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': ["#N/A"], + 'b': [""], + } + + def test_custom_bools(self): + # Infer booleans with custom values + opts = ConvertOptions(true_values=['T', 'yes'], + false_values=['F', 'no']) + rows = (b"a,b,c\n" + b"True,T,t\n" + b"False,F,f\n" + b"True,yes,yes\n" + b"False,no,no\n" + b"N/A,N/A,N/A\n") + table = self.read_bytes(rows, convert_options=opts) + schema = pa.schema([('a', pa.string()), + ('b', pa.bool_()), + ('c', pa.string())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': ["True", "False", "True", "False", "N/A"], + 'b': [True, False, True, False, None], + 'c': ["t", "f", "yes", "no", "N/A"], + } + + def test_column_types(self): + # Ask for specific column types in ConvertOptions + opts = ConvertOptions(column_types={'b': 'float32', + 'c': 'string', + 'd': 'boolean', + 'e': pa.decimal128(11, 2), + 'zz': 'null'}) + rows = b"a,b,c,d,e\n1,2,3,true,1.0\n4,-5,6,false,0\n" + table = self.read_bytes(rows, convert_options=opts) + schema = pa.schema([('a', pa.int64()), + ('b', pa.float32()), + ('c', pa.string()), + ('d', pa.bool_()), + ('e', pa.decimal128(11, 2))]) + expected = { + 'a': [1, 4], + 'b': [2.0, -5.0], + 'c': ["3", "6"], + 'd': [True, False], + 'e': [Decimal("1.00"), Decimal("0.00")] + } + assert table.schema == schema + assert table.to_pydict() == expected + # Pass column_types as schema + opts = ConvertOptions( + column_types=pa.schema([('b', pa.float32()), + ('c', pa.string()), + ('d', pa.bool_()), + ('e', pa.decimal128(11, 2)), + ('zz', pa.bool_())])) + table = self.read_bytes(rows, convert_options=opts) + assert table.schema == schema + assert table.to_pydict() == expected + # One of the columns in column_types fails converting + rows = b"a,b,c,d,e\n1,XXX,3,true,5\n4,-5,6,false,7\n" + with pytest.raises(pa.ArrowInvalid) as exc: + self.read_bytes(rows, convert_options=opts) + err = str(exc.value) + assert "In CSV column #1: " in err + assert "CSV conversion error to float: invalid value 'XXX'" in err + + def test_column_types_dict(self): + # Ask for dict-encoded column types in ConvertOptions + column_types = [ + ('a', pa.dictionary(pa.int32(), pa.utf8())), + ('b', pa.dictionary(pa.int32(), pa.int64())), + ('c', pa.dictionary(pa.int32(), pa.decimal128(11, 2))), + ('d', pa.dictionary(pa.int32(), pa.large_utf8()))] + + opts = ConvertOptions(column_types=dict(column_types)) + rows = (b"a,b,c,d\n" + b"abc,123456,1.0,zz\n" + b"defg,123456,0.5,xx\n" + b"abc,N/A,1.0,xx\n") + table = self.read_bytes(rows, convert_options=opts) + + schema = pa.schema(column_types) + expected = { + 'a': ["abc", "defg", "abc"], + 'b': [123456, 123456, None], + 'c': [Decimal("1.00"), Decimal("0.50"), Decimal("1.00")], + 'd': ["zz", "xx", "xx"], + } + assert table.schema == schema + assert table.to_pydict() == expected + + # Unsupported index type + column_types[0] = ('a', pa.dictionary(pa.int8(), pa.utf8())) + + opts = ConvertOptions(column_types=dict(column_types)) + with pytest.raises(NotImplementedError): + table = self.read_bytes(rows, convert_options=opts) + + def test_column_types_with_column_names(self): + # When both `column_names` and `column_types` are given, names + # in `column_types` should refer to names in `column_names` + rows = b"a,b\nc,d\ne,f\n" + read_options = ReadOptions(column_names=['x', 'y']) + convert_options = ConvertOptions(column_types={'x': pa.binary()}) + table = self.read_bytes(rows, read_options=read_options, + convert_options=convert_options) + schema = pa.schema([('x', pa.binary()), + ('y', pa.string())]) + assert table.schema == schema + assert table.to_pydict() == { + 'x': [b'a', b'c', b'e'], + 'y': ['b', 'd', 'f'], + } + + def test_no_ending_newline(self): + # No \n after last line + rows = b"a,b,c\n1,2,3\n4,5,6" + table = self.read_bytes(rows) + assert table.to_pydict() == { + 'a': [1, 4], + 'b': [2, 5], + 'c': [3, 6], + } + + def test_trivial(self): + # A bit pointless, but at least it shouldn't crash + rows = b",\n\n" + table = self.read_bytes(rows) + assert table.to_pydict() == {'': []} + + def test_empty_lines(self): + rows = b"a,b\n\r1,2\r\n\r\n3,4\r\n" + table = self.read_bytes(rows) + assert table.to_pydict() == { + 'a': [1, 3], + 'b': [2, 4], + } + parse_options = ParseOptions(ignore_empty_lines=False) + table = self.read_bytes(rows, parse_options=parse_options) + assert table.to_pydict() == { + 'a': [None, 1, None, 3], + 'b': [None, 2, None, 4], + } + read_options = ReadOptions(skip_rows=2) + table = self.read_bytes(rows, parse_options=parse_options, + read_options=read_options) + assert table.to_pydict() == { + '1': [None, 3], + '2': [None, 4], + } + + def test_invalid_csv(self): + # Various CSV errors + rows = b"a,b,c\n1,2\n4,5,6\n" + with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 2"): + self.read_bytes(rows) + rows = b"a,b,c\n1,2,3\n4" + with pytest.raises(pa.ArrowInvalid, match="Expected 3 columns, got 1"): + self.read_bytes(rows) + for rows in [b"", b"\n", b"\r\n", b"\r", b"\n\n"]: + with pytest.raises(pa.ArrowInvalid, match="Empty CSV file"): + self.read_bytes(rows) + + def test_options_delimiter(self): + rows = b"a;b,c\nde,fg;eh\n" + table = self.read_bytes(rows) + assert table.to_pydict() == { + 'a;b': ['de'], + 'c': ['fg;eh'], + } + opts = ParseOptions(delimiter=';') + table = self.read_bytes(rows, parse_options=opts) + assert table.to_pydict() == { + 'a': ['de,fg'], + 'b,c': ['eh'], + } + + def test_small_random_csv(self): + csv, expected = make_random_csv(num_cols=2, num_rows=10) + table = self.read_bytes(csv) + assert table.schema == expected.schema + assert table.equals(expected) + assert table.to_pydict() == expected.to_pydict() + + def test_stress_block_sizes(self): + # Test a number of small block sizes to stress block stitching + csv_base, expected = make_random_csv(num_cols=2, num_rows=500) + block_sizes = [11, 12, 13, 17, 37, 111] + csvs = [csv_base, csv_base.rstrip(b'\r\n')] + for csv in csvs: + for block_size in block_sizes: + read_options = ReadOptions(block_size=block_size) + table = self.read_bytes(csv, read_options=read_options) + assert table.schema == expected.schema + if not table.equals(expected): + # Better error output + assert table.to_pydict() == expected.to_pydict() + + def test_stress_convert_options_blowup(self): + # ARROW-6481: A convert_options with a very large number of columns + # should not blow memory and CPU time. + try: + clock = time.thread_time + except AttributeError: + clock = time.time + num_columns = 10000 + col_names = ["K{}".format(i) for i in range(num_columns)] + csv = make_empty_csv(col_names) + t1 = clock() + convert_options = ConvertOptions( + column_types={k: pa.string() for k in col_names[::2]}) + table = self.read_bytes(csv, convert_options=convert_options) + dt = clock() - t1 + # Check that processing time didn't blow up. + # This is a conservative check (it takes less than 300 ms + # in debug mode on my local machine). + assert dt <= 10.0 + # Check result + assert table.num_columns == num_columns + assert table.num_rows == 0 + assert table.column_names == col_names + + def test_cancellation(self): + if (threading.current_thread().ident != + threading.main_thread().ident): + pytest.skip("test only works from main Python thread") + # Skips test if not available + raise_signal = util.get_raise_signal() + signum = signal.SIGINT + + def signal_from_thread(): + # Give our workload a chance to start up + time.sleep(0.2) + raise_signal(signum) + + # We start with a small CSV reading workload and increase its size + # until it's large enough to get an interruption during it, even in + # release mode on fast machines. + last_duration = 0.0 + workload_size = 100_000 + attempts = 0 + + while last_duration < 5.0 and attempts < 10: + print("workload size:", workload_size) + large_csv = b"a,b,c\n" + b"1,2,3\n" * workload_size + exc_info = None + + try: + # We use a signal fd to reliably ensure that the signal + # has been delivered to Python, regardless of how exactly + # it was caught. + with util.signal_wakeup_fd() as sigfd: + try: + t = threading.Thread(target=signal_from_thread) + t.start() + t1 = time.time() + try: + self.read_bytes(large_csv) + except KeyboardInterrupt as e: + exc_info = e + last_duration = time.time() - t1 + finally: + # Wait for signal to arrive if it didn't already, + # to avoid getting a KeyboardInterrupt after the + # `except` block below. + select.select([sigfd], [], [sigfd], 10.0) + + except KeyboardInterrupt: + # KeyboardInterrupt didn't interrupt `read_bytes` above. + pass + + if exc_info is not None: + # We managed to get `self.read_bytes` interrupted, see if it + # was actually interrupted inside Arrow C++ or in the Python + # scaffolding. + if exc_info.__context__ is not None: + # Interrupted inside Arrow C++, we're satisfied now + break + + # Increase workload size to get a better chance + workload_size = workload_size * 3 + + if exc_info is None: + pytest.fail("Failed to get an interruption during CSV reading") + + # Interruption should have arrived timely + assert last_duration <= 1.0 + e = exc_info.__context__ + assert isinstance(e, pa.ArrowCancelled) + assert e.signum == signum + + def test_cancellation_disabled(self): + # ARROW-12622: reader would segfault when the cancelling signal + # handler was not enabled (e.g. if disabled, or if not on the + # main thread) + t = threading.Thread( + target=lambda: self.read_bytes(b"f64\n0.1")) + t.start() + t.join() + + +class TestSerialCSVTableRead(BaseCSVTableRead): + @property + def use_threads(self): + return False + + +class TestThreadedCSVTableRead(BaseCSVTableRead): + @property + def use_threads(self): + return True + + +class BaseStreamingCSVRead(BaseTestCSV): + + def open_csv(self, csv, *args, **kwargs): + """ + Reads the CSV file into memory using pyarrow's open_csv + csv The CSV bytes + args Positional arguments to be forwarded to pyarrow's open_csv + kwargs Keyword arguments to be forwarded to pyarrow's open_csv + """ + read_options = kwargs.setdefault('read_options', ReadOptions()) + read_options.use_threads = self.use_threads + return open_csv(csv, *args, **kwargs) + + def open_bytes(self, b, **kwargs): + return self.open_csv(pa.py_buffer(b), **kwargs) + + def check_reader(self, reader, expected_schema, expected_data): + assert reader.schema == expected_schema + batches = list(reader) + assert len(batches) == len(expected_data) + for batch, expected_batch in zip(batches, expected_data): + batch.validate(full=True) + assert batch.schema == expected_schema + assert batch.to_pydict() == expected_batch + + def read_bytes(self, b, **kwargs): + return self.open_bytes(b, **kwargs).read_all() + + def test_file_object(self): + data = b"a,b\n1,2\n3,4\n" + expected_data = {'a': [1, 3], 'b': [2, 4]} + bio = io.BytesIO(data) + reader = self.open_csv(bio) + expected_schema = pa.schema([('a', pa.int64()), + ('b', pa.int64())]) + self.check_reader(reader, expected_schema, [expected_data]) + + def test_header(self): + rows = b"abc,def,gh\n" + reader = self.open_bytes(rows) + expected_schema = pa.schema([('abc', pa.null()), + ('def', pa.null()), + ('gh', pa.null())]) + self.check_reader(reader, expected_schema, []) + + def test_inference(self): + # Inference is done on first block + rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n" + expected_schema = pa.schema([('a', pa.string()), + ('b', pa.binary())]) + + read_options = ReadOptions() + read_options.block_size = len(rows) + reader = self.open_bytes(rows, read_options=read_options) + self.check_reader(reader, expected_schema, + [{'a': ['123', 'abc', 'gh'], + 'b': [b'456', b'de\xff', b'ij']}]) + + read_options.block_size = len(rows) - 1 + reader = self.open_bytes(rows, read_options=read_options) + self.check_reader(reader, expected_schema, + [{'a': ['123', 'abc'], + 'b': [b'456', b'de\xff']}, + {'a': ['gh'], + 'b': [b'ij']}]) + + def test_inference_failure(self): + # Inference on first block, then conversion failure on second block + rows = b"a,b\n123,456\nabc,de\xff\ngh,ij\n" + read_options = ReadOptions() + read_options.block_size = len(rows) - 7 + reader = self.open_bytes(rows, read_options=read_options) + expected_schema = pa.schema([('a', pa.int64()), + ('b', pa.int64())]) + assert reader.schema == expected_schema + assert reader.read_next_batch().to_pydict() == { + 'a': [123], 'b': [456] + } + # Second block + with pytest.raises(ValueError, + match="CSV conversion error to int64"): + reader.read_next_batch() + # EOF + with pytest.raises(StopIteration): + reader.read_next_batch() + + def test_invalid_csv(self): + # CSV errors on first block + rows = b"a,b\n1,2,3\n4,5\n6,7\n" + read_options = ReadOptions() + read_options.block_size = 10 + with pytest.raises(pa.ArrowInvalid, + match="Expected 2 columns, got 3"): + reader = self.open_bytes( + rows, read_options=read_options) + + # CSV errors on second block + rows = b"a,b\n1,2\n3,4,5\n6,7\n" + read_options.block_size = 8 + reader = self.open_bytes(rows, read_options=read_options) + assert reader.read_next_batch().to_pydict() == {'a': [1], 'b': [2]} + with pytest.raises(pa.ArrowInvalid, + match="Expected 2 columns, got 3"): + reader.read_next_batch() + # Cannot continue after a parse error + with pytest.raises(StopIteration): + reader.read_next_batch() + + def test_options_delimiter(self): + rows = b"a;b,c\nde,fg;eh\n" + reader = self.open_bytes(rows) + expected_schema = pa.schema([('a;b', pa.string()), + ('c', pa.string())]) + self.check_reader(reader, expected_schema, + [{'a;b': ['de'], + 'c': ['fg;eh']}]) + + opts = ParseOptions(delimiter=';') + reader = self.open_bytes(rows, parse_options=opts) + expected_schema = pa.schema([('a', pa.string()), + ('b,c', pa.string())]) + self.check_reader(reader, expected_schema, + [{'a': ['de,fg'], + 'b,c': ['eh']}]) + + def test_no_ending_newline(self): + # No \n after last line + rows = b"a,b,c\n1,2,3\n4,5,6" + reader = self.open_bytes(rows) + expected_schema = pa.schema([('a', pa.int64()), + ('b', pa.int64()), + ('c', pa.int64())]) + self.check_reader(reader, expected_schema, + [{'a': [1, 4], + 'b': [2, 5], + 'c': [3, 6]}]) + + def test_empty_file(self): + with pytest.raises(ValueError, match="Empty CSV file"): + self.open_bytes(b"") + + def test_column_options(self): + # With column_names + rows = b"1,2,3\n4,5,6" + read_options = ReadOptions() + read_options.column_names = ['d', 'e', 'f'] + reader = self.open_bytes(rows, read_options=read_options) + expected_schema = pa.schema([('d', pa.int64()), + ('e', pa.int64()), + ('f', pa.int64())]) + self.check_reader(reader, expected_schema, + [{'d': [1, 4], + 'e': [2, 5], + 'f': [3, 6]}]) + + # With include_columns + convert_options = ConvertOptions() + convert_options.include_columns = ['f', 'e'] + reader = self.open_bytes(rows, read_options=read_options, + convert_options=convert_options) + expected_schema = pa.schema([('f', pa.int64()), + ('e', pa.int64())]) + self.check_reader(reader, expected_schema, + [{'e': [2, 5], + 'f': [3, 6]}]) + + # With column_types + convert_options.column_types = {'e': pa.string()} + reader = self.open_bytes(rows, read_options=read_options, + convert_options=convert_options) + expected_schema = pa.schema([('f', pa.int64()), + ('e', pa.string())]) + self.check_reader(reader, expected_schema, + [{'e': ["2", "5"], + 'f': [3, 6]}]) + + # Missing columns in include_columns + convert_options.include_columns = ['g', 'f', 'e'] + with pytest.raises( + KeyError, + match="Column 'g' in include_columns does not exist"): + reader = self.open_bytes(rows, read_options=read_options, + convert_options=convert_options) + + convert_options.include_missing_columns = True + reader = self.open_bytes(rows, read_options=read_options, + convert_options=convert_options) + expected_schema = pa.schema([('g', pa.null()), + ('f', pa.int64()), + ('e', pa.string())]) + self.check_reader(reader, expected_schema, + [{'g': [None, None], + 'e': ["2", "5"], + 'f': [3, 6]}]) + + convert_options.column_types = {'e': pa.string(), 'g': pa.float64()} + reader = self.open_bytes(rows, read_options=read_options, + convert_options=convert_options) + expected_schema = pa.schema([('g', pa.float64()), + ('f', pa.int64()), + ('e', pa.string())]) + self.check_reader(reader, expected_schema, + [{'g': [None, None], + 'e': ["2", "5"], + 'f': [3, 6]}]) + + def test_encoding(self): + # latin-1 (invalid utf-8) + rows = b"a,b\nun,\xe9l\xe9phant" + read_options = ReadOptions() + reader = self.open_bytes(rows, read_options=read_options) + expected_schema = pa.schema([('a', pa.string()), + ('b', pa.binary())]) + self.check_reader(reader, expected_schema, + [{'a': ["un"], + 'b': [b"\xe9l\xe9phant"]}]) + + read_options.encoding = 'latin1' + reader = self.open_bytes(rows, read_options=read_options) + expected_schema = pa.schema([('a', pa.string()), + ('b', pa.string())]) + self.check_reader(reader, expected_schema, + [{'a': ["un"], + 'b': ["éléphant"]}]) + + # utf-16 + rows = (b'\xff\xfea\x00,\x00b\x00\n\x00u\x00n\x00,' + b'\x00\xe9\x00l\x00\xe9\x00p\x00h\x00a\x00n\x00t\x00') + read_options.encoding = 'utf16' + reader = self.open_bytes(rows, read_options=read_options) + expected_schema = pa.schema([('a', pa.string()), + ('b', pa.string())]) + self.check_reader(reader, expected_schema, + [{'a': ["un"], + 'b': ["éléphant"]}]) + + def test_small_random_csv(self): + csv, expected = make_random_csv(num_cols=2, num_rows=10) + reader = self.open_bytes(csv) + table = reader.read_all() + assert table.schema == expected.schema + assert table.equals(expected) + assert table.to_pydict() == expected.to_pydict() + + def test_stress_block_sizes(self): + # Test a number of small block sizes to stress block stitching + csv_base, expected = make_random_csv(num_cols=2, num_rows=500) + block_sizes = [19, 21, 23, 26, 37, 111] + csvs = [csv_base, csv_base.rstrip(b'\r\n')] + for csv in csvs: + for block_size in block_sizes: + # Need at least two lines for type inference + assert csv[:block_size].count(b'\n') >= 2 + read_options = ReadOptions(block_size=block_size) + reader = self.open_bytes( + csv, read_options=read_options) + table = reader.read_all() + assert table.schema == expected.schema + if not table.equals(expected): + # Better error output + assert table.to_pydict() == expected.to_pydict() + + def test_batch_lifetime(self): + gc.collect() + old_allocated = pa.total_allocated_bytes() + + # Memory occupation should not grow with CSV file size + def check_one_batch(reader, expected): + batch = reader.read_next_batch() + assert batch.to_pydict() == expected + + rows = b"10,11\n12,13\n14,15\n16,17\n" + read_options = ReadOptions() + read_options.column_names = ['a', 'b'] + read_options.block_size = 6 + reader = self.open_bytes(rows, read_options=read_options) + check_one_batch(reader, {'a': [10], 'b': [11]}) + allocated_after_first_batch = pa.total_allocated_bytes() + check_one_batch(reader, {'a': [12], 'b': [13]}) + assert pa.total_allocated_bytes() <= allocated_after_first_batch + check_one_batch(reader, {'a': [14], 'b': [15]}) + assert pa.total_allocated_bytes() <= allocated_after_first_batch + check_one_batch(reader, {'a': [16], 'b': [17]}) + assert pa.total_allocated_bytes() <= allocated_after_first_batch + with pytest.raises(StopIteration): + reader.read_next_batch() + assert pa.total_allocated_bytes() == old_allocated + reader = None + assert pa.total_allocated_bytes() == old_allocated + + def test_header_skip_rows(self): + super().test_header_skip_rows() + + rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n" + + # Skipping all rows immediately results in end of iteration + opts = ReadOptions() + opts.skip_rows = 4 + opts.column_names = ['ab', 'cd'] + reader = self.open_bytes(rows, read_options=opts) + with pytest.raises(StopIteration): + assert reader.read_next_batch() + + def test_skip_rows_after_names(self): + super().test_skip_rows_after_names() + + rows = b"ab,cd\nef,gh\nij,kl\nmn,op\n" + + # Skipping all rows immediately results in end of iteration + opts = ReadOptions() + opts.skip_rows_after_names = 3 + reader = self.open_bytes(rows, read_options=opts) + with pytest.raises(StopIteration): + assert reader.read_next_batch() + + # Skipping beyond all rows immediately results in end of iteration + opts.skip_rows_after_names = 99999 + reader = self.open_bytes(rows, read_options=opts) + with pytest.raises(StopIteration): + assert reader.read_next_batch() + + +class TestSerialStreamingCSVRead(BaseStreamingCSVRead): + @property + def use_threads(self): + return False + + +class TestThreadedStreamingCSVRead(BaseStreamingCSVRead): + @property + def use_threads(self): + return True + + +class BaseTestCompressedCSVRead: + + def setUp(self): + self.tmpdir = tempfile.mkdtemp(prefix='arrow-csv-test-') + + def tearDown(self): + shutil.rmtree(self.tmpdir) + + def read_csv(self, csv_path): + try: + return read_csv(csv_path) + except pa.ArrowNotImplementedError as e: + pytest.skip(str(e)) + + def test_random_csv(self): + csv, expected = make_random_csv(num_cols=2, num_rows=100) + csv_path = os.path.join(self.tmpdir, self.csv_filename) + self.write_file(csv_path, csv) + table = self.read_csv(csv_path) + table.validate(full=True) + assert table.schema == expected.schema + assert table.equals(expected) + assert table.to_pydict() == expected.to_pydict() + + +class TestGZipCSVRead(BaseTestCompressedCSVRead, unittest.TestCase): + csv_filename = "compressed.csv.gz" + + def write_file(self, path, contents): + with gzip.open(path, 'wb', 3) as f: + f.write(contents) + + def test_concatenated(self): + # ARROW-5974 + csv_path = os.path.join(self.tmpdir, self.csv_filename) + with gzip.open(csv_path, 'wb', 3) as f: + f.write(b"ab,cd\nef,gh\n") + with gzip.open(csv_path, 'ab', 3) as f: + f.write(b"ij,kl\nmn,op\n") + table = self.read_csv(csv_path) + assert table.to_pydict() == { + 'ab': ['ef', 'ij', 'mn'], + 'cd': ['gh', 'kl', 'op'], + } + + +class TestBZ2CSVRead(BaseTestCompressedCSVRead, unittest.TestCase): + csv_filename = "compressed.csv.bz2" + + def write_file(self, path, contents): + with bz2.BZ2File(path, 'w') as f: + f.write(contents) + + +def test_read_csv_does_not_close_passed_file_handles(): + # ARROW-4823 + buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6") + read_csv(buf) + assert not buf.closed + + +def test_write_read_round_trip(): + t = pa.Table.from_arrays([[1, 2, 3], ["a", "b", "c"]], ["c1", "c2"]) + record_batch = t.to_batches(max_chunksize=4)[0] + for data in [t, record_batch]: + # Test with header + buf = io.BytesIO() + write_csv(data, buf, WriteOptions(include_header=True)) + buf.seek(0) + assert t == read_csv(buf) + + # Test without header + buf = io.BytesIO() + write_csv(data, buf, WriteOptions(include_header=False)) + buf.seek(0) + + read_options = ReadOptions(column_names=t.column_names) + assert t == read_csv(buf, read_options=read_options) + + # Test with writer + for read_options, parse_options, write_options in [ + (None, None, WriteOptions(include_header=True)), + (ReadOptions(column_names=t.column_names), None, + WriteOptions(include_header=False)), + (None, ParseOptions(delimiter='|'), + WriteOptions(include_header=True, delimiter='|')), + (ReadOptions(column_names=t.column_names), + ParseOptions(delimiter='\t'), + WriteOptions(include_header=False, delimiter='\t')), + ]: + buf = io.BytesIO() + with CSVWriter(buf, t.schema, write_options=write_options) as writer: + writer.write_table(t) + buf.seek(0) + assert t == read_csv(buf, read_options=read_options, + parse_options=parse_options) + buf = io.BytesIO() + with CSVWriter(buf, t.schema, write_options=write_options) as writer: + for batch in t.to_batches(max_chunksize=1): + writer.write_batch(batch) + buf.seek(0) + assert t == read_csv(buf, read_options=read_options, + parse_options=parse_options) + + +def test_write_quoting_style(): + t = pa.Table.from_arrays([[1, 2, None], ["a", None, "c"]], ["c1", "c2"]) + buf = io.BytesIO() + for write_options, res in [ + (WriteOptions(quoting_style='none'), b'"c1","c2"\n1,a\n2,\n,c\n'), + (WriteOptions(), b'"c1","c2"\n1,"a"\n2,\n,"c"\n'), + (WriteOptions(quoting_style='all_valid'), + b'"c1","c2"\n"1","a"\n"2",\n,"c"\n'), + ]: + with CSVWriter(buf, t.schema, write_options=write_options) as writer: + writer.write_table(t) + assert buf.getvalue() == res + buf.seek(0) + + # Test writing special characters with different quoting styles + t = pa.Table.from_arrays([[",", "\""]], ["c1"]) + buf = io.BytesIO() + for write_options, res in [ + (WriteOptions(quoting_style='needed'), b'"c1"\n","\n""""\n'), + (WriteOptions(quoting_style='none'), pa.lib.ArrowInvalid), + ]: + with CSVWriter(buf, t.schema, write_options=write_options) as writer: + try: + writer.write_table(t) + except Exception as e: + # This will trigger when we try to write a comma (,) + # without quotes, which is invalid + assert isinstance(e, res) + break + assert buf.getvalue() == res + buf.seek(0) + + +def test_read_csv_reference_cycle(): + # ARROW-13187 + def inner(): + buf = io.BytesIO(b"a,b,c\n1,2,3\n4,5,6") + table = read_csv(buf) + return weakref.ref(table) + + with util.disabled_gc(): + wr = inner() + assert wr() is None + + +@pytest.mark.parametrize("type_factory", ( + lambda: pa.decimal128(20, 1), + lambda: pa.decimal128(38, 15), + lambda: pa.decimal256(20, 1), + lambda: pa.decimal256(76, 10), +)) +def test_write_csv_decimal(tmpdir, type_factory): + type = type_factory() + table = pa.table({"col": pa.array([1, 2]).cast(type)}) + + write_csv(table, tmpdir / "out.csv") + out = read_csv(tmpdir / "out.csv") + + assert out.column('col').cast(type) == table.column('col') + + +def test_read_csv_gil_deadlock(): + # GH-38676 + # This test depends on several preconditions: + # - the CSV input is a Python file object + # - reading the CSV file produces an error + data = b"a,b,c" + + class MyBytesIO(io.BytesIO): + def read(self, *args): + time.sleep(0.001) + return super().read(*args) + + def readinto(self, *args): + time.sleep(0.001) + return super().readinto(*args) + + for i in range(20): + with pytest.raises(pa.ArrowInvalid): + read_csv(MyBytesIO(data)) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_cuda.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_cuda.py new file mode 100644 index 0000000000000000000000000000000000000000..43cd16a3cf666ddc36be003adb3125259d632342 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_cuda.py @@ -0,0 +1,794 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +UNTESTED: +read_message +""" + +import sys +import sysconfig + +import pytest + +import pyarrow as pa +import numpy as np + + +cuda = pytest.importorskip("pyarrow.cuda") + +platform = sysconfig.get_platform() +# TODO: enable ppc64 when Arrow C++ supports IPC in ppc64 systems: +has_ipc_support = platform == 'linux-x86_64' # or 'ppc64' in platform + +cuda_ipc = pytest.mark.skipif( + not has_ipc_support, + reason='CUDA IPC not supported in platform `%s`' % (platform)) + +global_context = None # for flake8 +global_context1 = None # for flake8 + + +def setup_module(module): + module.global_context = cuda.Context(0) + module.global_context1 = cuda.Context(cuda.Context.get_num_devices() - 1) + + +def teardown_module(module): + del module.global_context + + +def test_Context(): + assert cuda.Context.get_num_devices() > 0 + assert global_context.device_number == 0 + assert global_context1.device_number == cuda.Context.get_num_devices() - 1 + + with pytest.raises(ValueError, + match=("device_number argument must " + "be non-negative less than")): + cuda.Context(cuda.Context.get_num_devices()) + + +@pytest.mark.parametrize("size", [0, 1, 1000]) +def test_manage_allocate_free_host(size): + buf = cuda.new_host_buffer(size) + arr = np.frombuffer(buf, dtype=np.uint8) + arr[size//4:3*size//4] = 1 + arr_cp = arr.copy() + arr2 = np.frombuffer(buf, dtype=np.uint8) + np.testing.assert_equal(arr2, arr_cp) + assert buf.size == size + + +def test_context_allocate_del(): + bytes_allocated = global_context.bytes_allocated + cudabuf = global_context.new_buffer(128) + assert global_context.bytes_allocated == bytes_allocated + 128 + del cudabuf + assert global_context.bytes_allocated == bytes_allocated + + +def make_random_buffer(size, target='host'): + """Return a host or device buffer with random data. + """ + if target == 'host': + assert size >= 0 + buf = pa.allocate_buffer(size) + assert buf.size == size + arr = np.frombuffer(buf, dtype=np.uint8) + assert arr.size == size + arr[:] = np.random.randint(low=1, high=255, size=size, dtype=np.uint8) + assert arr.sum() > 0 or size == 0 + arr_ = np.frombuffer(buf, dtype=np.uint8) + np.testing.assert_equal(arr, arr_) + return arr, buf + elif target == 'device': + arr, buf = make_random_buffer(size, target='host') + dbuf = global_context.new_buffer(size) + assert dbuf.size == size + dbuf.copy_from_host(buf, position=0, nbytes=size) + return arr, dbuf + raise ValueError('invalid target value') + + +@pytest.mark.parametrize("size", [0, 1, 1000]) +def test_context_device_buffer(size): + # Creating device buffer from host buffer; + arr, buf = make_random_buffer(size) + cudabuf = global_context.buffer_from_data(buf) + assert cudabuf.size == size + arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr, arr2) + + # CudaBuffer does not support buffer protocol + with pytest.raises(BufferError): + memoryview(cudabuf) + + # Creating device buffer from array: + cudabuf = global_context.buffer_from_data(arr) + assert cudabuf.size == size + arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr, arr2) + + # Creating device buffer from bytes: + cudabuf = global_context.buffer_from_data(arr.tobytes()) + assert cudabuf.size == size + arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr, arr2) + + # Creating a device buffer from another device buffer, view: + cudabuf2 = cudabuf.slice(0, cudabuf.size) + assert cudabuf2.size == size + arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr, arr2) + + if size > 1: + cudabuf2.copy_from_host(arr[size//2:]) + arr3 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(np.concatenate((arr[size//2:], arr[size//2:])), + arr3) + cudabuf2.copy_from_host(arr[:size//2]) # restoring arr + + # Creating a device buffer from another device buffer, copy: + cudabuf2 = global_context.buffer_from_data(cudabuf) + assert cudabuf2.size == size + arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr, arr2) + + cudabuf2.copy_from_host(arr[size//2:]) + arr3 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr, arr3) + + # Slice of a device buffer + cudabuf2 = cudabuf.slice(0, cudabuf.size+10) + assert cudabuf2.size == size + arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr, arr2) + + cudabuf2 = cudabuf.slice(size//4, size+10) + assert cudabuf2.size == size - size//4 + arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr[size//4:], arr2) + + # Creating a device buffer from a slice of host buffer + soffset = size//4 + ssize = 2*size//4 + cudabuf = global_context.buffer_from_data(buf, offset=soffset, + size=ssize) + assert cudabuf.size == ssize + arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr[soffset:soffset + ssize], arr2) + + cudabuf = global_context.buffer_from_data(buf.slice(offset=soffset, + length=ssize)) + assert cudabuf.size == ssize + arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr[soffset:soffset + ssize], arr2) + + # Creating a device buffer from a slice of an array + cudabuf = global_context.buffer_from_data(arr, offset=soffset, size=ssize) + assert cudabuf.size == ssize + arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr[soffset:soffset + ssize], arr2) + + cudabuf = global_context.buffer_from_data(arr[soffset:soffset+ssize]) + assert cudabuf.size == ssize + arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr[soffset:soffset + ssize], arr2) + + # Creating a device buffer from a slice of bytes + cudabuf = global_context.buffer_from_data(arr.tobytes(), + offset=soffset, + size=ssize) + assert cudabuf.size == ssize + arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr[soffset:soffset + ssize], arr2) + + # Creating a device buffer from size + cudabuf = global_context.new_buffer(size) + assert cudabuf.size == size + + # Creating device buffer from a slice of another device buffer: + cudabuf = global_context.buffer_from_data(arr) + cudabuf2 = cudabuf.slice(soffset, ssize) + assert cudabuf2.size == ssize + arr2 = np.frombuffer(cudabuf2.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr[soffset:soffset+ssize], arr2) + + # Creating device buffer from HostBuffer + + buf = cuda.new_host_buffer(size) + arr_ = np.frombuffer(buf, dtype=np.uint8) + arr_[:] = arr + cudabuf = global_context.buffer_from_data(buf) + assert cudabuf.size == size + arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr, arr2) + + # Creating device buffer from HostBuffer slice + + cudabuf = global_context.buffer_from_data(buf, offset=soffset, size=ssize) + assert cudabuf.size == ssize + arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr[soffset:soffset+ssize], arr2) + + cudabuf = global_context.buffer_from_data( + buf.slice(offset=soffset, length=ssize)) + assert cudabuf.size == ssize + arr2 = np.frombuffer(cudabuf.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr[soffset:soffset+ssize], arr2) + + +@pytest.mark.parametrize("size", [0, 1, 1000]) +def test_context_from_object(size): + ctx = global_context + arr, cbuf = make_random_buffer(size, target='device') + dtype = arr.dtype + + # Creating device buffer from a CUDA host buffer + hbuf = cuda.new_host_buffer(size * arr.dtype.itemsize) + np.frombuffer(hbuf, dtype=dtype)[:] = arr + cbuf2 = ctx.buffer_from_object(hbuf) + assert cbuf2.size == cbuf.size + arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype) + np.testing.assert_equal(arr, arr2) + + # Creating device buffer from a device buffer + cbuf2 = ctx.buffer_from_object(cbuf2) + assert cbuf2.size == cbuf.size + arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype) + np.testing.assert_equal(arr, arr2) + + # Trying to create a device buffer from a Buffer + with pytest.raises(pa.ArrowTypeError, + match=('buffer is not backed by a CudaBuffer')): + ctx.buffer_from_object(pa.py_buffer(b"123")) + + # Trying to create a device buffer from numpy.array + with pytest.raises(pa.ArrowTypeError, + match=("cannot create device buffer view from " + ".* \'numpy.ndarray\'")): + ctx.buffer_from_object(np.array([1, 2, 3])) + + +def test_foreign_buffer(): + ctx = global_context + dtype = np.dtype(np.uint8) + size = 10 + hbuf = cuda.new_host_buffer(size * dtype.itemsize) + + # test host buffer memory reference counting + rc = sys.getrefcount(hbuf) + fbuf = ctx.foreign_buffer(hbuf.address, hbuf.size, hbuf) + assert sys.getrefcount(hbuf) == rc + 1 + del fbuf + assert sys.getrefcount(hbuf) == rc + + # test postponed deallocation of host buffer memory + fbuf = ctx.foreign_buffer(hbuf.address, hbuf.size, hbuf) + del hbuf + fbuf.copy_to_host() + + # test deallocating the host buffer memory making it inaccessible + hbuf = cuda.new_host_buffer(size * dtype.itemsize) + fbuf = ctx.foreign_buffer(hbuf.address, hbuf.size) + del hbuf + with pytest.raises(pa.ArrowIOError, + match=('Cuda error ')): + fbuf.copy_to_host() + + +@pytest.mark.parametrize("size", [0, 1, 1000]) +def test_CudaBuffer(size): + arr, buf = make_random_buffer(size) + assert arr.tobytes() == buf.to_pybytes() + cbuf = global_context.buffer_from_data(buf) + assert cbuf.size == size + assert not cbuf.is_cpu + assert arr.tobytes() == cbuf.to_pybytes() + if size > 0: + assert cbuf.address > 0 + + for i in range(size): + assert cbuf[i] == arr[i] + + for s in [ + slice(None), + slice(size//4, size//2), + ]: + assert cbuf[s].to_pybytes() == arr[s].tobytes() + + sbuf = cbuf.slice(size//4, size//2) + assert sbuf.parent == cbuf + + with pytest.raises(TypeError, + match="Do not call CudaBuffer's constructor directly"): + cuda.CudaBuffer() + + +@pytest.mark.parametrize("size", [0, 1, 1000]) +def test_HostBuffer(size): + arr, buf = make_random_buffer(size) + assert arr.tobytes() == buf.to_pybytes() + hbuf = cuda.new_host_buffer(size) + np.frombuffer(hbuf, dtype=np.uint8)[:] = arr + assert hbuf.size == size + assert hbuf.is_cpu + assert arr.tobytes() == hbuf.to_pybytes() + for i in range(size): + assert hbuf[i] == arr[i] + for s in [ + slice(None), + slice(size//4, size//2), + ]: + assert hbuf[s].to_pybytes() == arr[s].tobytes() + + sbuf = hbuf.slice(size//4, size//2) + assert sbuf.parent == hbuf + + del hbuf + + with pytest.raises(TypeError, + match="Do not call HostBuffer's constructor directly"): + cuda.HostBuffer() + + +@pytest.mark.parametrize("size", [0, 1, 1000]) +def test_copy_from_to_host(size): + # Create a buffer in host containing range(size) + dt = np.dtype('uint16') + nbytes = size * dt.itemsize + buf = pa.allocate_buffer(nbytes, resizable=True) # in host + assert isinstance(buf, pa.Buffer) + assert not isinstance(buf, cuda.CudaBuffer) + arr = np.frombuffer(buf, dtype=dt) + assert arr.size == size + arr[:] = range(size) + arr_ = np.frombuffer(buf, dtype=dt) + np.testing.assert_equal(arr, arr_) + + # Create a device buffer of the same size and copy from host + device_buffer = global_context.new_buffer(nbytes) + assert isinstance(device_buffer, cuda.CudaBuffer) + assert isinstance(device_buffer, pa.Buffer) + assert device_buffer.size == nbytes + assert not device_buffer.is_cpu + device_buffer.copy_from_host(buf, position=0, nbytes=nbytes) + + # Copy back to host and compare contents + buf2 = device_buffer.copy_to_host(position=0, nbytes=nbytes) + arr2 = np.frombuffer(buf2, dtype=dt) + np.testing.assert_equal(arr, arr2) + + +@pytest.mark.parametrize("size", [0, 1, 1000]) +def test_copy_to_host(size): + arr, dbuf = make_random_buffer(size, target='device') + + buf = dbuf.copy_to_host() + assert buf.is_cpu + np.testing.assert_equal(arr, np.frombuffer(buf, dtype=np.uint8)) + + buf = dbuf.copy_to_host(position=size//4) + assert buf.is_cpu + np.testing.assert_equal(arr[size//4:], np.frombuffer(buf, dtype=np.uint8)) + + buf = dbuf.copy_to_host(position=size//4, nbytes=size//8) + assert buf.is_cpu + np.testing.assert_equal(arr[size//4:size//4+size//8], + np.frombuffer(buf, dtype=np.uint8)) + + buf = dbuf.copy_to_host(position=size//4, nbytes=0) + assert buf.is_cpu + assert buf.size == 0 + + for (position, nbytes) in [ + (size+2, -1), (-2, -1), (size+1, 0), (-3, 0), + ]: + with pytest.raises(ValueError, + match='position argument is out-of-range'): + dbuf.copy_to_host(position=position, nbytes=nbytes) + + for (position, nbytes) in [ + (0, size+1), (size//2, (size+1)//2+1), (size, 1) + ]: + with pytest.raises(ValueError, + match=('requested more to copy than' + ' available from device buffer')): + dbuf.copy_to_host(position=position, nbytes=nbytes) + + buf = pa.allocate_buffer(size//4) + dbuf.copy_to_host(buf=buf) + np.testing.assert_equal(arr[:size//4], np.frombuffer(buf, dtype=np.uint8)) + + if size < 12: + return + + dbuf.copy_to_host(buf=buf, position=12) + np.testing.assert_equal(arr[12:12+size//4], + np.frombuffer(buf, dtype=np.uint8)) + + dbuf.copy_to_host(buf=buf, nbytes=12) + np.testing.assert_equal(arr[:12], np.frombuffer(buf, dtype=np.uint8)[:12]) + + dbuf.copy_to_host(buf=buf, nbytes=12, position=6) + np.testing.assert_equal(arr[6:6+12], + np.frombuffer(buf, dtype=np.uint8)[:12]) + + for (position, nbytes) in [ + (0, size+10), (10, size-5), + (0, size//2), (size//4, size//4+1) + ]: + with pytest.raises(ValueError, + match=('requested copy does not ' + 'fit into host buffer')): + dbuf.copy_to_host(buf=buf, position=position, nbytes=nbytes) + + +@pytest.mark.parametrize("dest_ctx", ['same', 'another']) +@pytest.mark.parametrize("size", [0, 1, 1000]) +def test_copy_from_device(dest_ctx, size): + arr, buf = make_random_buffer(size=size, target='device') + lst = arr.tolist() + if dest_ctx == 'another': + dest_ctx = global_context1 + if buf.context.device_number == dest_ctx.device_number: + pytest.skip("not a multi-GPU system") + else: + dest_ctx = buf.context + dbuf = dest_ctx.new_buffer(size) + + def put(*args, **kwargs): + dbuf.copy_from_device(buf, *args, **kwargs) + rbuf = dbuf.copy_to_host() + return np.frombuffer(rbuf, dtype=np.uint8).tolist() + assert put() == lst + if size > 4: + assert put(position=size//4) == lst[:size//4]+lst[:-size//4] + assert put() == lst + assert put(position=1, nbytes=size//2) == \ + lst[:1] + lst[:size//2] + lst[-(size-size//2-1):] + + for (position, nbytes) in [ + (size+2, -1), (-2, -1), (size+1, 0), (-3, 0), + ]: + with pytest.raises(ValueError, + match='position argument is out-of-range'): + put(position=position, nbytes=nbytes) + + for (position, nbytes) in [ + (0, size+1), + ]: + with pytest.raises(ValueError, + match=('requested more to copy than' + ' available from device buffer')): + put(position=position, nbytes=nbytes) + + if size < 4: + return + + for (position, nbytes) in [ + (size//2, (size+1)//2+1) + ]: + with pytest.raises(ValueError, + match=('requested more to copy than' + ' available in device buffer')): + put(position=position, nbytes=nbytes) + + +@pytest.mark.parametrize("size", [0, 1, 1000]) +def test_copy_from_host(size): + arr, buf = make_random_buffer(size=size, target='host') + lst = arr.tolist() + dbuf = global_context.new_buffer(size) + + def put(*args, **kwargs): + dbuf.copy_from_host(buf, *args, **kwargs) + rbuf = dbuf.copy_to_host() + return np.frombuffer(rbuf, dtype=np.uint8).tolist() + assert put() == lst + if size > 4: + assert put(position=size//4) == lst[:size//4]+lst[:-size//4] + assert put() == lst + assert put(position=1, nbytes=size//2) == \ + lst[:1] + lst[:size//2] + lst[-(size-size//2-1):] + + for (position, nbytes) in [ + (size+2, -1), (-2, -1), (size+1, 0), (-3, 0), + ]: + with pytest.raises(ValueError, + match='position argument is out-of-range'): + put(position=position, nbytes=nbytes) + + for (position, nbytes) in [ + (0, size+1), + ]: + with pytest.raises(ValueError, + match=('requested more to copy than' + ' available from host buffer')): + put(position=position, nbytes=nbytes) + + if size < 4: + return + + for (position, nbytes) in [ + (size//2, (size+1)//2+1) + ]: + with pytest.raises(ValueError, + match=('requested more to copy than' + ' available in device buffer')): + put(position=position, nbytes=nbytes) + + +def test_BufferWriter(): + def allocate(size): + cbuf = global_context.new_buffer(size) + writer = cuda.BufferWriter(cbuf) + return cbuf, writer + + def test_writes(total_size, chunksize, buffer_size=0): + cbuf, writer = allocate(total_size) + arr, buf = make_random_buffer(size=total_size, target='host') + + if buffer_size > 0: + writer.buffer_size = buffer_size + + position = writer.tell() + assert position == 0 + writer.write(buf.slice(length=chunksize)) + assert writer.tell() == chunksize + writer.seek(0) + position = writer.tell() + assert position == 0 + + while position < total_size: + bytes_to_write = min(chunksize, total_size - position) + writer.write(buf.slice(offset=position, length=bytes_to_write)) + position += bytes_to_write + + writer.flush() + assert cbuf.size == total_size + cbuf.context.synchronize() + buf2 = cbuf.copy_to_host() + cbuf.context.synchronize() + assert buf2.size == total_size + arr2 = np.frombuffer(buf2, dtype=np.uint8) + np.testing.assert_equal(arr, arr2) + + total_size, chunk_size = 1 << 16, 1000 + test_writes(total_size, chunk_size) + test_writes(total_size, chunk_size, total_size // 16) + + cbuf, writer = allocate(100) + writer.write(np.arange(100, dtype=np.uint8)) + writer.writeat(50, np.arange(25, dtype=np.uint8)) + writer.write(np.arange(25, dtype=np.uint8)) + writer.flush() + + arr = np.frombuffer(cbuf.copy_to_host(), np.uint8) + np.testing.assert_equal(arr[:50], np.arange(50, dtype=np.uint8)) + np.testing.assert_equal(arr[50:75], np.arange(25, dtype=np.uint8)) + np.testing.assert_equal(arr[75:], np.arange(25, dtype=np.uint8)) + + +def test_BufferWriter_edge_cases(): + # edge cases, see cuda-test.cc for more information: + size = 1000 + cbuf = global_context.new_buffer(size) + writer = cuda.BufferWriter(cbuf) + arr, buf = make_random_buffer(size=size, target='host') + + assert writer.buffer_size == 0 + writer.buffer_size = 100 + assert writer.buffer_size == 100 + + writer.write(buf.slice(length=0)) + assert writer.tell() == 0 + + writer.write(buf.slice(length=10)) + writer.buffer_size = 200 + assert writer.buffer_size == 200 + assert writer.num_bytes_buffered == 0 + + writer.write(buf.slice(offset=10, length=300)) + assert writer.num_bytes_buffered == 0 + + writer.write(buf.slice(offset=310, length=200)) + assert writer.num_bytes_buffered == 0 + + writer.write(buf.slice(offset=510, length=390)) + writer.write(buf.slice(offset=900, length=100)) + + writer.flush() + + buf2 = cbuf.copy_to_host() + assert buf2.size == size + arr2 = np.frombuffer(buf2, dtype=np.uint8) + np.testing.assert_equal(arr, arr2) + + +def test_BufferReader(): + size = 1000 + arr, cbuf = make_random_buffer(size=size, target='device') + + reader = cuda.BufferReader(cbuf) + reader.seek(950) + assert reader.tell() == 950 + + data = reader.read(100) + assert len(data) == 50 + assert reader.tell() == 1000 + + reader.seek(925) + arr2 = np.zeros(100, dtype=np.uint8) + n = reader.readinto(arr2) + assert n == 75 + assert reader.tell() == 1000 + np.testing.assert_equal(arr[925:], arr2[:75]) + + reader.seek(0) + assert reader.tell() == 0 + buf2 = reader.read_buffer() + arr2 = np.frombuffer(buf2.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr, arr2) + + +def test_BufferReader_zero_size(): + arr, cbuf = make_random_buffer(size=0, target='device') + reader = cuda.BufferReader(cbuf) + reader.seek(0) + data = reader.read() + assert len(data) == 0 + assert reader.tell() == 0 + buf2 = reader.read_buffer() + arr2 = np.frombuffer(buf2.copy_to_host(), dtype=np.uint8) + np.testing.assert_equal(arr, arr2) + + +def make_recordbatch(length): + schema = pa.schema([pa.field('f0', pa.int16()), + pa.field('f1', pa.int16())]) + a0 = pa.array(np.random.randint(0, 255, size=length, dtype=np.int16)) + a1 = pa.array(np.random.randint(0, 255, size=length, dtype=np.int16)) + batch = pa.record_batch([a0, a1], schema=schema) + return batch + + +def test_batch_serialize(): + batch = make_recordbatch(10) + hbuf = batch.serialize() + cbuf = cuda.serialize_record_batch(batch, global_context) + + # Test that read_record_batch works properly + cbatch = cuda.read_record_batch(cbuf, batch.schema) + assert isinstance(cbatch, pa.RecordBatch) + assert batch.schema == cbatch.schema + assert batch.num_columns == cbatch.num_columns + assert batch.num_rows == cbatch.num_rows + + # Deserialize CUDA-serialized batch on host + buf = cbuf.copy_to_host() + assert hbuf.equals(buf) + batch2 = pa.ipc.read_record_batch(buf, batch.schema) + assert hbuf.equals(batch2.serialize()) + + assert batch.num_columns == batch2.num_columns + assert batch.num_rows == batch2.num_rows + assert batch.column(0).equals(batch2.column(0)) + assert batch.equals(batch2) + + +def make_table(): + a0 = pa.array([0, 1, 42, None], type=pa.int16()) + a1 = pa.array([[0, 1], [2], [], None], type=pa.list_(pa.int32())) + a2 = pa.array([("ab", True), ("cde", False), (None, None), None], + type=pa.struct([("strs", pa.utf8()), + ("bools", pa.bool_())])) + # Dictionaries are validated on the IPC read path, but that can produce + # issues for GPU-located dictionaries. Check that they work fine. + a3 = pa.DictionaryArray.from_arrays( + indices=[0, 1, 1, None], + dictionary=pa.array(['foo', 'bar'])) + a4 = pa.DictionaryArray.from_arrays( + indices=[2, 1, 2, None], + dictionary=a1) + a5 = pa.DictionaryArray.from_arrays( + indices=[2, 1, 0, None], + dictionary=a2) + + arrays = [a0, a1, a2, a3, a4, a5] + schema = pa.schema([('f{}'.format(i), arr.type) + for i, arr in enumerate(arrays)]) + batch = pa.record_batch(arrays, schema=schema) + table = pa.Table.from_batches([batch]) + return table + + +def make_table_cuda(): + htable = make_table() + # Serialize the host table to bytes + sink = pa.BufferOutputStream() + with pa.ipc.new_stream(sink, htable.schema) as out: + out.write_table(htable) + hbuf = pa.py_buffer(sink.getvalue().to_pybytes()) + + # Copy the host bytes to a device buffer + dbuf = global_context.new_buffer(len(hbuf)) + dbuf.copy_from_host(hbuf, nbytes=len(hbuf)) + # Deserialize the device buffer into a Table + dtable = pa.ipc.open_stream(cuda.BufferReader(dbuf)).read_all() + return hbuf, htable, dbuf, dtable + + +def test_table_deserialize(): + # ARROW-9659: make sure that we can deserialize a GPU-located table + # without crashing when initializing or validating the underlying arrays. + hbuf, htable, dbuf, dtable = make_table_cuda() + # Assert basic fields the same between host and device tables + assert htable.schema == dtable.schema + assert htable.num_rows == dtable.num_rows + assert htable.num_columns == dtable.num_columns + # Assert byte-level equality + assert hbuf.equals(dbuf.copy_to_host()) + # Copy DtoH and assert the tables are still equivalent + assert htable.equals(pa.ipc.open_stream( + dbuf.copy_to_host() + ).read_all()) + + +def test_create_table_with_device_buffers(): + # ARROW-11872: make sure that we can create an Arrow Table from + # GPU-located Arrays without crashing. + hbuf, htable, dbuf, dtable = make_table_cuda() + # Construct a new Table from the device Table + dtable2 = pa.Table.from_arrays(dtable.columns, dtable.column_names) + # Assert basic fields the same between host and device tables + assert htable.schema == dtable2.schema + assert htable.num_rows == dtable2.num_rows + assert htable.num_columns == dtable2.num_columns + # Assert byte-level equality + assert hbuf.equals(dbuf.copy_to_host()) + # Copy DtoH and assert the tables are still equivalent + assert htable.equals(pa.ipc.open_stream( + dbuf.copy_to_host() + ).read_all()) + + +def other_process_for_test_IPC(handle_buffer, expected_arr): + other_context = pa.cuda.Context(0) + ipc_handle = pa.cuda.IpcMemHandle.from_buffer(handle_buffer) + ipc_buf = other_context.open_ipc_buffer(ipc_handle) + ipc_buf.context.synchronize() + buf = ipc_buf.copy_to_host() + assert buf.size == expected_arr.size, repr((buf.size, expected_arr.size)) + arr = np.frombuffer(buf, dtype=expected_arr.dtype) + np.testing.assert_equal(arr, expected_arr) + + +@cuda_ipc +@pytest.mark.parametrize("size", [0, 1, 1000]) +def test_IPC(size): + import multiprocessing + ctx = multiprocessing.get_context('spawn') + arr, cbuf = make_random_buffer(size=size, target='device') + ipc_handle = cbuf.export_for_ipc() + handle_buffer = ipc_handle.serialize() + p = ctx.Process(target=other_process_for_test_IPC, + args=(handle_buffer, arr)) + p.start() + p.join() + assert p.exitcode == 0 diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_cuda_numba_interop.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_cuda_numba_interop.py new file mode 100644 index 0000000000000000000000000000000000000000..ff1722d278d5e58fabde5b47910ad2d2b38070e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_cuda_numba_interop.py @@ -0,0 +1,235 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest +import pyarrow as pa +import numpy as np + +dtypes = ['uint8', 'int16', 'float32'] +cuda = pytest.importorskip("pyarrow.cuda") +nb_cuda = pytest.importorskip("numba.cuda") + +from numba.cuda.cudadrv.devicearray import DeviceNDArray # noqa: E402 + + +context_choices = None +context_choice_ids = ['pyarrow.cuda', 'numba.cuda'] + + +def setup_module(module): + np.random.seed(1234) + ctx1 = cuda.Context() + nb_ctx1 = ctx1.to_numba() + nb_ctx2 = nb_cuda.current_context() + ctx2 = cuda.Context.from_numba(nb_ctx2) + module.context_choices = [(ctx1, nb_ctx1), (ctx2, nb_ctx2)] + + +def teardown_module(module): + del module.context_choices + + +@pytest.mark.parametrize("c", range(len(context_choice_ids)), + ids=context_choice_ids) +def test_context(c): + ctx, nb_ctx = context_choices[c] + assert ctx.handle == nb_ctx.handle.value + assert ctx.handle == ctx.to_numba().handle.value + ctx2 = cuda.Context.from_numba(nb_ctx) + assert ctx.handle == ctx2.handle + size = 10 + buf = ctx.new_buffer(size) + assert ctx.handle == buf.context.handle + + +def make_random_buffer(size, target='host', dtype='uint8', ctx=None): + """Return a host or device buffer with random data. + """ + dtype = np.dtype(dtype) + if target == 'host': + assert size >= 0 + buf = pa.allocate_buffer(size*dtype.itemsize) + arr = np.frombuffer(buf, dtype=dtype) + arr[:] = np.random.randint(low=0, high=255, size=size, + dtype=np.uint8) + return arr, buf + elif target == 'device': + arr, buf = make_random_buffer(size, target='host', dtype=dtype) + dbuf = ctx.new_buffer(size * dtype.itemsize) + dbuf.copy_from_host(buf, position=0, nbytes=buf.size) + return arr, dbuf + raise ValueError('invalid target value') + + +@pytest.mark.parametrize("c", range(len(context_choice_ids)), + ids=context_choice_ids) +@pytest.mark.parametrize("dtype", dtypes, ids=dtypes) +@pytest.mark.parametrize("size", [0, 1, 8, 1000]) +def test_from_object(c, dtype, size): + ctx, nb_ctx = context_choices[c] + arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx) + + # Creating device buffer from numba DeviceNDArray: + darr = nb_cuda.to_device(arr) + cbuf2 = ctx.buffer_from_object(darr) + assert cbuf2.size == cbuf.size + arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype) + np.testing.assert_equal(arr, arr2) + + # Creating device buffer from a slice of numba DeviceNDArray: + if size >= 8: + # 1-D arrays + for s in [slice(size//4, None, None), + slice(size//4, -(size//4), None)]: + cbuf2 = ctx.buffer_from_object(darr[s]) + arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype) + np.testing.assert_equal(arr[s], arr2) + + # cannot test negative strides due to numba bug, see its issue 3705 + if 0: + rdarr = darr[::-1] + cbuf2 = ctx.buffer_from_object(rdarr) + assert cbuf2.size == cbuf.size + arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype) + np.testing.assert_equal(arr, arr2) + + with pytest.raises(ValueError, + match=('array data is non-contiguous')): + ctx.buffer_from_object(darr[::2]) + + # a rectangular 2-D array + s1 = size//4 + s2 = size//s1 + assert s1 * s2 == size + cbuf2 = ctx.buffer_from_object(darr.reshape(s1, s2)) + assert cbuf2.size == cbuf.size + arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype) + np.testing.assert_equal(arr, arr2) + + with pytest.raises(ValueError, + match=('array data is non-contiguous')): + ctx.buffer_from_object(darr.reshape(s1, s2)[:, ::2]) + + # a 3-D array + s1 = 4 + s2 = size//8 + s3 = size//(s1*s2) + assert s1 * s2 * s3 == size + cbuf2 = ctx.buffer_from_object(darr.reshape(s1, s2, s3)) + assert cbuf2.size == cbuf.size + arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype) + np.testing.assert_equal(arr, arr2) + + with pytest.raises(ValueError, + match=('array data is non-contiguous')): + ctx.buffer_from_object(darr.reshape(s1, s2, s3)[::2]) + + # Creating device buffer from am object implementing cuda array + # interface: + class MyObj: + def __init__(self, darr): + self.darr = darr + + @property + def __cuda_array_interface__(self): + return self.darr.__cuda_array_interface__ + + cbuf2 = ctx.buffer_from_object(MyObj(darr)) + assert cbuf2.size == cbuf.size + arr2 = np.frombuffer(cbuf2.copy_to_host(), dtype=dtype) + np.testing.assert_equal(arr, arr2) + + +@pytest.mark.parametrize("c", range(len(context_choice_ids)), + ids=context_choice_ids) +@pytest.mark.parametrize("dtype", dtypes, ids=dtypes) +def test_numba_memalloc(c, dtype): + ctx, nb_ctx = context_choices[c] + dtype = np.dtype(dtype) + # Allocate memory using numba context + # Warning: this will not be reflected in pyarrow context manager + # (e.g bytes_allocated does not change) + size = 10 + mem = nb_ctx.memalloc(size * dtype.itemsize) + darr = DeviceNDArray((size,), (dtype.itemsize,), dtype, gpu_data=mem) + darr[:5] = 99 + darr[5:] = 88 + np.testing.assert_equal(darr.copy_to_host()[:5], 99) + np.testing.assert_equal(darr.copy_to_host()[5:], 88) + + # wrap numba allocated memory with CudaBuffer + cbuf = cuda.CudaBuffer.from_numba(mem) + arr2 = np.frombuffer(cbuf.copy_to_host(), dtype=dtype) + np.testing.assert_equal(arr2, darr.copy_to_host()) + + +@pytest.mark.parametrize("c", range(len(context_choice_ids)), + ids=context_choice_ids) +@pytest.mark.parametrize("dtype", dtypes, ids=dtypes) +def test_pyarrow_memalloc(c, dtype): + ctx, nb_ctx = context_choices[c] + size = 10 + arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx) + + # wrap CudaBuffer with numba device array + mem = cbuf.to_numba() + darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem) + np.testing.assert_equal(darr.copy_to_host(), arr) + + +@pytest.mark.parametrize("c", range(len(context_choice_ids)), + ids=context_choice_ids) +@pytest.mark.parametrize("dtype", dtypes, ids=dtypes) +def test_numba_context(c, dtype): + ctx, nb_ctx = context_choices[c] + size = 10 + with nb_cuda.gpus[0]: + arr, cbuf = make_random_buffer(size, target='device', + dtype=dtype, ctx=ctx) + assert cbuf.context.handle == nb_ctx.handle.value + mem = cbuf.to_numba() + darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem) + np.testing.assert_equal(darr.copy_to_host(), arr) + darr[0] = 99 + cbuf.context.synchronize() + arr2 = np.frombuffer(cbuf.copy_to_host(), dtype=dtype) + assert arr2[0] == 99 + + +@pytest.mark.parametrize("c", range(len(context_choice_ids)), + ids=context_choice_ids) +@pytest.mark.parametrize("dtype", dtypes, ids=dtypes) +def test_pyarrow_jit(c, dtype): + ctx, nb_ctx = context_choices[c] + + @nb_cuda.jit + def increment_by_one(an_array): + pos = nb_cuda.grid(1) + if pos < an_array.size: + an_array[pos] += 1 + + # applying numba.cuda kernel to memory hold by CudaBuffer + size = 10 + arr, cbuf = make_random_buffer(size, target='device', dtype=dtype, ctx=ctx) + threadsperblock = 32 + blockspergrid = (arr.size + (threadsperblock - 1)) // threadsperblock + mem = cbuf.to_numba() + darr = DeviceNDArray(arr.shape, arr.strides, arr.dtype, gpu_data=mem) + increment_by_one[blockspergrid, threadsperblock](darr) + cbuf.context.synchronize() + arr1 = np.frombuffer(cbuf.copy_to_host(), dtype=arr.dtype) + np.testing.assert_equal(arr1, arr + 1) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_cython.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_cython.py new file mode 100644 index 0000000000000000000000000000000000000000..0eeae5d65f7d5a4e620be8709db0441fd3625fe3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_cython.py @@ -0,0 +1,200 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import shutil +import subprocess +import sys + +import pytest + +import pyarrow as pa +import pyarrow.tests.util as test_util + +here = os.path.dirname(os.path.abspath(__file__)) +test_ld_path = os.environ.get('PYARROW_TEST_LD_PATH', '') +if os.name == 'posix': + compiler_opts = ['-std=c++17'] +elif os.name == 'nt': + compiler_opts = ['-D_ENABLE_EXTENDED_ALIGNED_STORAGE', '/std:c++17'] +else: + compiler_opts = [] + +setup_template = """if 1: + from setuptools import setup + from Cython.Build import cythonize + + import numpy as np + + import pyarrow as pa + + ext_modules = cythonize({pyx_file!r}) + compiler_opts = {compiler_opts!r} + custom_ld_path = {test_ld_path!r} + + for ext in ext_modules: + # XXX required for numpy/numpyconfig.h, + # included from arrow/python/api.h + ext.include_dirs.append(np.get_include()) + ext.include_dirs.append(pa.get_include()) + ext.libraries.extend(pa.get_libraries()) + ext.library_dirs.extend(pa.get_library_dirs()) + if custom_ld_path: + ext.library_dirs.append(custom_ld_path) + ext.extra_compile_args.extend(compiler_opts) + print("Extension module:", + ext, ext.include_dirs, ext.libraries, ext.library_dirs) + + setup( + ext_modules=ext_modules, + ) +""" + + +def check_cython_example_module(mod): + arr = pa.array([1, 2, 3]) + assert mod.get_array_length(arr) == 3 + with pytest.raises(TypeError, match="not an array"): + mod.get_array_length(None) + + scal = pa.scalar(123) + cast_scal = mod.cast_scalar(scal, pa.utf8()) + assert cast_scal == pa.scalar("123") + with pytest.raises(NotImplementedError, + match="Unsupported cast from int64 to list using function " + "cast_list"): + mod.cast_scalar(scal, pa.list_(pa.int64())) + + +@pytest.mark.cython +def test_cython_api(tmpdir): + """ + Basic test for the Cython API. + """ + # Fail early if cython is not found + import cython # noqa + + with tmpdir.as_cwd(): + # Set up temporary workspace + pyx_file = 'pyarrow_cython_example.pyx' + shutil.copyfile(os.path.join(here, pyx_file), + os.path.join(str(tmpdir), pyx_file)) + # Create setup.py file + setup_code = setup_template.format(pyx_file=pyx_file, + compiler_opts=compiler_opts, + test_ld_path=test_ld_path) + with open('setup.py', 'w') as f: + f.write(setup_code) + + # ARROW-2263: Make environment with this pyarrow/ package first on the + # PYTHONPATH, for local dev environments + subprocess_env = test_util.get_modified_env_with_pythonpath() + + # Compile extension module + subprocess.check_call([sys.executable, 'setup.py', + 'build_ext', '--inplace'], + env=subprocess_env) + + # Check basic functionality + orig_path = sys.path[:] + sys.path.insert(0, str(tmpdir)) + try: + mod = __import__('pyarrow_cython_example') + check_cython_example_module(mod) + finally: + sys.path = orig_path + + # Check the extension module is loadable from a subprocess without + # pyarrow imported first. + code = """if 1: + import sys + import os + + try: + # Add dll directory was added on python 3.8 + # and is required in order to find extra DLLs + # only for win32 + for dir in {library_dirs}: + os.add_dll_directory(dir) + except AttributeError: + pass + + mod = __import__({mod_name!r}) + arr = mod.make_null_array(5) + assert mod.get_array_length(arr) == 5 + assert arr.null_count == 5 + """.format(mod_name='pyarrow_cython_example', + library_dirs=pa.get_library_dirs()) + + path_var = None + if sys.platform == 'win32': + if not hasattr(os, 'add_dll_directory'): + # Python 3.8 onwards don't check extension module DLLs on path + # we have to use os.add_dll_directory instead. + delim, path_var = ';', 'PATH' + elif sys.platform == 'darwin': + delim, path_var = ':', 'DYLD_LIBRARY_PATH' + else: + delim, path_var = ':', 'LD_LIBRARY_PATH' + + if path_var: + paths = sys.path + paths += pa.get_library_dirs() + paths += [subprocess_env.get(path_var, '')] + paths = [path for path in paths if path] + subprocess_env[path_var] = delim.join(paths) + subprocess.check_call([sys.executable, '-c', code], + stdout=subprocess.PIPE, + env=subprocess_env) + + +@pytest.mark.cython +def test_visit_strings(tmpdir): + with tmpdir.as_cwd(): + # Set up temporary workspace + pyx_file = 'bound_function_visit_strings.pyx' + shutil.copyfile(os.path.join(here, pyx_file), + os.path.join(str(tmpdir), pyx_file)) + # Create setup.py file + setup_code = setup_template.format(pyx_file=pyx_file, + compiler_opts=compiler_opts, + test_ld_path=test_ld_path) + with open('setup.py', 'w') as f: + f.write(setup_code) + + subprocess_env = test_util.get_modified_env_with_pythonpath() + + # Compile extension module + subprocess.check_call([sys.executable, 'setup.py', + 'build_ext', '--inplace'], + env=subprocess_env) + + sys.path.insert(0, str(tmpdir)) + mod = __import__('bound_function_visit_strings') + + strings = ['a', 'b', 'c'] + visited = [] + mod._visit_strings(strings, visited.append) + + assert visited == strings + + with pytest.raises(ValueError, match="wtf"): + def raise_on_b(s): + if s == 'b': + raise ValueError('wtf') + + mod._visit_strings(strings, raise_on_b) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_dataset.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..0b79218fb00187c964478b6b2bb10ddb54f73f40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_dataset.py @@ -0,0 +1,5651 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import contextlib +import datetime +import os +import pathlib +import posixpath +import sys +import tempfile +import textwrap +import threading +import time +from shutil import copytree +from urllib.parse import quote + +import numpy as np +import pytest + +import pyarrow as pa +import pyarrow.compute as pc +import pyarrow.csv +import pyarrow.feather +import pyarrow.fs as fs +import pyarrow.json +from pyarrow.tests.util import (FSProtocolClass, ProxyHandler, + _configure_s3_limited_user, _filesystem_uri, + change_cwd) + +try: + import pandas as pd +except ImportError: + pd = None + +try: + import pyarrow.dataset as ds +except ImportError: + ds = None + +try: + import pyarrow.parquet as pq +except ImportError: + pq = None + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not dataset' +pytestmark = pytest.mark.dataset + + +def _generate_data(n): + import datetime + import itertools + + day = datetime.datetime(2000, 1, 1) + interval = datetime.timedelta(days=5) + colors = itertools.cycle(['green', 'blue', 'yellow', 'red', 'orange']) + + data = [] + for i in range(n): + data.append((day, i, float(i), next(colors))) + day += interval + + return pd.DataFrame(data, columns=['date', 'index', 'value', 'color']) + + +def _table_from_pandas(df): + schema = pa.schema([ + pa.field('date', pa.date32()), + pa.field('index', pa.int64()), + pa.field('value', pa.float64()), + pa.field('color', pa.string()), + ]) + table = pa.Table.from_pandas(df, schema=schema, preserve_index=False) + return table.replace_schema_metadata() + + +def assert_dataset_fragment_convenience_methods(dataset): + # FileFragment convenience methods + for fragment in dataset.get_fragments(): + with fragment.open() as nf: + assert isinstance(nf, pa.NativeFile) + assert not nf.closed + assert nf.seekable() + assert nf.readable() + assert not nf.writable() + + +@pytest.fixture +def mockfs(): + mockfs = fs._MockFileSystem() + + directories = [ + 'subdir/1/xxx', + 'subdir/2/yyy', + ] + + for i, directory in enumerate(directories): + path = '{}/file{}.parquet'.format(directory, i) + mockfs.create_dir(directory) + with mockfs.open_output_stream(path) as out: + data = [ + list(range(5)), + list(map(float, range(5))), + list(map(str, range(5))), + [i] * 5, + [{'a': j % 3, 'b': str(j % 3)} for j in range(5)], + ] + schema = pa.schema([ + ('i64', pa.int64()), + ('f64', pa.float64()), + ('str', pa.string()), + ('const', pa.int64()), + ('struct', pa.struct({'a': pa.int64(), 'b': pa.string()})), + ]) + batch = pa.record_batch(data, schema=schema) + table = pa.Table.from_batches([batch]) + + pq.write_table(table, out) + + return mockfs + + +@pytest.fixture +def open_logging_fs(monkeypatch): + from pyarrow.fs import LocalFileSystem, PyFileSystem + + from .test_fs import ProxyHandler + + localfs = LocalFileSystem() + + def normalized(paths): + return {localfs.normalize_path(str(p)) for p in paths} + + opened = set() + + def open_input_file(self, path): + path = localfs.normalize_path(str(path)) + opened.add(path) + return self._fs.open_input_file(path) + + # patch proxyhandler to log calls to open_input_file + monkeypatch.setattr(ProxyHandler, "open_input_file", open_input_file) + fs = PyFileSystem(ProxyHandler(localfs)) + + @contextlib.contextmanager + def assert_opens(expected_opened): + opened.clear() + try: + yield + finally: + assert normalized(opened) == normalized(expected_opened) + + return fs, assert_opens + + +@pytest.fixture(scope='module') +def multisourcefs(request): + request.config.pyarrow.requires('pandas') + request.config.pyarrow.requires('parquet') + + df = _generate_data(1000) + mockfs = fs._MockFileSystem() + + # simply split the dataframe into four chunks to construct a data source + # from each chunk into its own directory + n = len(df) + df_a, df_b, df_c, df_d = [df.iloc[i:i+n//4] for i in range(0, n, n//4)] + + # create a directory containing a flat sequence of parquet files without + # any partitioning involved + mockfs.create_dir('plain') + n = len(df_a) + for i, chunk in enumerate([df_a.iloc[i:i+n//10] for i in range(0, n, n//10)]): + path = 'plain/chunk-{}.parquet'.format(i) + with mockfs.open_output_stream(path) as out: + pq.write_table(_table_from_pandas(chunk), out) + + # create one with schema partitioning by weekday and color + mockfs.create_dir('schema') + for part, chunk in df_b.groupby([df_b.date.dt.dayofweek, df_b.color]): + folder = 'schema/{}/{}'.format(*part) + path = '{}/chunk.parquet'.format(folder) + mockfs.create_dir(folder) + with mockfs.open_output_stream(path) as out: + pq.write_table(_table_from_pandas(chunk), out) + + # create one with hive partitioning by year and month + mockfs.create_dir('hive') + for part, chunk in df_c.groupby([df_c.date.dt.year, df_c.date.dt.month]): + folder = 'hive/year={}/month={}'.format(*part) + path = '{}/chunk.parquet'.format(folder) + mockfs.create_dir(folder) + with mockfs.open_output_stream(path) as out: + pq.write_table(_table_from_pandas(chunk), out) + + # create one with hive partitioning by color + mockfs.create_dir('hive_color') + for part, chunk in df_d.groupby("color"): + folder = 'hive_color/color={}'.format(part) + path = '{}/chunk.parquet'.format(folder) + mockfs.create_dir(folder) + with mockfs.open_output_stream(path) as out: + pq.write_table(_table_from_pandas(chunk), out) + + return mockfs + + +@pytest.fixture +def dataset(mockfs): + format = ds.ParquetFileFormat() + selector = fs.FileSelector('subdir', recursive=True) + options = ds.FileSystemFactoryOptions('subdir') + options.partitioning = ds.DirectoryPartitioning( + pa.schema([ + pa.field('group', pa.int32()), + pa.field('key', pa.string()) + ]) + ) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + return factory.finish() + + +@pytest.fixture(params=[ + (True), + (False) +], ids=['threaded', 'serial']) +def dataset_reader(request): + ''' + Fixture which allows dataset scanning operations to be + run with/without threads + ''' + use_threads = request.param + + class reader: + + def __init__(self): + self.use_threads = use_threads + + def _patch_kwargs(self, kwargs): + if 'use_threads' in kwargs: + raise Exception( + ('Invalid use of dataset_reader, do not specify' + ' use_threads')) + kwargs['use_threads'] = use_threads + + def to_table(self, dataset, **kwargs): + self._patch_kwargs(kwargs) + return dataset.to_table(**kwargs) + + def to_batches(self, dataset, **kwargs): + self._patch_kwargs(kwargs) + return dataset.to_batches(**kwargs) + + def scanner(self, dataset, **kwargs): + self._patch_kwargs(kwargs) + return dataset.scanner(**kwargs) + + def head(self, dataset, num_rows, **kwargs): + self._patch_kwargs(kwargs) + return dataset.head(num_rows, **kwargs) + + def take(self, dataset, indices, **kwargs): + self._patch_kwargs(kwargs) + return dataset.take(indices, **kwargs) + + def count_rows(self, dataset, **kwargs): + self._patch_kwargs(kwargs) + return dataset.count_rows(**kwargs) + + return reader() + + +@pytest.mark.parquet +def test_filesystem_dataset(mockfs): + schema = pa.schema([ + pa.field('const', pa.int64()) + ]) + file_format = ds.ParquetFileFormat() + paths = ['subdir/1/xxx/file0.parquet', 'subdir/2/yyy/file1.parquet'] + partitions = [ds.field('part') == x for x in range(1, 3)] + fragments = [file_format.make_fragment(path, mockfs, part) + for path, part in zip(paths, partitions)] + root_partition = ds.field('level') == ds.scalar(1337) + + dataset_from_fragments = ds.FileSystemDataset( + fragments, schema=schema, format=file_format, + filesystem=mockfs, root_partition=root_partition, + ) + dataset_from_paths = ds.FileSystemDataset.from_paths( + paths, schema=schema, format=file_format, filesystem=mockfs, + partitions=partitions, root_partition=root_partition, + ) + + for dataset in [dataset_from_fragments, dataset_from_paths]: + assert isinstance(dataset, ds.FileSystemDataset) + assert isinstance(dataset.format, ds.ParquetFileFormat) + assert dataset.partition_expression.equals(root_partition) + assert set(dataset.files) == set(paths) + + fragments = list(dataset.get_fragments()) + for fragment, partition, path in zip(fragments, partitions, paths): + assert fragment.partition_expression.equals(partition) + assert fragment.path == path + assert isinstance(fragment.format, ds.ParquetFileFormat) + assert isinstance(fragment, ds.ParquetFileFragment) + assert fragment.row_groups == [0] + assert fragment.num_row_groups == 1 + + row_group_fragments = list(fragment.split_by_row_group()) + assert fragment.num_row_groups == len(row_group_fragments) == 1 + assert isinstance(row_group_fragments[0], ds.ParquetFileFragment) + assert row_group_fragments[0].path == path + assert row_group_fragments[0].row_groups == [0] + assert row_group_fragments[0].num_row_groups == 1 + + fragments = list(dataset.get_fragments(filter=ds.field("const") == 0)) + assert len(fragments) == 2 + + # the root_partition keyword has a default + dataset = ds.FileSystemDataset( + fragments, schema=schema, format=file_format, filesystem=mockfs + ) + assert dataset.partition_expression.equals(ds.scalar(True)) + + # from_paths partitions have defaults + dataset = ds.FileSystemDataset.from_paths( + paths, schema=schema, format=file_format, filesystem=mockfs + ) + assert dataset.partition_expression.equals(ds.scalar(True)) + for fragment in dataset.get_fragments(): + assert fragment.partition_expression.equals(ds.scalar(True)) + + # validation of required arguments + with pytest.raises(TypeError, match="incorrect type"): + ds.FileSystemDataset(fragments, file_format, schema) + # validation of root_partition + with pytest.raises(TypeError, match="incorrect type"): + ds.FileSystemDataset(fragments, schema=schema, + format=file_format, root_partition=1) + # missing required argument in from_paths + with pytest.raises(TypeError, match="incorrect type"): + ds.FileSystemDataset.from_paths(fragments, format=file_format) + + +def test_filesystem_dataset_no_filesystem_interaction(dataset_reader): + # ARROW-8283 + schema = pa.schema([ + pa.field('f1', pa.int64()) + ]) + file_format = ds.IpcFileFormat() + paths = ['nonexistingfile.arrow'] + + # creating the dataset itself doesn't raise + dataset = ds.FileSystemDataset.from_paths( + paths, schema=schema, format=file_format, + filesystem=fs.LocalFileSystem(), + ) + + # getting fragments also doesn't raise + dataset.get_fragments() + + # scanning does raise + with pytest.raises(FileNotFoundError): + dataset_reader.to_table(dataset) + + +@pytest.mark.parquet +def test_dataset(dataset, dataset_reader): + assert isinstance(dataset, ds.Dataset) + assert isinstance(dataset.schema, pa.Schema) + + # TODO(kszucs): test non-boolean Exprs for filter do raise + expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64()) + expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64()) + + for batch in dataset_reader.to_batches(dataset): + assert isinstance(batch, pa.RecordBatch) + assert batch.column(0).equals(expected_i64) + assert batch.column(1).equals(expected_f64) + + for batch in dataset_reader.scanner(dataset).scan_batches(): + assert isinstance(batch, ds.TaggedRecordBatch) + assert isinstance(batch.fragment, ds.Fragment) + + table = dataset_reader.to_table(dataset) + assert isinstance(table, pa.Table) + assert len(table) == 10 + + condition = ds.field('i64') == 1 + result = dataset.to_table(use_threads=True, filter=condition) + # Don't rely on the scanning order + result = result.sort_by('group').to_pydict() + + assert result['i64'] == [1, 1] + assert result['f64'] == [1., 1.] + assert sorted(result['group']) == [1, 2] + assert sorted(result['key']) == ['xxx', 'yyy'] + + # Filtering on a nested field ref + condition = ds.field(('struct', 'b')) == '1' + result = dataset.to_table(use_threads=True, filter=condition) + result = result.sort_by('group').to_pydict() + + assert result['i64'] == [1, 4, 1, 4] + assert result['f64'] == [1.0, 4.0, 1.0, 4.0] + assert result['group'] == [1, 1, 2, 2] + assert result['key'] == ['xxx', 'xxx', 'yyy', 'yyy'] + + # Projecting on a nested field ref expression + projection = { + 'i64': ds.field('i64'), + 'f64': ds.field('f64'), + 'new': ds.field(('struct', 'b')) == '1', + } + result = dataset.to_table(use_threads=True, columns=projection) + result = result.sort_by('i64').to_pydict() + + assert list(result) == ['i64', 'f64', 'new'] + assert result['i64'] == [0, 0, 1, 1, 2, 2, 3, 3, 4, 4] + assert result['f64'] == [0.0, 0.0, 1.0, 1.0, + 2.0, 2.0, 3.0, 3.0, 4.0, 4.0] + assert result['new'] == [False, False, True, True, False, False, + False, False, True, True] + assert_dataset_fragment_convenience_methods(dataset) + + +@pytest.mark.parquet +def test_scanner_options(dataset): + scanner = dataset.to_batches(fragment_readahead=16, batch_readahead=8) + batch = next(scanner) + assert batch.num_columns == 7 + + +@pytest.mark.parquet +def test_scanner(dataset, dataset_reader): + scanner = dataset_reader.scanner( + dataset, memory_pool=pa.default_memory_pool()) + assert isinstance(scanner, ds.Scanner) + + with pytest.raises(pa.ArrowInvalid): + dataset_reader.scanner(dataset, columns=['unknown']) + + scanner = dataset_reader.scanner(dataset, columns=['i64'], + memory_pool=pa.default_memory_pool()) + assert scanner.dataset_schema == dataset.schema + assert scanner.projected_schema == pa.schema([("i64", pa.int64())]) + + assert isinstance(scanner, ds.Scanner) + table = scanner.to_table() + for batch in scanner.to_batches(): + assert batch.schema == scanner.projected_schema + assert batch.num_columns == 1 + assert table == scanner.to_reader().read_all() + + assert table.schema == scanner.projected_schema + for i in range(table.num_rows): + indices = pa.array([i]) + assert table.take(indices) == scanner.take(indices) + with pytest.raises(pa.ArrowIndexError): + scanner.take(pa.array([table.num_rows])) + + assert table.num_rows == scanner.count_rows() + + scanner = dataset_reader.scanner(dataset, columns=['__filename', + '__fragment_index', + '__batch_index', + '__last_in_fragment'], + memory_pool=pa.default_memory_pool()) + table = scanner.to_table() + expected_names = ['__filename', '__fragment_index', + '__batch_index', '__last_in_fragment'] + assert table.column_names == expected_names + + sorted_table = table.sort_by('__fragment_index') + assert sorted_table['__filename'].to_pylist() == ( + ['subdir/1/xxx/file0.parquet'] * 5 + + ['subdir/2/yyy/file1.parquet'] * 5) + assert sorted_table['__fragment_index'].to_pylist() == ([0] * 5 + [1] * 5) + assert sorted_table['__batch_index'].to_pylist() == [0] * 10 + assert sorted_table['__last_in_fragment'].to_pylist() == [True] * 10 + + +@pytest.mark.parquet +def test_scanner_memory_pool(dataset): + # honor default pool - https://issues.apache.org/jira/browse/ARROW-18164 + old_pool = pa.default_memory_pool() + # TODO(ARROW-18293) we should be able to use the proxy memory pool for + # for testing, but this crashes + # pool = pa.proxy_memory_pool(old_pool) + pool = pa.system_memory_pool() + pa.set_memory_pool(pool) + + try: + allocated_before = pool.bytes_allocated() + scanner = ds.Scanner.from_dataset(dataset) + _ = scanner.to_table() + assert pool.bytes_allocated() > allocated_before + finally: + pa.set_memory_pool(old_pool) + + +@pytest.mark.parquet +def test_head(dataset, dataset_reader): + result = dataset_reader.head(dataset, 0) + assert result == pa.Table.from_batches([], schema=dataset.schema) + + result = dataset_reader.head(dataset, 1, columns=['i64']).to_pydict() + assert result == {'i64': [0]} + + result = dataset_reader.head(dataset, 2, columns=['i64'], + filter=ds.field('i64') > 1).to_pydict() + assert result == {'i64': [2, 3]} + + result = dataset_reader.head(dataset, 1024, columns=['i64']).to_pydict() + assert result == {'i64': list(range(5)) * 2} + + fragment = next(dataset.get_fragments()) + result = fragment.head(1, columns=['i64']).to_pydict() + assert result == {'i64': [0]} + + result = fragment.head(1024, columns=['i64']).to_pydict() + assert result == {'i64': list(range(5))} + + +@pytest.mark.parquet +def test_take(dataset, dataset_reader): + fragment = next(dataset.get_fragments()) + for indices in [[1, 3], pa.array([1, 3])]: + expected = dataset_reader.to_table(fragment).take(indices) + assert dataset_reader.take(fragment, indices) == expected + with pytest.raises(IndexError): + dataset_reader.take(fragment, pa.array([5])) + + for indices in [[1, 7], pa.array([1, 7])]: + assert dataset_reader.take( + dataset, indices) == dataset_reader.to_table(dataset).take(indices) + with pytest.raises(IndexError): + dataset_reader.take(dataset, pa.array([10])) + + +@pytest.mark.parquet +def test_count_rows(dataset, dataset_reader): + fragment = next(dataset.get_fragments()) + assert dataset_reader.count_rows(fragment) == 5 + assert dataset_reader.count_rows( + fragment, filter=ds.field("i64") == 4) == 1 + + assert dataset_reader.count_rows(dataset) == 10 + # Filter on partition key + assert dataset_reader.count_rows( + dataset, filter=ds.field("group") == 1) == 5 + # Filter on data + assert dataset_reader.count_rows(dataset, filter=ds.field("i64") >= 3) == 4 + assert dataset_reader.count_rows(dataset, filter=ds.field("i64") < 0) == 0 + + +def test_abstract_classes(): + classes = [ + ds.FileFormat, + ds.Scanner, + ds.Partitioning, + ] + for klass in classes: + with pytest.raises(TypeError): + klass() + + +def test_partitioning(): + schema = pa.schema([ + pa.field('i64', pa.int64()), + pa.field('f64', pa.float64()) + ]) + for klass in [ds.DirectoryPartitioning, ds.HivePartitioning, + ds.FilenamePartitioning]: + partitioning = klass(schema) + assert isinstance(partitioning, ds.Partitioning) + assert partitioning == klass(schema) + assert partitioning != "other object" + + schema = pa.schema([ + pa.field('group', pa.int64()), + pa.field('key', pa.float64()) + ]) + partitioning = ds.DirectoryPartitioning(schema) + assert len(partitioning.dictionaries) == 2 + assert all(x is None for x in partitioning.dictionaries) + expr = partitioning.parse('/3/3.14/') + assert isinstance(expr, ds.Expression) + + expected = (ds.field('group') == 3) & (ds.field('key') == 3.14) + assert expr.equals(expected) + + with pytest.raises(pa.ArrowInvalid): + partitioning.parse('/prefix/3/aaa') + + expr = partitioning.parse('/3/') + expected = ds.field('group') == 3 + assert expr.equals(expected) + + assert partitioning != ds.DirectoryPartitioning(schema, segment_encoding="none") + + schema = pa.schema([ + pa.field('alpha', pa.int64()), + pa.field('beta', pa.int64()) + ]) + partitioning = ds.HivePartitioning(schema, null_fallback='xyz') + assert len(partitioning.dictionaries) == 2 + assert all(x is None for x in partitioning.dictionaries) + expr = partitioning.parse('/alpha=0/beta=3/') + expected = ( + (ds.field('alpha') == ds.scalar(0)) & + (ds.field('beta') == ds.scalar(3)) + ) + assert expr.equals(expected) + + expr = partitioning.parse('/alpha=xyz/beta=3/') + expected = ( + (ds.field('alpha').is_null() & (ds.field('beta') == ds.scalar(3))) + ) + assert expr.equals(expected) + + for shouldfail in ['/alpha=one/beta=2/', '/alpha=one/', '/beta=two/']: + with pytest.raises(pa.ArrowInvalid): + partitioning.parse(shouldfail) + + assert partitioning != ds.HivePartitioning(schema, null_fallback='other') + + schema = pa.schema([ + pa.field('group', pa.int64()), + pa.field('key', pa.float64()) + ]) + partitioning = ds.FilenamePartitioning(schema) + assert len(partitioning.dictionaries) == 2 + assert all(x is None for x in partitioning.dictionaries) + expr = partitioning.parse('3_3.14_') + assert isinstance(expr, ds.Expression) + + expected = (ds.field('group') == 3) & (ds.field('key') == 3.14) + assert expr.equals(expected) + + with pytest.raises(pa.ArrowInvalid): + partitioning.parse('prefix_3_aaa_') + + assert partitioning != ds.FilenamePartitioning(schema, segment_encoding="none") + + schema = pa.schema([ + pa.field('group', pa.int64()), + pa.field('key', pa.dictionary(pa.int8(), pa.string())) + ]) + partitioning = ds.DirectoryPartitioning( + schema, dictionaries={"key": pa.array(["first", "second", "third"])} + ) + assert partitioning.dictionaries[0] is None + assert partitioning.dictionaries[1].to_pylist() == [ + "first", "second", "third"] + assert partitioning != ds.DirectoryPartitioning(schema, dictionaries=None) + + partitioning = ds.FilenamePartitioning( + pa.schema([ + pa.field('group', pa.int64()), + pa.field('key', pa.dictionary(pa.int8(), pa.string())) + ]), + dictionaries={ + "key": pa.array(["first", "second", "third"]), + }) + assert partitioning.dictionaries[0] is None + assert partitioning.dictionaries[1].to_pylist() == [ + "first", "second", "third"] + + # test partitioning roundtrip + table = pa.table([ + pa.array(range(20)), pa.array(np.random.randn(20)), + pa.array(np.repeat(['a', 'b'], 10))], + names=["f1", "f2", "part"] + ) + partitioning_schema = pa.schema([("part", pa.string())]) + for klass in [ds.DirectoryPartitioning, ds.HivePartitioning, + ds.FilenamePartitioning]: + with tempfile.TemporaryDirectory() as tempdir: + partitioning = klass(partitioning_schema) + ds.write_dataset(table, tempdir, + format='ipc', partitioning=partitioning) + load_back = ds.dataset(tempdir, format='ipc', + partitioning=partitioning) + load_back_table = load_back.to_table() + assert load_back_table.equals(table) + + # test invalid partitioning input + with tempfile.TemporaryDirectory() as tempdir: + partitioning = ds.DirectoryPartitioning(partitioning_schema) + ds.write_dataset(table, tempdir, + format='ipc', partitioning=partitioning) + load_back = None + with pytest.raises(ValueError, + match="Expected Partitioning or PartitioningFactory"): + load_back = ds.dataset(tempdir, format='ipc', partitioning=int(0)) + assert load_back is None + + +def test_partitioning_pickling(pickle_module): + schema = pa.schema([ + pa.field('i64', pa.int64()), + pa.field('f64', pa.float64()) + ]) + parts = [ + ds.DirectoryPartitioning(schema), + ds.HivePartitioning(schema), + ds.FilenamePartitioning(schema), + ds.DirectoryPartitioning(schema, segment_encoding="none"), + ds.FilenamePartitioning(schema, segment_encoding="none"), + ds.HivePartitioning(schema, segment_encoding="none", null_fallback="xyz"), + ] + + for part in parts: + assert pickle_module.loads(pickle_module.dumps(part)) == part + + +def test_expression_arithmetic_operators(): + dataset = ds.dataset(pa.table({'a': [1, 2, 3], 'b': [2, 2, 2]})) + a = ds.field("a") + b = ds.field("b") + result = dataset.to_table(columns={ + "a+1": a + 1, + "b-a": b - a, + "a*2": a * 2, + "a/b": a.cast("float64") / b, + }) + expected = pa.table({ + "a+1": [2, 3, 4], "b-a": [1, 0, -1], + "a*2": [2, 4, 6], "a/b": [0.5, 1.0, 1.5], + }) + assert result.equals(expected) + + +def test_partition_keys(): + a, b, c = [ds.field(f) == f for f in 'abc'] + assert ds.get_partition_keys(a) == {'a': 'a'} + assert ds.get_partition_keys(a) == ds._get_partition_keys(a) + assert ds.get_partition_keys(a & b & c) == {f: f for f in 'abc'} + + nope = ds.field('d') >= 3 + assert ds.get_partition_keys(nope) == {} + assert ds.get_partition_keys(a & nope) == {'a': 'a'} + + null = ds.field('a').is_null() + assert ds.get_partition_keys(null) == {'a': None} + + +@pytest.mark.parquet +def test_parquet_read_options(): + opts1 = ds.ParquetReadOptions() + opts2 = ds.ParquetReadOptions(dictionary_columns=['a', 'b']) + opts3 = ds.ParquetReadOptions(coerce_int96_timestamp_unit="ms") + + assert opts1.dictionary_columns == set() + + assert opts2.dictionary_columns == {'a', 'b'} + + assert opts1.coerce_int96_timestamp_unit == "ns" + assert opts3.coerce_int96_timestamp_unit == "ms" + + assert opts1 == opts1 + assert opts1 != opts2 + assert opts1 != opts3 + + +@pytest.mark.parquet +def test_parquet_file_format_read_options(): + pff1 = ds.ParquetFileFormat() + pff2 = ds.ParquetFileFormat(dictionary_columns={'a'}) + pff3 = ds.ParquetFileFormat(coerce_int96_timestamp_unit="s") + + assert pff1.read_options == ds.ParquetReadOptions() + assert pff2.read_options == ds.ParquetReadOptions(dictionary_columns=['a']) + assert pff3.read_options == ds.ParquetReadOptions( + coerce_int96_timestamp_unit="s") + + +@pytest.mark.parquet +def test_parquet_scan_options(): + opts1 = ds.ParquetFragmentScanOptions() + opts2 = ds.ParquetFragmentScanOptions(buffer_size=4096) + opts3 = ds.ParquetFragmentScanOptions( + buffer_size=2**13, use_buffered_stream=True) + opts4 = ds.ParquetFragmentScanOptions(buffer_size=2**13, pre_buffer=False) + opts5 = ds.ParquetFragmentScanOptions( + thrift_string_size_limit=123456, + thrift_container_size_limit=987654,) + opts6 = ds.ParquetFragmentScanOptions( + page_checksum_verification=True) + cache_opts = pa.CacheOptions( + hole_size_limit=2**10, range_size_limit=8*2**10, lazy=True) + opts7 = ds.ParquetFragmentScanOptions(pre_buffer=True, cache_options=cache_opts) + + assert opts1.use_buffered_stream is False + assert opts1.buffer_size == 2**13 + assert opts1.pre_buffer is True + assert opts1.thrift_string_size_limit == 100_000_000 # default in C++ + assert opts1.thrift_container_size_limit == 1_000_000 # default in C++ + assert opts1.page_checksum_verification is False + + assert opts2.use_buffered_stream is False + assert opts2.buffer_size == 2**12 + assert opts2.pre_buffer is True + + assert opts3.use_buffered_stream is True + assert opts3.buffer_size == 2**13 + assert opts3.pre_buffer is True + + assert opts4.use_buffered_stream is False + assert opts4.buffer_size == 2**13 + assert opts4.pre_buffer is False + + assert opts5.thrift_string_size_limit == 123456 + assert opts5.thrift_container_size_limit == 987654 + + assert opts6.page_checksum_verification is True + + assert opts7.pre_buffer is True + assert opts7.cache_options == cache_opts + assert opts7.cache_options != opts1.cache_options + + assert opts1 == opts1 + assert opts1 != opts2 + assert opts2 != opts3 + assert opts3 != opts4 + assert opts5 != opts1 + assert opts6 != opts1 + assert opts7 != opts1 + + +def test_file_format_pickling(pickle_module): + formats = [ + ds.IpcFileFormat(), + ds.CsvFileFormat(), + ds.CsvFileFormat(pa.csv.ParseOptions(delimiter='\t', + ignore_empty_lines=True)), + ds.CsvFileFormat(read_options=pa.csv.ReadOptions( + skip_rows=3, column_names=['foo'])), + ds.CsvFileFormat(read_options=pa.csv.ReadOptions( + skip_rows=3, block_size=2**20)), + ds.JsonFileFormat(), + ds.JsonFileFormat( + parse_options=pa.json.ParseOptions(newlines_in_values=True, + unexpected_field_behavior="ignore")), + ds.JsonFileFormat(read_options=pa.json.ReadOptions( + use_threads=False, block_size=14)), + ] + try: + formats.append(ds.OrcFileFormat()) + except ImportError: + pass + + if pq is not None: + formats.extend([ + ds.ParquetFileFormat(), + ds.ParquetFileFormat(dictionary_columns={'a'}), + ds.ParquetFileFormat(use_buffered_stream=True), + ds.ParquetFileFormat( + use_buffered_stream=True, + buffer_size=4096, + thrift_string_size_limit=123, + thrift_container_size_limit=456, + ), + ]) + + for file_format in formats: + assert pickle_module.loads(pickle_module.dumps(file_format)) == file_format + + +def test_fragment_scan_options_pickling(pickle_module): + options = [ + ds.CsvFragmentScanOptions(), + ds.CsvFragmentScanOptions( + convert_options=pa.csv.ConvertOptions(strings_can_be_null=True)), + ds.CsvFragmentScanOptions( + read_options=pa.csv.ReadOptions(block_size=2**16)), + ds.JsonFragmentScanOptions(), + ds.JsonFragmentScanOptions( + pa.json.ParseOptions(newlines_in_values=False, + unexpected_field_behavior="error")), + ds.JsonFragmentScanOptions( + read_options=pa.json.ReadOptions(use_threads=True, block_size=512)), + ] + + if pq is not None: + options.extend([ + ds.ParquetFragmentScanOptions(buffer_size=4096), + ds.ParquetFragmentScanOptions(pre_buffer=True), + ]) + + for option in options: + assert pickle_module.loads(pickle_module.dumps(option)) == option + + +@pytest.mark.parametrize('paths_or_selector', [ + fs.FileSelector('subdir', recursive=True), + [ + 'subdir/1/xxx/file0.parquet', + 'subdir/2/yyy/file1.parquet', + ] +]) +@pytest.mark.parametrize('pre_buffer', [False, True]) +@pytest.mark.parquet +def test_filesystem_factory(mockfs, paths_or_selector, pre_buffer): + format = ds.ParquetFileFormat( + read_options=ds.ParquetReadOptions(dictionary_columns={"str"}), + pre_buffer=pre_buffer + ) + + options = ds.FileSystemFactoryOptions('subdir') + options.partitioning = ds.DirectoryPartitioning( + pa.schema([ + pa.field('group', pa.int32()), + pa.field('key', pa.string()) + ]) + ) + assert options.partition_base_dir == 'subdir' + assert options.selector_ignore_prefixes == ['.', '_'] + assert options.exclude_invalid_files is False + + factory = ds.FileSystemDatasetFactory( + mockfs, paths_or_selector, format, options + ) + inspected_schema = factory.inspect() + + assert factory.inspect().equals(pa.schema([ + pa.field('i64', pa.int64()), + pa.field('f64', pa.float64()), + pa.field('str', pa.dictionary(pa.int32(), pa.string())), + pa.field('const', pa.int64()), + pa.field('struct', pa.struct({'a': pa.int64(), + 'b': pa.string()})), + pa.field('group', pa.int32()), + pa.field('key', pa.string()), + ]), check_metadata=False) + + assert isinstance(factory.inspect_schemas(), list) + assert isinstance(factory.finish(inspected_schema), + ds.FileSystemDataset) + assert factory.root_partition.equals(ds.scalar(True)) + + dataset = factory.finish() + assert isinstance(dataset, ds.FileSystemDataset) + + scanner = dataset.scanner() + expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64()) + expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64()) + expected_str = pa.DictionaryArray.from_arrays( + pa.array([0, 1, 2, 3, 4], type=pa.int32()), + pa.array("0 1 2 3 4".split(), type=pa.string()) + ) + expected_struct = pa.array([{'a': i % 3, 'b': str(i % 3)} + for i in range(5)]) + iterator = scanner.scan_batches() + for (batch, fragment), group, key in zip(iterator, [1, 2], ['xxx', 'yyy']): + expected_group = pa.array([group] * 5, type=pa.int32()) + expected_key = pa.array([key] * 5, type=pa.string()) + expected_const = pa.array([group - 1] * 5, type=pa.int64()) + # Can't compare or really introspect expressions from Python + assert fragment.partition_expression is not None + assert batch.num_columns == 7 + assert batch[0].equals(expected_i64) + assert batch[1].equals(expected_f64) + assert batch[2].equals(expected_str) + assert batch[3].equals(expected_const) + assert batch[4].equals(expected_struct) + assert batch[5].equals(expected_group) + assert batch[6].equals(expected_key) + + table = dataset.to_table() + assert isinstance(table, pa.Table) + assert len(table) == 10 + assert table.num_columns == 7 + + +@pytest.mark.parquet +def test_make_fragment(multisourcefs): + parquet_format = ds.ParquetFileFormat() + dataset = ds.dataset('/plain', filesystem=multisourcefs, + format=parquet_format) + + for path in dataset.files: + fragment = parquet_format.make_fragment(path, multisourcefs) + assert fragment.row_groups == [0] + + row_group_fragment = parquet_format.make_fragment(path, multisourcefs, + row_groups=[0]) + for f in [fragment, row_group_fragment]: + assert isinstance(f, ds.ParquetFileFragment) + assert f.path == path + assert isinstance(f.filesystem, type(multisourcefs)) + assert row_group_fragment.row_groups == [0] + + +@pytest.mark.parquet +@pytest.mark.s3 +def test_make_fragment_with_size(s3_example_simple): + """ + Test passing file_size to make_fragment. Not all FS implementations make use + of the file size (by implementing an OpenInputFile that takes a FileInfo), but + s3 does, which is why it's used here. + """ + table, path, fs, uri, host, port, access_key, secret_key = s3_example_simple + + file_format = ds.ParquetFileFormat() + paths = [path] + + fragments = [file_format.make_fragment(path, fs) + for path in paths] + dataset = ds.FileSystemDataset( + fragments, format=file_format, schema=table.schema, filesystem=fs + ) + + tbl = dataset.to_table() + assert tbl.equals(table) + + # true sizes -> works + sizes_true = [dataset.filesystem.get_file_info(x).size for x in dataset.files] + fragments_with_size = [file_format.make_fragment(path, fs, file_size=size) + for path, size in zip(paths, sizes_true)] + dataset_with_size = ds.FileSystemDataset( + fragments_with_size, format=file_format, schema=table.schema, filesystem=fs + ) + tbl = dataset.to_table() + assert tbl.equals(table) + + # too small sizes -> error + sizes_toosmall = [1 for path in paths] + fragments_with_size = [file_format.make_fragment(path, fs, file_size=size) + for path, size in zip(paths, sizes_toosmall)] + + dataset_with_size = ds.FileSystemDataset( + fragments_with_size, format=file_format, schema=table.schema, filesystem=fs + ) + + with pytest.raises(pyarrow.lib.ArrowInvalid, match='Parquet file size is 1 bytes'): + table = dataset_with_size.to_table() + + # too large sizes -> error + sizes_toolarge = [1000000 for path in paths] + fragments_with_size = [file_format.make_fragment(path, fs, file_size=size) + for path, size in zip(paths, sizes_toolarge)] + + dataset_with_size = ds.FileSystemDataset( + fragments_with_size, format=file_format, schema=table.schema, filesystem=fs + ) + + # invalid range + with pytest.raises(OSError, match='HTTP status 416'): + table = dataset_with_size.to_table() + + +def test_make_csv_fragment_from_buffer(dataset_reader, pickle_module): + content = textwrap.dedent(""" + alpha,num,animal + a,12,dog + b,11,cat + c,10,rabbit + """) + buffer = pa.py_buffer(content.encode('utf-8')) + + csv_format = ds.CsvFileFormat() + fragment = csv_format.make_fragment(buffer) + + # When buffer, fragment open returns a BufferReader, not NativeFile + assert isinstance(fragment.open(), pa.BufferReader) + + expected = pa.table([['a', 'b', 'c'], + [12, 11, 10], + ['dog', 'cat', 'rabbit']], + names=['alpha', 'num', 'animal']) + assert dataset_reader.to_table(fragment).equals(expected) + + pickled = pickle_module.loads(pickle_module.dumps(fragment)) + assert dataset_reader.to_table(pickled).equals(fragment.to_table()) + + +def test_make_json_fragment_from_buffer(dataset_reader, pickle_module): + content = '{"alpha" : "a", "num": 12, "animal" : "dog"}\n' + \ + '{"alpha" : "b", "num": 11, "animal" : "cat"}\n' + \ + '{"alpha" : "c", "num": 10, "animal" : "rabbit"}\n' + buffer = pa.py_buffer(content.encode('utf-8')) + + json_format = ds.JsonFileFormat() + fragment = json_format.make_fragment(buffer) + + # When buffer, fragment open returns a BufferReader, not NativeFile + assert isinstance(fragment.open(), pa.BufferReader) + + expected = pa.table([['a', 'b', 'c'], + [12, 11, 10], + ['dog', 'cat', 'rabbit']], + names=['alpha', 'num', 'animal']) + assert dataset_reader.to_table(fragment).equals(expected) + + pickled = pickle_module.loads(pickle_module.dumps(fragment)) + assert dataset_reader.to_table(pickled).equals(fragment.to_table()) + + +@pytest.mark.parquet +def test_make_parquet_fragment_from_buffer(dataset_reader, pickle_module): + arrays = [ + pa.array(['a', 'b', 'c']), + pa.array([12, 11, 10]), + pa.array(['dog', 'cat', 'rabbit']) + ] + dictionary_arrays = [ + arrays[0].dictionary_encode(), + arrays[1], + arrays[2].dictionary_encode() + ] + dictionary_format = ds.ParquetFileFormat( + read_options=ds.ParquetReadOptions( + dictionary_columns=['alpha', 'animal'] + ), + use_buffered_stream=True, + buffer_size=4096, + ) + + cases = [ + (arrays, ds.ParquetFileFormat()), + (dictionary_arrays, dictionary_format) + ] + for arrays, format_ in cases: + table = pa.table(arrays, names=['alpha', 'num', 'animal']) + + out = pa.BufferOutputStream() + pq.write_table(table, out) + buffer = out.getvalue() + + fragment = format_.make_fragment(buffer) + assert dataset_reader.to_table(fragment).equals(table) + + pickled = pickle_module.loads(pickle_module.dumps(fragment)) + assert dataset_reader.to_table(pickled).equals(table) + + +@pytest.mark.parquet +def _create_dataset_for_fragments(tempdir, chunk_size=None, filesystem=None): + table = pa.table( + [range(8), [1] * 8, ['a'] * 4 + ['b'] * 4], + names=['f1', 'f2', 'part'] + ) + + path = str(tempdir / "test_parquet_dataset") + + pq.write_to_dataset(table, path, + partition_cols=["part"], chunk_size=chunk_size) + dataset = ds.dataset( + path, format="parquet", partitioning="hive", filesystem=filesystem + ) + + return table, dataset + + +@pytest.mark.parquet +def test_fragments(tempdir, dataset_reader): + table, dataset = _create_dataset_for_fragments(tempdir) + + # list fragments + fragments = list(dataset.get_fragments()) + assert len(fragments) == 2 + f = fragments[0] + + physical_names = ['f1', 'f2'] + # file's schema does not include partition column + assert f.physical_schema.names == physical_names + assert f.format.inspect(f.path, f.filesystem) == f.physical_schema + assert f.partition_expression.equals(ds.field('part') == 'a') + + # By default, the partition column is not part of the schema. + result = dataset_reader.to_table(f) + assert result.column_names == physical_names + assert result.equals(table.remove_column(2).slice(0, 4)) + + # scanning fragment includes partition columns when given the proper + # schema. + result = dataset_reader.to_table(f, schema=dataset.schema) + assert result.column_names == ['f1', 'f2', 'part'] + assert result.equals(table.slice(0, 4)) + assert f.physical_schema == result.schema.remove(2) + + # scanning fragments follow filter predicate + result = dataset_reader.to_table( + f, schema=dataset.schema, filter=ds.field('f1') < 2) + assert result.column_names == ['f1', 'f2', 'part'] + + +@pytest.mark.pandas +@pytest.mark.parquet +def test_fragments_implicit_cast(tempdir): + # ARROW-8693 + table = pa.table([range(8), [1] * 4 + [2] * 4], names=['col', 'part']) + path = str(tempdir / "test_parquet_dataset") + pq.write_to_dataset(table, path, partition_cols=["part"]) + + part = ds.partitioning(pa.schema([('part', 'int8')]), flavor="hive") + dataset = ds.dataset(path, format="parquet", partitioning=part) + fragments = dataset.get_fragments(filter=ds.field("part") >= 2) + assert len(list(fragments)) == 1 + + +@pytest.mark.parquet +def test_fragments_reconstruct(tempdir, dataset_reader, pickle_module): + table, dataset = _create_dataset_for_fragments(tempdir) + + def assert_yields_projected(fragment, row_slice, + columns=None, filter=None): + actual = fragment.to_table( + schema=table.schema, columns=columns, filter=filter) + column_names = columns if columns else table.column_names + assert actual.column_names == column_names + + expected = table.slice(*row_slice).select(column_names) + assert actual.equals(expected) + + fragment = list(dataset.get_fragments())[0] + parquet_format = fragment.format + + # test pickle roundtrip + pickled_fragment = pickle_module.loads(pickle_module.dumps(fragment)) + assert dataset_reader.to_table( + pickled_fragment) == dataset_reader.to_table(fragment) + + # manually re-construct a fragment, with explicit schema + new_fragment = parquet_format.make_fragment( + fragment.path, fragment.filesystem, + partition_expression=fragment.partition_expression) + assert dataset_reader.to_table(new_fragment).equals( + dataset_reader.to_table(fragment)) + assert_yields_projected(new_fragment, (0, 4)) + + # filter / column projection, inspected schema + new_fragment = parquet_format.make_fragment( + fragment.path, fragment.filesystem, + partition_expression=fragment.partition_expression) + assert_yields_projected(new_fragment, (0, 2), filter=ds.field('f1') < 2) + + # filter requiring cast / column projection, inspected schema + new_fragment = parquet_format.make_fragment( + fragment.path, fragment.filesystem, + partition_expression=fragment.partition_expression) + assert_yields_projected(new_fragment, (0, 2), + columns=['f1'], filter=ds.field('f1') < 2.0) + + # filter on the partition column + new_fragment = parquet_format.make_fragment( + fragment.path, fragment.filesystem, + partition_expression=fragment.partition_expression) + assert_yields_projected(new_fragment, (0, 4), + filter=ds.field('part') == 'a') + + # Fragments don't contain the partition's columns if not provided to the + # `to_table(schema=...)` method. + pattern = (r'No match for FieldRef.Name\(part\) in ' + + fragment.physical_schema.to_string(False, False, False)) + with pytest.raises(ValueError, match=pattern): + new_fragment = parquet_format.make_fragment( + fragment.path, fragment.filesystem, + partition_expression=fragment.partition_expression) + dataset_reader.to_table(new_fragment, filter=ds.field('part') == 'a') + + +@pytest.mark.parquet +def test_fragments_parquet_row_groups(tempdir, dataset_reader): + table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2) + + fragment = list(dataset.get_fragments())[0] + + # list and scan row group fragments + row_group_fragments = list(fragment.split_by_row_group()) + assert len(row_group_fragments) == fragment.num_row_groups == 2 + result = dataset_reader.to_table( + row_group_fragments[0], schema=dataset.schema) + assert result.column_names == ['f1', 'f2', 'part'] + assert len(result) == 2 + assert result.equals(table.slice(0, 2)) + + assert row_group_fragments[0].row_groups is not None + assert row_group_fragments[0].num_row_groups == 1 + assert row_group_fragments[0].row_groups[0].statistics == { + 'f1': {'min': 0, 'max': 1}, + 'f2': {'min': 1, 'max': 1}, + } + + fragment = list(dataset.get_fragments(filter=ds.field('f1') < 1))[0] + row_group_fragments = list(fragment.split_by_row_group(ds.field('f1') < 1)) + assert len(row_group_fragments) == 1 + result = dataset_reader.to_table( + row_group_fragments[0], filter=ds.field('f1') < 1) + assert len(result) == 1 + + +@pytest.mark.parquet +def test_fragments_parquet_num_row_groups(tempdir): + table = pa.table({'a': range(8)}) + pq.write_table(table, tempdir / "test.parquet", row_group_size=2) + dataset = ds.dataset(tempdir / "test.parquet", format="parquet") + original_fragment = list(dataset.get_fragments())[0] + + # create fragment with subset of row groups + fragment = original_fragment.format.make_fragment( + original_fragment.path, original_fragment.filesystem, + row_groups=[1, 3]) + assert fragment.num_row_groups == 2 + # ensure that parsing metadata preserves correct number of row groups + fragment.ensure_complete_metadata() + assert fragment.num_row_groups == 2 + assert len(fragment.row_groups) == 2 + + +@pytest.mark.pandas +@pytest.mark.parquet +def test_fragments_parquet_row_groups_dictionary(tempdir, dataset_reader): + df = pd.DataFrame(dict(col1=['a', 'b'], col2=[1, 2])) + df['col1'] = df['col1'].astype("category") + + pq.write_table(pa.table(df), tempdir / "test_filter_dictionary.parquet") + + import pyarrow.dataset as ds + dataset = ds.dataset(tempdir / 'test_filter_dictionary.parquet') + result = dataset_reader.to_table(dataset, filter=ds.field("col1") == "a") + + assert (df.iloc[0] == result.to_pandas()).all().all() + + +@pytest.mark.parquet +def test_fragments_parquet_ensure_metadata(tempdir, open_logging_fs, pickle_module): + fs, assert_opens = open_logging_fs + _, dataset = _create_dataset_for_fragments( + tempdir, chunk_size=2, filesystem=fs + ) + fragment = list(dataset.get_fragments())[0] + + # with default discovery, no metadata loaded + with assert_opens([fragment.path]): + fragment.ensure_complete_metadata() + assert fragment.row_groups == [0, 1] + + # second time -> use cached / no file IO + with assert_opens([]): + fragment.ensure_complete_metadata() + + assert isinstance(fragment.metadata, pq.FileMetaData) + + # recreate fragment with row group ids + new_fragment = fragment.format.make_fragment( + fragment.path, fragment.filesystem, row_groups=[0, 1] + ) + assert new_fragment.row_groups == fragment.row_groups + + # collect metadata + new_fragment.ensure_complete_metadata() + row_group = new_fragment.row_groups[0] + assert row_group.id == 0 + assert row_group.num_rows == 2 + assert row_group.statistics is not None + + # pickling preserves row group ids + pickled_fragment = pickle_module.loads(pickle_module.dumps(new_fragment)) + with assert_opens([fragment.path]): + assert pickled_fragment.row_groups == [0, 1] + row_group = pickled_fragment.row_groups[0] + assert row_group.id == 0 + assert row_group.statistics is not None + + +@pytest.mark.parquet +def test_fragments_parquet_pickle_no_metadata(tempdir, open_logging_fs, pickle_module): + # https://issues.apache.org/jira/browse/ARROW-15796 + fs, assert_opens = open_logging_fs + _, dataset = _create_dataset_for_fragments(tempdir, filesystem=fs) + fragment = list(dataset.get_fragments())[1] + + # second fragment hasn't yet loaded the metadata, + # and pickling it also should not read the metadata + with assert_opens([]): + pickled_fragment = pickle_module.loads(pickle_module.dumps(fragment)) + + # then accessing the row group info reads the metadata + with assert_opens([pickled_fragment.path]): + row_groups = pickled_fragment.row_groups + assert row_groups == [0] + + +def _create_dataset_all_types(tempdir, chunk_size=None): + table = pa.table( + [ + pa.array([True, None, False], pa.bool_()), + pa.array([1, 10, 42], pa.int8()), + pa.array([1, 10, 42], pa.uint8()), + pa.array([1, 10, 42], pa.int16()), + pa.array([1, 10, 42], pa.uint16()), + pa.array([1, 10, 42], pa.int32()), + pa.array([1, 10, 42], pa.uint32()), + pa.array([1, 10, 42], pa.int64()), + pa.array([1, 10, 42], pa.uint64()), + pa.array([1.0, 10.0, 42.0], pa.float32()), + pa.array([1.0, 10.0, 42.0], pa.float64()), + pa.array(['a', None, 'z'], pa.utf8()), + pa.array(['a', None, 'z'], pa.binary()), + pa.array([1, 10, 42], pa.timestamp('s')), + pa.array([1, 10, 42], pa.timestamp('ms')), + pa.array([1, 10, 42], pa.timestamp('us')), + pa.array([1, 10, 42], pa.date32()), + pa.array([1, 10, 4200000000], pa.date64()), + pa.array([1, 10, 42], pa.time32('s')), + pa.array([1, 10, 42], pa.time64('us')), + ], + names=[ + 'boolean', + 'int8', + 'uint8', + 'int16', + 'uint16', + 'int32', + 'uint32', + 'int64', + 'uint64', + 'float', + 'double', + 'utf8', + 'binary', + 'ts[s]', + 'ts[ms]', + 'ts[us]', + 'date32', + 'date64', + 'time32', + 'time64', + ] + ) + + path = str(tempdir / "test_parquet_dataset_all_types") + + # write_to_dataset currently requires pandas + pq.write_to_dataset(table, path, chunk_size=chunk_size) + + return table, ds.dataset(path, format="parquet", partitioning="hive") + + +@pytest.mark.pandas +@pytest.mark.parquet +def test_parquet_fragment_statistics(tempdir): + table, dataset = _create_dataset_all_types(tempdir) + + fragment = list(dataset.get_fragments())[0] + + import datetime + def dt_s(x): return datetime.datetime(1970, 1, 1, 0, 0, x) + def dt_ms(x): return datetime.datetime(1970, 1, 1, 0, 0, 0, x*1000) + def dt_us(x): return datetime.datetime(1970, 1, 1, 0, 0, 0, x) + date = datetime.date + time = datetime.time + + # list and scan row group fragments + row_group_fragments = list(fragment.split_by_row_group()) + assert row_group_fragments[0].row_groups is not None + row_group = row_group_fragments[0].row_groups[0] + assert row_group.num_rows == 3 + assert row_group.total_byte_size > 1000 + assert row_group.statistics == { + 'boolean': {'min': False, 'max': True}, + 'int8': {'min': 1, 'max': 42}, + 'uint8': {'min': 1, 'max': 42}, + 'int16': {'min': 1, 'max': 42}, + 'uint16': {'min': 1, 'max': 42}, + 'int32': {'min': 1, 'max': 42}, + 'uint32': {'min': 1, 'max': 42}, + 'int64': {'min': 1, 'max': 42}, + 'uint64': {'min': 1, 'max': 42}, + 'float': {'min': 1.0, 'max': 42.0}, + 'double': {'min': 1.0, 'max': 42.0}, + 'utf8': {'min': 'a', 'max': 'z'}, + 'binary': {'min': b'a', 'max': b'z'}, + 'ts[s]': {'min': dt_s(1), 'max': dt_s(42)}, + 'ts[ms]': {'min': dt_ms(1), 'max': dt_ms(42)}, + 'ts[us]': {'min': dt_us(1), 'max': dt_us(42)}, + 'date32': {'min': date(1970, 1, 2), 'max': date(1970, 2, 12)}, + 'date64': {'min': date(1970, 1, 1), 'max': date(1970, 2, 18)}, + 'time32': {'min': time(0, 0, 1), 'max': time(0, 0, 42)}, + 'time64': {'min': time(0, 0, 0, 1), 'max': time(0, 0, 0, 42)}, + } + + +@pytest.mark.parquet +def test_parquet_fragment_statistics_nulls(tempdir): + table = pa.table({'a': [0, 1, None, None], 'b': ['a', 'b', None, None]}) + pq.write_table(table, tempdir / "test.parquet", row_group_size=2) + + dataset = ds.dataset(tempdir / "test.parquet", format="parquet") + fragments = list(dataset.get_fragments())[0].split_by_row_group() + # second row group has all nulls -> no statistics + assert fragments[1].row_groups[0].statistics == {} + + +@pytest.mark.pandas +@pytest.mark.parquet +def test_parquet_empty_row_group_statistics(tempdir): + df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})[:0] + df.to_parquet(tempdir / "test.parquet", engine="pyarrow") + + dataset = ds.dataset(tempdir / "test.parquet", format="parquet") + fragments = list(dataset.get_fragments())[0].split_by_row_group() + # Only row group is empty + assert fragments[0].row_groups[0].statistics == {} + + +@pytest.mark.parquet +def test_fragments_parquet_row_groups_predicate(tempdir): + table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2) + + fragment = list(dataset.get_fragments())[0] + assert fragment.partition_expression.equals(ds.field('part') == 'a') + + # predicate may reference a partition field not present in the + # physical_schema if an explicit schema is provided to split_by_row_group + + # filter matches partition_expression: all row groups + row_group_fragments = list( + fragment.split_by_row_group(filter=ds.field('part') == 'a', + schema=dataset.schema)) + assert len(row_group_fragments) == 2 + + # filter contradicts partition_expression: no row groups + row_group_fragments = list( + fragment.split_by_row_group(filter=ds.field('part') == 'b', + schema=dataset.schema)) + assert len(row_group_fragments) == 0 + + +@pytest.mark.parquet +def test_fragments_parquet_row_groups_reconstruct(tempdir, dataset_reader, + pickle_module): + table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2) + + fragment = list(dataset.get_fragments())[0] + parquet_format = fragment.format + row_group_fragments = list(fragment.split_by_row_group()) + + # test pickle roundtrip + pickled_fragment = pickle_module.loads(pickle_module.dumps(fragment)) + assert dataset_reader.to_table( + pickled_fragment) == dataset_reader.to_table(fragment) + + # manually re-construct row group fragments + new_fragment = parquet_format.make_fragment( + fragment.path, fragment.filesystem, + partition_expression=fragment.partition_expression, + row_groups=[0]) + result = dataset_reader.to_table(new_fragment) + assert result.equals(dataset_reader.to_table(row_group_fragments[0])) + + # manually re-construct a row group fragment with filter/column projection + new_fragment = parquet_format.make_fragment( + fragment.path, fragment.filesystem, + partition_expression=fragment.partition_expression, + row_groups={1}) + result = dataset_reader.to_table( + new_fragment, schema=table.schema, columns=['f1', 'part'], + filter=ds.field('f1') < 3, ) + assert result.column_names == ['f1', 'part'] + assert len(result) == 1 + + # out of bounds row group index + new_fragment = parquet_format.make_fragment( + fragment.path, fragment.filesystem, + partition_expression=fragment.partition_expression, + row_groups={2}) + with pytest.raises(IndexError, match="references row group 2"): + dataset_reader.to_table(new_fragment) + + +@pytest.mark.parquet +def test_fragments_parquet_subset_ids(tempdir, open_logging_fs, + dataset_reader): + fs, assert_opens = open_logging_fs + table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=1, + filesystem=fs) + fragment = list(dataset.get_fragments())[0] + + # select with row group ids + subfrag = fragment.subset(row_group_ids=[0, 3]) + with assert_opens([]): + assert subfrag.num_row_groups == 2 + assert subfrag.row_groups == [0, 3] + assert subfrag.row_groups[0].statistics is not None + + # check correct scan result of subset + result = dataset_reader.to_table(subfrag) + assert result.to_pydict() == {"f1": [0, 3], "f2": [1, 1]} + + # empty list of ids + subfrag = fragment.subset(row_group_ids=[]) + assert subfrag.num_row_groups == 0 + assert subfrag.row_groups == [] + result = dataset_reader.to_table(subfrag, schema=dataset.schema) + assert result.num_rows == 0 + assert result.equals(table[:0]) + + +@pytest.mark.parquet +def test_fragments_parquet_subset_filter(tempdir, open_logging_fs, + dataset_reader): + fs, assert_opens = open_logging_fs + table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=1, + filesystem=fs) + fragment = list(dataset.get_fragments())[0] + + # select with filter + subfrag = fragment.subset(ds.field("f1") >= 1) + with assert_opens([]): + assert subfrag.num_row_groups == 3 + assert len(subfrag.row_groups) == 3 + assert subfrag.row_groups[0].statistics is not None + + # check correct scan result of subset + result = dataset_reader.to_table(subfrag) + assert result.to_pydict() == {"f1": [1, 2, 3], "f2": [1, 1, 1]} + + # filter that results in empty selection + subfrag = fragment.subset(ds.field("f1") > 5) + assert subfrag.num_row_groups == 0 + assert subfrag.row_groups == [] + result = dataset_reader.to_table(subfrag, schema=dataset.schema) + assert result.num_rows == 0 + assert result.equals(table[:0]) + + # passing schema to ensure filter on partition expression works + subfrag = fragment.subset(ds.field("part") == "a", schema=dataset.schema) + assert subfrag.num_row_groups == 4 + + +@pytest.mark.parquet +def test_fragments_parquet_subset_invalid(tempdir): + _, dataset = _create_dataset_for_fragments(tempdir, chunk_size=1) + fragment = list(dataset.get_fragments())[0] + + # passing none or both of filter / row_group_ids + with pytest.raises(ValueError): + fragment.subset(ds.field("f1") >= 1, row_group_ids=[1, 2]) + + with pytest.raises(ValueError): + fragment.subset() + + +@pytest.mark.parquet +def test_fragments_parquet_subset_with_nested_fields(tempdir): + # ensure row group filtering with nested field works + f1 = pa.array([0, 1, 2, 3]) + f21 = pa.array([0.1, 0.2, 0.3, 0.4]) + f22 = pa.array([1, 2, 3, 4]) + f2 = pa.StructArray.from_arrays([f21, f22], names=["f21", "f22"]) + struct_col = pa.StructArray.from_arrays([f1, f2], names=["f1", "f2"]) + table = pa.table({"col": struct_col}) + pq.write_table(table, tempdir / "data_struct.parquet", row_group_size=2) + + dataset = ds.dataset(tempdir / "data_struct.parquet", format="parquet") + fragment = list(dataset.get_fragments())[0] + assert fragment.num_row_groups == 2 + + subfrag = fragment.subset(ds.field("col", "f1") > 2) + assert subfrag.num_row_groups == 1 + subfrag = fragment.subset(ds.field("col", "f1") > 5) + assert subfrag.num_row_groups == 0 + + subfrag = fragment.subset(ds.field("col", "f2", "f21") > 0) + assert subfrag.num_row_groups == 2 + subfrag = fragment.subset(ds.field("col", "f2", "f22") <= 2) + assert subfrag.num_row_groups == 1 + + # nonexisting field ref + with pytest.raises(pa.ArrowInvalid, match="No match for FieldRef.Nested"): + fragment.subset(ds.field("col", "f3") > 0) + + # comparison with struct field is not implemented + with pytest.raises( + NotImplementedError, match="Function 'greater' has no kernel matching" + ): + fragment.subset(ds.field("col", "f2") > 0) + + +@pytest.mark.pandas +@pytest.mark.parquet +def test_fragments_repr(tempdir, dataset): + # partitioned parquet dataset + fragment = list(dataset.get_fragments())[0] + assert ( + # Ordering of partition items is non-deterministic + repr(fragment) == + "" or + repr(fragment) == + "" + ) + + # single-file parquet dataset (no partition information in repr) + table, path = _create_single_file(tempdir) + dataset = ds.dataset(path, format="parquet") + fragment = list(dataset.get_fragments())[0] + assert ( + repr(fragment) == + "".format( + dataset.filesystem.normalize_path(str(path))) + ) + + # non-parquet format + path = tempdir / "data.feather" + pa.feather.write_feather(table, path) + dataset = ds.dataset(path, format="feather") + fragment = list(dataset.get_fragments())[0] + assert ( + repr(fragment) == + "".format( + dataset.filesystem.normalize_path(str(path))) + ) + + +@pytest.mark.parquet +@pytest.mark.parametrize( + "pickled", [lambda x, m: x, lambda x, m: m.loads(m.dumps(x))]) +def test_partitioning_factory(mockfs, pickled, pickle_module): + paths_or_selector = fs.FileSelector('subdir', recursive=True) + format = ds.ParquetFileFormat() + + options = ds.FileSystemFactoryOptions('subdir') + partitioning_factory = ds.DirectoryPartitioning.discover(['group', 'key']) + partitioning_factory = pickled(partitioning_factory, pickle_module) + assert isinstance(partitioning_factory, ds.PartitioningFactory) + options.partitioning_factory = partitioning_factory + + factory = ds.FileSystemDatasetFactory( + mockfs, paths_or_selector, format, options + ) + inspected_schema = factory.inspect() + # i64/f64 from data, group/key from "/1/xxx" and "/2/yyy" paths + expected_schema = pa.schema([ + ("i64", pa.int64()), + ("f64", pa.float64()), + ("str", pa.string()), + ("const", pa.int64()), + ("struct", pa.struct({'a': pa.int64(), 'b': pa.string()})), + ("group", pa.int32()), + ("key", pa.string()), + ]) + assert inspected_schema.equals(expected_schema) + + hive_partitioning_factory = ds.HivePartitioning.discover() + assert isinstance(hive_partitioning_factory, ds.PartitioningFactory) + + +@pytest.mark.parquet +@pytest.mark.parametrize('infer_dictionary', [False, True]) +@pytest.mark.parametrize( + "pickled", [lambda x, m: x, lambda x, m: m.loads(m.dumps(x))]) +def test_partitioning_factory_dictionary(mockfs, infer_dictionary, pickled, + pickle_module): + paths_or_selector = fs.FileSelector('subdir', recursive=True) + format = ds.ParquetFileFormat() + options = ds.FileSystemFactoryOptions('subdir') + + partitioning_factory = ds.DirectoryPartitioning.discover( + ['group', 'key'], infer_dictionary=infer_dictionary) + options.partitioning_factory = pickled(partitioning_factory, pickle_module) + + factory = ds.FileSystemDatasetFactory( + mockfs, paths_or_selector, format, options) + + inferred_schema = factory.inspect() + if infer_dictionary: + expected_type = pa.dictionary(pa.int32(), pa.string()) + assert inferred_schema.field('key').type == expected_type + + table = factory.finish().to_table().combine_chunks() + actual = table.column('key').chunk(0) + expected = pa.array(['xxx'] * 5 + ['yyy'] * 5).dictionary_encode() + assert actual.equals(expected) + + # ARROW-9345 ensure filtering on the partition field works + table = factory.finish().to_table(filter=ds.field('key') == 'xxx') + actual = table.column('key').chunk(0) + expected = expected.slice(0, 5) + assert actual.equals(expected) + else: + assert inferred_schema.field('key').type == pa.string() + + +@pytest.mark.parametrize( + "pickled", [lambda x, m: x, lambda x, m: m.loads(m.dumps(x))]) +def test_partitioning_factory_segment_encoding(pickled, pickle_module): + mockfs = fs._MockFileSystem() + format = ds.IpcFileFormat() + schema = pa.schema([("i64", pa.int64())]) + table = pa.table([pa.array(range(10))], schema=schema) + partition_schema = pa.schema( + [("date", pa.timestamp("s")), ("string", pa.string())]) + string_partition_schema = pa.schema( + [("date", pa.string()), ("string", pa.string())]) + full_schema = pa.schema(list(schema) + list(partition_schema)) + for directory in [ + "directory/2021-05-04 00%3A00%3A00/%24", + "hive/date=2021-05-04 00%3A00%3A00/string=%24", + ]: + mockfs.create_dir(directory) + with mockfs.open_output_stream(directory + "/0.feather") as sink: + with pa.ipc.new_file(sink, schema) as writer: + writer.write_table(table) + writer.close() + + # Directory + selector = fs.FileSelector("directory", recursive=True) + options = ds.FileSystemFactoryOptions("directory") + partitioning_factory = ds.DirectoryPartitioning.discover( + schema=partition_schema) + options.partitioning_factory = pickled(partitioning_factory, pickle_module) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + inferred_schema = factory.inspect() + assert inferred_schema == full_schema + actual = factory.finish().to_table(columns={ + "date_int": ds.field("date").cast(pa.int64()), + }) + assert actual[0][0].as_py() == 1620086400 + + partitioning_factory = ds.DirectoryPartitioning.discover( + ["date", "string"], segment_encoding="none") + options.partitioning_factory = pickled(partitioning_factory, pickle_module) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + fragments = list(factory.finish().get_fragments()) + assert fragments[0].partition_expression.equals( + (ds.field("date") == "2021-05-04 00%3A00%3A00") & + (ds.field("string") == "%24")) + + partitioning = ds.DirectoryPartitioning( + string_partition_schema, segment_encoding="none") + options.partitioning = pickled(partitioning, pickle_module) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + fragments = list(factory.finish().get_fragments()) + assert fragments[0].partition_expression.equals( + (ds.field("date") == "2021-05-04 00%3A00%3A00") & + (ds.field("string") == "%24")) + + partitioning_factory = ds.DirectoryPartitioning.discover( + schema=partition_schema, segment_encoding="none") + options.partitioning_factory = pickled(partitioning_factory, pickle_module) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + with pytest.raises(pa.ArrowInvalid, + match="Could not cast segments for partition field"): + inferred_schema = factory.inspect() + + # Hive + selector = fs.FileSelector("hive", recursive=True) + options = ds.FileSystemFactoryOptions("hive") + partitioning_factory = ds.HivePartitioning.discover( + schema=partition_schema) + options.partitioning_factory = pickled(partitioning_factory, pickle_module) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + inferred_schema = factory.inspect() + assert inferred_schema == full_schema + actual = factory.finish().to_table(columns={ + "date_int": ds.field("date").cast(pa.int64()), + }) + assert actual[0][0].as_py() == 1620086400 + + partitioning_factory = ds.HivePartitioning.discover( + segment_encoding="none") + options.partitioning_factory = pickled(partitioning_factory, pickle_module) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + fragments = list(factory.finish().get_fragments()) + assert fragments[0].partition_expression.equals( + (ds.field("date") == "2021-05-04 00%3A00%3A00") & + (ds.field("string") == "%24")) + + options.partitioning = ds.HivePartitioning( + string_partition_schema, segment_encoding="none") + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + fragments = list(factory.finish().get_fragments()) + assert fragments[0].partition_expression.equals( + (ds.field("date") == "2021-05-04 00%3A00%3A00") & + (ds.field("string") == "%24")) + + partitioning_factory = ds.HivePartitioning.discover( + schema=partition_schema, segment_encoding="none") + options.partitioning_factory = pickled(partitioning_factory, pickle_module) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + with pytest.raises(pa.ArrowInvalid, + match="Could not cast segments for partition field"): + inferred_schema = factory.inspect() + + +@pytest.mark.parametrize( + "pickled", [lambda x, m: x, lambda x, m: m.loads(m.dumps(x))]) +def test_partitioning_factory_hive_segment_encoding_key_encoded(pickled, pickle_module): + mockfs = fs._MockFileSystem() + format = ds.IpcFileFormat() + schema = pa.schema([("i64", pa.int64())]) + table = pa.table([pa.array(range(10))], schema=schema) + partition_schema = pa.schema( + [("test'; date", pa.timestamp("s")), ("test';[ string'", pa.string())]) + string_partition_schema = pa.schema( + [("test'; date", pa.string()), ("test';[ string'", pa.string())]) + full_schema = pa.schema(list(schema) + list(partition_schema)) + + partition_schema_en = pa.schema( + [("test%27%3B%20date", pa.timestamp("s")), + ("test%27%3B%5B%20string%27", pa.string())]) + string_partition_schema_en = pa.schema( + [("test%27%3B%20date", pa.string()), + ("test%27%3B%5B%20string%27", pa.string())]) + + directory = ("hive/test%27%3B%20date=2021-05-04 00%3A00%3A00/" + "test%27%3B%5B%20string%27=%24") + mockfs.create_dir(directory) + with mockfs.open_output_stream(directory + "/0.feather") as sink: + with pa.ipc.new_file(sink, schema) as writer: + writer.write_table(table) + writer.close() + + # Hive + selector = fs.FileSelector("hive", recursive=True) + options = ds.FileSystemFactoryOptions("hive") + partitioning_factory = ds.HivePartitioning.discover( + schema=partition_schema) + options.partitioning_factory = pickled(partitioning_factory, pickle_module) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + inferred_schema = factory.inspect() + assert inferred_schema == full_schema + actual = factory.finish().to_table(columns={ + "date_int": ds.field("test'; date").cast(pa.int64()), + }) + assert actual[0][0].as_py() == 1620086400 + + partitioning_factory = ds.HivePartitioning.discover( + segment_encoding="uri") + options.partitioning_factory = pickled(partitioning_factory, pickle_module) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + fragments = list(factory.finish().get_fragments()) + assert fragments[0].partition_expression.equals( + (ds.field("test'; date") == "2021-05-04 00:00:00") & + (ds.field("test';[ string'") == "$")) + + partitioning = ds.HivePartitioning( + string_partition_schema, segment_encoding="uri") + options.partitioning = pickled(partitioning, pickle_module) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + fragments = list(factory.finish().get_fragments()) + assert fragments[0].partition_expression.equals( + (ds.field("test'; date") == "2021-05-04 00:00:00") & + (ds.field("test';[ string'") == "$")) + + partitioning_factory = ds.HivePartitioning.discover( + segment_encoding="none") + options.partitioning_factory = pickled(partitioning_factory, pickle_module) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + fragments = list(factory.finish().get_fragments()) + assert fragments[0].partition_expression.equals( + (ds.field("test%27%3B%20date") == "2021-05-04 00%3A00%3A00") & + (ds.field("test%27%3B%5B%20string%27") == "%24")) + + partitioning = ds.HivePartitioning( + string_partition_schema_en, segment_encoding="none") + options.partitioning = pickled(partitioning, pickle_module) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + fragments = list(factory.finish().get_fragments()) + assert fragments[0].partition_expression.equals( + (ds.field("test%27%3B%20date") == "2021-05-04 00%3A00%3A00") & + (ds.field("test%27%3B%5B%20string%27") == "%24")) + + partitioning_factory = ds.HivePartitioning.discover( + schema=partition_schema_en, segment_encoding="none") + options.partitioning_factory = pickled(partitioning_factory, pickle_module) + factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options) + with pytest.raises(pa.ArrowInvalid, + match="Could not cast segments for partition field"): + inferred_schema = factory.inspect() + + +def test_dictionary_partitioning_outer_nulls_raises(tempdir): + table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z']}) + part = ds.partitioning( + pa.schema([pa.field('a', pa.string()), pa.field('b', pa.string())])) + with pytest.raises(pa.ArrowInvalid): + ds.write_dataset(table, tempdir, format='ipc', partitioning=part) + + +def test_positional_keywords_raises(tempdir): + table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z']}) + with pytest.raises(TypeError): + ds.write_dataset(table, tempdir, "basename-{i}.arrow") + + +@pytest.mark.parquet +@pytest.mark.pandas +def test_read_partition_keys_only(tempdir): + BATCH_SIZE = 2 ** 15 + # This is a regression test for ARROW-15318 which saw issues + # reading only the partition keys from files with batches larger + # than the default batch size (e.g. so we need to return two chunks) + table = pa.table({ + 'key': pa.repeat(0, BATCH_SIZE + 1), + 'value': np.arange(BATCH_SIZE + 1)}) + pq.write_to_dataset( + table[:BATCH_SIZE], + tempdir / 'one', partition_cols=['key']) + pq.write_to_dataset( + table[:BATCH_SIZE + 1], + tempdir / 'two', partition_cols=['key']) + + table = pq.read_table(tempdir / 'one', columns=['key']) + assert table['key'].num_chunks == 1 + + table = pq.read_table(tempdir / 'two', columns=['key', 'value']) + assert table['key'].num_chunks == 2 + + table = pq.read_table(tempdir / 'two', columns=['key']) + assert table['key'].num_chunks == 2 + + +def _has_subdirs(basedir): + elements = os.listdir(basedir) + return any([os.path.isdir(os.path.join(basedir, el)) for el in elements]) + + +def _do_list_all_dirs(basedir, path_so_far, result): + for f in os.listdir(basedir): + true_nested = os.path.join(basedir, f) + if os.path.isdir(true_nested): + norm_nested = posixpath.join(path_so_far, f) + if _has_subdirs(true_nested): + _do_list_all_dirs(true_nested, norm_nested, result) + else: + result.append(norm_nested) + + +def _list_all_dirs(basedir): + result = [] + _do_list_all_dirs(basedir, '', result) + return result + + +def _check_dataset_directories(tempdir, expected_directories): + actual_directories = set(_list_all_dirs(tempdir)) + assert actual_directories == set(expected_directories) + + +def test_dictionary_partitioning_inner_nulls(tempdir): + table = pa.table({'a': ['x', 'y', 'z'], 'b': ['x', 'y', None]}) + part = ds.partitioning( + pa.schema([pa.field('a', pa.string()), pa.field('b', pa.string())])) + ds.write_dataset(table, tempdir, format='ipc', partitioning=part) + _check_dataset_directories(tempdir, ['x/x', 'y/y', 'z']) + + +def test_hive_partitioning_nulls(tempdir): + table = pa.table({'a': ['x', None, 'z'], 'b': ['x', 'y', None]}) + part = ds.HivePartitioning(pa.schema( + [pa.field('a', pa.string()), pa.field('b', pa.string())]), None, 'xyz') + ds.write_dataset(table, tempdir, format='ipc', partitioning=part) + _check_dataset_directories(tempdir, ['a=x/b=x', 'a=xyz/b=y', 'a=z/b=xyz']) + + +def test_partitioning_function(): + schema = pa.schema([("year", pa.int16()), ("month", pa.int8())]) + names = ["year", "month"] + + # default DirectoryPartitioning + part = ds.partitioning(schema) + assert isinstance(part, ds.DirectoryPartitioning) + part = ds.partitioning(schema, dictionaries="infer") + assert isinstance(part, ds.PartitioningFactory) + part = ds.partitioning(field_names=names) + assert isinstance(part, ds.PartitioningFactory) + # needs schema or list of names + with pytest.raises(ValueError): + ds.partitioning() + with pytest.raises(ValueError, match="Expected list"): + ds.partitioning(field_names=schema) + with pytest.raises(ValueError, match="Cannot specify both"): + ds.partitioning(schema, field_names=schema) + + # Hive partitioning + part = ds.partitioning(schema, flavor="hive") + assert isinstance(part, ds.HivePartitioning) + part = ds.partitioning(schema, dictionaries="infer", flavor="hive") + assert isinstance(part, ds.PartitioningFactory) + part = ds.partitioning(flavor="hive") + assert isinstance(part, ds.PartitioningFactory) + # cannot pass list of names + with pytest.raises(ValueError): + ds.partitioning(names, flavor="hive") + with pytest.raises(ValueError, match="Cannot specify 'field_names'"): + ds.partitioning(field_names=names, flavor="hive") + + # unsupported flavor + with pytest.raises(ValueError): + ds.partitioning(schema, flavor="unsupported") + + +@pytest.mark.parquet +def test_directory_partitioning_dictionary_key(mockfs): + # ARROW-8088 specifying partition key as dictionary type + schema = pa.schema([ + pa.field('group', pa.dictionary(pa.int8(), pa.int32())), + pa.field('key', pa.dictionary(pa.int8(), pa.string())) + ]) + part = ds.DirectoryPartitioning.discover(schema=schema) + + dataset = ds.dataset( + "subdir", format="parquet", filesystem=mockfs, partitioning=part + ) + assert dataset.partitioning.schema == schema + table = dataset.to_table() + + assert table.column('group').type.equals(schema.types[0]) + assert table.column('group').to_pylist() == [1] * 5 + [2] * 5 + assert table.column('key').type.equals(schema.types[1]) + assert table.column('key').to_pylist() == ['xxx'] * 5 + ['yyy'] * 5 + + +def test_hive_partitioning_dictionary_key(multisourcefs): + # ARROW-8088 specifying partition key as dictionary type + schema = pa.schema([ + pa.field('year', pa.dictionary(pa.int8(), pa.int16())), + pa.field('month', pa.dictionary(pa.int8(), pa.int16())) + ]) + part = ds.HivePartitioning.discover(schema=schema) + + dataset = ds.dataset( + "hive", format="parquet", filesystem=multisourcefs, partitioning=part + ) + assert dataset.partitioning.schema == schema + table = dataset.to_table() + + year_dictionary = list(range(2006, 2011)) + month_dictionary = list(range(1, 13)) + assert table.column('year').type.equals(schema.types[0]) + for chunk in table.column('year').chunks: + actual = chunk.dictionary.to_pylist() + actual.sort() + assert actual == year_dictionary + assert table.column('month').type.equals(schema.types[1]) + for chunk in table.column('month').chunks: + actual = chunk.dictionary.to_pylist() + actual.sort() + assert actual == month_dictionary + + +def _create_single_file(base_dir, table=None, row_group_size=None): + if table is None: + table = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5}) + path = base_dir / "test.parquet" + pq.write_table(table, path, row_group_size=row_group_size) + return table, path + + +def _create_directory_of_files(base_dir): + table1 = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5}) + path1 = base_dir / "test1.parquet" + pq.write_table(table1, path1) + table2 = pa.table({'a': range(9, 18), 'b': [0.] * 4 + [1.] * 5}) + path2 = base_dir / "test2.parquet" + pq.write_table(table2, path2) + return (table1, table2), (path1, path2) + + +def _check_dataset(dataset, table, dataset_reader, pickler): + # also test that pickle roundtrip keeps the functionality + for d in [dataset, pickler.loads(pickler.dumps(dataset))]: + assert dataset.schema.equals(table.schema) + assert dataset_reader.to_table(dataset).equals(table) + + +def _check_dataset_from_path(path, table, dataset_reader, pickler, **kwargs): + # pathlib object + assert isinstance(path, pathlib.Path) + + # accept Path, str, List[Path], List[str] + for p in [path, str(path), [path], [str(path)]]: + dataset = ds.dataset(path, **kwargs) + assert isinstance(dataset, ds.FileSystemDataset) + _check_dataset(dataset, table, dataset_reader, pickler) + + # relative string path + with change_cwd(path.parent): + dataset = ds.dataset(path.name, **kwargs) + assert isinstance(dataset, ds.FileSystemDataset) + _check_dataset(dataset, table, dataset_reader, pickler) + + +@pytest.mark.parquet +def test_open_dataset_single_file(tempdir, dataset_reader, pickle_module): + table, path = _create_single_file(tempdir) + _check_dataset_from_path(path, table, dataset_reader, pickle_module) + + +@pytest.mark.parquet +def test_deterministic_row_order(tempdir, dataset_reader, pickle_module): + # ARROW-8447 Ensure that dataset.to_table (and Scanner::ToTable) returns a + # deterministic row ordering. This is achieved by constructing a single + # parquet file with one row per RowGroup. + table, path = _create_single_file(tempdir, row_group_size=1) + _check_dataset_from_path(path, table, dataset_reader, pickle_module) + + +@pytest.mark.parquet +def test_open_dataset_directory(tempdir, dataset_reader, pickle_module): + tables, _ = _create_directory_of_files(tempdir) + table = pa.concat_tables(tables) + _check_dataset_from_path(tempdir, table, dataset_reader, pickle_module) + + +@pytest.mark.parquet +def test_open_dataset_list_of_files(tempdir, dataset_reader, pickle_module): + tables, (path1, path2) = _create_directory_of_files(tempdir) + table = pa.concat_tables(tables) + + datasets = [ + ds.dataset([path1, path2]), + ds.dataset([str(path1), str(path2)]) + ] + datasets += [ + pickle_module.loads(pickle_module.dumps(d)) for d in datasets + ] + + for dataset in datasets: + assert dataset.schema.equals(table.schema) + result = dataset_reader.to_table(dataset) + assert result.equals(table) + + +@pytest.mark.parquet +def test_open_dataset_filesystem_fspath(tempdir): + # single file + table, path = _create_single_file(tempdir) + + fspath = FSProtocolClass(path) + + # filesystem inferred from path + dataset1 = ds.dataset(fspath) + assert dataset1.schema.equals(table.schema) + + # filesystem specified + dataset2 = ds.dataset(fspath, filesystem=fs.LocalFileSystem()) + assert dataset2.schema.equals(table.schema) + + # passing different filesystem + with pytest.raises(TypeError): + ds.dataset(fspath, filesystem=fs._MockFileSystem()) + + +@pytest.mark.parquet +def test_construct_from_single_file(tempdir, dataset_reader, pickle_module): + directory = tempdir / 'single-file' + directory.mkdir() + table, path = _create_single_file(directory) + relative_path = path.relative_to(directory) + + # instantiate from a single file + d1 = ds.dataset(path) + # instantiate from a single file with a filesystem object + d2 = ds.dataset(path, filesystem=fs.LocalFileSystem()) + # instantiate from a single file with prefixed filesystem URI + d3 = ds.dataset(str(relative_path), filesystem=_filesystem_uri(directory)) + # pickle roundtrip + d4 = pickle_module.loads(pickle_module.dumps(d1)) + + assert dataset_reader.to_table(d1) == dataset_reader.to_table( + d2) == dataset_reader.to_table(d3) == dataset_reader.to_table(d4) + + +@pytest.mark.parquet +def test_construct_from_single_directory(tempdir, dataset_reader, pickle_module): + directory = tempdir / 'single-directory' + directory.mkdir() + tables, paths = _create_directory_of_files(directory) + + d1 = ds.dataset(directory) + d2 = ds.dataset(directory, filesystem=fs.LocalFileSystem()) + d3 = ds.dataset(directory.name, filesystem=_filesystem_uri(tempdir)) + t1 = dataset_reader.to_table(d1) + t2 = dataset_reader.to_table(d2) + t3 = dataset_reader.to_table(d3) + assert t1 == t2 == t3 + + # test pickle roundtrip + for d in [d1, d2, d3]: + restored = pickle_module.loads(pickle_module.dumps(d)) + assert dataset_reader.to_table(restored) == t1 + + +@pytest.mark.parquet +def test_construct_from_list_of_files(tempdir, dataset_reader): + # instantiate from a list of files + directory = tempdir / 'list-of-files' + directory.mkdir() + tables, paths = _create_directory_of_files(directory) + + relative_paths = [p.relative_to(tempdir) for p in paths] + with change_cwd(tempdir): + d1 = ds.dataset(relative_paths) + t1 = dataset_reader.to_table(d1) + assert len(t1) == sum(map(len, tables)) + + d2 = ds.dataset(relative_paths, filesystem=_filesystem_uri(tempdir)) + t2 = dataset_reader.to_table(d2) + d3 = ds.dataset(paths) + t3 = dataset_reader.to_table(d3) + d4 = ds.dataset(paths, filesystem=fs.LocalFileSystem()) + t4 = dataset_reader.to_table(d4) + + assert t1 == t2 == t3 == t4 + + +@pytest.mark.parquet +def test_construct_from_list_of_mixed_paths_fails(mockfs): + # instantiate from a list of mixed paths + files = [ + 'subdir/1/xxx/file0.parquet', + 'subdir/1/xxx/doesnt-exist.parquet', + ] + with pytest.raises(FileNotFoundError, match='doesnt-exist'): + ds.dataset(files, filesystem=mockfs) + + +@pytest.mark.parquet +def test_construct_from_mixed_child_datasets(mockfs): + # instantiate from a list of mixed paths + a = ds.dataset(['subdir/1/xxx/file0.parquet', + 'subdir/2/yyy/file1.parquet'], filesystem=mockfs) + b = ds.dataset('subdir', filesystem=mockfs) + + dataset = ds.dataset([a, b]) + + assert isinstance(dataset, ds.UnionDataset) + assert len(list(dataset.get_fragments())) == 4 + + table = dataset.to_table() + assert len(table) == 20 + assert table.num_columns == 5 + + assert len(dataset.children) == 2 + for child in dataset.children: + assert child.files == ['subdir/1/xxx/file0.parquet', + 'subdir/2/yyy/file1.parquet'] + + +def test_construct_empty_dataset(): + empty = ds.dataset([], format='ipc') + table = empty.to_table() + assert table.num_rows == 0 + assert table.num_columns == 0 + + +def test_construct_dataset_with_invalid_schema(): + empty = ds.dataset([], format='ipc', schema=pa.schema([ + ('a', pa.int64()), + ('a', pa.string()) + ])) + with pytest.raises(ValueError, match='Multiple matches for .*a.* in '): + empty.to_table() + + +def test_construct_from_invalid_sources_raise(multisourcefs): + child1 = ds.FileSystemDatasetFactory( + multisourcefs, + fs.FileSelector('/plain'), + format=ds.ParquetFileFormat() + ) + child2 = ds.FileSystemDatasetFactory( + multisourcefs, + fs.FileSelector('/schema'), + format=ds.ParquetFileFormat() + ) + batch1 = pa.RecordBatch.from_arrays([pa.array(range(10))], names=["a"]) + batch2 = pa.RecordBatch.from_arrays([pa.array(range(10))], names=["b"]) + + with pytest.raises(TypeError, match='Expected.*FileSystemDatasetFactory'): + ds.dataset([child1, child2]) + + expected = ( + "Expected a list of path-like or dataset objects, or a list " + "of batches or tables. The given list contains the following " + "types: int" + ) + with pytest.raises(TypeError, match=expected): + ds.dataset([1, 2, 3]) + + expected = ( + "Expected a path-like, list of path-likes or a list of Datasets " + "instead of the given type: NoneType" + ) + with pytest.raises(TypeError, match=expected): + ds.dataset(None) + + expected = ( + "Expected a path-like, list of path-likes or a list of Datasets " + "instead of the given type: generator" + ) + with pytest.raises(TypeError, match=expected): + ds.dataset((batch1 for _ in range(3))) + + expected = ( + "Must provide schema to construct in-memory dataset from an empty list" + ) + with pytest.raises(ValueError, match=expected): + ds.InMemoryDataset([]) + + expected = ( + "Item has schema\nb: int64\nwhich does not match expected schema\n" + "a: int64" + ) + with pytest.raises(TypeError, match=expected): + ds.dataset([batch1, batch2]) + + expected = ( + "Expected a list of path-like or dataset objects, or a list of " + "batches or tables. The given list contains the following types:" + ) + with pytest.raises(TypeError, match=expected): + ds.dataset([batch1, 0]) + + expected = ( + "Expected a list of tables or batches. The given list contains a int" + ) + with pytest.raises(TypeError, match=expected): + ds.InMemoryDataset([batch1, 0]) + + +def test_construct_in_memory(dataset_reader): + batch = pa.RecordBatch.from_arrays([pa.array(range(10))], names=["a"]) + table = pa.Table.from_batches([batch]) + + dataset_table = ds.dataset([], format='ipc', schema=pa.schema([]) + ).to_table() + assert dataset_table == pa.table([]) + + for source in (batch, table, [batch], [table]): + dataset = ds.dataset(source) + assert dataset_reader.to_table(dataset) == table + assert len(list(dataset.get_fragments())) == 1 + assert next(dataset.get_fragments()).to_table() == table + assert pa.Table.from_batches(list(dataset.to_batches())) == table + + +@pytest.mark.parametrize('use_threads', [False, True]) +def test_scan_iterator(use_threads): + batch = pa.RecordBatch.from_arrays([pa.array(range(10))], names=["a"]) + table = pa.Table.from_batches([batch]) + # When constructed from readers/iterators, should be one-shot + match = "OneShotFragment was already scanned" + for factory, schema in ( + (lambda: pa.RecordBatchReader.from_batches( + batch.schema, [batch]), None), + (lambda: (batch for _ in range(1)), batch.schema), + ): + # Scanning the fragment consumes the underlying iterator + scanner = ds.Scanner.from_batches( + factory(), schema=schema, use_threads=use_threads) + assert scanner.to_table() == table + with pytest.raises(pa.ArrowInvalid, match=match): + scanner.to_table() + + +def _create_partitioned_dataset(basedir): + table = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5}) + + path = basedir / "dataset-partitioned" + path.mkdir() + + for i in range(3): + part = path / "part={}".format(i) + part.mkdir() + pq.write_table(table.slice(3*i, 3), part / "test.parquet") + + full_table = table.append_column( + "part", pa.array(np.repeat([0, 1, 2], 3), type=pa.int32())) + + return full_table, path + + +@pytest.mark.parquet +def test_open_dataset_partitioned_directory(tempdir, dataset_reader, pickle_module): + full_table, path = _create_partitioned_dataset(tempdir) + + # no partitioning specified, just read all individual files + table = full_table.select(['a', 'b']) + _check_dataset_from_path(path, table, dataset_reader, pickle_module) + + # specify partition scheme with discovery + dataset = ds.dataset( + str(path), partitioning=ds.partitioning(flavor="hive")) + assert dataset.schema.equals(full_table.schema) + + # specify partition scheme with discovery and relative path + with change_cwd(tempdir): + dataset = ds.dataset("dataset-partitioned/", + partitioning=ds.partitioning(flavor="hive")) + assert dataset.schema.equals(full_table.schema) + + # specify partition scheme with string short-cut + dataset = ds.dataset(str(path), partitioning="hive") + assert dataset.schema.equals(full_table.schema) + + # specify partition scheme with explicit scheme + dataset = ds.dataset( + str(path), + partitioning=ds.partitioning( + pa.schema([("part", pa.int8())]), flavor="hive")) + expected_schema = table.schema.append(pa.field("part", pa.int8())) + assert dataset.schema.equals(expected_schema) + + result = dataset.to_table() + expected = table.append_column( + "part", pa.array(np.repeat([0, 1, 2], 3), type=pa.int8())) + assert result.equals(expected) + + +@pytest.mark.parquet +def test_open_dataset_filesystem(tempdir): + # single file + table, path = _create_single_file(tempdir) + + # filesystem inferred from path + dataset1 = ds.dataset(str(path)) + assert dataset1.schema.equals(table.schema) + + # filesystem specified + dataset2 = ds.dataset(str(path), filesystem=fs.LocalFileSystem()) + assert dataset2.schema.equals(table.schema) + + # local filesystem specified with relative path + with change_cwd(tempdir): + dataset3 = ds.dataset("test.parquet", filesystem=fs.LocalFileSystem()) + assert dataset3.schema.equals(table.schema) + + # passing different filesystem + with pytest.raises(FileNotFoundError): + ds.dataset(str(path), filesystem=fs._MockFileSystem()) + + +@pytest.mark.parquet +def test_open_dataset_unsupported_format(tempdir): + _, path = _create_single_file(tempdir) + with pytest.raises(ValueError, match="format 'blabla' is not supported"): + ds.dataset([path], format="blabla") + + +@pytest.mark.parquet +def test_open_union_dataset(tempdir, dataset_reader, pickle_module): + _, path = _create_single_file(tempdir) + dataset = ds.dataset(path) + + union = ds.dataset([dataset, dataset]) + assert isinstance(union, ds.UnionDataset) + + pickled = pickle_module.loads(pickle_module.dumps(union)) + assert dataset_reader.to_table(pickled) == dataset_reader.to_table(union) + + +def test_open_union_dataset_with_additional_kwargs(multisourcefs): + child = ds.dataset('/plain', filesystem=multisourcefs, format='parquet') + with pytest.raises(ValueError, match="cannot pass any additional"): + ds.dataset([child], format="parquet") + + +def test_open_dataset_non_existing_file(): + # ARROW-8213: Opening a dataset with a local incorrect path gives confusing + # error message + with pytest.raises(FileNotFoundError): + ds.dataset('i-am-not-existing.arrow', format='ipc') + + with pytest.raises(pa.ArrowInvalid, match='cannot be relative'): + ds.dataset('file:i-am-not-existing.arrow', format='ipc') + + +@pytest.mark.parquet +@pytest.mark.parametrize('partitioning', ["directory", "hive"]) +@pytest.mark.parametrize('null_fallback', ['xyz', None]) +@pytest.mark.parametrize('infer_dictionary', [False, True]) +@pytest.mark.parametrize('partition_keys', [ + (["A", "B", "C"], [1, 2, 3]), + ([1, 2, 3], ["A", "B", "C"]), + (["A", "B", "C"], ["D", "E", "F"]), + ([1, 2, 3], [4, 5, 6]), + ([1, None, 3], ["A", "B", "C"]), + ([1, 2, 3], ["A", None, "C"]), + ([None, 2, 3], [None, 2, 3]), +]) +def test_partition_discovery( + tempdir, partitioning, null_fallback, infer_dictionary, partition_keys +): + # ARROW-9288 / ARROW-9476 + table = pa.table({'a': range(9), 'b': [0.0] * 4 + [1.0] * 5}) + + has_null = None in partition_keys[0] or None in partition_keys[1] + if partitioning == "directory" and has_null: + # Directory partitioning can't handle the first part being null + return + + if partitioning == "directory": + partitioning = ds.DirectoryPartitioning.discover( + ["part1", "part2"], infer_dictionary=infer_dictionary) + fmt = "{0}/{1}" + null_value = None + else: + if null_fallback: + partitioning = ds.HivePartitioning.discover( + infer_dictionary=infer_dictionary, null_fallback=null_fallback + ) + else: + partitioning = ds.HivePartitioning.discover( + infer_dictionary=infer_dictionary) + fmt = "part1={0}/part2={1}" + if null_fallback: + null_value = null_fallback + else: + null_value = "__HIVE_DEFAULT_PARTITION__" + + basepath = tempdir / "dataset" + basepath.mkdir() + + part_keys1, part_keys2 = partition_keys + for part1 in part_keys1: + for part2 in part_keys2: + path = basepath / \ + fmt.format(part1 or null_value, part2 or null_value) + path.mkdir(parents=True) + pq.write_table(table, path / "test.parquet") + + dataset = ds.dataset(str(basepath), partitioning=partitioning) + + def expected_type(key): + if infer_dictionary: + value_type = pa.string() if isinstance(key, str) else pa.int32() + return pa.dictionary(pa.int32(), value_type) + else: + return pa.string() if isinstance(key, str) else pa.int32() + expected_schema = table.schema.append( + pa.field("part1", expected_type(part_keys1[0])) + ).append( + pa.field("part2", expected_type(part_keys2[0])) + ) + assert dataset.schema.equals(expected_schema) + + +@pytest.mark.pandas +def test_dataset_partitioned_dictionary_type_reconstruct(tempdir, pickle_module): + # https://issues.apache.org/jira/browse/ARROW-11400 + table = pa.table({'part': np.repeat(['A', 'B'], 5), 'col': range(10)}) + part = ds.partitioning(table.select(['part']).schema, flavor="hive") + ds.write_dataset(table, tempdir, partitioning=part, format="feather") + + dataset = ds.dataset( + tempdir, format="feather", + partitioning=ds.HivePartitioning.discover(infer_dictionary=True) + ) + expected = pa.table( + {'col': table['col'], 'part': table['part'].dictionary_encode()} + ) + assert dataset.to_table().equals(expected) + fragment = list(dataset.get_fragments())[0] + assert fragment.to_table(schema=dataset.schema).equals(expected[:5]) + part_expr = fragment.partition_expression + + restored = pickle_module.loads(pickle_module.dumps(dataset)) + assert restored.to_table().equals(expected) + + restored = pickle_module.loads(pickle_module.dumps(fragment)) + assert restored.to_table(schema=dataset.schema).equals(expected[:5]) + # to_pandas call triggers computation of the actual dictionary values + assert restored.to_table(schema=dataset.schema).to_pandas().equals( + expected[:5].to_pandas() + ) + assert restored.partition_expression.equals(part_expr) + + +@pytest.fixture +def s3_example_simple(s3_server): + from pyarrow.fs import FileSystem + + host, port, access_key, secret_key = s3_server['connection'] + uri = ( + "s3://{}:{}@mybucket/data.parquet?scheme=http&endpoint_override={}:{}" + "&allow_bucket_creation=True" + .format(access_key, secret_key, host, port) + ) + + fs, path = FileSystem.from_uri(uri) + + fs.create_dir("mybucket") + table = pa.table({'a': [1, 2, 3]}) + with fs.open_output_stream("mybucket/data.parquet") as out: + pq.write_table(table, out) + + return table, path, fs, uri, host, port, access_key, secret_key + + +@pytest.mark.parquet +@pytest.mark.s3 +def test_open_dataset_from_uri_s3(s3_example_simple, dataset_reader): + # open dataset from non-localfs string path + table, path, fs, uri, _, _, _, _ = s3_example_simple + + # full string URI + dataset = ds.dataset(uri, format="parquet") + assert dataset_reader.to_table(dataset).equals(table) + + # passing filesystem object + dataset = ds.dataset(path, format="parquet", filesystem=fs) + assert dataset_reader.to_table(dataset).equals(table) + + +@pytest.mark.parquet +@pytest.mark.s3 +def test_open_dataset_from_fileinfos(s3_example_simple, dataset_reader): + table, path, filesystem, uri, _, _, _, _ = s3_example_simple + selector = fs.FileSelector("mybucket") + finfos = filesystem.get_file_info(selector) + dataset = ds.dataset(finfos, format="parquet", filesystem=filesystem) + assert dataset_reader.to_table(dataset).equals(table) + + +@pytest.mark.parquet +@pytest.mark.s3 # still needed to create the data +def test_open_dataset_from_uri_s3_fsspec(s3_example_simple): + table, path, _, _, host, port, access_key, secret_key = s3_example_simple + s3fs = pytest.importorskip("s3fs") + + from pyarrow.fs import FSSpecHandler, PyFileSystem + + fs = s3fs.S3FileSystem( + key=access_key, + secret=secret_key, + client_kwargs={ + 'endpoint_url': 'http://{}:{}'.format(host, port) + } + ) + + # passing as fsspec filesystem + dataset = ds.dataset(path, format="parquet", filesystem=fs) + assert dataset.to_table().equals(table) + + # directly passing the fsspec-handler + fs = PyFileSystem(FSSpecHandler(fs)) + dataset = ds.dataset(path, format="parquet", filesystem=fs) + assert dataset.to_table().equals(table) + + +@pytest.mark.parquet +@pytest.mark.s3 +def test_open_dataset_from_s3_with_filesystem_uri(s3_server): + from pyarrow.fs import FileSystem + + host, port, access_key, secret_key = s3_server['connection'] + bucket = 'theirbucket' + path = 'nested/folder/data.parquet' + uri = "s3://{}:{}@{}/{}?scheme=http&endpoint_override={}:{}"\ + "&allow_bucket_creation=true".format( + access_key, secret_key, bucket, path, host, port + ) + + fs, path = FileSystem.from_uri(uri) + assert path == 'theirbucket/nested/folder/data.parquet' + + fs.create_dir(bucket) + + table = pa.table({'a': [1, 2, 3]}) + with fs.open_output_stream(path) as out: + pq.write_table(table, out) + + # full string URI + dataset = ds.dataset(uri, format="parquet") + assert dataset.to_table().equals(table) + + # passing filesystem as an uri + template = ( + "s3://{}:{}@{{}}?scheme=http&endpoint_override={}:{}".format( + access_key, secret_key, host, port + ) + ) + cases = [ + ('theirbucket/nested/folder/', '/data.parquet'), + ('theirbucket/nested/folder', 'data.parquet'), + ('theirbucket/nested/', 'folder/data.parquet'), + ('theirbucket/nested', 'folder/data.parquet'), + ('theirbucket', '/nested/folder/data.parquet'), + ('theirbucket', 'nested/folder/data.parquet'), + ] + for prefix, path in cases: + uri = template.format(prefix) + dataset = ds.dataset(path, filesystem=uri, format="parquet") + assert dataset.to_table().equals(table) + + with pytest.raises(pa.ArrowInvalid, match='Missing bucket name'): + uri = template.format('/') + ds.dataset('/theirbucket/nested/folder/data.parquet', filesystem=uri) + + error = ( + "The path component of the filesystem URI must point to a directory " + "but it has a type: `{}`. The path component is `{}` and the given " + "filesystem URI is `{}`" + ) + + path = 'theirbucket/doesnt/exist' + uri = template.format(path) + with pytest.raises(ValueError) as exc: + ds.dataset('data.parquet', filesystem=uri) + assert str(exc.value) == error.format('NotFound', path, uri) + + path = 'theirbucket/nested/folder/data.parquet' + uri = template.format(path) + with pytest.raises(ValueError) as exc: + ds.dataset('data.parquet', filesystem=uri) + assert str(exc.value) == error.format('File', path, uri) + + +@pytest.mark.parquet +def test_open_dataset_from_fsspec(tempdir): + table, path = _create_single_file(tempdir) + + fsspec = pytest.importorskip("fsspec") + + localfs = fsspec.filesystem("file") + dataset = ds.dataset(path, filesystem=localfs) + assert dataset.schema.equals(table.schema) + + +@pytest.mark.parquet +def test_file_format_inspect_fsspec(tempdir): + # https://issues.apache.org/jira/browse/ARROW-16413 + fsspec = pytest.importorskip("fsspec") + + # create bucket + file with pyarrow + table = pa.table({'a': [1, 2, 3]}) + path = tempdir / "data.parquet" + pq.write_table(table, path) + + # read using fsspec filesystem + fsspec_fs = fsspec.filesystem("file") + assert fsspec_fs.ls(tempdir)[0].endswith("data.parquet") + + # inspect using dataset file format + format = ds.ParquetFileFormat() + # manually creating a PyFileSystem instead of using fs._ensure_filesystem + # which would convert an fsspec local filesystem to a native one + filesystem = fs.PyFileSystem(fs.FSSpecHandler(fsspec_fs)) + schema = format.inspect(path, filesystem) + assert schema.equals(table.schema) + + fragment = format.make_fragment(path, filesystem) + assert fragment.physical_schema.equals(table.schema) + + +@pytest.mark.pandas +def test_filter_timestamp(tempdir, dataset_reader): + # ARROW-11379 + path = tempdir / "test_partition_timestamps" + + table = pa.table({ + "dates": ['2012-01-01', '2012-01-02'] * 5, + "id": range(10)}) + + # write dataset partitioned on dates (as strings) + part = ds.partitioning(table.select(['dates']).schema, flavor="hive") + ds.write_dataset(table, path, partitioning=part, format="feather") + + # read dataset partitioned on dates (as timestamps) + part = ds.partitioning(pa.schema([("dates", pa.timestamp("s"))]), + flavor="hive") + dataset = ds.dataset(path, format="feather", partitioning=part) + + condition = ds.field("dates") > pd.Timestamp("2012-01-01") + table = dataset_reader.to_table(dataset, filter=condition) + assert table.column('id').to_pylist() == [1, 3, 5, 7, 9] + + import datetime + condition = ds.field("dates") > datetime.datetime(2012, 1, 1) + table = dataset_reader.to_table(dataset, filter=condition) + assert table.column('id').to_pylist() == [1, 3, 5, 7, 9] + + +@pytest.mark.parquet +def test_filter_implicit_cast(tempdir, dataset_reader): + # ARROW-7652 + table = pa.table({'a': pa.array([0, 1, 2, 3, 4, 5], type=pa.int8())}) + _, path = _create_single_file(tempdir, table) + dataset = ds.dataset(str(path)) + + filter_ = ds.field('a') > 2 + assert len(dataset_reader.to_table(dataset, filter=filter_)) == 3 + + +@pytest.mark.parquet +def test_filter_equal_null(tempdir, dataset_reader): + # ARROW-12066 equality with null, although not useful, should not crash + table = pa.table({"A": ["a", "b", None]}) + _, path = _create_single_file(tempdir, table) + dataset = ds.dataset(str(path)) + + table = dataset_reader.to_table( + dataset, filter=ds.field("A") == ds.scalar(None) + ) + assert table.num_rows == 0 + + +@pytest.mark.parquet +def test_filter_compute_expression(tempdir, dataset_reader): + table = pa.table({ + "A": ["a", "b", None, "a", "c"], + "B": [datetime.datetime(2022, 1, 1, i) for i in range(5)], + "C": [datetime.datetime(2022, 1, i) for i in range(1, 6)], + }) + _, path = _create_single_file(tempdir, table) + dataset = ds.dataset(str(path)) + + filter_ = pc.is_in(ds.field('A'), pa.array(["a", "b"])) + assert dataset_reader.to_table(dataset, filter=filter_).num_rows == 3 + + filter_ = pc.hour(ds.field('B')) >= 3 + assert dataset_reader.to_table(dataset, filter=filter_).num_rows == 2 + + days = pc.days_between(ds.field('B'), ds.field("C")) + result = dataset_reader.to_table(dataset, columns={"days": days}) + assert result["days"].to_pylist() == [0, 1, 2, 3, 4] + + +def test_dataset_union(multisourcefs): + child = ds.FileSystemDatasetFactory( + multisourcefs, fs.FileSelector('/plain'), + format=ds.ParquetFileFormat() + ) + factory = ds.UnionDatasetFactory([child]) + + # TODO(bkietz) reintroduce factory.children property + assert len(factory.inspect_schemas()) == 1 + assert all(isinstance(s, pa.Schema) for s in factory.inspect_schemas()) + assert factory.inspect_schemas()[0].equals(child.inspect()) + assert factory.inspect().equals(child.inspect()) + assert isinstance(factory.finish(), ds.Dataset) + + +def test_union_dataset_from_other_datasets(tempdir, multisourcefs): + child1 = ds.dataset('/plain', filesystem=multisourcefs, format='parquet') + child2 = ds.dataset('/schema', filesystem=multisourcefs, format='parquet', + partitioning=['week', 'color']) + child3 = ds.dataset('/hive', filesystem=multisourcefs, format='parquet', + partitioning='hive') + + assert child1.schema != child2.schema != child3.schema + + assembled = ds.dataset([child1, child2, child3]) + assert isinstance(assembled, ds.UnionDataset) + + msg = 'cannot pass any additional arguments' + with pytest.raises(ValueError, match=msg): + ds.dataset([child1, child2], filesystem=multisourcefs) + + expected_schema = pa.schema([ + ('date', pa.date32()), + ('index', pa.int64()), + ('value', pa.float64()), + ('color', pa.string()), + ('week', pa.int32()), + ('year', pa.int32()), + ('month', pa.int32()), + ]) + assert assembled.schema.equals(expected_schema) + assert assembled.to_table().schema.equals(expected_schema) + + assembled = ds.dataset([child1, child3]) + expected_schema = pa.schema([ + ('date', pa.date32()), + ('index', pa.int64()), + ('value', pa.float64()), + ('color', pa.string()), + ('year', pa.int32()), + ('month', pa.int32()), + ]) + assert assembled.schema.equals(expected_schema) + assert assembled.to_table().schema.equals(expected_schema) + + expected_schema = pa.schema([ + ('month', pa.int32()), + ('color', pa.string()), + ('date', pa.date32()), + ]) + assembled = ds.dataset([child1, child3], schema=expected_schema) + assert assembled.to_table().schema.equals(expected_schema) + + expected_schema = pa.schema([ + ('month', pa.int32()), + ('color', pa.string()), + ('unknown', pa.string()) # fill with nulls + ]) + assembled = ds.dataset([child1, child3], schema=expected_schema) + assert assembled.to_table().schema.equals(expected_schema) + + # incompatible schemas, date and index columns have conflicting types + table = pa.table([range(9), [0.] * 4 + [1.] * 5, 'abcdefghj'], + names=['date', 'value', 'index']) + _, path = _create_single_file(tempdir, table=table) + child4 = ds.dataset(path) + + with pytest.raises(pa.ArrowTypeError, match='Unable to merge'): + ds.dataset([child1, child4]) + + +def test_dataset_from_a_list_of_local_directories_raises(multisourcefs): + msg = 'points to a directory, but only file paths are supported' + with pytest.raises(IsADirectoryError, match=msg): + ds.dataset(['/plain', '/schema', '/hive'], filesystem=multisourcefs) + + +def test_union_dataset_filesystem_datasets(multisourcefs): + # without partitioning + dataset = ds.dataset([ + ds.dataset('/plain', filesystem=multisourcefs), + ds.dataset('/schema', filesystem=multisourcefs), + ds.dataset('/hive', filesystem=multisourcefs), + ]) + expected_schema = pa.schema([ + ('date', pa.date32()), + ('index', pa.int64()), + ('value', pa.float64()), + ('color', pa.string()), + ]) + assert dataset.schema.equals(expected_schema) + + # with hive partitioning for two hive sources + dataset = ds.dataset([ + ds.dataset('/plain', filesystem=multisourcefs), + ds.dataset('/schema', filesystem=multisourcefs), + ds.dataset('/hive', filesystem=multisourcefs, partitioning='hive') + ]) + expected_schema = pa.schema([ + ('date', pa.date32()), + ('index', pa.int64()), + ('value', pa.float64()), + ('color', pa.string()), + ('year', pa.int32()), + ('month', pa.int32()), + ]) + assert dataset.schema.equals(expected_schema) + + +@pytest.mark.parquet +def test_specified_schema(tempdir, dataset_reader): + table = pa.table({'a': [1, 2, 3], 'b': [.1, .2, .3]}) + pq.write_table(table, tempdir / "data.parquet") + + def _check_dataset(schema, expected, expected_schema=None): + dataset = ds.dataset(str(tempdir / "data.parquet"), schema=schema) + if expected_schema is not None: + assert dataset.schema.equals(expected_schema) + else: + assert dataset.schema.equals(schema) + result = dataset_reader.to_table(dataset) + assert result.equals(expected) + + # no schema specified + schema = None + expected = table + _check_dataset(schema, expected, expected_schema=table.schema) + + # identical schema specified + schema = table.schema + expected = table + _check_dataset(schema, expected) + + # Specifying schema with change column order + schema = pa.schema([('b', 'float64'), ('a', 'int64')]) + expected = pa.table([[.1, .2, .3], [1, 2, 3]], names=['b', 'a']) + _check_dataset(schema, expected) + + # Specifying schema with missing column + schema = pa.schema([('a', 'int64')]) + expected = pa.table([[1, 2, 3]], names=['a']) + _check_dataset(schema, expected) + + # Specifying schema with additional column + schema = pa.schema([('a', 'int64'), ('c', 'int32')]) + expected = pa.table([[1, 2, 3], + pa.array([None, None, None], type='int32')], + names=['a', 'c']) + _check_dataset(schema, expected) + + # Specifying with differing field types + schema = pa.schema([('a', 'int32'), ('b', 'float64')]) + dataset = ds.dataset(str(tempdir / "data.parquet"), schema=schema) + expected = pa.table([table['a'].cast('int32'), + table['b']], + names=['a', 'b']) + _check_dataset(schema, expected) + + # Specifying with incompatible schema + schema = pa.schema([('a', pa.list_(pa.int32())), ('b', 'float64')]) + dataset = ds.dataset(str(tempdir / "data.parquet"), schema=schema) + assert dataset.schema.equals(schema) + with pytest.raises(NotImplementedError, + match='Unsupported cast from int64 to list'): + dataset_reader.to_table(dataset) + + +@pytest.mark.parquet +def test_incompatible_schema_hang(tempdir, dataset_reader): + # ARROW-13480: deadlock when reading past an errored fragment + + fn = tempdir / "data.parquet" + table = pa.table({'a': [1, 2, 3]}) + pq.write_table(table, fn) + + schema = pa.schema([('a', pa.null())]) + dataset = ds.dataset([str(fn)] * 100, schema=schema) + assert dataset.schema.equals(schema) + scanner = dataset_reader.scanner(dataset) + with pytest.raises(NotImplementedError, + match='Unsupported cast from int64 to null'): + reader = scanner.to_reader() + reader.read_all() + + +def test_ipc_format(tempdir, dataset_reader): + table = pa.table({'a': pa.array([1, 2, 3], type="int8"), + 'b': pa.array([.1, .2, .3], type="float64")}) + + path = str(tempdir / 'test.arrow') + with pa.output_stream(path) as sink: + writer = pa.RecordBatchFileWriter(sink, table.schema) + writer.write_batch(table.to_batches()[0]) + writer.close() + + dataset = ds.dataset(path, format=ds.IpcFileFormat()) + result = dataset_reader.to_table(dataset) + assert result.equals(table) + + assert_dataset_fragment_convenience_methods(dataset) + + for format_str in ["ipc", "arrow"]: + dataset = ds.dataset(path, format=format_str) + result = dataset_reader.to_table(dataset) + assert result.equals(table) + + +@pytest.mark.orc +def test_orc_format(tempdir, dataset_reader): + from pyarrow import orc + table = pa.table({'a': pa.array([1, 2, 3], type="int8"), + 'b': pa.array([.1, .2, .3], type="float64")}) + + path = str(tempdir / 'test.orc') + orc.write_table(table, path) + + dataset = ds.dataset(path, format=ds.OrcFileFormat()) + fragments = list(dataset.get_fragments()) + assert isinstance(fragments[0], ds.FileFragment) + result = dataset_reader.to_table(dataset) + result.validate(full=True) + assert result.equals(table) + + assert_dataset_fragment_convenience_methods(dataset) + + dataset = ds.dataset(path, format="orc") + result = dataset_reader.to_table(dataset) + result.validate(full=True) + assert result.equals(table) + + result = dataset_reader.to_table(dataset, columns=["b"]) + result.validate(full=True) + assert result.equals(table.select(["b"])) + + result = dataset_reader.to_table( + dataset, columns={"b2": ds.field("b") * 2} + ) + result.validate(full=True) + assert result.equals( + pa.table({'b2': pa.array([.2, .4, .6], type="float64")}) + ) + + assert dataset_reader.count_rows(dataset) == 3 + assert dataset_reader.count_rows(dataset, filter=ds.field("a") > 2) == 1 + + +@pytest.mark.orc +def test_orc_scan_options(tempdir, dataset_reader): + from pyarrow import orc + table = pa.table({'a': pa.array([1, 2, 3], type="int8"), + 'b': pa.array([.1, .2, .3], type="float64")}) + + path = str(tempdir / 'test.orc') + orc.write_table(table, path) + + dataset = ds.dataset(path, format="orc") + result = list(dataset_reader.to_batches(dataset)) + assert len(result) == 1 + assert result[0].num_rows == 3 + assert result[0].equals(table.to_batches()[0]) + # TODO batch_size is not yet supported (ARROW-14153) + # result = list(dataset_reader.to_batches(dataset, batch_size=2)) + # assert len(result) == 2 + # assert result[0].num_rows == 2 + # assert result[0].equals(table.slice(0, 2).to_batches()[0]) + # assert result[1].num_rows == 1 + # assert result[1].equals(table.slice(2, 1).to_batches()[0]) + + +def test_orc_format_not_supported(): + try: + from pyarrow.dataset import OrcFileFormat # noqa + except ImportError: + # ORC is not available, test error message + with pytest.raises( + ValueError, match="not built with support for the ORC file" + ): + ds.dataset(".", format="orc") + + +@pytest.mark.orc +def test_orc_writer_not_implemented_for_dataset(): + with pytest.raises( + NotImplementedError, + match="Writing datasets not yet implemented for this file format" + ): + ds.write_dataset( + pa.table({"a": range(10)}), format='orc', base_dir='/tmp' + ) + + of = ds.OrcFileFormat() + with pytest.raises( + NotImplementedError, + match="Writing datasets not yet implemented for this file format" + ): + of.make_write_options() + + +@pytest.mark.pandas +def test_csv_format(tempdir, dataset_reader): + table = pa.table({'a': pa.array([1, 2, 3], type="int64"), + 'b': pa.array([.1, .2, .3], type="float64")}) + + path = str(tempdir / 'test.csv') + table.to_pandas().to_csv(path, index=False) + + dataset = ds.dataset(path, format=ds.CsvFileFormat()) + result = dataset_reader.to_table(dataset) + assert result.equals(table) + + assert_dataset_fragment_convenience_methods(dataset) + + dataset = ds.dataset(path, format='csv') + result = dataset_reader.to_table(dataset) + assert result.equals(table) + + +@pytest.mark.pandas +@pytest.mark.parametrize("compression", [ + "bz2", + "gzip", + "lz4", + "zstd", +]) +def test_csv_format_compressed(tempdir, compression, dataset_reader): + if not pyarrow.Codec.is_available(compression): + pytest.skip("{} support is not built".format(compression)) + table = pa.table({'a': pa.array([1, 2, 3], type="int64"), + 'b': pa.array([.1, .2, .3], type="float64")}) + filesystem = fs.LocalFileSystem() + suffix = compression if compression != 'gzip' else 'gz' + path = str(tempdir / f'test.csv.{suffix}') + with filesystem.open_output_stream(path, compression=compression) as sink: + # https://github.com/pandas-dev/pandas/issues/23854 + # With CI version of Pandas (anything < 1.2), Pandas tries to write + # str to the sink + csv_str = table.to_pandas().to_csv(index=False) + sink.write(csv_str.encode('utf-8')) + + dataset = ds.dataset(path, format=ds.CsvFileFormat()) + result = dataset_reader.to_table(dataset) + assert result.equals(table) + + +def test_csv_format_options(tempdir, dataset_reader): + path = str(tempdir / 'test.csv') + with open(path, 'w') as sink: + sink.write('skipped\ncol0\nfoo\nbar\n') + dataset = ds.dataset(path, format='csv') + result = dataset_reader.to_table(dataset) + assert result.equals( + pa.table({'skipped': pa.array(['col0', 'foo', 'bar'])})) + + dataset = ds.dataset(path, format=ds.CsvFileFormat( + read_options=pa.csv.ReadOptions(skip_rows=1))) + result = dataset_reader.to_table(dataset) + assert result.equals(pa.table({'col0': pa.array(['foo', 'bar'])})) + + dataset = ds.dataset(path, format=ds.CsvFileFormat( + read_options=pa.csv.ReadOptions(column_names=['foo']))) + result = dataset_reader.to_table(dataset) + assert result.equals( + pa.table({'foo': pa.array(['skipped', 'col0', 'foo', 'bar'])})) + + +def test_csv_format_options_generate_columns(tempdir, dataset_reader): + path = str(tempdir / 'test.csv') + with open(path, 'w') as sink: + sink.write('1,a,true,1\n') + + dataset = ds.dataset(path, format=ds.CsvFileFormat( + read_options=pa.csv.ReadOptions(autogenerate_column_names=True))) + result = dataset_reader.to_table(dataset) + expected_column_names = ["f0", "f1", "f2", "f3"] + assert result.column_names == expected_column_names + assert result.equals(pa.table({'f0': pa.array([1]), + 'f1': pa.array(["a"]), + 'f2': pa.array([True]), + 'f3': pa.array([1])})) + + +def test_csv_fragment_options(tempdir, dataset_reader): + path = str(tempdir / 'test.csv') + with open(path, 'w') as sink: + sink.write('col0\nfoo\nspam\nMYNULL\n') + dataset = ds.dataset(path, format='csv') + convert_options = pyarrow.csv.ConvertOptions(null_values=['MYNULL'], + strings_can_be_null=True) + options = ds.CsvFragmentScanOptions( + convert_options=convert_options, + read_options=pa.csv.ReadOptions(block_size=2**16)) + result = dataset_reader.to_table(dataset, fragment_scan_options=options) + assert result.equals(pa.table({'col0': pa.array(['foo', 'spam', None])})) + + csv_format = ds.CsvFileFormat(convert_options=convert_options) + dataset = ds.dataset(path, format=csv_format) + result = dataset_reader.to_table(dataset) + assert result.equals(pa.table({'col0': pa.array(['foo', 'spam', None])})) + + options = ds.CsvFragmentScanOptions() + result = dataset_reader.to_table(dataset, fragment_scan_options=options) + assert result.equals( + pa.table({'col0': pa.array(['foo', 'spam', 'MYNULL'])})) + + +@pytest.mark.pandas +def test_json_format(tempdir, dataset_reader): + table = pa.table({'a': pa.array([1, 2, 3], type="int64"), + 'b': pa.array([.1, .2, .3], type="float64")}) + + path = str(tempdir / 'test.json') + out = table.to_pandas().to_json(orient='records')[1:-1].replace('},{', '}\n{') + with open(path, 'w') as f: + f.write(out) + + dataset = ds.dataset(path, format=ds.JsonFileFormat()) + result = dataset_reader.to_table(dataset) + assert result.equals(table) + + assert_dataset_fragment_convenience_methods(dataset) + + dataset = ds.dataset(path, format='json') + result = dataset_reader.to_table(dataset) + assert result.equals(table) + + +@pytest.mark.pandas +def test_json_format_options(tempdir, dataset_reader): + table = pa.table({'a': pa.array([1, 2, 3], type="int64"), + 'b': pa.array([.1, .2, .3], type="float64")}) + + path = str(tempdir / 'test.json') + out = table.to_pandas().to_json(orient='records')[1:-1].replace('},{', '}\n{') + with open(path, 'w') as f: + f.write(out) + + with pytest.raises(ValueError, + match="try to increase block size"): + dataset = ds.dataset(path, format=ds.JsonFileFormat( + read_options=pa.json.ReadOptions(block_size=4))) + + dataset = ds.dataset(path, format=ds.JsonFileFormat( + read_options=pa.json.ReadOptions(block_size=64))) + result = dataset_reader.to_table(dataset) + assert result.equals(table) + + +@pytest.mark.pandas +def test_json_fragment_options(tempdir, dataset_reader): + table = pa.table({'a': pa.array([1, 2, 3], type="int64"), + 'b': pa.array([.1, .2, .3], type="float64")}) + + path = str(tempdir / 'test.json') + out = table.to_pandas().to_json(orient='records')[1:-1].replace('},{', '}\n{') + with open(path, 'w') as f: + f.write(out) + + with pytest.raises(ValueError, + match="try to increase block size"): + options = ds.JsonFragmentScanOptions( + read_options=pa.json.ReadOptions(block_size=4)) + dataset = ds.dataset(path, format=ds.JsonFileFormat(options)) + + options = ds.JsonFragmentScanOptions( + read_options=pa.json.ReadOptions(block_size=64)) + dataset = ds.dataset(path, format=ds.JsonFileFormat(options)) + result = dataset_reader.to_table(dataset) + assert result.equals(table) + + +def test_encoding(tempdir, dataset_reader): + path = str(tempdir / 'test.csv') + + for encoding, input_rows in [ + ('latin-1', b"a,b\nun,\xe9l\xe9phant"), + ('utf16', b'\xff\xfea\x00,\x00b\x00\n\x00u\x00n\x00,' + b'\x00\xe9\x00l\x00\xe9\x00p\x00h\x00a\x00n\x00t\x00'), + ]: + + with open(path, 'wb') as sink: + sink.write(input_rows) + + # Interpret as utf8: + expected_schema = pa.schema([("a", pa.string()), ("b", pa.string())]) + expected_table = pa.table({'a': ["un"], + 'b': ["éléphant"]}, schema=expected_schema) + + read_options = pa.csv.ReadOptions(encoding=encoding) + file_format = ds.CsvFileFormat(read_options=read_options) + dataset_transcoded = ds.dataset(path, format=file_format) + assert dataset_transcoded.schema.equals(expected_schema) + assert dataset_transcoded.to_table().equals(expected_table) + + +# Test if a dataset with non-utf8 chars in the column names is properly handled +def test_column_names_encoding(tempdir, dataset_reader): + path = str(tempdir / 'test.csv') + + with open(path, 'wb') as sink: + sink.write(b"\xe9,b\nun,\xe9l\xe9phant") + + # Interpret as utf8: + expected_schema = pa.schema([("é", pa.string()), ("b", pa.string())]) + expected_table = pa.table({'é': ["un"], + 'b': ["éléphant"]}, schema=expected_schema) + + # Reading as string without specifying encoding should produce an error + dataset = ds.dataset(path, format='csv', schema=expected_schema) + with pytest.raises(pyarrow.lib.ArrowInvalid, match="invalid UTF8"): + dataset_reader.to_table(dataset) + + # Setting the encoding in the read_options should transcode the data + read_options = pa.csv.ReadOptions(encoding='latin-1') + file_format = ds.CsvFileFormat(read_options=read_options) + dataset_transcoded = ds.dataset(path, format=file_format) + assert dataset_transcoded.schema.equals(expected_schema) + assert dataset_transcoded.to_table().equals(expected_table) + + +def test_feather_format(tempdir, dataset_reader): + from pyarrow.feather import write_feather + + table = pa.table({'a': pa.array([1, 2, 3], type="int8"), + 'b': pa.array([.1, .2, .3], type="float64")}) + + basedir = tempdir / "feather_dataset" + basedir.mkdir() + write_feather(table, str(basedir / "data.feather")) + + dataset = ds.dataset(basedir, format=ds.IpcFileFormat()) + result = dataset_reader.to_table(dataset) + assert result.equals(table) + + assert_dataset_fragment_convenience_methods(dataset) + + dataset = ds.dataset(basedir, format="feather") + result = dataset_reader.to_table(dataset) + assert result.equals(table) + + # ARROW-8641 - column selection order + result = dataset_reader.to_table(dataset, columns=["b", "a"]) + assert result.column_names == ["b", "a"] + result = dataset_reader.to_table(dataset, columns=["a", "a"]) + assert result.column_names == ["a", "a"] + + # error with Feather v1 files + write_feather(table, str(basedir / "data1.feather"), version=1) + with pytest.raises(ValueError): + dataset_reader.to_table(ds.dataset(basedir, format="feather")) + + +@pytest.mark.pandas +@pytest.mark.parametrize("compression", [ + "lz4", + "zstd", + "brotli" # not supported +]) +def test_feather_format_compressed(tempdir, compression, dataset_reader): + table = pa.table({'a': pa.array([0]*300, type="int8"), + 'b': pa.array([.1, .2, .3]*100, type="float64")}) + if not pa.Codec.is_available(compression): + pytest.skip() + + basedir = tempdir / "feather_dataset_compressed" + basedir.mkdir() + file_format = ds.IpcFileFormat() + + uncompressed_basedir = tempdir / "feather_dataset_uncompressed" + uncompressed_basedir.mkdir() + ds.write_dataset( + table, + str(uncompressed_basedir / "data.arrow"), + format=file_format, + file_options=file_format.make_write_options(compression=None) + ) + + if compression == "brotli": + with pytest.raises(ValueError, match="Compression type"): + write_options = file_format.make_write_options( + compression=compression) + with pytest.raises(ValueError, match="Compression type"): + codec = pa.Codec(compression) + write_options = file_format.make_write_options(compression=codec) + return + + write_options = file_format.make_write_options(compression=compression) + ds.write_dataset( + table, + str(basedir / "data.arrow"), + format=file_format, + file_options=write_options + ) + + dataset = ds.dataset(basedir, format=ds.IpcFileFormat()) + result = dataset_reader.to_table(dataset) + assert result.equals(table) + + compressed_file = basedir / "data.arrow" / "part-0.arrow" + compressed_size = compressed_file.stat().st_size + uncompressed_file = uncompressed_basedir / "data.arrow" / "part-0.arrow" + uncompressed_size = uncompressed_file.stat().st_size + assert compressed_size < uncompressed_size + + +def _create_parquet_dataset_simple(root_path): + """ + Creates a simple (flat files, no nested partitioning) Parquet dataset + """ + + metadata_collector = [] + + for i in range(4): + table = pa.table({'f1': [i] * 10, 'f2': np.random.randn(10)}) + pq.write_to_dataset( + table, str(root_path), metadata_collector=metadata_collector + ) + + metadata_path = str(root_path / '_metadata') + # write _metadata file + pq.write_metadata( + table.schema, metadata_path, + metadata_collector=metadata_collector + ) + return metadata_path, table + + +@pytest.mark.parquet +@pytest.mark.pandas # write_to_dataset currently requires pandas +def test_parquet_dataset_factory(tempdir): + root_path = tempdir / "test_parquet_dataset" + metadata_path, table = _create_parquet_dataset_simple(root_path) + dataset = ds.parquet_dataset(metadata_path) + assert dataset.schema.equals(table.schema) + assert len(dataset.files) == 4 + result = dataset.to_table() + assert result.num_rows == 40 + + +@pytest.mark.parquet +@pytest.mark.pandas # write_to_dataset currently requires pandas +@pytest.mark.skipif(sys.platform == 'win32', + reason="Results in FileNotFoundError on Windows") +def test_parquet_dataset_factory_fsspec(tempdir): + # https://issues.apache.org/jira/browse/ARROW-16413 + fsspec = pytest.importorskip("fsspec") + + # create dataset with pyarrow + root_path = tempdir / "test_parquet_dataset" + metadata_path, table = _create_parquet_dataset_simple(root_path) + + # read using fsspec filesystem + fsspec_fs = fsspec.filesystem("file") + # manually creating a PyFileSystem, because passing the local fsspec + # filesystem would internally be converted to native LocalFileSystem + filesystem = fs.PyFileSystem(fs.FSSpecHandler(fsspec_fs)) + dataset = ds.parquet_dataset(metadata_path, filesystem=filesystem) + assert dataset.schema.equals(table.schema) + assert len(dataset.files) == 4 + result = dataset.to_table() + assert result.num_rows == 40 + + +@pytest.mark.parquet +@pytest.mark.pandas # write_to_dataset currently requires pandas +def test_parquet_dataset_factory_roundtrip(tempdir): + # Simple test to ensure we can roundtrip dataset to + # _metadata/common_metadata and back. A more complex test + # using partitioning will have to wait for ARROW-13269. The + # above test (test_parquet_dataset_factory) will not work + # when legacy is False as there is no "append" equivalent in + # the new dataset until ARROW-12358 + root_path = tempdir / "test_parquet_dataset" + table = pa.table({'f1': [0] * 10, 'f2': np.random.randn(10)}) + metadata_collector = [] + pq.write_to_dataset( + table, str(root_path), metadata_collector=metadata_collector, + ) + metadata_path = str(root_path / '_metadata') + # write _metadata file + pq.write_metadata( + table.schema, metadata_path, + metadata_collector=metadata_collector + ) + dataset = ds.parquet_dataset(metadata_path) + assert dataset.schema.equals(table.schema) + result = dataset.to_table() + assert result.num_rows == 10 + + +@pytest.mark.parquet +def test_parquet_dataset_factory_order(tempdir): + # The order of the fragments in the dataset should match the order of the + # row groups in the _metadata file. + metadatas = [] + # Create a dataset where f1 is incrementing from 0 to 100 spread across + # 10 files. Put the row groups in the correct order in _metadata + for i in range(10): + table = pa.table( + {'f1': list(range(i*10, (i+1)*10))}) + table_path = tempdir / f'{i}.parquet' + pq.write_table(table, table_path, metadata_collector=metadatas) + metadatas[-1].set_file_path(f'{i}.parquet') + metadata_path = str(tempdir / '_metadata') + pq.write_metadata(table.schema, metadata_path, metadatas) + dataset = ds.parquet_dataset(metadata_path) + # Ensure the table contains values from 0-100 in the right order + scanned_table = dataset.to_table() + scanned_col = scanned_table.column('f1').to_pylist() + assert scanned_col == list(range(0, 100)) + + +@pytest.mark.parquet +@pytest.mark.pandas +def test_parquet_dataset_factory_invalid(tempdir): + root_path = tempdir / "test_parquet_dataset_invalid" + metadata_path, table = _create_parquet_dataset_simple(root_path) + # remove one of the files + list(root_path.glob("*.parquet"))[0].unlink() + dataset = ds.parquet_dataset(metadata_path) + assert dataset.schema.equals(table.schema) + assert len(dataset.files) == 4 + with pytest.raises(FileNotFoundError): + dataset.to_table() + + +def _create_metadata_file(root_path): + # create _metadata file from existing parquet dataset + parquet_paths = list(sorted(root_path.rglob("*.parquet"))) + schema = pq.ParquetFile(parquet_paths[0]).schema.to_arrow_schema() + + metadata_collector = [] + for path in parquet_paths: + metadata = pq.ParquetFile(path).metadata + metadata.set_file_path(str(path.relative_to(root_path))) + metadata_collector.append(metadata) + + metadata_path = root_path / "_metadata" + pq.write_metadata( + schema, metadata_path, metadata_collector=metadata_collector + ) + return metadata_path + + +def _create_parquet_dataset_partitioned(root_path): + table = pa.table([ + pa.array(range(20)), pa.array(np.random.randn(20)), + pa.array(np.repeat(['a', 'b'], 10))], + names=["f1", "f2", "part"] + ) + table = table.replace_schema_metadata({"key": "value"}) + pq.write_to_dataset(table, str(root_path), partition_cols=['part']) + return _create_metadata_file(root_path), table + + +@pytest.mark.parquet +@pytest.mark.pandas +def test_parquet_dataset_factory_partitioned(tempdir): + root_path = tempdir / "test_parquet_dataset_factory_partitioned" + metadata_path, table = _create_parquet_dataset_partitioned(root_path) + + partitioning = ds.partitioning(flavor="hive") + dataset = ds.parquet_dataset(metadata_path, partitioning=partitioning) + + assert dataset.schema.equals(table.schema) + assert len(dataset.files) == 2 + result = dataset.to_table() + assert result.num_rows == 20 + + # the partitioned dataset does not preserve order + result = result.to_pandas().sort_values("f1").reset_index(drop=True) + expected = table.to_pandas() + pd.testing.assert_frame_equal(result, expected) + + +@pytest.mark.parquet +@pytest.mark.pandas +def test_parquet_dataset_factory_metadata(tempdir): + # ensure ParquetDatasetFactory preserves metadata (ARROW-9363) + root_path = tempdir / "test_parquet_dataset_factory_metadata" + metadata_path, table = _create_parquet_dataset_partitioned(root_path) + + dataset = ds.parquet_dataset(metadata_path, partitioning="hive") + assert dataset.schema.equals(table.schema) + assert b"key" in dataset.schema.metadata + + fragments = list(dataset.get_fragments()) + assert b"key" in fragments[0].physical_schema.metadata + + +@pytest.mark.parquet +@pytest.mark.pandas +def test_parquet_dataset_lazy_filtering(tempdir, open_logging_fs): + fs, assert_opens = open_logging_fs + + # Test to ensure that no IO happens when filtering a dataset + # created with ParquetDatasetFactory from a _metadata file + + root_path = tempdir / "test_parquet_dataset_lazy_filtering" + metadata_path, _ = _create_parquet_dataset_simple(root_path) + + # creating the dataset should only open the metadata file + with assert_opens([metadata_path]): + dataset = ds.parquet_dataset( + metadata_path, + partitioning=ds.partitioning(flavor="hive"), + filesystem=fs) + + # materializing fragments should not open any file + with assert_opens([]): + fragments = list(dataset.get_fragments()) + + # filtering fragments should not open any file + with assert_opens([]): + list(dataset.get_fragments(ds.field("f1") > 15)) + + # splitting by row group should still not open any file + with assert_opens([]): + fragments[0].split_by_row_group(ds.field("f1") > 15) + + # ensuring metadata of split fragment should also not open any file + with assert_opens([]): + rg_fragments = fragments[0].split_by_row_group() + rg_fragments[0].ensure_complete_metadata() + + # FIXME(bkietz) on Windows this results in FileNotFoundErrors. + # but actually scanning does open files + # with assert_opens([f.path for f in fragments]): + # dataset.to_table() + + +@pytest.mark.parquet +@pytest.mark.pandas +def test_dataset_schema_metadata(tempdir, dataset_reader): + # ARROW-8802 + df = pd.DataFrame({'a': [1, 2, 3]}) + path = tempdir / "test.parquet" + df.to_parquet(path) + dataset = ds.dataset(path) + + schema = dataset_reader.to_table(dataset).schema + projected_schema = dataset_reader.to_table(dataset, columns=["a"]).schema + + # ensure the pandas metadata is included in the schema + assert b"pandas" in schema.metadata + # ensure it is still there in a projected schema (with column selection) + assert schema.equals(projected_schema, check_metadata=True) + + +@pytest.mark.parquet +def test_filter_mismatching_schema(tempdir, dataset_reader): + # ARROW-9146 + table = pa.table({"col": pa.array([1, 2, 3, 4], type='int32')}) + pq.write_table(table, str(tempdir / "data.parquet")) + + # specifying explicit schema, but that mismatches the schema of the data + schema = pa.schema([("col", pa.int64())]) + dataset = ds.dataset( + tempdir / "data.parquet", format="parquet", schema=schema) + + # filtering on a column with such type mismatch should implicitly + # cast the column + filtered = dataset_reader.to_table(dataset, filter=ds.field("col") > 2) + assert filtered["col"].equals(table["col"].cast('int64').slice(2)) + + fragment = list(dataset.get_fragments())[0] + filtered = dataset_reader.to_table( + fragment, filter=ds.field("col") > 2, schema=schema) + assert filtered["col"].equals(table["col"].cast('int64').slice(2)) + + +@pytest.mark.parquet +@pytest.mark.pandas +def test_dataset_project_only_partition_columns(tempdir, dataset_reader): + # ARROW-8729 + table = pa.table({'part': 'a a b b'.split(), 'col': list(range(4))}) + + path = str(tempdir / 'test_dataset') + pq.write_to_dataset(table, path, partition_cols=['part']) + dataset = ds.dataset(path, partitioning='hive') + + all_cols = dataset_reader.to_table(dataset) + part_only = dataset_reader.to_table(dataset, columns=['part']) + + assert all_cols.column('part').equals(part_only.column('part')) + + +@pytest.mark.parquet +@pytest.mark.pandas +def test_dataset_project_null_column(tempdir, dataset_reader): + df = pd.DataFrame({"col": np.array([None, None, None], dtype='object')}) + + f = tempdir / "test_dataset_project_null_column.parquet" + df.to_parquet(f, engine="pyarrow") + + dataset = ds.dataset(f, format="parquet", + schema=pa.schema([("col", pa.int64())])) + expected = pa.table({'col': pa.array([None, None, None], pa.int64())}) + assert dataset_reader.to_table(dataset).equals(expected) + + +def test_dataset_project_columns(tempdir, dataset_reader): + # basic column re-projection with expressions + from pyarrow import feather + table = pa.table({"A": [1, 2, 3], "B": [1., 2., 3.], "C": ["a", "b", "c"]}) + feather.write_feather(table, tempdir / "data.feather") + + dataset = ds.dataset(tempdir / "data.feather", format="feather") + result = dataset_reader.to_table(dataset, columns={ + 'A_renamed': ds.field('A'), + 'B_as_int': ds.field('B').cast("int32", safe=False), + 'C_is_a': ds.field('C') == 'a' + }) + expected = pa.table({ + "A_renamed": [1, 2, 3], + "B_as_int": pa.array([1, 2, 3], type="int32"), + "C_is_a": [True, False, False], + }) + assert result.equals(expected) + + # raise proper error when not passing an expression + with pytest.raises(TypeError, match="Expected an Expression"): + dataset_reader.to_table(dataset, columns={"A": "A"}) + + +@pytest.mark.pandas +@pytest.mark.parquet +def test_dataset_preserved_partitioning(tempdir): + # ARROW-8655 + + # through discovery, but without partitioning + _, path = _create_single_file(tempdir) + dataset = ds.dataset(path) + assert isinstance(dataset.partitioning, ds.DirectoryPartitioning) + # TODO(GH-34884) partitioning attribute not preserved in pickling + # dataset_ = ds.dataset(path) + # for dataset in [dataset_, pickle_module.loads(pickle_module.dumps(dataset_))]: + # assert isinstance(dataset.partitioning, ds.DirectoryPartitioning) + + # through discovery, with hive partitioning but not specified + full_table, path = _create_partitioned_dataset(tempdir) + dataset = ds.dataset(path) + assert isinstance(dataset.partitioning, ds.DirectoryPartitioning) + + # through discovery, with hive partitioning (from a partitioning factory) + dataset = ds.dataset(path, partitioning="hive") + part = dataset.partitioning + assert part is not None + assert isinstance(part, ds.HivePartitioning) + assert part.schema == pa.schema([("part", pa.int32())]) + assert len(part.dictionaries) == 1 + assert part.dictionaries[0] == pa.array([0, 1, 2], pa.int32()) + + # through discovery, with hive partitioning (from a partitioning object) + part = ds.partitioning(pa.schema([("part", pa.int32())]), flavor="hive") + assert isinstance(part, ds.HivePartitioning) # not a factory + assert len(part.dictionaries) == 1 + assert all(x is None for x in part.dictionaries) + dataset = ds.dataset(path, partitioning=part) + part = dataset.partitioning + assert isinstance(part, ds.HivePartitioning) + assert part.schema == pa.schema([("part", pa.int32())]) + # TODO is this expected? + assert len(part.dictionaries) == 1 + assert all(x is None for x in part.dictionaries) + + # through manual creation -> not available + dataset = ds.dataset(path, partitioning="hive") + dataset2 = ds.FileSystemDataset( + list(dataset.get_fragments()), schema=dataset.schema, + format=dataset.format, filesystem=dataset.filesystem + ) + assert dataset2.partitioning is None + + # through discovery with ParquetDatasetFactory + root_path = tempdir / "data-partitioned-metadata" + metadata_path, _ = _create_parquet_dataset_partitioned(root_path) + dataset = ds.parquet_dataset(metadata_path, partitioning="hive") + part = dataset.partitioning + assert part is not None + assert isinstance(part, ds.HivePartitioning) + assert part.schema == pa.schema([("part", pa.string())]) + assert len(part.dictionaries) == 1 + # will be fixed by ARROW-13153 (order is not preserved at the moment) + # assert part.dictionaries[0] == pa.array(["a", "b"], pa.string()) + assert set(part.dictionaries[0].to_pylist()) == {"a", "b"} + + +@pytest.mark.parquet +@pytest.mark.pandas +def test_write_to_dataset_given_null_just_works(tempdir): + schema = pa.schema([ + pa.field('col', pa.int64()), + pa.field('part', pa.dictionary(pa.int32(), pa.string())) + ]) + table = pa.table({'part': [None, None, 'a', 'a'], + 'col': list(range(4))}, schema=schema) + + path = str(tempdir / 'test_dataset') + pq.write_to_dataset(table, path, partition_cols=['part']) + + actual_table = pq.read_table(tempdir / 'test_dataset') + # column.equals can handle the difference in chunking but not the fact + # that `part` will have different dictionaries for the two chunks + assert actual_table.column('part').to_pylist( + ) == table.column('part').to_pylist() + assert actual_table.column('col').equals(table.column('col')) + + +def _sort_table(tab, sort_col): + import pyarrow.compute as pc + sorted_indices = pc.sort_indices( + tab, options=pc.SortOptions([(sort_col, 'ascending')])) + return pc.take(tab, sorted_indices) + + +def _check_dataset_roundtrip(dataset, base_dir, expected_files, sort_col, + base_dir_path=None, partitioning=None): + base_dir_path = base_dir_path or base_dir + + ds.write_dataset(dataset, base_dir, format="arrow", + partitioning=partitioning, use_threads=False) + + # check that all files are present + file_paths = list(base_dir_path.rglob("*")) + assert set(file_paths) == set(expected_files) + + # check that reading back in as dataset gives the same result + dataset2 = ds.dataset( + base_dir_path, format="arrow", partitioning=partitioning) + + assert _sort_table(dataset2.to_table(), sort_col).equals( + _sort_table(dataset.to_table(), sort_col)) + + +@pytest.mark.parquet +def test_write_dataset(tempdir): + # manually create a written dataset and read as dataset object + directory = tempdir / 'single-file' + directory.mkdir() + _ = _create_single_file(directory) + dataset = ds.dataset(directory) + + # full string path + target = tempdir / 'single-file-target' + expected_files = [target / "part-0.arrow"] + _check_dataset_roundtrip(dataset, str(target), expected_files, 'a', target) + + # pathlib path object + target = tempdir / 'single-file-target2' + expected_files = [target / "part-0.arrow"] + _check_dataset_roundtrip(dataset, target, expected_files, 'a', target) + + # TODO + # # relative path + # target = tempdir / 'single-file-target3' + # expected_files = [target / "part-0.ipc"] + # _check_dataset_roundtrip( + # dataset, './single-file-target3', expected_files, target) + + # Directory of files + directory = tempdir / 'single-directory' + directory.mkdir() + _ = _create_directory_of_files(directory) + dataset = ds.dataset(directory) + + target = tempdir / 'single-directory-target' + expected_files = [target / "part-0.arrow"] + _check_dataset_roundtrip(dataset, str(target), expected_files, 'a', target) + + +@pytest.mark.parquet +@pytest.mark.pandas +def test_write_dataset_partitioned(tempdir): + directory = tempdir / "partitioned" + _ = _create_parquet_dataset_partitioned(directory) + partitioning = ds.partitioning(flavor="hive") + dataset = ds.dataset(directory, partitioning=partitioning) + + # hive partitioning + target = tempdir / 'partitioned-hive-target' + expected_paths = [ + target / "part=a", target / "part=a" / "part-0.arrow", + target / "part=b", target / "part=b" / "part-0.arrow" + ] + partitioning_schema = ds.partitioning( + pa.schema([("part", pa.string())]), flavor="hive") + _check_dataset_roundtrip( + dataset, str(target), expected_paths, 'f1', target, + partitioning=partitioning_schema) + + # directory partitioning + target = tempdir / 'partitioned-dir-target' + expected_paths = [ + target / "a", target / "a" / "part-0.arrow", + target / "b", target / "b" / "part-0.arrow" + ] + partitioning_schema = ds.partitioning( + pa.schema([("part", pa.string())])) + _check_dataset_roundtrip( + dataset, str(target), expected_paths, 'f1', target, + partitioning=partitioning_schema) + + +def test_write_dataset_with_field_names(tempdir): + table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z']}) + + ds.write_dataset(table, tempdir, format='ipc', + partitioning=["b"]) + + load_back = ds.dataset(tempdir, format='ipc', partitioning=["b"]) + files = load_back.files + partitioning_dirs = { + str(pathlib.Path(f).relative_to(tempdir).parent) for f in files + } + assert partitioning_dirs == {"x", "y", "z"} + + load_back_table = load_back.to_table() + assert load_back_table.equals(table) + + +def test_write_dataset_with_field_names_hive(tempdir): + table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z']}) + + ds.write_dataset(table, tempdir, format='ipc', + partitioning=["b"], partitioning_flavor="hive") + + load_back = ds.dataset(tempdir, format='ipc', partitioning="hive") + files = load_back.files + partitioning_dirs = { + str(pathlib.Path(f).relative_to(tempdir).parent) for f in files + } + assert partitioning_dirs == {"b=x", "b=y", "b=z"} + + load_back_table = load_back.to_table() + assert load_back_table.equals(table) + + +def test_write_dataset_with_scanner(tempdir): + table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z'], + 'c': [1, 2, 3]}) + + ds.write_dataset(table, tempdir, format='ipc', + partitioning=["b"]) + + dataset = ds.dataset(tempdir, format='ipc', partitioning=["b"]) + + with tempfile.TemporaryDirectory() as tempdir2: + ds.write_dataset(dataset.scanner(columns=["b", "c"]), + tempdir2, format='ipc', partitioning=["b"]) + + load_back = ds.dataset(tempdir2, format='ipc', partitioning=["b"]) + load_back_table = load_back.to_table() + assert dict(load_back_table.to_pydict() + ) == table.drop_columns("a").to_pydict() + + +@pytest.mark.parquet +def test_write_dataset_with_backpressure(tempdir): + consumer_gate = threading.Event() + + # A filesystem that blocks all writes so that we can build + # up backpressure. The writes are released at the end of + # the test. + class GatingFs(ProxyHandler): + def open_output_stream(self, path, metadata): + # Block until the end of the test + consumer_gate.wait() + return self._fs.open_output_stream(path, metadata=metadata) + gating_fs = fs.PyFileSystem(GatingFs(fs.LocalFileSystem())) + + schema = pa.schema([pa.field('data', pa.int32())]) + # The scanner should queue ~ 8Mi rows (~8 batches) but due to ARROW-16258 + # it always queues 32 batches. + batch = pa.record_batch([pa.array(list(range(1_000_000)))], schema=schema) + batches_read = 0 + min_backpressure = 32 + end = 200 + keep_going = True + + def counting_generator(): + nonlocal batches_read + while batches_read < end: + if not keep_going: + return + time.sleep(0.01) + batches_read += 1 + yield batch + + scanner = ds.Scanner.from_batches( + counting_generator(), schema=schema, use_threads=True) + + write_thread = threading.Thread( + target=lambda: ds.write_dataset( + scanner, str(tempdir), format='parquet', filesystem=gating_fs)) + write_thread.start() + + try: + start = time.time() + + def duration(): + return time.time() - start + + # This test is timing dependent. There is no signal from the C++ + # when backpressure has been hit. We don't know exactly when + # backpressure will be hit because it may take some time for the + # signal to get from the sink to the scanner. + # + # The test may emit false positives on slow systems. It could + # theoretically emit a false negative if the scanner managed to read + # and emit all 200 batches before the backpressure signal had a chance + # to propagate but the 0.01s delay in the generator should make that + # scenario unlikely. + last_value = 0 + backpressure_probably_hit = False + while duration() < 10: + if batches_read > min_backpressure: + if batches_read == last_value: + backpressure_probably_hit = True + break + last_value = batches_read + time.sleep(0.5) + + assert backpressure_probably_hit + + finally: + # If any batches remain to be generated go ahead and + # skip them + keep_going = False + consumer_gate.set() + write_thread.join() + + +def test_write_dataset_with_dataset(tempdir): + table = pa.table({'b': ['x', 'y', 'z'], 'c': [1, 2, 3]}) + + ds.write_dataset(table, tempdir, format='ipc', + partitioning=["b"]) + + dataset = ds.dataset(tempdir, format='ipc', partitioning=["b"]) + + with tempfile.TemporaryDirectory() as tempdir2: + ds.write_dataset(dataset, tempdir2, + format='ipc', partitioning=["b"]) + + load_back = ds.dataset(tempdir2, format='ipc', partitioning=["b"]) + load_back_table = load_back.to_table() + assert dict(load_back_table.to_pydict()) == table.to_pydict() + + +@pytest.mark.pandas +def test_write_dataset_existing_data(tempdir): + directory = tempdir / 'ds' + table = pa.table({'b': ['x', 'y', 'z'], 'c': [1, 2, 3]}) + partitioning = ds.partitioning(schema=pa.schema( + [pa.field('c', pa.int64())]), flavor='hive') + + def compare_tables_ignoring_order(t1, t2): + df1 = t1.to_pandas().sort_values('b').reset_index(drop=True) + df2 = t2.to_pandas().sort_values('b').reset_index(drop=True) + assert df1.equals(df2) + + # First write is ok + ds.write_dataset(table, directory, partitioning=partitioning, format='ipc') + + table = pa.table({'b': ['a', 'b', 'c'], 'c': [2, 3, 4]}) + + # Second write should fail + with pytest.raises(pa.ArrowInvalid): + ds.write_dataset(table, directory, + partitioning=partitioning, format='ipc') + + extra_table = pa.table({'b': ['e']}) + extra_file = directory / 'c=2' / 'foo.arrow' + pyarrow.feather.write_feather(extra_table, extra_file) + + # Should be ok and overwrite with overwrite behavior + ds.write_dataset(table, directory, partitioning=partitioning, + format='ipc', + existing_data_behavior='overwrite_or_ignore') + + overwritten = pa.table( + {'b': ['e', 'x', 'a', 'b', 'c'], 'c': [2, 1, 2, 3, 4]}) + readback = ds.dataset(tempdir, format='ipc', + partitioning=partitioning).to_table() + compare_tables_ignoring_order(readback, overwritten) + assert extra_file.exists() + + # Should be ok and delete matching with delete_matching + ds.write_dataset(table, directory, partitioning=partitioning, + format='ipc', existing_data_behavior='delete_matching') + + overwritten = pa.table({'b': ['x', 'a', 'b', 'c'], 'c': [1, 2, 3, 4]}) + readback = ds.dataset(tempdir, format='ipc', + partitioning=partitioning).to_table() + compare_tables_ignoring_order(readback, overwritten) + assert not extra_file.exists() + + +def _generate_random_int_array(size=4, min=1, max=10): + return np.random.randint(min, max, size) + + +def _generate_data_and_columns(num_of_columns, num_of_records): + data = [] + column_names = [] + for i in range(num_of_columns): + data.append(_generate_random_int_array(size=num_of_records, + min=1, + max=num_of_records)) + column_names.append("c" + str(i)) + record_batch = pa.record_batch(data=data, names=column_names) + return record_batch + + +def _get_num_of_files_generated(base_directory, file_format): + return len(list(pathlib.Path(base_directory).glob(f'**/*.{file_format}'))) + + +@pytest.mark.parquet +def test_write_dataset_max_rows_per_file(tempdir): + directory = tempdir / 'ds' + max_rows_per_file = 10 + max_rows_per_group = 10 + num_of_columns = 2 + num_of_records = 35 + + record_batch = _generate_data_and_columns(num_of_columns, + num_of_records) + + ds.write_dataset(record_batch, directory, format="parquet", + max_rows_per_file=max_rows_per_file, + max_rows_per_group=max_rows_per_group) + + files_in_dir = os.listdir(directory) + + # number of partitions with max_rows and the partition with the remainder + expected_partitions = num_of_records // max_rows_per_file + 1 + + # test whether the expected amount of files are written + assert len(files_in_dir) == expected_partitions + + # compute the number of rows per each file written + result_row_combination = [] + for _, f_file in enumerate(files_in_dir): + f_path = directory / str(f_file) + dataset = ds.dataset(f_path, format="parquet") + result_row_combination.append(dataset.to_table().shape[0]) + + # test whether the generated files have the expected number of rows + assert expected_partitions == len(result_row_combination) + assert num_of_records == sum(result_row_combination) + assert all(file_rowcount <= max_rows_per_file + for file_rowcount in result_row_combination) + + +@pytest.mark.parquet +def test_write_dataset_min_rows_per_group(tempdir): + directory = tempdir / 'ds' + min_rows_per_group = 6 + max_rows_per_group = 8 + num_of_columns = 2 + + record_sizes = [5, 5, 5, 5, 5, 4, 4, 4, 4, 4] + + record_batches = [_generate_data_and_columns(num_of_columns, + num_of_records) + for num_of_records in record_sizes] + + data_source = directory / "min_rows_group" + + ds.write_dataset(data=record_batches, base_dir=data_source, + min_rows_per_group=min_rows_per_group, + max_rows_per_group=max_rows_per_group, + format="parquet") + + files_in_dir = os.listdir(data_source) + for _, f_file in enumerate(files_in_dir): + f_path = data_source / str(f_file) + dataset = ds.dataset(f_path, format="parquet") + table = dataset.to_table() + batches = table.to_batches() + + for id, batch in enumerate(batches): + rows_per_batch = batch.num_rows + if id < len(batches) - 1: + assert rows_per_batch >= min_rows_per_group and \ + rows_per_batch <= max_rows_per_group + else: + assert rows_per_batch <= max_rows_per_group + + +@pytest.mark.parquet +def test_write_dataset_max_rows_per_group(tempdir): + directory = tempdir / 'ds' + max_rows_per_group = 18 + num_of_columns = 2 + num_of_records = 30 + + record_batch = _generate_data_and_columns(num_of_columns, + num_of_records) + + data_source = directory / "max_rows_group" + + ds.write_dataset(data=record_batch, base_dir=data_source, + max_rows_per_group=max_rows_per_group, + format="parquet") + + files_in_dir = os.listdir(data_source) + batched_data = [] + for f_file in files_in_dir: + f_path = data_source / str(f_file) + dataset = ds.dataset(f_path, format="parquet") + table = dataset.to_table() + batches = table.to_batches() + for batch in batches: + batched_data.append(batch.num_rows) + + assert batched_data == [18, 12] + + +@pytest.mark.parquet +def test_write_dataset_max_open_files(tempdir): + directory = tempdir / 'ds' + file_format = "parquet" + partition_column_id = 1 + column_names = ['c1', 'c2'] + record_batch_1 = pa.record_batch(data=[[1, 2, 3, 4, 0, 10], + ['a', 'b', 'c', 'd', 'e', 'a']], + names=column_names) + record_batch_2 = pa.record_batch(data=[[5, 6, 7, 8, 0, 1], + ['a', 'b', 'c', 'd', 'e', 'c']], + names=column_names) + record_batch_3 = pa.record_batch(data=[[9, 10, 11, 12, 0, 1], + ['a', 'b', 'c', 'd', 'e', 'd']], + names=column_names) + record_batch_4 = pa.record_batch(data=[[13, 14, 15, 16, 0, 1], + ['a', 'b', 'c', 'd', 'e', 'b']], + names=column_names) + + table = pa.Table.from_batches([record_batch_1, record_batch_2, + record_batch_3, record_batch_4]) + + partitioning = ds.partitioning( + pa.schema([(column_names[partition_column_id], pa.string())]), + flavor="hive") + + data_source_1 = directory / "default" + + ds.write_dataset(data=table, base_dir=data_source_1, + partitioning=partitioning, format=file_format) + + # Here we consider the number of unique partitions created when + # partitioning column contains duplicate records. + # Returns: (number_of_files_generated, number_of_partitions) + def _get_compare_pair(data_source, record_batch, file_format, col_id): + num_of_files_generated = _get_num_of_files_generated( + base_directory=data_source, file_format=file_format) + number_of_partitions = len(pa.compute.unique(record_batch[col_id])) + return num_of_files_generated, number_of_partitions + + # CASE 1: when max_open_files=default & max_open_files >= num_of_partitions + # In case of a writing to disk via partitioning based on a + # particular column (considering row labels in that column), + # the number of unique rows must be equal + # to the number of files generated + + num_of_files_generated, number_of_partitions \ + = _get_compare_pair(data_source_1, record_batch_1, file_format, + partition_column_id) + assert num_of_files_generated == number_of_partitions + + # CASE 2: when max_open_files > 0 & max_open_files < num_of_partitions + # the number of files generated must be greater than the number of + # partitions + + data_source_2 = directory / "max_1" + + max_open_files = 3 + + ds.write_dataset(data=table, base_dir=data_source_2, + partitioning=partitioning, format=file_format, + max_open_files=max_open_files, use_threads=False) + + num_of_files_generated, number_of_partitions \ + = _get_compare_pair(data_source_2, record_batch_1, file_format, + partition_column_id) + assert num_of_files_generated > number_of_partitions + + +@pytest.mark.parquet +@pytest.mark.pandas +def test_write_dataset_partitioned_dict(tempdir): + directory = tempdir / "partitioned" + _ = _create_parquet_dataset_partitioned(directory) + + # directory partitioning, dictionary partition columns + dataset = ds.dataset( + directory, + partitioning=ds.HivePartitioning.discover(infer_dictionary=True)) + target = tempdir / 'partitioned-dir-target' + expected_paths = [ + target / "a", target / "a" / "part-0.arrow", + target / "b", target / "b" / "part-0.arrow" + ] + partitioning = ds.partitioning(pa.schema([ + dataset.schema.field('part')]), + dictionaries={'part': pa.array(['a', 'b'])}) + # NB: dictionaries required here since we use partitioning to parse + # directories in _check_dataset_roundtrip (not currently required for + # the formatting step) + _check_dataset_roundtrip( + dataset, str(target), expected_paths, 'f1', target, + partitioning=partitioning) + + +@pytest.mark.parquet +@pytest.mark.pandas +def test_write_dataset_use_threads(tempdir): + directory = tempdir / "partitioned" + _ = _create_parquet_dataset_partitioned(directory) + dataset = ds.dataset(directory, partitioning="hive") + + partitioning = ds.partitioning( + pa.schema([("part", pa.string())]), flavor="hive") + + target1 = tempdir / 'partitioned1' + paths_written = [] + + def file_visitor(written_file): + paths_written.append(written_file.path) + + ds.write_dataset( + dataset, target1, format="feather", partitioning=partitioning, + use_threads=True, file_visitor=file_visitor + ) + + expected_paths = { + target1 / 'part=a' / 'part-0.feather', + target1 / 'part=b' / 'part-0.feather' + } + paths_written_set = set(map(pathlib.Path, paths_written)) + assert paths_written_set == expected_paths + + target2 = tempdir / 'partitioned2' + ds.write_dataset( + dataset, target2, format="feather", partitioning=partitioning, + use_threads=False + ) + + # check that reading in gives same result + result1 = ds.dataset(target1, format="feather", partitioning=partitioning) + result2 = ds.dataset(target2, format="feather", partitioning=partitioning) + assert result1.to_table().equals(result2.to_table()) + + +def test_write_table(tempdir): + table = pa.table([ + pa.array(range(20)), pa.array(np.random.randn(20)), + pa.array(np.repeat(['a', 'b'], 10)) + ], names=["f1", "f2", "part"]) + + base_dir = tempdir / 'single' + ds.write_dataset(table, base_dir, + basename_template='dat_{i}.arrow', format="feather") + # check that all files are present + file_paths = list(base_dir.rglob("*")) + expected_paths = [base_dir / "dat_0.arrow"] + assert set(file_paths) == set(expected_paths) + # check Table roundtrip + result = ds.dataset(base_dir, format="ipc").to_table() + assert result.equals(table) + + # with partitioning + base_dir = tempdir / 'partitioned' + expected_paths = [ + base_dir / "part=a", base_dir / "part=a" / "dat_0.arrow", + base_dir / "part=b", base_dir / "part=b" / "dat_0.arrow" + ] + + visited_paths = [] + visited_sizes = [] + + def file_visitor(written_file): + visited_paths.append(written_file.path) + visited_sizes.append(written_file.size) + + partitioning = ds.partitioning( + pa.schema([("part", pa.string())]), flavor="hive") + ds.write_dataset(table, base_dir, format="feather", + basename_template='dat_{i}.arrow', + partitioning=partitioning, file_visitor=file_visitor) + file_paths = list(base_dir.rglob("*")) + assert set(file_paths) == set(expected_paths) + actual_sizes = [os.path.getsize(path) for path in visited_paths] + assert visited_sizes == actual_sizes + result = ds.dataset(base_dir, format="ipc", partitioning=partitioning) + assert result.to_table().equals(table) + assert len(visited_paths) == 2 + for visited_path in visited_paths: + assert pathlib.Path(visited_path) in expected_paths + + +def test_write_table_multiple_fragments(tempdir): + table = pa.table([ + pa.array(range(10)), pa.array(np.random.randn(10)), + pa.array(np.repeat(['a', 'b'], 5)) + ], names=["f1", "f2", "part"]) + table = pa.concat_tables([table]*2) + + # Table with multiple batches written as single Fragment by default + base_dir = tempdir / 'single' + ds.write_dataset(table, base_dir, format="feather") + assert set(base_dir.rglob("*")) == set([base_dir / "part-0.feather"]) + assert ds.dataset(base_dir, format="ipc").to_table().equals(table) + + # Same for single-element list of Table + base_dir = tempdir / 'single-list' + ds.write_dataset([table], base_dir, format="feather") + assert set(base_dir.rglob("*")) == set([base_dir / "part-0.feather"]) + assert ds.dataset(base_dir, format="ipc").to_table().equals(table) + + # Provide list of batches to write multiple fragments + base_dir = tempdir / 'multiple' + ds.write_dataset(table.to_batches(), base_dir, format="feather") + assert set(base_dir.rglob("*")) == set( + [base_dir / "part-0.feather"]) + assert ds.dataset(base_dir, format="ipc").to_table().equals(table) + + # Provide list of tables to write multiple fragments + base_dir = tempdir / 'multiple-table' + ds.write_dataset([table, table], base_dir, format="feather") + assert set(base_dir.rglob("*")) == set( + [base_dir / "part-0.feather"]) + assert ds.dataset(base_dir, format="ipc").to_table().equals( + pa.concat_tables([table]*2) + ) + + +def test_write_iterable(tempdir): + table = pa.table([ + pa.array(range(20)), pa.array(np.random.randn(20)), + pa.array(np.repeat(['a', 'b'], 10)) + ], names=["f1", "f2", "part"]) + + base_dir = tempdir / 'inmemory_iterable' + ds.write_dataset((batch for batch in table.to_batches()), base_dir, + schema=table.schema, + basename_template='dat_{i}.arrow', format="feather") + result = ds.dataset(base_dir, format="ipc").to_table() + assert result.equals(table) + + base_dir = tempdir / 'inmemory_reader' + reader = pa.RecordBatchReader.from_batches(table.schema, + table.to_batches()) + ds.write_dataset(reader, base_dir, + basename_template='dat_{i}.arrow', format="feather") + result = ds.dataset(base_dir, format="ipc").to_table() + assert result.equals(table) + + +def test_write_scanner(tempdir, dataset_reader): + table = pa.table([ + pa.array(range(20)), pa.array(np.random.randn(20)), + pa.array(np.repeat(['a', 'b'], 10)) + ], names=["f1", "f2", "part"]) + dataset = ds.dataset(table) + + base_dir = tempdir / 'dataset_from_scanner' + ds.write_dataset(dataset_reader.scanner( + dataset), base_dir, format="feather") + result = dataset_reader.to_table(ds.dataset(base_dir, format="ipc")) + assert result.equals(table) + + # scanner with different projected_schema + base_dir = tempdir / 'dataset_from_scanner2' + ds.write_dataset(dataset_reader.scanner(dataset, columns=["f1"]), + base_dir, format="feather") + result = dataset_reader.to_table(ds.dataset(base_dir, format="ipc")) + assert result.equals(table.select(["f1"])) + + # schema not allowed when writing a scanner + with pytest.raises(ValueError, match="Cannot specify a schema"): + ds.write_dataset(dataset_reader.scanner(dataset), base_dir, + schema=table.schema, format="feather") + + +def test_write_table_partitioned_dict(tempdir): + # ensure writing table partitioned on a dictionary column works without + # specifying the dictionary values explicitly + table = pa.table([ + pa.array(range(20)), + pa.array(np.repeat(['a', 'b'], 10)).dictionary_encode(), + ], names=['col', 'part']) + + partitioning = ds.partitioning(table.select(["part"]).schema) + + base_dir = tempdir / "dataset" + ds.write_dataset( + table, base_dir, format="feather", partitioning=partitioning + ) + + # check roundtrip + partitioning_read = ds.DirectoryPartitioning.discover( + ["part"], infer_dictionary=True) + result = ds.dataset( + base_dir, format="ipc", partitioning=partitioning_read + ).to_table() + assert result.equals(table) + + +@pytest.mark.parquet +def test_write_dataset_parquet(tempdir): + table = pa.table([ + pa.array(range(20), type="uint32"), + pa.array(np.arange("2012-01-01", 20, dtype="datetime64[D]").astype( + "datetime64[ns]")), + pa.array(np.repeat(['a', 'b'], 10)) + ], names=["f1", "f2", "part"]) + + # using default "parquet" format string + + base_dir = tempdir / 'parquet_dataset' + ds.write_dataset(table, base_dir, format="parquet") + # check that all files are present + file_paths = list(base_dir.rglob("*")) + expected_paths = [base_dir / "part-0.parquet"] + assert set(file_paths) == set(expected_paths) + # check Table roundtrip with default version + result = ds.dataset(base_dir, format="parquet").to_table() + assert result.equals(table) + + # using custom options + for version in ["1.0", "2.4", "2.6"]: + format = ds.ParquetFileFormat() + opts = format.make_write_options(version=version) + assert " 1).scanner(filter=pc.field("colA") != 6) + assert r2.to_table() == pa.table({ + "colA": [2], + "col2": ["b"] + }) + + # Ensure that writing back to disk works. + ds.write_dataset(result, tempdir / "filtered", format="ipc") + filtered = ds.dataset(tempdir / "filtered", format="ipc") + assert filtered.to_table() == pa.table({ + "colA": [1], + "col2": ["a"] + }) + + # Ensure that joining to a filtered Dataset works. + joined = result.join(ds.dataset(pa.table({ + "colB": [10, 20], + "col2": ["a", "b"] + })), keys="col2", join_type="right outer") + assert joined.to_table().sort_by("colB") == pa.table({ + "colA": [1, None], + "colB": [10, 20], + "col2": ["a", "b"] + }) + + # Filter with None doesn't work for now + with pytest.raises(TypeError): + ds1.filter(None) + + # Can't get fragments of a filtered dataset + with pytest.raises(ValueError): + result.get_fragments() + + # Ensure replacing schema preserves the filter. + schema_without_col2 = ds1.schema.remove(1) + newschema = ds1.filter( + pc.field("colA") < 3 + ).replace_schema(schema_without_col2) + assert newschema.to_table() == pa.table({ + "colA": [1, 2], + }) + with pytest.raises(pa.ArrowInvalid): + # The schema might end up being replaced with + # something that makes the filter invalid. + # Let's make sure we error nicely. + result.replace_schema(schema_without_col2).to_table() + + +@pytest.mark.parametrize('dstype', [ + "fs", "mem" +]) +def test_union_dataset_filter(tempdir, dstype): + t1 = pa.table({ + "colA": [1, 2, 6, 8], + "col2": ["a", "b", "f", "g"] + }) + t2 = pa.table({ + "colA": [9, 10, 11], + "col2": ["h", "i", "l"] + }) + if dstype == "fs": + ds.write_dataset(t1, tempdir / "t1", format="ipc") + ds1 = ds.dataset(tempdir / "t1", format="ipc") + ds.write_dataset(t2, tempdir / "t2", format="ipc") + ds2 = ds.dataset(tempdir / "t2", format="ipc") + elif dstype == "mem": + ds1 = ds.dataset(t1) + ds2 = ds.dataset(t2) + else: + raise NotImplementedError + + filtered_union_ds = ds.dataset((ds1, ds2)).filter( + (pc.field("colA") < 3) | (pc.field("colA") == 9) + ) + assert filtered_union_ds.to_table() == pa.table({ + "colA": [1, 2, 9], + "col2": ["a", "b", "h"] + }) + + joined = filtered_union_ds.join(ds.dataset(pa.table({ + "colB": [10, 20], + "col2": ["a", "b"] + })), keys="col2", join_type="left outer") + assert joined.to_table().sort_by("colA") == pa.table({ + "colA": [1, 2, 9], + "col2": ["a", "b", "h"], + "colB": [10, 20, None] + }) + + filtered_ds1 = ds1.filter(pc.field("colA") < 3) + filtered_ds2 = ds2.filter(pc.field("colA") < 10) + + with pytest.raises(ValueError, match="currently not supported"): + ds.dataset((filtered_ds1, filtered_ds2)) + + +def test_parquet_dataset_filter(tempdir): + root_path = tempdir / "test_parquet_dataset_filter" + metadata_path, _ = _create_parquet_dataset_simple(root_path) + dataset = ds.parquet_dataset(metadata_path) + + result = dataset.to_table() + assert result.num_rows == 40 + + filtered_ds = dataset.filter(pc.field("f1") < 2) + assert filtered_ds.to_table().num_rows == 20 + + with pytest.raises(ValueError): + filtered_ds.get_fragments() + + +def test_write_dataset_with_scanner_use_projected_schema(tempdir): + """ + Ensure the projected schema is used to validate partitions for scanner + + https://issues.apache.org/jira/browse/ARROW-17228 + """ + table = pa.table([pa.array(range(20))], names=["original_column"]) + table_dataset = ds.dataset(table) + columns = { + "renamed_column": ds.field("original_column"), + } + scanner = table_dataset.scanner(columns=columns) + + ds.write_dataset( + scanner, tempdir, partitioning=["renamed_column"], format="ipc") + with ( + pytest.raises( + KeyError, match=r"'Column original_column does not exist in schema" + ) + ): + ds.write_dataset( + scanner, tempdir, partitioning=["original_column"], format="ipc" + ) + + +@pytest.mark.parametrize("format", ("ipc", "parquet")) +def test_read_table_nested_columns(tempdir, format): + if format == "parquet": + pytest.importorskip("pyarrow.parquet") + + table = pa.table({"user_id": ["abc123", "qrs456"], + "a.dotted.field": [1, 2], + "interaction": [ + {"type": None, "element": "button", + "values": [1, 2], "structs": [{"foo": "bar"}, None]}, + {"type": "scroll", "element": "window", + "values": [None, 3, 4], "structs": [{"fizz": "buzz"}]} + ]}) + ds.write_dataset(table, tempdir / "table", format=format) + ds1 = ds.dataset(tempdir / "table", format=format) + + # Dot path to read subsets of nested data + table = ds1.to_table( + columns=["user_id", "interaction.type", "interaction.values", + "interaction.structs", "a.dotted.field"]) + assert table.to_pylist() == [ + {'user_id': 'abc123', 'type': None, 'values': [1, 2], + 'structs': [{'fizz': None, 'foo': 'bar'}, None], 'a.dotted.field': 1}, + {'user_id': 'qrs456', 'type': 'scroll', 'values': [None, 3, 4], + 'structs': [{'fizz': 'buzz', 'foo': None}], 'a.dotted.field': 2} + ] + + +def test_dataset_partition_with_slash(tmpdir): + from pyarrow import dataset as ds + + path = tmpdir / "slash-writer-x" + + dt_table = pa.Table.from_arrays([ + pa.array([1, 2, 3, 4, 5], pa.int32()), + pa.array(["experiment/A/f.csv", "experiment/B/f.csv", + "experiment/A/f.csv", "experiment/C/k.csv", + "experiment/M/i.csv"], pa.utf8())], ["exp_id", "exp_meta"]) + + ds.write_dataset( + data=dt_table, + base_dir=path, + format='ipc', + partitioning=['exp_meta'], + partitioning_flavor='hive', + ) + + read_table = ds.dataset( + source=path, + format='ipc', + partitioning='hive', + schema=pa.schema([pa.field("exp_id", pa.int32()), + pa.field("exp_meta", pa.utf8())]) + ).to_table().combine_chunks() + + assert dt_table == read_table.sort_by("exp_id") + + exp_meta = dt_table.column(1).to_pylist() + exp_meta = sorted(set(exp_meta)) # take unique + encoded_paths = ["exp_meta=" + quote(path, safe='') for path in exp_meta] + file_paths = sorted(os.listdir(path)) + + assert encoded_paths == file_paths + + +@pytest.mark.parquet +def test_write_dataset_preserve_nullability(tempdir): + # GH-35730 + schema_nullable = pa.schema([ + pa.field("x", pa.int64(), nullable=False), + pa.field("y", pa.int64(), nullable=True)]) + + arrays = [[1, 2, 3], [None, 5, None]] + table = pa.Table.from_arrays(arrays, schema=schema_nullable) + + pq.write_to_dataset(table, tempdir / "nulltest1") + dataset = ds.dataset(tempdir / "nulltest1", format="parquet") + # nullability of field is preserved + assert dataset.to_table().schema.equals(schema_nullable) + + ds.write_dataset(table, tempdir / "nulltest2", format="parquet") + dataset = ds.dataset(tempdir / "nulltest2", format="parquet") + assert dataset.to_table().schema.equals(schema_nullable) + + ds.write_dataset([table, table], tempdir / "nulltest3", format="parquet") + dataset = ds.dataset(tempdir / "nulltest3", format="parquet") + assert dataset.to_table().schema.equals(schema_nullable) + + +def test_write_dataset_preserve_field_metadata(tempdir): + schema_metadata = pa.schema([ + pa.field("x", pa.int64(), metadata={b'foo': b'bar'}), + pa.field("y", pa.int64())]) + + schema_no_meta = pa.schema([ + pa.field("x", pa.int64()), + pa.field("y", pa.int64())]) + + arrays = [[1, 2, 3], [None, 5, None]] + table = pa.Table.from_arrays(arrays, schema=schema_metadata) + table_no_meta = pa.Table.from_arrays(arrays, schema=schema_no_meta) + + # If no schema is provided the schema of the first table will be used + ds.write_dataset([table, table_no_meta], tempdir / "test1", format="parquet") + dataset = ds.dataset(tempdir / "test1", format="parquet") + assert dataset.to_table().schema.equals(schema_metadata, check_metadata=True) + + ds.write_dataset([table_no_meta, table], tempdir / "test2", format="parquet") + dataset = ds.dataset(tempdir / "test2", format="parquet") + assert dataset.to_table().schema.equals(schema_no_meta, check_metadata=True) + + # If a schema is provided it will override the schema of the input + ds.write_dataset([table_no_meta, table], tempdir / "test3", format="parquet", + schema=schema_metadata) + dataset = ds.dataset(tempdir / "test3", format="parquet") + assert dataset.to_table().schema.equals(schema_metadata, check_metadata=True) + + +def test_write_dataset_write_page_index(tempdir): + for write_statistics in [True, False]: + for write_page_index in [True, False]: + schema = pa.schema([ + pa.field("x", pa.int64()), + pa.field("y", pa.int64())]) + + arrays = [[1, 2, 3], [None, 5, None]] + table = pa.Table.from_arrays(arrays, schema=schema) + + file_format = ds.ParquetFileFormat() + base_dir = tempdir / f"write_page_index_{write_page_index}" + ds.write_dataset( + table, + base_dir, + format="parquet", + file_options=file_format.make_write_options( + write_statistics=write_statistics, + write_page_index=write_page_index, + ), + existing_data_behavior='overwrite_or_ignore', + ) + ds1 = ds.dataset(base_dir, format="parquet") + + for file in ds1.files: + # Can retrieve sorting columns from metadata + metadata = pq.read_metadata(file) + cc = metadata.row_group(0).column(0) + assert cc.has_offset_index is write_page_index + assert cc.has_column_index is write_page_index & write_statistics + + +@pytest.mark.parametrize('dstype', [ + "fs", "mem" +]) +def test_dataset_sort_by(tempdir, dstype): + table = pa.table([ + pa.array([3, 1, 4, 2, 5]), + pa.array(["b", "a", "b", "a", "c"]), + ], names=["values", "keys"]) + + if dstype == "fs": + ds.write_dataset(table, tempdir / "t1", format="ipc") + dt = ds.dataset(tempdir / "t1", format="ipc") + elif dstype == "mem": + dt = ds.dataset(table) + else: + raise NotImplementedError + + assert dt.sort_by("values").to_table().to_pydict() == { + "keys": ["a", "a", "b", "b", "c"], + "values": [1, 2, 3, 4, 5] + } + + assert dt.sort_by([("values", "descending")]).to_table().to_pydict() == { + "keys": ["c", "b", "b", "a", "a"], + "values": [5, 4, 3, 2, 1] + } + + assert dt.filter((pc.field("values") < 4)).sort_by( + "values" + ).to_table().to_pydict() == { + "keys": ["a", "a", "b"], + "values": [1, 2, 3] + } + + table = pa.Table.from_arrays([ + pa.array([5, 7, 7, 35], type=pa.int64()), + pa.array(["foo", "car", "bar", "foobar"]) + ], names=["a", "b"]) + dt = ds.dataset(table) + + sorted_tab = dt.sort_by([("a", "descending")]) + sorted_tab_dict = sorted_tab.to_table().to_pydict() + assert sorted_tab_dict["a"] == [35, 7, 7, 5] + assert sorted_tab_dict["b"] == ["foobar", "car", "bar", "foo"] + + sorted_tab = dt.sort_by([("a", "ascending")]) + sorted_tab_dict = sorted_tab.to_table().to_pydict() + assert sorted_tab_dict["a"] == [5, 7, 7, 35] + assert sorted_tab_dict["b"] == ["foo", "car", "bar", "foobar"] + + +def test_checksum_write_dataset_read_dataset_to_table(tempdir): + """Check that checksum verification works for datasets created with + ds.write_dataset and read with ds.dataset.to_table""" + + table_orig = pa.table({'a': [1, 2, 3, 4]}) + + # Write a sample dataset with page checksum enabled + pq_write_format = pa.dataset.ParquetFileFormat() + write_options = pq_write_format.make_write_options( + write_page_checksum=True) + + original_dir_path = tempdir / 'correct_dir' + ds.write_dataset( + data=table_orig, + base_dir=original_dir_path, + format=pq_write_format, + file_options=write_options, + ) + + # Open dataset and verify that the data is correct + pq_scan_opts_crc = ds.ParquetFragmentScanOptions( + page_checksum_verification=True) + pq_read_format_crc = pa.dataset.ParquetFileFormat( + default_fragment_scan_options=pq_scan_opts_crc) + table_check = ds.dataset( + original_dir_path, + format=pq_read_format_crc + ).to_table() + assert table_orig == table_check + + # Copy dataset dir (which should be just one file) + corrupted_dir_path = tempdir / 'corrupted_dir' + copytree(original_dir_path, corrupted_dir_path) + + # Read the only file in the path as binary and swap the 31-th and 36-th + # bytes. This should be equivalent to storing the following data: + # pa.table({'a': [1, 3, 2, 4]}) + corrupted_file_path_list = list(corrupted_dir_path.iterdir()) + assert len(corrupted_file_path_list) == 1 + corrupted_file_path = corrupted_file_path_list[0] + bin_data = bytearray(corrupted_file_path.read_bytes()) + + # Swap two bytes to emulate corruption. Also, check that the two bytes are + # different, otherwise no corruption occurs + assert bin_data[31] != bin_data[36] + bin_data[31], bin_data[36] = bin_data[36], bin_data[31] + + # Write the corrupted data to the parquet file + corrupted_file_path.write_bytes(bin_data) + + # Case 1: Reading the corrupted file with dataset().to_table() and without + # page checksum verification succeeds but yields corrupted data + pq_scan_opts_no_crc = ds.ParquetFragmentScanOptions( + page_checksum_verification=False) + pq_read_format_no_crc = pa.dataset.ParquetFileFormat( + default_fragment_scan_options=pq_scan_opts_no_crc) + table_corrupt = ds.dataset( + corrupted_dir_path, format=pq_read_format_no_crc).to_table() + + # The read should complete without error, but the table has different + # content than the original file! + assert table_corrupt != table_orig + assert table_corrupt == pa.table({'a': [1, 3, 2, 4]}) + + # Case 2: Reading the corrupted file with read_table() and with page + # checksum verification enabled raises an exception + with pytest.raises(OSError, match="CRC checksum verification"): + _ = ds.dataset( + corrupted_dir_path, + format=pq_read_format_crc + ).to_table() + + +def test_make_write_options_error(): + # GH-39440: calling make_write_options as a static class method + msg_1 = ("make_write_options() should be called on an " + "instance of ParquetFileFormat") + # GH-41043: In Cython2 all Cython methods were "regular" C extension methods + # see: https://github.com/cython/cython/issues/6127#issuecomment-2038153359 + msg_2 = ("descriptor 'make_write_options' for " + "'pyarrow._dataset_parquet.ParquetFileFormat' objects " + "doesn't apply to a 'int'") + with pytest.raises(TypeError) as excinfo: + pa.dataset.ParquetFileFormat.make_write_options(43) + assert msg_1 in str(excinfo.value) or msg_2 in str(excinfo.value) + + pformat = pa.dataset.ParquetFileFormat() + msg = "make_write_options\\(\\) takes exactly 0 positional arguments" + with pytest.raises(TypeError, match=msg): + pformat.make_write_options(43) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_dataset_encryption.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_dataset_encryption.py new file mode 100644 index 0000000000000000000000000000000000000000..2a631db9fc0fa279d650827220126f68fb67801a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_dataset_encryption.py @@ -0,0 +1,217 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import base64 +from datetime import timedelta +import numpy as np +import pyarrow.fs as fs +import pyarrow as pa + +import pytest + +encryption_unavailable = False + +try: + import pyarrow.parquet as pq + import pyarrow.dataset as ds +except ImportError: + pq = None + ds = None + +try: + from pyarrow.tests.parquet.encryption import InMemoryKmsClient + import pyarrow.parquet.encryption as pe +except ImportError: + encryption_unavailable = True + + +# Marks all of the tests in this module +pytestmark = pytest.mark.dataset + + +FOOTER_KEY = b"0123456789112345" +FOOTER_KEY_NAME = "footer_key" +COL_KEY = b"1234567890123450" +COL_KEY_NAME = "col_key" + + +def create_sample_table(): + return pa.table( + { + "year": [2020, 2022, 2021, 2022, 2019, 2021], + "n_legs": [2, 2, 4, 4, 5, 100], + "animal": [ + "Flamingo", + "Parrot", + "Dog", + "Horse", + "Brittle stars", + "Centipede", + ], + } + ) + + +def create_encryption_config(): + return pe.EncryptionConfiguration( + footer_key=FOOTER_KEY_NAME, + plaintext_footer=False, + column_keys={COL_KEY_NAME: ["n_legs", "animal"]}, + encryption_algorithm="AES_GCM_V1", + # requires timedelta or an assertion is raised + cache_lifetime=timedelta(minutes=5.0), + data_key_length_bits=256, + ) + + +def create_decryption_config(): + return pe.DecryptionConfiguration(cache_lifetime=300) + + +def create_kms_connection_config(): + return pe.KmsConnectionConfig( + custom_kms_conf={ + FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"), + COL_KEY_NAME: COL_KEY.decode("UTF-8"), + } + ) + + +def kms_factory(kms_connection_configuration): + return InMemoryKmsClient(kms_connection_configuration) + + +@pytest.mark.skipif( + encryption_unavailable, reason="Parquet Encryption is not currently enabled" +) +def test_dataset_encryption_decryption(): + table = create_sample_table() + + encryption_config = create_encryption_config() + decryption_config = create_decryption_config() + kms_connection_config = create_kms_connection_config() + + crypto_factory = pe.CryptoFactory(kms_factory) + parquet_encryption_cfg = ds.ParquetEncryptionConfig( + crypto_factory, kms_connection_config, encryption_config + ) + parquet_decryption_cfg = ds.ParquetDecryptionConfig( + crypto_factory, kms_connection_config, decryption_config + ) + + # create write_options with dataset encryption config + pformat = pa.dataset.ParquetFileFormat() + write_options = pformat.make_write_options(encryption_config=parquet_encryption_cfg) + + mockfs = fs._MockFileSystem() + mockfs.create_dir("/") + + ds.write_dataset( + data=table, + base_dir="sample_dataset", + format=pformat, + file_options=write_options, + filesystem=mockfs, + ) + + # read without decryption config -> should error is dataset was properly encrypted + pformat = pa.dataset.ParquetFileFormat() + with pytest.raises(IOError, match=r"no decryption"): + ds.dataset("sample_dataset", format=pformat, filesystem=mockfs) + + # set decryption config for parquet fragment scan options + pq_scan_opts = ds.ParquetFragmentScanOptions( + decryption_config=parquet_decryption_cfg + ) + pformat = pa.dataset.ParquetFileFormat(default_fragment_scan_options=pq_scan_opts) + dataset = ds.dataset("sample_dataset", format=pformat, filesystem=mockfs) + + assert table.equals(dataset.to_table()) + + +@pytest.mark.skipif( + not encryption_unavailable, reason="Parquet Encryption is currently enabled" +) +def test_write_dataset_parquet_without_encryption(): + """Test write_dataset with ParquetFileFormat and test if an exception is thrown + if you try to set encryption_config using make_write_options""" + + # Set the encryption configuration using ParquetFileFormat + # and make_write_options + pformat = pa.dataset.ParquetFileFormat() + + with pytest.raises(NotImplementedError): + _ = pformat.make_write_options(encryption_config="some value") + + +@pytest.mark.skipif( + encryption_unavailable, reason="Parquet Encryption is not currently enabled" +) +def test_large_row_encryption_decryption(): + """Test encryption and decryption of a large number of rows.""" + + class NoOpKmsClient(pe.KmsClient): + def wrap_key(self, key_bytes: bytes, _: str) -> bytes: + b = base64.b64encode(key_bytes) + return b + + def unwrap_key(self, wrapped_key: bytes, _: str) -> bytes: + b = base64.b64decode(wrapped_key) + return b + + row_count = 2**15 + 1 + table = pa.Table.from_arrays( + [pa.array(np.random.rand(row_count), type=pa.float32())], names=["foo"] + ) + + kms_config = pe.KmsConnectionConfig() + crypto_factory = pe.CryptoFactory(lambda _: NoOpKmsClient()) + encryption_config = pe.EncryptionConfiguration( + footer_key="UNIMPORTANT_KEY", + column_keys={"UNIMPORTANT_KEY": ["foo"]}, + double_wrapping=True, + plaintext_footer=False, + data_key_length_bits=128, + ) + pqe_config = ds.ParquetEncryptionConfig( + crypto_factory, kms_config, encryption_config + ) + pqd_config = ds.ParquetDecryptionConfig( + crypto_factory, kms_config, pe.DecryptionConfiguration() + ) + scan_options = ds.ParquetFragmentScanOptions(decryption_config=pqd_config) + file_format = ds.ParquetFileFormat(default_fragment_scan_options=scan_options) + write_options = file_format.make_write_options(encryption_config=pqe_config) + file_decryption_properties = crypto_factory.file_decryption_properties(kms_config) + + mockfs = fs._MockFileSystem() + mockfs.create_dir("/") + + path = "large-row-test-dataset" + ds.write_dataset(table, path, format=file_format, + file_options=write_options, filesystem=mockfs) + + file_path = path + "/part-0.parquet" + new_table = pq.ParquetFile( + file_path, decryption_properties=file_decryption_properties, + filesystem=mockfs + ).read() + assert table == new_table + + dataset = ds.dataset(path, format=file_format, filesystem=mockfs) + new_table = dataset.to_table() + assert table == new_table diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_deprecations.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_deprecations.py new file mode 100644 index 0000000000000000000000000000000000000000..b165289377aabe3b63cd998b3bb04b7e954f751c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_deprecations.py @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Check that various deprecation warnings are raised + +# flake8: noqa + +import pyarrow as pa +import pytest diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_dlpack.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_dlpack.py new file mode 100644 index 0000000000000000000000000000000000000000..7cf3f4acdbd40a808dbd8d2defdc8bda61435105 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_dlpack.py @@ -0,0 +1,142 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import ctypes +from functools import wraps +import pytest + +import numpy as np + +import pyarrow as pa +from pyarrow.vendored.version import Version + + +def PyCapsule_IsValid(capsule, name): + return ctypes.pythonapi.PyCapsule_IsValid(ctypes.py_object(capsule), name) == 1 + + +def check_dlpack_export(arr, expected_arr): + DLTensor = arr.__dlpack__() + assert PyCapsule_IsValid(DLTensor, b"dltensor") is True + + result = np.from_dlpack(arr) + np.testing.assert_array_equal(result, expected_arr, strict=True) + + assert arr.__dlpack_device__() == (1, 0) + + +def check_bytes_allocated(f): + @wraps(f) + def wrapper(*args, **kwargs): + allocated_bytes = pa.total_allocated_bytes() + try: + return f(*args, **kwargs) + finally: + assert pa.total_allocated_bytes() == allocated_bytes + return wrapper + + +@check_bytes_allocated +@pytest.mark.parametrize( + ('value_type', 'np_type'), + [ + (pa.uint8(), np.uint8), + (pa.uint16(), np.uint16), + (pa.uint32(), np.uint32), + (pa.uint64(), np.uint64), + (pa.int8(), np.int8), + (pa.int16(), np.int16), + (pa.int32(), np.int32), + (pa.int64(), np.int64), + (pa.float16(), np.float16), + (pa.float32(), np.float32), + (pa.float64(), np.float64), + ] +) +def test_dlpack(value_type, np_type): + if Version(np.__version__) < Version("1.24.0"): + pytest.skip("No dlpack support in numpy versions older than 1.22.0, " + "strict keyword in assert_array_equal added in numpy version " + "1.24.0") + + expected = np.array([1, 2, 3], dtype=np_type) + arr = pa.array(expected, type=value_type) + check_dlpack_export(arr, expected) + + arr_sliced = arr.slice(1, 1) + expected = np.array([2], dtype=np_type) + check_dlpack_export(arr_sliced, expected) + + arr_sliced = arr.slice(0, 1) + expected = np.array([1], dtype=np_type) + check_dlpack_export(arr_sliced, expected) + + arr_sliced = arr.slice(1) + expected = np.array([2, 3], dtype=np_type) + check_dlpack_export(arr_sliced, expected) + + arr_zero = pa.array([], type=value_type) + expected = np.array([], dtype=np_type) + check_dlpack_export(arr_zero, expected) + + +def test_dlpack_not_supported(): + if Version(np.__version__) < Version("1.22.0"): + pytest.skip("No dlpack support in numpy versions older than 1.22.0.") + + arr = pa.array([1, None, 3]) + with pytest.raises(TypeError, match="Can only use DLPack " + "on arrays with no nulls."): + np.from_dlpack(arr) + + arr = pa.array( + [[0, 1], [3, 4]], + type=pa.list_(pa.int32()) + ) + with pytest.raises(TypeError, match="DataType is not compatible with DLPack spec"): + np.from_dlpack(arr) + + arr = pa.array([]) + with pytest.raises(TypeError, match="DataType is not compatible with DLPack spec"): + np.from_dlpack(arr) + + # DLPack doesn't support bit-packed boolean values + arr = pa.array([True, False, True]) + with pytest.raises(TypeError, match="Bit-packed boolean data type " + "not supported by DLPack."): + np.from_dlpack(arr) + + +def test_dlpack_cuda_not_supported(): + cuda = pytest.importorskip("pyarrow.cuda") + + schema = pa.schema([pa.field('f0', pa.int16())]) + a0 = pa.array([1, 2, 3], type=pa.int16()) + batch = pa.record_batch([a0], schema=schema) + + cbuf = cuda.serialize_record_batch(batch, cuda.Context(0)) + cbatch = cuda.read_record_batch(cbuf, batch.schema) + carr = cbatch["f0"] + + # CudaBuffers not yet supported + with pytest.raises(NotImplementedError, match="DLPack support is implemented " + "only for buffers on CPU device."): + np.from_dlpack(carr) + + with pytest.raises(NotImplementedError, match="DLPack support is implemented " + "only for buffers on CPU device."): + carr.__dlpack_device__() diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_exec_plan.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_exec_plan.py new file mode 100644 index 0000000000000000000000000000000000000000..d85a2c215248180c2c20232a6b599067776195b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_exec_plan.py @@ -0,0 +1,337 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest +import pyarrow as pa +import pyarrow.compute as pc +from .test_extension_type import IntegerType + +try: + import pyarrow.dataset as ds +except ImportError: + pass + +try: + from pyarrow.acero import _perform_join, _filter_table +except ImportError: + pass + +pytestmark = pytest.mark.acero + + +def test_joins_corner_cases(): + t1 = pa.Table.from_pydict({ + "colA": [1, 2, 3, 4, 5, 6], + "col2": ["a", "b", "c", "d", "e", "f"] + }) + + t2 = pa.Table.from_pydict({ + "colB": [1, 2, 3, 4, 5], + "col3": ["A", "B", "C", "D", "E"] + }) + + with pytest.raises(pa.ArrowInvalid): + _perform_join("left outer", t1, "", t2, "") + + with pytest.raises(TypeError): + _perform_join("left outer", None, "colA", t2, "colB") + + with pytest.raises(ValueError): + _perform_join("super mario join", t1, "colA", t2, "colB") + + +@pytest.mark.parametrize("jointype,expected", [ + ("left semi", { + "colA": [1, 2], + "col2": ["a", "b"] + }), + ("right semi", { + "colB": [1, 2], + "col3": ["A", "B"] + }), + ("left anti", { + "colA": [6], + "col2": ["f"] + }), + ("right anti", { + "colB": [99], + "col3": ["Z"] + }), + ("inner", { + "colA": [1, 2], + "col2": ["a", "b"], + "colB": [1, 2], + "col3": ["A", "B"] + }), + ("left outer", { + "colA": [1, 2, 6], + "col2": ["a", "b", "f"], + "colB": [1, 2, None], + "col3": ["A", "B", None] + }), + ("right outer", { + "colA": [1, 2, None], + "col2": ["a", "b", None], + "colB": [1, 2, 99], + "col3": ["A", "B", "Z"] + }), + ("full outer", { + "colA": [1, 2, 6, None], + "col2": ["a", "b", "f", None], + "colB": [1, 2, None, 99], + "col3": ["A", "B", None, "Z"] + }) +]) +@pytest.mark.parametrize("use_threads", [True, False]) +@pytest.mark.parametrize("coalesce_keys", [True, False]) +@pytest.mark.parametrize("use_datasets", + [False, pytest.param(True, marks=pytest.mark.dataset)]) +def test_joins(jointype, expected, use_threads, coalesce_keys, use_datasets): + # Allocate table here instead of using parametrize + # this prevents having arrow allocated memory forever around. + expected = pa.table(expected) + + t1 = pa.Table.from_pydict({ + "colA": [1, 2, 6], + "col2": ["a", "b", "f"] + }) + + t2 = pa.Table.from_pydict({ + "colB": [99, 2, 1], + "col3": ["Z", "B", "A"] + }) + + if use_datasets: + t1 = ds.dataset([t1]) + t2 = ds.dataset([t2]) + + r = _perform_join(jointype, t1, "colA", t2, "colB", + use_threads=use_threads, coalesce_keys=coalesce_keys) + r = r.combine_chunks() + if "right" in jointype: + r = r.sort_by("colB") + else: + r = r.sort_by("colA") + if coalesce_keys: + if jointype in ("inner", "left outer"): + expected = expected.drop(["colB"]) + elif jointype == "right outer": + expected = expected.drop(["colA"]) + elif jointype == "full outer": + expected = expected.drop(["colB"]).set_column(0, "colA", [[1, 2, 6, 99]]) + assert r == expected + + +def test_table_join_collisions(): + t1 = pa.table({ + "colA": [1, 2, 6], + "colB": [10, 20, 60], + "colVals": ["a", "b", "f"] + }) + + t2 = pa.table({ + "colB": [99, 20, 10], + "colVals": ["Z", "B", "A"], + "colUniq": [100, 200, 300], + "colA": [99, 2, 1], + }) + + result = _perform_join( + "full outer", t1, ["colA", "colB"], t2, ["colA", "colB"]) + result = result.combine_chunks() + result = result.sort_by("colUniq") + assert result == pa.table([ + [None, 2, 1, 6], + [None, 20, 10, 60], + [None, "b", "a", "f"], + [99, 20, 10, None], + ["Z", "B", "A", None], + [100, 200, 300, None], + [99, 2, 1, None], + ], names=["colA", "colB", "colVals", "colB", "colVals", "colUniq", "colA"]) + + result = _perform_join("full outer", t1, "colA", + t2, "colA", right_suffix="_r", + coalesce_keys=False) + result = result.combine_chunks() + result = result.sort_by("colA") + assert result == pa.table({ + "colA": [1, 2, 6, None], + "colB": [10, 20, 60, None], + "colVals": ["a", "b", "f", None], + "colB_r": [10, 20, None, 99], + "colVals_r": ["A", "B", None, "Z"], + "colUniq": [300, 200, None, 100], + "colA_r": [1, 2, None, 99], + }) + + result = _perform_join("full outer", t1, "colA", + t2, "colA", right_suffix="_r", + coalesce_keys=True) + result = result.combine_chunks() + result = result.sort_by("colA") + assert result == pa.table({ + "colA": [1, 2, 6, 99], + "colB": [10, 20, 60, None], + "colVals": ["a", "b", "f", None], + "colB_r": [10, 20, None, 99], + "colVals_r": ["A", "B", None, "Z"], + "colUniq": [300, 200, None, 100] + }) + + +def test_table_join_keys_order(): + t1 = pa.table({ + "colB": [10, 20, 60], + "colA": [1, 2, 6], + "colVals": ["a", "b", "f"] + }) + + t2 = pa.table({ + "colVals": ["Z", "B", "A"], + "colX": [99, 2, 1], + }) + + result = _perform_join("full outer", t1, "colA", t2, "colX", + left_suffix="_l", right_suffix="_r", + coalesce_keys=True) + result = result.combine_chunks() + result = result.sort_by("colA") + assert result == pa.table({ + "colB": [10, 20, 60, None], + "colA": [1, 2, 6, 99], + "colVals_l": ["a", "b", "f", None], + "colVals_r": ["A", "B", None, "Z"], + }) + + +def test_filter_table_errors(): + t = pa.table({ + "a": [1, 2, 3, 4, 5], + "b": [10, 20, 30, 40, 50] + }) + + with pytest.raises(pa.ArrowTypeError): + _filter_table(t, pc.divide(pc.field("a"), pc.scalar(2))) + + with pytest.raises(pa.ArrowInvalid): + _filter_table(t, (pc.field("Z") <= pc.scalar(2))) + + +def test_filter_table(): + t = pa.table({ + "a": [1, 2, 3, 4, 5], + "b": [10, 20, 30, 40, 50] + }) + + result = _filter_table( + t, (pc.field("a") <= pc.scalar(3)) & (pc.field("b") == pc.scalar(20)), + ) + assert result == pa.table({ + "a": [2], + "b": [20] + }) + + result = _filter_table(t, pc.field("b") > pc.scalar(30)) + assert result == pa.table({ + "a": [4, 5], + "b": [40, 50] + }) + + +def test_filter_table_ordering(): + table1 = pa.table({'a': [1, 2, 3, 4], 'b': ['a'] * 4}) + table2 = pa.table({'a': [1, 2, 3, 4], 'b': ['b'] * 4}) + table = pa.concat_tables([table1, table2]) + + for _ in range(20): + # 20 seems to consistently cause errors when order is not preserved. + # If the order problem is reintroduced this test will become flaky + # which is still a signal that the order is not preserved. + r = _filter_table(table, pc.field('a') == 1) + assert r["b"] == pa.chunked_array([["a"], ["b"]]) + + +def test_complex_filter_table(): + t = pa.table({ + "a": [1, 2, 3, 4, 5, 6, 6], + "b": [10, 20, 30, 40, 50, 60, 61] + }) + + result = _filter_table( + t, ((pc.bit_wise_and(pc.field("a"), pc.scalar(1)) == pc.scalar(0)) & + (pc.multiply(pc.field("a"), pc.scalar(10)) == pc.field("b"))) + ) + + assert result == pa.table({ + "a": [2, 4, 6], # second six must be omitted because 6*10 != 61 + "b": [20, 40, 60] + }) + + +def test_join_extension_array_column(): + storage = pa.array([1, 2, 3], type=pa.int64()) + ty = IntegerType() + ext_array = pa.ExtensionArray.from_storage(ty, storage) + dict_array = pa.DictionaryArray.from_arrays( + pa.array([0, 2, 1]), pa.array(['a', 'b', 'c'])) + t1 = pa.table({ + "colA": [1, 2, 6], + "colB": ext_array, + "colVals": ext_array, + }) + + t2 = pa.table({ + "colA": [99, 2, 1], + "colC": ext_array, + }) + + t3 = pa.table({ + "colA": [99, 2, 1], + "colC": ext_array, + "colD": dict_array, + }) + + result = _perform_join( + "left outer", t1, ["colA"], t2, ["colA"]) + assert result["colVals"] == pa.chunked_array(ext_array) + + result = _perform_join( + "left outer", t1, ["colB"], t2, ["colC"]) + assert result["colB"] == pa.chunked_array(ext_array) + + result = _perform_join( + "left outer", t1, ["colA"], t3, ["colA"]) + assert result["colVals"] == pa.chunked_array(ext_array) + + result = _perform_join( + "left outer", t1, ["colB"], t3, ["colC"]) + assert result["colB"] == pa.chunked_array(ext_array) + + +def test_group_by_ordering(): + # GH-36709 - preserve ordering in groupby by setting use_threads=False + table1 = pa.table({'a': [1, 2, 3, 4], 'b': ['a'] * 4}) + table2 = pa.table({'a': [1, 2, 3, 4], 'b': ['b'] * 4}) + table = pa.concat_tables([table1, table2]) + + for _ in range(50): + # 50 seems to consistently cause errors when order is not preserved. + # If the order problem is reintroduced this test will become flaky + # which is still a signal that the order is not preserved. + result = table.group_by("b", use_threads=False).aggregate([]) + assert result["b"] == pa.chunked_array([["a"], ["b"]]) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_extension_type.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_extension_type.py new file mode 100644 index 0000000000000000000000000000000000000000..fe38bf651baae20610e8ccf3cd19b2f5b0ebce58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_extension_type.py @@ -0,0 +1,1577 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import contextlib +import os +import shutil +import subprocess +import weakref +from uuid import uuid4, UUID +import sys + +import numpy as np +import pyarrow as pa +from pyarrow.vendored.version import Version + +import pytest + + +@contextlib.contextmanager +def registered_extension_type(ext_type): + pa.register_extension_type(ext_type) + try: + yield + finally: + pa.unregister_extension_type(ext_type.extension_name) + + +@contextlib.contextmanager +def enabled_auto_load(): + pa.PyExtensionType.set_auto_load(True) + try: + yield + finally: + pa.PyExtensionType.set_auto_load(False) + + +class TinyIntType(pa.ExtensionType): + + def __init__(self): + super().__init__(pa.int8(), 'pyarrow.tests.TinyIntType') + + def __arrow_ext_serialize__(self): + return b'' + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + assert serialized == b'' + assert storage_type == pa.int8() + return cls() + + +class IntegerType(pa.ExtensionType): + + def __init__(self): + super().__init__(pa.int64(), 'pyarrow.tests.IntegerType') + + def __arrow_ext_serialize__(self): + return b'' + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + assert serialized == b'' + assert storage_type == pa.int64() + return cls() + + +class IntegerEmbeddedType(pa.ExtensionType): + + def __init__(self): + super().__init__(IntegerType(), 'pyarrow.tests.IntegerType') + + def __arrow_ext_serialize__(self): + # XXX pa.BaseExtensionType should expose C++ serialization method + return self.storage_type.__arrow_ext_serialize__() + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + deserialized_storage_type = storage_type.__arrow_ext_deserialize__( + serialized) + assert deserialized_storage_type == storage_type + return cls() + + +class UuidScalarType(pa.ExtensionScalar): + def as_py(self): + return None if self.value is None else UUID(bytes=self.value.as_py()) + + +class UuidType(pa.ExtensionType): + + def __init__(self): + super().__init__(pa.binary(16), 'pyarrow.tests.UuidType') + + def __arrow_ext_scalar_class__(self): + return UuidScalarType + + def __arrow_ext_serialize__(self): + return b'' + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + return cls() + + +class UuidType2(pa.ExtensionType): + + def __init__(self): + super().__init__(pa.binary(16), 'pyarrow.tests.UuidType2') + + def __arrow_ext_serialize__(self): + return b'' + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + return cls() + + +class LabelType(pa.ExtensionType): + + def __init__(self): + super().__init__(pa.string(), 'pyarrow.tests.LabelType') + + def __arrow_ext_serialize__(self): + return b'' + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + return cls() + + +class ParamExtType(pa.ExtensionType): + + def __init__(self, width): + self._width = width + super().__init__(pa.binary(width), 'pyarrow.tests.ParamExtType') + + @property + def width(self): + return self._width + + def __arrow_ext_serialize__(self): + return str(self._width).encode() + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + width = int(serialized.decode()) + assert storage_type == pa.binary(width) + return cls(width) + + +class MyStructType(pa.ExtensionType): + storage_type = pa.struct([('left', pa.int64()), + ('right', pa.int64())]) + + def __init__(self): + super().__init__(self.storage_type, 'pyarrow.tests.MyStructType') + + def __arrow_ext_serialize__(self): + return b'' + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + assert serialized == b'' + assert storage_type == cls.storage_type + return cls() + + +class MyListType(pa.ExtensionType): + + def __init__(self, storage_type): + assert isinstance(storage_type, pa.ListType) + super().__init__(storage_type, 'pyarrow.tests.MyListType') + + def __arrow_ext_serialize__(self): + return b'' + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + assert serialized == b'' + return cls(storage_type) + + +class AnnotatedType(pa.ExtensionType): + """ + Generic extension type that can store any storage type. + """ + + def __init__(self, storage_type, annotation): + self.annotation = annotation + super().__init__(storage_type, 'pyarrow.tests.AnnotatedType') + + def __arrow_ext_serialize__(self): + return b'' + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + assert serialized == b'' + return cls(storage_type) + + +class LegacyIntType(pa.PyExtensionType): + + def __init__(self): + pa.PyExtensionType.__init__(self, pa.int8()) + + def __reduce__(self): + return LegacyIntType, () + + +def ipc_write_batch(batch): + stream = pa.BufferOutputStream() + writer = pa.RecordBatchStreamWriter(stream, batch.schema) + writer.write_batch(batch) + writer.close() + return stream.getvalue() + + +def ipc_read_batch(buf): + reader = pa.RecordBatchStreamReader(buf) + return reader.read_next_batch() + + +def test_ext_type_basics(): + ty = UuidType() + assert ty.extension_name == "pyarrow.tests.UuidType" + + +def test_ext_type_str(): + ty = IntegerType() + expected = "extension>" + assert str(ty) == expected + assert pa.DataType.__str__(ty) == expected + + +def test_ext_type_repr(): + ty = IntegerType() + assert repr(ty) == "IntegerType(DataType(int64))" + + +def test_ext_type__lifetime(): + ty = UuidType() + wr = weakref.ref(ty) + del ty + assert wr() is None + + +def test_ext_type__storage_type(): + ty = UuidType() + assert ty.storage_type == pa.binary(16) + assert ty.__class__ is UuidType + ty = ParamExtType(5) + assert ty.storage_type == pa.binary(5) + assert ty.__class__ is ParamExtType + + +def test_ext_type_as_py(): + ty = UuidType() + expected = uuid4() + scalar = pa.ExtensionScalar.from_storage(ty, expected.bytes) + assert scalar.as_py() == expected + + # test array + uuids = [uuid4() for _ in range(3)] + storage = pa.array([uuid.bytes for uuid in uuids], type=pa.binary(16)) + arr = pa.ExtensionArray.from_storage(ty, storage) + + # Works for __get_item__ + for i, expected in enumerate(uuids): + assert arr[i].as_py() == expected + + # Works for __iter__ + for result, expected in zip(arr, uuids): + assert result.as_py() == expected + + # test chunked array + data = [ + pa.ExtensionArray.from_storage(ty, storage), + pa.ExtensionArray.from_storage(ty, storage) + ] + carr = pa.chunked_array(data) + for i, expected in enumerate(uuids + uuids): + assert carr[i].as_py() == expected + + for result, expected in zip(carr, uuids + uuids): + assert result.as_py() == expected + + +def test_uuid_type_pickle(pickle_module): + for proto in range(0, pickle_module.HIGHEST_PROTOCOL + 1): + ty = UuidType() + ser = pickle_module.dumps(ty, protocol=proto) + del ty + ty = pickle_module.loads(ser) + wr = weakref.ref(ty) + assert ty.extension_name == "pyarrow.tests.UuidType" + del ty + assert wr() is None + + +def test_ext_type_equality(): + a = ParamExtType(5) + b = ParamExtType(6) + c = ParamExtType(6) + assert a != b + assert b == c + d = UuidType() + e = UuidType() + assert a != d + assert d == e + + +def test_ext_array_basics(): + ty = ParamExtType(3) + storage = pa.array([b"foo", b"bar"], type=pa.binary(3)) + arr = pa.ExtensionArray.from_storage(ty, storage) + arr.validate() + assert arr.type is ty + assert arr.storage.equals(storage) + + +def test_ext_array_lifetime(): + ty = ParamExtType(3) + storage = pa.array([b"foo", b"bar"], type=pa.binary(3)) + arr = pa.ExtensionArray.from_storage(ty, storage) + + refs = [weakref.ref(ty), weakref.ref(arr), weakref.ref(storage)] + del ty, storage, arr + for ref in refs: + assert ref() is None + + +def test_ext_array_to_pylist(): + ty = ParamExtType(3) + storage = pa.array([b"foo", b"bar", None], type=pa.binary(3)) + arr = pa.ExtensionArray.from_storage(ty, storage) + + assert arr.to_pylist() == [b"foo", b"bar", None] + + +def test_ext_array_errors(): + ty = ParamExtType(4) + storage = pa.array([b"foo", b"bar"], type=pa.binary(3)) + with pytest.raises(TypeError, match="Incompatible storage type"): + pa.ExtensionArray.from_storage(ty, storage) + + +def test_ext_array_equality(): + storage1 = pa.array([b"0123456789abcdef"], type=pa.binary(16)) + storage2 = pa.array([b"0123456789abcdef"], type=pa.binary(16)) + storage3 = pa.array([], type=pa.binary(16)) + ty1 = UuidType() + ty2 = ParamExtType(16) + + a = pa.ExtensionArray.from_storage(ty1, storage1) + b = pa.ExtensionArray.from_storage(ty1, storage2) + assert a.equals(b) + c = pa.ExtensionArray.from_storage(ty1, storage3) + assert not a.equals(c) + d = pa.ExtensionArray.from_storage(ty2, storage1) + assert not a.equals(d) + e = pa.ExtensionArray.from_storage(ty2, storage2) + assert d.equals(e) + f = pa.ExtensionArray.from_storage(ty2, storage3) + assert not d.equals(f) + + +def test_ext_array_wrap_array(): + ty = ParamExtType(3) + storage = pa.array([b"foo", b"bar", None], type=pa.binary(3)) + arr = ty.wrap_array(storage) + arr.validate(full=True) + assert isinstance(arr, pa.ExtensionArray) + assert arr.type == ty + assert arr.storage == storage + + storage = pa.chunked_array([[b"abc", b"def"], [b"ghi"]], + type=pa.binary(3)) + arr = ty.wrap_array(storage) + arr.validate(full=True) + assert isinstance(arr, pa.ChunkedArray) + assert arr.type == ty + assert arr.chunk(0).storage == storage.chunk(0) + assert arr.chunk(1).storage == storage.chunk(1) + + # Wrong storage type + storage = pa.array([b"foo", b"bar", None]) + with pytest.raises(TypeError, match="Incompatible storage type"): + ty.wrap_array(storage) + + # Not an array or chunked array + with pytest.raises(TypeError, match="Expected array or chunked array"): + ty.wrap_array(None) + + +def test_ext_scalar_from_array(): + data = [b"0123456789abcdef", b"0123456789abcdef", + b"zyxwvutsrqponmlk", None] + storage = pa.array(data, type=pa.binary(16)) + ty1 = UuidType() + ty2 = ParamExtType(16) + ty3 = UuidType2() + + a = pa.ExtensionArray.from_storage(ty1, storage) + b = pa.ExtensionArray.from_storage(ty2, storage) + c = pa.ExtensionArray.from_storage(ty3, storage) + + scalars_a = list(a) + assert len(scalars_a) == 4 + + assert ty1.__arrow_ext_scalar_class__() == UuidScalarType + assert isinstance(a[0], UuidScalarType) + assert isinstance(scalars_a[0], UuidScalarType) + + for s, val in zip(scalars_a, data): + assert isinstance(s, pa.ExtensionScalar) + assert s.is_valid == (val is not None) + assert s.type == ty1 + if val is not None: + assert s.value == pa.scalar(val, storage.type) + assert s.as_py() == UUID(bytes=val) + else: + assert s.value is None + + scalars_b = list(b) + assert len(scalars_b) == 4 + + for sa, sb in zip(scalars_a, scalars_b): + assert isinstance(sb, pa.ExtensionScalar) + assert sa.is_valid == sb.is_valid + if sa.as_py() is None: + assert sa.as_py() == sb.as_py() + else: + assert sa.as_py().bytes == sb.as_py() + assert sa != sb + + scalars_c = list(c) + assert len(scalars_c) == 4 + + for s, val in zip(scalars_c, data): + assert isinstance(s, pa.ExtensionScalar) + assert s.is_valid == (val is not None) + assert s.type == ty3 + if val is not None: + assert s.value == pa.scalar(val, storage.type) + assert s.as_py() == val + else: + assert s.value is None + + assert a.to_pylist() == [UUID(bytes=x) if x else None for x in data] + + +def test_ext_scalar_from_storage(): + ty = UuidType() + + s = pa.ExtensionScalar.from_storage(ty, None) + assert isinstance(s, pa.ExtensionScalar) + assert s.type == ty + assert s.is_valid is False + assert s.value is None + + s = pa.ExtensionScalar.from_storage(ty, b"0123456789abcdef") + assert isinstance(s, pa.ExtensionScalar) + assert s.type == ty + assert s.is_valid is True + assert s.value == pa.scalar(b"0123456789abcdef", ty.storage_type) + + s = pa.ExtensionScalar.from_storage(ty, pa.scalar(None, ty.storage_type)) + assert isinstance(s, pa.ExtensionScalar) + assert s.type == ty + assert s.is_valid is False + assert s.value is None + + s = pa.ExtensionScalar.from_storage( + ty, pa.scalar(b"0123456789abcdef", ty.storage_type)) + assert isinstance(s, pa.ExtensionScalar) + assert s.type == ty + assert s.is_valid is True + assert s.value == pa.scalar(b"0123456789abcdef", ty.storage_type) + + +def test_ext_array_pickling(pickle_module): + for proto in range(0, pickle_module.HIGHEST_PROTOCOL + 1): + ty = ParamExtType(3) + storage = pa.array([b"foo", b"bar"], type=pa.binary(3)) + arr = pa.ExtensionArray.from_storage(ty, storage) + ser = pickle_module.dumps(arr, protocol=proto) + del ty, storage, arr + arr = pickle_module.loads(ser) + arr.validate() + assert isinstance(arr, pa.ExtensionArray) + assert arr.type == ParamExtType(3) + assert arr.type.storage_type == pa.binary(3) + assert arr.storage.type == pa.binary(3) + assert arr.storage.to_pylist() == [b"foo", b"bar"] + + +def test_ext_array_conversion_to_numpy(): + storage1 = pa.array([1, 2, 3], type=pa.int64()) + storage2 = pa.array([b"123", b"456", b"789"], type=pa.binary(3)) + ty1 = IntegerType() + ty2 = ParamExtType(3) + + arr1 = pa.ExtensionArray.from_storage(ty1, storage1) + arr2 = pa.ExtensionArray.from_storage(ty2, storage2) + + result = arr1.to_numpy() + expected = np.array([1, 2, 3], dtype="int64") + np.testing.assert_array_equal(result, expected) + + with pytest.raises(ValueError, match="zero_copy_only was True"): + arr2.to_numpy() + result = arr2.to_numpy(zero_copy_only=False) + expected = np.array([b"123", b"456", b"789"]) + np.testing.assert_array_equal(result, expected) + + +@pytest.mark.pandas +def test_ext_array_conversion_to_pandas(): + import pandas as pd + + storage1 = pa.array([1, 2, 3], type=pa.int64()) + storage2 = pa.array([b"123", b"456", b"789"], type=pa.binary(3)) + ty1 = IntegerType() + ty2 = ParamExtType(3) + + arr1 = pa.ExtensionArray.from_storage(ty1, storage1) + arr2 = pa.ExtensionArray.from_storage(ty2, storage2) + + result = arr1.to_pandas() + expected = pd.Series([1, 2, 3], dtype="int64") + pd.testing.assert_series_equal(result, expected) + + result = arr2.to_pandas() + expected = pd.Series([b"123", b"456", b"789"], dtype=object) + pd.testing.assert_series_equal(result, expected) + + +@pytest.fixture +def struct_w_ext_data(): + storage1 = pa.array([1, 2, 3], type=pa.int64()) + storage2 = pa.array([b"123", b"456", b"789"], type=pa.binary(3)) + ty1 = IntegerType() + ty2 = ParamExtType(3) + + arr1 = pa.ExtensionArray.from_storage(ty1, storage1) + arr2 = pa.ExtensionArray.from_storage(ty2, storage2) + + sarr1 = pa.StructArray.from_arrays([arr1], ["f0"]) + sarr2 = pa.StructArray.from_arrays([arr2], ["f1"]) + + return [sarr1, sarr2] + + +def test_struct_w_ext_array_to_numpy(struct_w_ext_data): + # ARROW-15291 + # Check that we don't segfault when trying to build + # a numpy array from a StructArray with a field being + # an ExtensionArray + + result = struct_w_ext_data[0].to_numpy(zero_copy_only=False) + expected = np.array([{'f0': 1}, {'f0': 2}, + {'f0': 3}], dtype=object) + np.testing.assert_array_equal(result, expected) + + result = struct_w_ext_data[1].to_numpy(zero_copy_only=False) + expected = np.array([{'f1': b'123'}, {'f1': b'456'}, + {'f1': b'789'}], dtype=object) + np.testing.assert_array_equal(result, expected) + + +@pytest.mark.pandas +def test_struct_w_ext_array_to_pandas(struct_w_ext_data): + # ARROW-15291 + # Check that we don't segfault when trying to build + # a Pandas dataframe from a StructArray with a field + # being an ExtensionArray + import pandas as pd + + result = struct_w_ext_data[0].to_pandas() + expected = pd.Series([{'f0': 1}, {'f0': 2}, + {'f0': 3}], dtype=object) + pd.testing.assert_series_equal(result, expected) + + result = struct_w_ext_data[1].to_pandas() + expected = pd.Series([{'f1': b'123'}, {'f1': b'456'}, + {'f1': b'789'}], dtype=object) + pd.testing.assert_series_equal(result, expected) + + +def test_cast_kernel_on_extension_arrays(): + # test array casting + storage = pa.array([1, 2, 3, 4], pa.int64()) + arr = pa.ExtensionArray.from_storage(IntegerType(), storage) + + # test that no allocation happens during identity cast + allocated_before_cast = pa.total_allocated_bytes() + casted = arr.cast(pa.int64()) + assert pa.total_allocated_bytes() == allocated_before_cast + + cases = [ + (pa.int64(), pa.Int64Array), + (pa.int32(), pa.Int32Array), + (pa.int16(), pa.Int16Array), + (pa.uint64(), pa.UInt64Array), + (pa.uint32(), pa.UInt32Array), + (pa.uint16(), pa.UInt16Array) + ] + for typ, klass in cases: + casted = arr.cast(typ) + assert casted.type == typ + assert isinstance(casted, klass) + + # test chunked array casting + arr = pa.chunked_array([arr, arr]) + casted = arr.cast(pa.int16()) + assert casted.type == pa.int16() + assert isinstance(casted, pa.ChunkedArray) + + +@pytest.mark.parametrize("data,ty", ( + ([1, 2], pa.int32), + ([1, 2], pa.int64), + (["1", "2"], pa.string), + ([b"1", b"2"], pa.binary), + ([1.0, 2.0], pa.float32), + ([1.0, 2.0], pa.float64) +)) +def test_casting_to_extension_type(data, ty): + arr = pa.array(data, ty()) + out = arr.cast(IntegerType()) + assert isinstance(out, pa.ExtensionArray) + assert out.type == IntegerType() + assert out.to_pylist() == [1, 2] + + +def test_cast_between_extension_types(): + array = pa.array([1, 2, 3], pa.int8()) + + tiny_int_arr = array.cast(TinyIntType()) + assert tiny_int_arr.type == TinyIntType() + + # Casting between extension types w/ different storage types not okay. + msg = ("Casting from 'extension<.*?>' " + "to different extension type " + "'extension<.*?>' not permitted. " + "One can first cast to the storage type, " + "then to the extension type." + ) + with pytest.raises(TypeError, match=msg): + tiny_int_arr.cast(IntegerType()) + tiny_int_arr.cast(pa.int64()).cast(IntegerType()) + + # Between the same extension types is okay + array = pa.array([b'1' * 16, b'2' * 16], pa.binary(16)).cast(UuidType()) + out = array.cast(UuidType()) + assert out.type == UuidType() + + # Will still fail casting between extensions who share storage type, + # can only cast between exactly the same extension types. + with pytest.raises(TypeError, match='Casting from *'): + array.cast(UuidType2()) + + +def test_cast_to_extension_with_extension_storage(): + # Test casting directly, and IntegerType -> IntegerEmbeddedType + array = pa.array([1, 2, 3], pa.int64()) + array.cast(IntegerEmbeddedType()) + array.cast(IntegerType()).cast(IntegerEmbeddedType()) + + +@pytest.mark.parametrize("data,type_factory", ( + # list + ([[1, 2, 3]], lambda: pa.list_(IntegerType())), + # struct + ([{"foo": 1}], lambda: pa.struct([("foo", IntegerType())])), + # list> + ([[{"foo": 1}]], lambda: pa.list_(pa.struct([("foo", IntegerType())]))), + # struct> + ([{"foo": [1, 2, 3]}], lambda: pa.struct( + [("foo", pa.list_(IntegerType()))])), +)) +def test_cast_nested_extension_types(data, type_factory): + ty = type_factory() + a = pa.array(data) + b = a.cast(ty) + assert b.type == ty # casted to target extension + assert b.cast(a.type) # and can cast back + + +def test_casting_dict_array_to_extension_type(): + storage = pa.array([b"0123456789abcdef"], type=pa.binary(16)) + arr = pa.ExtensionArray.from_storage(UuidType(), storage) + dict_arr = pa.DictionaryArray.from_arrays(pa.array([0, 0], pa.int32()), + arr) + out = dict_arr.cast(UuidType()) + assert isinstance(out, pa.ExtensionArray) + assert out.to_pylist() == [UUID('30313233-3435-3637-3839-616263646566'), + UUID('30313233-3435-3637-3839-616263646566')] + + +def test_concat(): + arr1 = pa.array([1, 2, 3], IntegerType()) + arr2 = pa.array([4, 5, 6], IntegerType()) + + result = pa.concat_arrays([arr1, arr2]) + expected = pa.array([1, 2, 3, 4, 5, 6], IntegerType()) + assert result.equals(expected) + + # nested in a struct + struct_arr1 = pa.StructArray.from_arrays([arr1], names=["a"]) + struct_arr2 = pa.StructArray.from_arrays([arr2], names=["a"]) + result = pa.concat_arrays([struct_arr1, struct_arr2]) + expected = pa.StructArray.from_arrays([expected], names=["a"]) + assert result.equals(expected) + + +def test_null_storage_type(): + ext_type = AnnotatedType(pa.null(), {"key": "value"}) + storage = pa.array([None] * 10, pa.null()) + arr = pa.ExtensionArray.from_storage(ext_type, storage) + assert arr.null_count == 10 + arr.validate(full=True) + + +def example_batch(): + ty = ParamExtType(3) + storage = pa.array([b"foo", b"bar"], type=pa.binary(3)) + arr = pa.ExtensionArray.from_storage(ty, storage) + return pa.RecordBatch.from_arrays([arr], ["exts"]) + + +def check_example_batch(batch, *, expect_extension): + arr = batch.column(0) + if expect_extension: + assert isinstance(arr, pa.ExtensionArray) + assert arr.type.storage_type == pa.binary(3) + assert arr.storage.to_pylist() == [b"foo", b"bar"] + else: + assert arr.type == pa.binary(3) + assert arr.to_pylist() == [b"foo", b"bar"] + return arr + + +def test_ipc_unregistered(): + batch = example_batch() + buf = ipc_write_batch(batch) + del batch + + batch = ipc_read_batch(buf) + batch.validate(full=True) + check_example_batch(batch, expect_extension=False) + + +def test_ipc_registered(): + with registered_extension_type(ParamExtType(1)): + batch = example_batch() + buf = ipc_write_batch(batch) + del batch + + batch = ipc_read_batch(buf) + batch.validate(full=True) + arr = check_example_batch(batch, expect_extension=True) + assert arr.type == ParamExtType(3) + + +class PeriodArray(pa.ExtensionArray): + pass + + +class PeriodType(pa.ExtensionType): + def __init__(self, freq): + # attributes need to be set first before calling + # super init (as that calls serialize) + self._freq = freq + pa.ExtensionType.__init__(self, pa.int64(), 'test.period') + + @property + def freq(self): + return self._freq + + def __arrow_ext_serialize__(self): + return "freq={}".format(self.freq).encode() + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + serialized = serialized.decode() + assert serialized.startswith("freq=") + freq = serialized.split('=')[1] + return PeriodType(freq) + + def __eq__(self, other): + if isinstance(other, pa.BaseExtensionType): + return (isinstance(self, type(other)) and + self.freq == other.freq) + else: + return NotImplemented + + +class PeriodTypeWithClass(PeriodType): + def __init__(self, freq): + PeriodType.__init__(self, freq) + + def __arrow_ext_class__(self): + return PeriodArray + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + freq = PeriodType.__arrow_ext_deserialize__( + storage_type, serialized).freq + return PeriodTypeWithClass(freq) + + +class PeriodTypeWithToPandasDtype(PeriodType): + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + freq = PeriodType.__arrow_ext_deserialize__( + storage_type, serialized).freq + return PeriodTypeWithToPandasDtype(freq) + + def to_pandas_dtype(self): + import pandas as pd + return pd.PeriodDtype(freq=self.freq) + + +@pytest.fixture(params=[PeriodType('D'), + PeriodTypeWithClass('D'), + PeriodTypeWithToPandasDtype('D')]) +def registered_period_type(request): + # setup + period_type = request.param + period_class = period_type.__arrow_ext_class__() + pa.register_extension_type(period_type) + yield period_type, period_class + # teardown + try: + pa.unregister_extension_type('test.period') + except KeyError: + pass + + +def test_generic_ext_type(): + period_type = PeriodType('D') + assert period_type.extension_name == "test.period" + assert period_type.storage_type == pa.int64() + # default ext_class expected. + assert period_type.__arrow_ext_class__() == pa.ExtensionArray + + +def test_generic_ext_type_ipc(registered_period_type): + period_type, period_class = registered_period_type + storage = pa.array([1, 2, 3, 4], pa.int64()) + arr = pa.ExtensionArray.from_storage(period_type, storage) + batch = pa.RecordBatch.from_arrays([arr], ["ext"]) + # check the built array has exactly the expected clss + assert isinstance(arr, period_class) + + buf = ipc_write_batch(batch) + del batch + batch = ipc_read_batch(buf) + + result = batch.column(0) + # check the deserialized array class is the expected one + assert isinstance(result, period_class) + assert result.type.extension_name == "test.period" + assert arr.storage.to_pylist() == [1, 2, 3, 4] + + # we get back an actual PeriodType + assert isinstance(result.type, PeriodType) + assert result.type.freq == 'D' + assert result.type == period_type + + # using different parametrization as how it was registered + period_type_H = period_type.__class__('H') + assert period_type_H.extension_name == "test.period" + assert period_type_H.freq == 'H' + + arr = pa.ExtensionArray.from_storage(period_type_H, storage) + batch = pa.RecordBatch.from_arrays([arr], ["ext"]) + + buf = ipc_write_batch(batch) + del batch + batch = ipc_read_batch(buf) + result = batch.column(0) + assert isinstance(result.type, PeriodType) + assert result.type.freq == 'H' + assert isinstance(result, period_class) + + +def test_generic_ext_type_ipc_unknown(registered_period_type): + period_type, _ = registered_period_type + storage = pa.array([1, 2, 3, 4], pa.int64()) + arr = pa.ExtensionArray.from_storage(period_type, storage) + batch = pa.RecordBatch.from_arrays([arr], ["ext"]) + + buf = ipc_write_batch(batch) + del batch + + # unregister type before loading again => reading unknown extension type + # as plain array (but metadata in schema's field are preserved) + pa.unregister_extension_type('test.period') + + batch = ipc_read_batch(buf) + result = batch.column(0) + + assert isinstance(result, pa.Int64Array) + ext_field = batch.schema.field('ext') + assert ext_field.metadata == { + b'ARROW:extension:metadata': b'freq=D', + b'ARROW:extension:name': b'test.period' + } + + +def test_generic_ext_type_equality(): + period_type = PeriodType('D') + assert period_type.extension_name == "test.period" + + period_type2 = PeriodType('D') + period_type3 = PeriodType('H') + assert period_type == period_type2 + assert not period_type == period_type3 + + +def test_generic_ext_type_pickling(registered_period_type, pickle_module): + # GH-36038 + for proto in range(0, pickle_module.HIGHEST_PROTOCOL + 1): + period_type, _ = registered_period_type + ser = pickle_module.dumps(period_type, protocol=proto) + period_type_pickled = pickle_module.loads(ser) + assert period_type == period_type_pickled + + +def test_generic_ext_array_pickling(registered_period_type, pickle_module): + for proto in range(0, pickle_module.HIGHEST_PROTOCOL + 1): + period_type, _ = registered_period_type + storage = pa.array([1, 2, 3, 4], pa.int64()) + arr = pa.ExtensionArray.from_storage(period_type, storage) + ser = pickle_module.dumps(arr, protocol=proto) + del storage, arr + arr = pickle_module.loads(ser) + arr.validate() + assert isinstance(arr, pa.ExtensionArray) + assert arr.type == period_type + assert arr.type.storage_type == pa.int64() + assert arr.storage.type == pa.int64() + assert arr.storage.to_pylist() == [1, 2, 3, 4] + + +def test_generic_ext_type_register(registered_period_type): + # test that trying to register other type does not segfault + with pytest.raises(TypeError): + pa.register_extension_type(pa.string()) + + # register second time raises KeyError + period_type = PeriodType('D') + with pytest.raises(KeyError): + pa.register_extension_type(period_type) + + +@pytest.mark.parquet +def test_parquet_period(tmpdir, registered_period_type): + # Parquet support for primitive extension types + period_type, period_class = registered_period_type + storage = pa.array([1, 2, 3, 4], pa.int64()) + arr = pa.ExtensionArray.from_storage(period_type, storage) + table = pa.table([arr], names=["ext"]) + + import pyarrow.parquet as pq + + filename = tmpdir / 'period_extension_type.parquet' + pq.write_table(table, filename) + + # Stored in parquet as storage type but with extension metadata saved + # in the serialized arrow schema + meta = pq.read_metadata(filename) + assert meta.schema.column(0).physical_type == "INT64" + assert b"ARROW:schema" in meta.metadata + + import base64 + decoded_schema = base64.b64decode(meta.metadata[b"ARROW:schema"]) + schema = pa.ipc.read_schema(pa.BufferReader(decoded_schema)) + # Since the type could be reconstructed, the extension type metadata is + # absent. + assert schema.field("ext").metadata == {} + + # When reading in, properly create extension type if it is registered + result = pq.read_table(filename) + result.validate(full=True) + assert result.schema.field("ext").type == period_type + assert result.schema.field("ext").metadata == {} + # Get the exact array class defined by the registered type. + result_array = result.column("ext").chunk(0) + assert type(result_array) is period_class + + # When the type is not registered, read in as storage type + pa.unregister_extension_type(period_type.extension_name) + result = pq.read_table(filename) + result.validate(full=True) + assert result.schema.field("ext").type == pa.int64() + # The extension metadata is present for roundtripping. + assert result.schema.field("ext").metadata == { + b'ARROW:extension:metadata': b'freq=D', + b'ARROW:extension:name': b'test.period' + } + + +@pytest.mark.parquet +def test_parquet_extension_with_nested_storage(tmpdir): + # Parquet support for extension types with nested storage type + import pyarrow.parquet as pq + + struct_array = pa.StructArray.from_arrays( + [pa.array([0, 1], type="int64"), pa.array([4, 5], type="int64")], + names=["left", "right"]) + list_array = pa.array([[1, 2, 3], [4, 5]], type=pa.list_(pa.int32())) + + mystruct_array = pa.ExtensionArray.from_storage(MyStructType(), + struct_array) + mylist_array = pa.ExtensionArray.from_storage( + MyListType(list_array.type), list_array) + + orig_table = pa.table({'structs': mystruct_array, + 'lists': mylist_array}) + filename = tmpdir / 'nested_extension_storage.parquet' + pq.write_table(orig_table, filename) + + # Unregistered + table = pq.read_table(filename) + table.validate(full=True) + assert table.column('structs').type == struct_array.type + assert table.column('structs').combine_chunks() == struct_array + assert table.column('lists').type == list_array.type + assert table.column('lists').combine_chunks() == list_array + + # Registered + with registered_extension_type(mystruct_array.type): + with registered_extension_type(mylist_array.type): + table = pq.read_table(filename) + table.validate(full=True) + assert table.column('structs').type == mystruct_array.type + assert table.column('lists').type == mylist_array.type + assert table == orig_table + + # Cannot select a subfield of an extension type with + # a struct storage type. + with pytest.raises(pa.ArrowInvalid, + match='without all of its fields'): + pq.ParquetFile(filename).read(columns=['structs.left']) + + +@pytest.mark.parquet +def test_parquet_nested_extension(tmpdir): + # Parquet support for extension types nested in struct or list + import pyarrow.parquet as pq + + ext_type = IntegerType() + storage = pa.array([4, 5, 6, 7], type=pa.int64()) + ext_array = pa.ExtensionArray.from_storage(ext_type, storage) + + # Struct of extensions + struct_array = pa.StructArray.from_arrays( + [storage, ext_array], + names=['ints', 'exts']) + + orig_table = pa.table({'structs': struct_array}) + filename = tmpdir / 'struct_of_ext.parquet' + pq.write_table(orig_table, filename) + + table = pq.read_table(filename) + table.validate(full=True) + assert table.column(0).type == pa.struct({'ints': pa.int64(), + 'exts': pa.int64()}) + with registered_extension_type(ext_type): + table = pq.read_table(filename) + table.validate(full=True) + assert table.column(0).type == struct_array.type + assert table == orig_table + + # List of extensions + list_array = pa.ListArray.from_arrays([0, 1, None, 3], ext_array) + + orig_table = pa.table({'lists': list_array}) + filename = tmpdir / 'list_of_ext.parquet' + pq.write_table(orig_table, filename) + + table = pq.read_table(filename) + table.validate(full=True) + assert table.column(0).type == pa.list_(pa.int64()) + with registered_extension_type(ext_type): + table = pq.read_table(filename) + table.validate(full=True) + assert table.column(0).type == list_array.type + assert table == orig_table + + # Large list of extensions + list_array = pa.LargeListArray.from_arrays([0, 1, None, 3], ext_array) + + orig_table = pa.table({'lists': list_array}) + filename = tmpdir / 'list_of_ext.parquet' + pq.write_table(orig_table, filename) + + table = pq.read_table(filename) + table.validate(full=True) + assert table.column(0).type == pa.large_list(pa.int64()) + with registered_extension_type(ext_type): + table = pq.read_table(filename) + table.validate(full=True) + assert table.column(0).type == list_array.type + assert table == orig_table + + +@pytest.mark.parquet +def test_parquet_extension_nested_in_extension(tmpdir): + # Parquet support for extension> + import pyarrow.parquet as pq + + inner_ext_type = IntegerType() + inner_storage = pa.array([4, 5, 6, 7], type=pa.int64()) + inner_ext_array = pa.ExtensionArray.from_storage(inner_ext_type, + inner_storage) + + list_array = pa.ListArray.from_arrays([0, 1, None, 3], inner_ext_array) + mylist_array = pa.ExtensionArray.from_storage( + MyListType(list_array.type), list_array) + + orig_table = pa.table({'lists': mylist_array}) + filename = tmpdir / 'ext_of_list_of_ext.parquet' + pq.write_table(orig_table, filename) + + table = pq.read_table(filename) + assert table.column(0).type == pa.list_(pa.int64()) + with registered_extension_type(mylist_array.type): + with registered_extension_type(inner_ext_array.type): + table = pq.read_table(filename) + assert table.column(0).type == mylist_array.type + assert table == orig_table + + +def test_to_numpy(): + period_type = PeriodType('D') + storage = pa.array([1, 2, 3, 4], pa.int64()) + arr = pa.ExtensionArray.from_storage(period_type, storage) + + expected = storage.to_numpy() + result = arr.to_numpy() + np.testing.assert_array_equal(result, expected) + + result = np.asarray(arr) + np.testing.assert_array_equal(result, expected) + + # chunked array + a1 = pa.chunked_array([arr, arr]) + a2 = pa.chunked_array([arr, arr], type=period_type) + expected = np.hstack([expected, expected]) + + for charr in [a1, a2]: + assert charr.type == period_type + for result in [np.asarray(charr), charr.to_numpy()]: + assert result.dtype == np.int64 + np.testing.assert_array_equal(result, expected) + + # zero chunks + charr = pa.chunked_array([], type=period_type) + assert charr.type == period_type + + for result in [np.asarray(charr), charr.to_numpy()]: + assert result.dtype == np.int64 + np.testing.assert_array_equal(result, np.array([], dtype='int64')) + + +def test_empty_take(): + # https://issues.apache.org/jira/browse/ARROW-13474 + ext_type = IntegerType() + storage = pa.array([], type=pa.int64()) + empty_arr = pa.ExtensionArray.from_storage(ext_type, storage) + + result = empty_arr.filter(pa.array([], pa.bool_())) + assert len(result) == 0 + assert result.equals(empty_arr) + + result = empty_arr.take(pa.array([], pa.int32())) + assert len(result) == 0 + assert result.equals(empty_arr) + + +@pytest.mark.parametrize("data,ty", ( + ([1, 2, 3], IntegerType), + (["cat", "dog", "horse"], LabelType) +)) +@pytest.mark.parametrize( + "into", ["to_numpy", pytest.param("to_pandas", marks=pytest.mark.pandas)]) +def test_extension_array_to_numpy_pandas(data, ty, into): + storage = pa.array(data) + ext_arr = pa.ExtensionArray.from_storage(ty(), storage) + offsets = pa.array([0, 1, 2, 3]) + list_arr = pa.ListArray.from_arrays(offsets, ext_arr) + result = getattr(list_arr, into)(zero_copy_only=False) + + list_arr_storage_type = list_arr.cast(pa.list_(ext_arr.type.storage_type)) + expected = getattr(list_arr_storage_type, into)(zero_copy_only=False) + if into == "to_pandas": + assert result.equals(expected) + else: + assert np.array_equal(result, expected) + + +def test_array_constructor(): + ext_type = IntegerType() + storage = pa.array([1, 2, 3], type=pa.int64()) + expected = pa.ExtensionArray.from_storage(ext_type, storage) + + result = pa.array([1, 2, 3], type=IntegerType()) + assert result.equals(expected) + + result = pa.array(np.array([1, 2, 3]), type=IntegerType()) + assert result.equals(expected) + + result = pa.array(np.array([1.0, 2.0, 3.0]), type=IntegerType()) + assert result.equals(expected) + + +@pytest.mark.pandas +def test_array_constructor_from_pandas(): + import pandas as pd + + ext_type = IntegerType() + storage = pa.array([1, 2, 3], type=pa.int64()) + expected = pa.ExtensionArray.from_storage(ext_type, storage) + + result = pa.array(pd.Series([1, 2, 3]), type=IntegerType()) + assert result.equals(expected) + + result = pa.array( + pd.Series([1, 2, 3], dtype="category"), type=IntegerType() + ) + assert result.equals(expected) + + +@pytest.mark.cython +def test_cpp_extension_in_python(tmpdir): + from .test_cython import ( + setup_template, compiler_opts, test_ld_path, test_util, here) + with tmpdir.as_cwd(): + # Set up temporary workspace + pyx_file = 'extensions.pyx' + shutil.copyfile(os.path.join(here, pyx_file), + os.path.join(str(tmpdir), pyx_file)) + # Create setup.py file + setup_code = setup_template.format(pyx_file=pyx_file, + compiler_opts=compiler_opts, + test_ld_path=test_ld_path) + with open('setup.py', 'w') as f: + f.write(setup_code) + + subprocess_env = test_util.get_modified_env_with_pythonpath() + + # Compile extension module + subprocess.check_call([sys.executable, 'setup.py', + 'build_ext', '--inplace'], + env=subprocess_env) + + sys.path.insert(0, str(tmpdir)) + mod = __import__('extensions') + + uuid_type = mod._make_uuid_type() + assert uuid_type.extension_name == "uuid" + assert uuid_type.storage_type == pa.binary(16) + + array = mod._make_uuid_array() + assert array.type == uuid_type + assert array.to_pylist() == [b'abcdefghijklmno0', b'0onmlkjihgfedcba'] + assert array[0].as_py() == b'abcdefghijklmno0' + assert array[1].as_py() == b'0onmlkjihgfedcba' + + buf = ipc_write_batch(pa.RecordBatch.from_arrays([array], ["uuid"])) + + batch = ipc_read_batch(buf) + reconstructed_array = batch.column(0) + assert reconstructed_array.type == uuid_type + assert reconstructed_array == array + + +def test_tensor_type(): + tensor_type = pa.fixed_shape_tensor(pa.int8(), [2, 3]) + assert tensor_type.extension_name == "arrow.fixed_shape_tensor" + assert tensor_type.storage_type == pa.list_(pa.int8(), 6) + assert tensor_type.shape == [2, 3] + assert tensor_type.dim_names is None + assert tensor_type.permutation is None + + tensor_type = pa.fixed_shape_tensor(pa.float64(), [2, 2, 3], + permutation=[0, 2, 1]) + assert tensor_type.extension_name == "arrow.fixed_shape_tensor" + assert tensor_type.storage_type == pa.list_(pa.float64(), 12) + assert tensor_type.shape == [2, 2, 3] + assert tensor_type.dim_names is None + assert tensor_type.permutation == [0, 2, 1] + + tensor_type = pa.fixed_shape_tensor(pa.bool_(), [2, 2, 3], + dim_names=['C', 'H', 'W']) + assert tensor_type.extension_name == "arrow.fixed_shape_tensor" + assert tensor_type.storage_type == pa.list_(pa.bool_(), 12) + assert tensor_type.shape == [2, 2, 3] + assert tensor_type.dim_names == ['C', 'H', 'W'] + assert tensor_type.permutation is None + + +@pytest.mark.parametrize("value_type", (np.int8(), np.int64(), np.float32())) +def test_tensor_class_methods(value_type): + from numpy.lib.stride_tricks import as_strided + arrow_type = pa.from_numpy_dtype(value_type) + + tensor_type = pa.fixed_shape_tensor(arrow_type, [2, 3]) + storage = pa.array([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], + pa.list_(arrow_type, 6)) + arr = pa.ExtensionArray.from_storage(tensor_type, storage) + expected = np.array( + [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], dtype=value_type) + np.testing.assert_array_equal(arr.to_tensor(), expected) + np.testing.assert_array_equal(arr.to_numpy_ndarray(), expected) + + expected = np.array([[[7, 8, 9], [10, 11, 12]]], dtype=value_type) + result = arr[1:].to_numpy_ndarray() + np.testing.assert_array_equal(result, expected) + + values = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]] + flat_arr = np.array(values[0], dtype=value_type) + bw = value_type.itemsize + storage = pa.array(values, pa.list_(arrow_type, 12)) + + tensor_type = pa.fixed_shape_tensor(arrow_type, [2, 2, 3], permutation=[0, 1, 2]) + result = pa.ExtensionArray.from_storage(tensor_type, storage) + expected = np.array( + [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]], dtype=value_type) + np.testing.assert_array_equal(result.to_numpy_ndarray(), expected) + + result = flat_arr.reshape(1, 2, 3, 2) + expected = np.array( + [[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]]], dtype=value_type) + np.testing.assert_array_equal(result, expected) + + tensor_type = pa.fixed_shape_tensor(arrow_type, [2, 2, 3], permutation=[0, 2, 1]) + result = pa.ExtensionArray.from_storage(tensor_type, storage) + expected = as_strided(flat_arr, shape=(1, 2, 3, 2), + strides=(bw * 12, bw * 6, bw, bw * 3)) + np.testing.assert_array_equal(result.to_numpy_ndarray(), expected) + + tensor_type = pa.fixed_shape_tensor(arrow_type, [2, 2, 3], permutation=[2, 0, 1]) + result = pa.ExtensionArray.from_storage(tensor_type, storage) + expected = as_strided(flat_arr, shape=(1, 3, 2, 2), + strides=(bw * 12, bw, bw * 6, bw * 2)) + np.testing.assert_array_equal(result.to_numpy_ndarray(), expected) + + assert result.type.permutation == [2, 0, 1] + assert result.type.shape == [2, 2, 3] + assert result.to_tensor().shape == (1, 3, 2, 2) + assert result.to_tensor().strides == (12 * bw, 1 * bw, 6 * bw, 2 * bw) + + +@pytest.mark.parametrize("value_type", (np.int8(), np.int64(), np.float32())) +def test_tensor_array_from_numpy(value_type): + from numpy.lib.stride_tricks import as_strided + arrow_type = pa.from_numpy_dtype(value_type) + + arr = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], + dtype=value_type, order="C") + tensor_array_from_numpy = pa.FixedShapeTensorArray.from_numpy_ndarray(arr) + assert isinstance(tensor_array_from_numpy.type, pa.FixedShapeTensorType) + assert tensor_array_from_numpy.type.value_type == arrow_type + assert tensor_array_from_numpy.type.shape == [2, 3] + + arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], + dtype=value_type, order="F") + with pytest.raises(ValueError, match="First stride needs to be largest"): + pa.FixedShapeTensorArray.from_numpy_ndarray(arr) + + flat_arr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=value_type) + bw = value_type.itemsize + + arr = flat_arr.reshape(1, 3, 4) + tensor_array_from_numpy = pa.FixedShapeTensorArray.from_numpy_ndarray(arr) + assert tensor_array_from_numpy.type.shape == [3, 4] + assert tensor_array_from_numpy.type.permutation == [0, 1] + assert tensor_array_from_numpy.to_tensor() == pa.Tensor.from_numpy(arr) + + arr = as_strided(flat_arr, shape=(1, 2, 3, 2), + strides=(bw * 12, bw * 6, bw, bw * 3)) + tensor_array_from_numpy = pa.FixedShapeTensorArray.from_numpy_ndarray(arr) + assert tensor_array_from_numpy.type.shape == [2, 2, 3] + assert tensor_array_from_numpy.type.permutation == [0, 2, 1] + assert tensor_array_from_numpy.to_tensor() == pa.Tensor.from_numpy(arr) + + arr = flat_arr.reshape(1, 2, 3, 2) + result = pa.FixedShapeTensorArray.from_numpy_ndarray(arr) + expected = np.array( + [[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]]], dtype=value_type) + np.testing.assert_array_equal(result.to_numpy_ndarray(), expected) + + arr = np.array([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], dtype=value_type) + expected = arr[1:] + result = pa.FixedShapeTensorArray.from_numpy_ndarray(arr)[1:].to_numpy_ndarray() + np.testing.assert_array_equal(result, expected) + + arr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=value_type) + with pytest.raises(ValueError, match="Cannot convert 1D array or scalar to fixed"): + pa.FixedShapeTensorArray.from_numpy_ndarray(arr) + + arr = np.array(1, dtype=value_type) + with pytest.raises(ValueError, match="Cannot convert 1D array or scalar to fixed"): + pa.FixedShapeTensorArray.from_numpy_ndarray(arr) + + arr = np.array([], dtype=value_type) + + with pytest.raises(ValueError, match="Cannot convert 1D array or scalar to fixed"): + pa.FixedShapeTensorArray.from_numpy_ndarray(arr.reshape((0))) + + with pytest.raises(ValueError, match="Expected a non-empty ndarray"): + pa.FixedShapeTensorArray.from_numpy_ndarray(arr.reshape((0, 3, 2))) + + with pytest.raises(ValueError, match="Expected a non-empty ndarray"): + pa.FixedShapeTensorArray.from_numpy_ndarray(arr.reshape((3, 0, 2))) + + +@pytest.mark.parametrize("tensor_type", ( + pa.fixed_shape_tensor(pa.int8(), [2, 2, 3]), + pa.fixed_shape_tensor(pa.int8(), [2, 2, 3], permutation=[0, 2, 1]), + pa.fixed_shape_tensor(pa.int8(), [2, 2, 3], dim_names=['C', 'H', 'W']) +)) +def test_tensor_type_ipc(tensor_type): + storage = pa.array([[1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6]], pa.list_(pa.int8(), 12)) + arr = pa.ExtensionArray.from_storage(tensor_type, storage) + batch = pa.RecordBatch.from_arrays([arr], ["ext"]) + + # check the built array has exactly the expected clss + tensor_class = tensor_type.__arrow_ext_class__() + assert isinstance(arr, tensor_class) + + buf = ipc_write_batch(batch) + del batch + batch = ipc_read_batch(buf) + + result = batch.column(0) + # check the deserialized array class is the expected one + assert isinstance(result, tensor_class) + assert result.type.extension_name == "arrow.fixed_shape_tensor" + assert arr.storage.to_pylist() == [[1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6]] + + # we get back an actual TensorType + assert isinstance(result.type, pa.FixedShapeTensorType) + assert result.type.value_type == pa.int8() + assert result.type.shape == [2, 2, 3] + + +def test_tensor_type_equality(): + tensor_type = pa.fixed_shape_tensor(pa.int8(), [2, 2, 3]) + assert tensor_type.extension_name == "arrow.fixed_shape_tensor" + + tensor_type2 = pa.fixed_shape_tensor(pa.int8(), [2, 2, 3]) + tensor_type3 = pa.fixed_shape_tensor(pa.uint8(), [2, 2, 3]) + assert tensor_type == tensor_type2 + assert not tensor_type == tensor_type3 + + +@pytest.mark.pandas +def test_extension_to_pandas_storage_type(registered_period_type): + period_type, _ = registered_period_type + np_arr = np.array([1, 2, 3, 4], dtype='i8') + storage = pa.array([1, 2, 3, 4], pa.int64()) + arr = pa.ExtensionArray.from_storage(period_type, storage) + + if isinstance(period_type, PeriodTypeWithToPandasDtype): + pandas_dtype = period_type.to_pandas_dtype() + else: + pandas_dtype = np_arr.dtype + + # Test arrays + result = arr.to_pandas() + assert result.dtype == pandas_dtype + + # Test chunked arrays + chunked_arr = pa.chunked_array([arr]) + result = chunked_arr.to_numpy() + assert result.dtype == np_arr.dtype + + result = chunked_arr.to_pandas() + assert result.dtype == pandas_dtype + + # Test Table.to_pandas + data = [ + pa.array([1, 2, 3, 4]), + pa.array(['foo', 'bar', None, None]), + pa.array([True, None, True, False]), + arr + ] + my_schema = pa.schema([('f0', pa.int8()), + ('f1', pa.string()), + ('f2', pa.bool_()), + ('ext', period_type)]) + table = pa.Table.from_arrays(data, schema=my_schema) + result = table.to_pandas() + assert result["ext"].dtype == pandas_dtype + + import pandas as pd + # Skip tests for 2.0.x, See: GH-35821 + if ( + Version(pd.__version__) >= Version("2.1.0") + ): + # Check the usage of types_mapper + result = table.to_pandas(types_mapper=pd.ArrowDtype) + assert isinstance(result["ext"].dtype, pd.ArrowDtype) + + +def test_tensor_type_is_picklable(pickle_module): + # GH-35599 + + expected_type = pa.fixed_shape_tensor(pa.int32(), (2, 2)) + result = pickle_module.loads(pickle_module.dumps(expected_type)) + + assert result == expected_type + + arr = [[1, 2, 3, 4], [10, 20, 30, 40], [100, 200, 300, 400]] + storage = pa.array(arr, pa.list_(pa.int32(), 4)) + expected_arr = pa.ExtensionArray.from_storage(expected_type, storage) + result = pickle_module.loads(pickle_module.dumps(expected_arr)) + + assert result == expected_arr + + +@pytest.mark.parametrize(("tensor_type", "text"), [ + ( + pa.fixed_shape_tensor(pa.int8(), [2, 2, 3]), + 'fixed_shape_tensor[value_type=int8, shape=[2,2,3]]' + ), + ( + pa.fixed_shape_tensor(pa.int32(), [2, 2, 3], permutation=[0, 2, 1]), + 'fixed_shape_tensor[value_type=int32, shape=[2,2,3], permutation=[0,2,1]]' + ), + ( + pa.fixed_shape_tensor(pa.int64(), [2, 2, 3], dim_names=['C', 'H', 'W']), + 'fixed_shape_tensor[value_type=int64, shape=[2,2,3], dim_names=[C,H,W]]' + ) +]) +def test_tensor_type_str(tensor_type, text): + tensor_type_str = tensor_type.__str__() + assert text in tensor_type_str + + +def test_legacy_int_type(): + with pytest.warns(FutureWarning, match="PyExtensionType is deprecated"): + ext_ty = LegacyIntType() + arr = pa.array([1, 2, 3], type=ext_ty.storage_type) + ext_arr = pa.ExtensionArray.from_storage(ext_ty, arr) + batch = pa.RecordBatch.from_arrays([ext_arr], names=['ext']) + buf = ipc_write_batch(batch) + + with pytest.warns((RuntimeWarning, FutureWarning)): + batch = ipc_read_batch(buf) + assert isinstance(batch.column(0).type, pa.UnknownExtensionType) + + with enabled_auto_load(): + with pytest.warns(FutureWarning, match="PyExtensionType is deprecated"): + batch = ipc_read_batch(buf) + assert isinstance(batch.column(0).type, LegacyIntType) + assert batch.column(0) == ext_arr diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_feather.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_feather.py new file mode 100644 index 0000000000000000000000000000000000000000..006400648908818510b1a753f20068047074893d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_feather.py @@ -0,0 +1,863 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import io +import os +import sys +import tempfile +import pytest +import hypothesis as h +import hypothesis.strategies as st + +import numpy as np + +import pyarrow as pa +import pyarrow.tests.strategies as past +from pyarrow.feather import (read_feather, write_feather, read_table, + FeatherDataset) + +try: + from pandas.testing import assert_frame_equal + import pandas as pd + import pyarrow.pandas_compat +except ImportError: + pass + + +@pytest.fixture(scope='module') +def datadir(base_datadir): + return base_datadir / 'feather' + + +def random_path(prefix='feather_'): + return tempfile.mktemp(prefix=prefix) + + +@pytest.fixture(scope="module", params=[1, 2]) +def version(request): + yield request.param + + +@pytest.fixture(scope="module", params=[None, "uncompressed", "lz4", "zstd"]) +def compression(request): + if request.param in ['lz4', 'zstd'] and not pa.Codec.is_available( + request.param): + pytest.skip(f'{request.param} is not available') + yield request.param + + +TEST_FILES = None + + +def setup_module(module): + global TEST_FILES + TEST_FILES = [] + + +def teardown_module(module): + for path in TEST_FILES: + try: + os.remove(path) + except os.error: + pass + + +@pytest.mark.pandas +def test_file_not_exist(): + with pytest.raises(pa.ArrowIOError): + read_feather('test_invalid_file') + + +def _check_pandas_roundtrip(df, expected=None, path=None, + columns=None, use_threads=False, + version=None, compression=None, + compression_level=None): + if path is None: + path = random_path() + + if version is None: + version = 2 + + TEST_FILES.append(path) + write_feather(df, path, compression=compression, + compression_level=compression_level, version=version) + + if not os.path.exists(path): + raise Exception('file not written') + + result = read_feather(path, columns, use_threads=use_threads) + + if expected is None: + expected = df + + assert_frame_equal(result, expected) + + +def _check_arrow_roundtrip(table, path=None, compression=None): + if path is None: + path = random_path() + + TEST_FILES.append(path) + write_feather(table, path, compression=compression) + if not os.path.exists(path): + raise Exception('file not written') + + result = read_table(path) + assert result.equals(table) + + +def _assert_error_on_write(df, exc, path=None, version=2): + # check that we are raising the exception + # on writing + + if path is None: + path = random_path() + + TEST_FILES.append(path) + + def f(): + write_feather(df, path, version=version) + + pytest.raises(exc, f) + + +def test_dataset(version): + num_values = (100, 100) + num_files = 5 + paths = [random_path() for i in range(num_files)] + data = { + "col_" + str(i): np.random.randn(num_values[0]) + for i in range(num_values[1]) + } + table = pa.table(data) + + TEST_FILES.extend(paths) + for index, path in enumerate(paths): + rows = ( + index * (num_values[0] // num_files), + (index + 1) * (num_values[0] // num_files), + ) + + write_feather(table[rows[0]: rows[1]], path, version=version) + + data = FeatherDataset(paths).read_table() + assert data.equals(table) + + +@pytest.mark.pandas +def test_float_no_nulls(version): + data = {} + numpy_dtypes = ['f4', 'f8'] + num_values = 100 + + for dtype in numpy_dtypes: + values = np.random.randn(num_values) + data[dtype] = values.astype(dtype) + + df = pd.DataFrame(data) + _check_pandas_roundtrip(df, version=version) + + +@pytest.mark.pandas +def test_read_table(version): + num_values = (100, 100) + path = random_path() + + TEST_FILES.append(path) + + values = np.random.randint(0, 100, size=num_values) + columns = ['col_' + str(i) for i in range(100)] + table = pa.Table.from_arrays(values, columns) + + write_feather(table, path, version=version) + + result = read_table(path) + assert result.equals(table) + + # Test without memory mapping + result = read_table(path, memory_map=False) + assert result.equals(table) + + result = read_feather(path, memory_map=False) + assert_frame_equal(table.to_pandas(), result) + + +@pytest.mark.pandas +def test_use_threads(version): + # ARROW-14470 + num_values = (10, 10) + path = random_path() + + TEST_FILES.append(path) + + values = np.random.randint(0, 10, size=num_values) + columns = ['col_' + str(i) for i in range(10)] + table = pa.Table.from_arrays(values, columns) + + write_feather(table, path, version=version) + + result = read_feather(path) + assert_frame_equal(table.to_pandas(), result) + + # Test read_feather with use_threads=False + result = read_feather(path, use_threads=False) + assert_frame_equal(table.to_pandas(), result) + + # Test read_table with use_threads=False + result = read_table(path, use_threads=False) + assert result.equals(table) + + +@pytest.mark.pandas +def test_float_nulls(version): + num_values = 100 + + path = random_path() + TEST_FILES.append(path) + + null_mask = np.random.randint(0, 10, size=num_values) < 3 + dtypes = ['f4', 'f8'] + expected_cols = [] + + arrays = [] + for name in dtypes: + values = np.random.randn(num_values).astype(name) + arrays.append(pa.array(values, mask=null_mask)) + + values[null_mask] = np.nan + + expected_cols.append(values) + + table = pa.table(arrays, names=dtypes) + _check_arrow_roundtrip(table) + + df = table.to_pandas() + _check_pandas_roundtrip(df, version=version) + + +@pytest.mark.pandas +def test_integer_no_nulls(version): + data, arr = {}, [] + + numpy_dtypes = ['i1', 'i2', 'i4', 'i8', + 'u1', 'u2', 'u4', 'u8'] + num_values = 100 + + for dtype in numpy_dtypes: + values = np.random.randint(0, 100, size=num_values) + data[dtype] = values.astype(dtype) + arr.append(values.astype(dtype)) + + df = pd.DataFrame(data) + _check_pandas_roundtrip(df, version=version) + + table = pa.table(arr, names=numpy_dtypes) + _check_arrow_roundtrip(table) + + +@pytest.mark.pandas +def test_platform_numpy_integers(version): + data = {} + + numpy_dtypes = ['longlong'] + num_values = 100 + + for dtype in numpy_dtypes: + values = np.random.randint(0, 100, size=num_values) + data[dtype] = values.astype(dtype) + + df = pd.DataFrame(data) + _check_pandas_roundtrip(df, version=version) + + +@pytest.mark.pandas +def test_integer_with_nulls(version): + # pandas requires upcast to float dtype + path = random_path() + TEST_FILES.append(path) + + int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'] + num_values = 100 + + arrays = [] + null_mask = np.random.randint(0, 10, size=num_values) < 3 + expected_cols = [] + for name in int_dtypes: + values = np.random.randint(0, 100, size=num_values) + arrays.append(pa.array(values, mask=null_mask)) + + expected = values.astype('f8') + expected[null_mask] = np.nan + + expected_cols.append(expected) + + table = pa.table(arrays, names=int_dtypes) + _check_arrow_roundtrip(table) + + df = table.to_pandas() + _check_pandas_roundtrip(df, version=version) + + +@pytest.mark.pandas +def test_boolean_no_nulls(version): + num_values = 100 + + np.random.seed(0) + + df = pd.DataFrame({'bools': np.random.randn(num_values) > 0}) + _check_pandas_roundtrip(df, version=version) + + +@pytest.mark.pandas +def test_boolean_nulls(version): + # pandas requires upcast to object dtype + path = random_path() + TEST_FILES.append(path) + + num_values = 100 + np.random.seed(0) + + mask = np.random.randint(0, 10, size=num_values) < 3 + values = np.random.randint(0, 10, size=num_values) < 5 + + table = pa.table([pa.array(values, mask=mask)], names=['bools']) + _check_arrow_roundtrip(table) + + df = table.to_pandas() + _check_pandas_roundtrip(df, version=version) + + +def test_buffer_bounds_error(version): + # ARROW-1676 + path = random_path() + TEST_FILES.append(path) + + for i in range(16, 256): + table = pa.Table.from_arrays( + [pa.array([None] + list(range(i)), type=pa.float64())], + names=["arr"] + ) + _check_arrow_roundtrip(table) + + +def test_boolean_object_nulls(version): + repeats = 100 + table = pa.Table.from_arrays( + [np.array([False, None, True] * repeats, dtype=object)], + names=["arr"] + ) + _check_arrow_roundtrip(table) + + +@pytest.mark.pandas +def test_delete_partial_file_on_error(version): + if sys.platform == 'win32': + pytest.skip('Windows hangs on to file handle for some reason') + + class CustomClass: + pass + + # strings will fail + df = pd.DataFrame( + { + 'numbers': range(5), + 'strings': [b'foo', None, 'bar', CustomClass(), np.nan]}, + columns=['numbers', 'strings']) + + path = random_path() + try: + write_feather(df, path, version=version) + except Exception: + pass + + assert not os.path.exists(path) + + +@pytest.mark.pandas +def test_strings(version): + repeats = 1000 + + # Mixed bytes, unicode, strings coerced to binary + values = [b'foo', None, 'bar', 'qux', np.nan] + df = pd.DataFrame({'strings': values * repeats}) + + ex_values = [b'foo', None, b'bar', b'qux', None] + expected = pd.DataFrame({'strings': ex_values * repeats}) + _check_pandas_roundtrip(df, expected, version=version) + + # embedded nulls are ok + values = ['foo', None, 'bar', 'qux', None] + df = pd.DataFrame({'strings': values * repeats}) + expected = pd.DataFrame({'strings': values * repeats}) + _check_pandas_roundtrip(df, expected, version=version) + + values = ['foo', None, 'bar', 'qux', np.nan] + df = pd.DataFrame({'strings': values * repeats}) + ex_values = ['foo', None, 'bar', 'qux', None] + expected = pd.DataFrame({'strings': ex_values * repeats}) + _check_pandas_roundtrip(df, expected, version=version) + + +@pytest.mark.pandas +def test_empty_strings(version): + df = pd.DataFrame({'strings': [''] * 10}) + _check_pandas_roundtrip(df, version=version) + + +@pytest.mark.pandas +def test_all_none(version): + df = pd.DataFrame({'all_none': [None] * 10}) + _check_pandas_roundtrip(df, version=version) + + +@pytest.mark.pandas +def test_all_null_category(version): + # ARROW-1188 + df = pd.DataFrame({"A": (1, 2, 3), "B": (None, None, None)}) + df = df.assign(B=df.B.astype("category")) + _check_pandas_roundtrip(df, version=version) + + +@pytest.mark.pandas +def test_multithreaded_read(version): + data = {'c{}'.format(i): [''] * 10 + for i in range(100)} + df = pd.DataFrame(data) + _check_pandas_roundtrip(df, use_threads=True, version=version) + + +@pytest.mark.pandas +def test_nan_as_null(version): + # Create a nan that is not numpy.nan + values = np.array(['foo', np.nan, np.nan * 2, 'bar'] * 10) + df = pd.DataFrame({'strings': values}) + _check_pandas_roundtrip(df, version=version) + + +@pytest.mark.pandas +def test_category(version): + repeats = 1000 + values = ['foo', None, 'bar', 'qux', np.nan] + df = pd.DataFrame({'strings': values * repeats}) + df['strings'] = df['strings'].astype('category') + + values = ['foo', None, 'bar', 'qux', None] + expected = pd.DataFrame({'strings': pd.Categorical(values * repeats)}) + _check_pandas_roundtrip(df, expected, version=version) + + +@pytest.mark.pandas +def test_timestamp(version): + df = pd.DataFrame({'naive': pd.date_range('2016-03-28', periods=10)}) + df['with_tz'] = (df.naive.dt.tz_localize('utc') + .dt.tz_convert('America/Los_Angeles')) + + _check_pandas_roundtrip(df, version=version) + + +@pytest.mark.pandas +def test_timestamp_with_nulls(version): + df = pd.DataFrame({'test': [pd.Timestamp(2016, 1, 1), + None, + pd.Timestamp(2016, 1, 3)]}) + df['with_tz'] = df.test.dt.tz_localize('utc') + + _check_pandas_roundtrip(df, version=version) + + +@pytest.mark.pandas +@pytest.mark.xfail(reason="not supported", raises=TypeError) +def test_timedelta_with_nulls_v1(): + df = pd.DataFrame({'test': [pd.Timedelta('1 day'), + None, + pd.Timedelta('3 day')]}) + _check_pandas_roundtrip(df, version=1) + + +@pytest.mark.pandas +def test_timedelta_with_nulls(): + df = pd.DataFrame({'test': [pd.Timedelta('1 day'), + None, + pd.Timedelta('3 day')]}) + _check_pandas_roundtrip(df, version=2) + + +@pytest.mark.pandas +def test_out_of_float64_timestamp_with_nulls(version): + df = pd.DataFrame( + {'test': pd.DatetimeIndex([1451606400000000001, + None, 14516064000030405])}) + df['with_tz'] = df.test.dt.tz_localize('utc') + _check_pandas_roundtrip(df, version=version) + + +@pytest.mark.pandas +def test_non_string_columns(version): + df = pd.DataFrame({0: [1, 2, 3, 4], + 1: [True, False, True, False]}) + expected = df + + if version == 1: + expected = df.rename(columns=str) + _check_pandas_roundtrip(df, expected, version=version) + + +@pytest.mark.pandas +@pytest.mark.skipif(not os.path.supports_unicode_filenames, + reason='unicode filenames not supported') +def test_unicode_filename(version): + # GH #209 + name = (b'Besa_Kavaj\xc3\xab.feather').decode('utf-8') + df = pd.DataFrame({'foo': [1, 2, 3, 4]}) + _check_pandas_roundtrip(df, path=random_path(prefix=name), + version=version) + + +@pytest.mark.pandas +def test_read_columns(version): + df = pd.DataFrame({ + 'foo': [1, 2, 3, 4], + 'boo': [5, 6, 7, 8], + 'woo': [1, 3, 5, 7] + }) + expected = df[['boo', 'woo']] + + _check_pandas_roundtrip(df, expected, version=version, + columns=['boo', 'woo']) + + +def test_overwritten_file(version): + path = random_path() + TEST_FILES.append(path) + + num_values = 100 + np.random.seed(0) + + values = np.random.randint(0, 10, size=num_values) + + table = pa.table({'ints': values}) + write_feather(table, path) + + table = pa.table({'more_ints': values[0:num_values//2]}) + _check_arrow_roundtrip(table, path=path) + + +@pytest.mark.pandas +def test_filelike_objects(version): + buf = io.BytesIO() + + # the copy makes it non-strided + df = pd.DataFrame(np.arange(12).reshape(4, 3), + columns=['a', 'b', 'c']).copy() + write_feather(df, buf, version=version) + + buf.seek(0) + + result = read_feather(buf) + assert_frame_equal(result, df) + + +@pytest.mark.pandas +@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning") +@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning") +def test_sparse_dataframe(version): + if not pa.pandas_compat._pandas_api.has_sparse: + pytest.skip("version of pandas does not support SparseDataFrame") + # GH #221 + data = {'A': [0, 1, 2], + 'B': [1, 0, 1]} + df = pd.DataFrame(data).to_sparse(fill_value=1) + expected = df.to_dense() + _check_pandas_roundtrip(df, expected, version=version) + + +@pytest.mark.pandas +def test_duplicate_columns_pandas(): + + # https://github.com/wesm/feather/issues/53 + # not currently able to handle duplicate columns + df = pd.DataFrame(np.arange(12).reshape(4, 3), + columns=list('aaa')).copy() + _assert_error_on_write(df, ValueError) + + +def test_duplicate_columns(): + # only works for version 2 + table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'a', 'b']) + _check_arrow_roundtrip(table) + _assert_error_on_write(table, ValueError, version=1) + + +@pytest.mark.pandas +def test_unsupported(): + # https://github.com/wesm/feather/issues/240 + # serializing actual python objects + + # custom python objects + class A: + pass + + df = pd.DataFrame({'a': [A(), A()]}) + _assert_error_on_write(df, ValueError) + + # non-strings + df = pd.DataFrame({'a': ['a', 1, 2.0]}) + _assert_error_on_write(df, TypeError) + + +@pytest.mark.pandas +def test_v2_set_chunksize(): + df = pd.DataFrame({'A': np.arange(1000)}) + table = pa.table(df) + + buf = io.BytesIO() + write_feather(table, buf, chunksize=250, version=2) + + result = buf.getvalue() + + ipc_file = pa.ipc.open_file(pa.BufferReader(result)) + assert ipc_file.num_record_batches == 4 + assert len(ipc_file.get_batch(0)) == 250 + + +@pytest.mark.pandas +@pytest.mark.lz4 +@pytest.mark.snappy +@pytest.mark.zstd +def test_v2_compression_options(): + df = pd.DataFrame({'A': np.arange(1000)}) + + cases = [ + # compression, compression_level + ('uncompressed', None), + ('lz4', None), + ('lz4', 1), + ('lz4', 12), + ('zstd', 1), + ('zstd', 10) + ] + + for compression, compression_level in cases: + _check_pandas_roundtrip(df, compression=compression, + compression_level=compression_level) + + buf = io.BytesIO() + + # Trying to compress with V1 + with pytest.raises( + ValueError, + match="Feather V1 files do not support compression option"): + write_feather(df, buf, compression='lz4', version=1) + + # Trying to set chunksize with V1 + with pytest.raises( + ValueError, + match="Feather V1 files do not support chunksize option"): + write_feather(df, buf, chunksize=4096, version=1) + + # Unsupported compressor + with pytest.raises(ValueError, + match='compression="snappy" not supported'): + write_feather(df, buf, compression='snappy') + + +def test_v2_lz4_default_compression(): + # ARROW-8750: Make sure that the compression=None option selects lz4 if + # it's available + if not pa.Codec.is_available('lz4_frame'): + pytest.skip("LZ4 compression support is not built in C++") + + # some highly compressible data + t = pa.table([np.repeat(0, 100000)], names=['f0']) + + buf = io.BytesIO() + write_feather(t, buf) + default_result = buf.getvalue() + + buf = io.BytesIO() + write_feather(t, buf, compression='uncompressed') + uncompressed_result = buf.getvalue() + + assert len(default_result) < len(uncompressed_result) + + +def test_v1_unsupported_types(): + table = pa.table([pa.array([[1, 2, 3], [], None])], names=['f0']) + + buf = io.BytesIO() + with pytest.raises(TypeError, + match=("Unsupported Feather V1 type: " + "list. " + "Use V2 format to serialize all Arrow types.")): + write_feather(table, buf, version=1) + + +@pytest.mark.slow +@pytest.mark.pandas +def test_large_dataframe(version): + df = pd.DataFrame({'A': np.arange(400000000)}) + _check_pandas_roundtrip(df, version=version) + + +@pytest.mark.large_memory +@pytest.mark.pandas +def test_chunked_binary_error_message(): + # ARROW-3058: As Feather does not yet support chunked columns, we at least + # make sure it's clear to the user what is going on + + # 2^31 + 1 bytes + values = [b'x'] + [ + b'x' * (1 << 20) + ] * 2 * (1 << 10) + df = pd.DataFrame({'byte_col': values}) + + # Works fine with version 2 + buf = io.BytesIO() + write_feather(df, buf, version=2) + result = read_feather(pa.BufferReader(buf.getvalue())) + assert_frame_equal(result, df) + + with pytest.raises(ValueError, match="'byte_col' exceeds 2GB maximum " + "capacity of a Feather binary column. This restriction " + "may be lifted in the future"): + write_feather(df, io.BytesIO(), version=1) + + +def test_feather_without_pandas(tempdir, version): + # ARROW-8345 + table = pa.table([pa.array([1, 2, 3])], names=['f0']) + path = str(tempdir / "data.feather") + _check_arrow_roundtrip(table, path) + + +@pytest.mark.pandas +def test_read_column_selection(version): + # ARROW-8641 + df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=['a', 'b', 'c']) + + # select columns as string names or integer indices + _check_pandas_roundtrip( + df, columns=['a', 'c'], expected=df[['a', 'c']], version=version) + _check_pandas_roundtrip( + df, columns=[0, 2], expected=df[['a', 'c']], version=version) + + # different order is followed + _check_pandas_roundtrip( + df, columns=['b', 'a'], expected=df[['b', 'a']], version=version) + _check_pandas_roundtrip( + df, columns=[1, 0], expected=df[['b', 'a']], version=version) + + +def test_read_column_duplicated_selection(tempdir, version): + # duplicated columns in the column selection + table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'b', 'c']) + path = str(tempdir / "data.feather") + write_feather(table, path, version=version) + + expected = pa.table([[1, 2, 3], [4, 5, 6], [1, 2, 3]], + names=['a', 'b', 'a']) + for col_selection in [['a', 'b', 'a'], [0, 1, 0]]: + result = read_table(path, columns=col_selection) + assert result.equals(expected) + + +def test_read_column_duplicated_in_file(tempdir): + # duplicated columns in feather file (only works for feather v2) + table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'b', 'a']) + path = str(tempdir / "data.feather") + write_feather(table, path, version=2) + + # no selection works fine + result = read_table(path) + assert result.equals(table) + + # selection with indices works + result = read_table(path, columns=[0, 2]) + assert result.column_names == ['a', 'a'] + + # selection with column names errors + with pytest.raises(ValueError): + read_table(path, columns=['a', 'b']) + + +def test_nested_types(compression): + # https://issues.apache.org/jira/browse/ARROW-8860 + table = pa.table({'col': pa.StructArray.from_arrays( + [[0, 1, 2], [1, 2, 3]], names=["f1", "f2"])}) + _check_arrow_roundtrip(table, compression=compression) + + table = pa.table({'col': pa.array([[1, 2], [3, 4]])}) + _check_arrow_roundtrip(table, compression=compression) + + table = pa.table({'col': pa.array([[[1, 2], [3, 4]], [[5, 6], None]])}) + _check_arrow_roundtrip(table, compression=compression) + + +@h.given(past.all_tables, st.sampled_from(["uncompressed", "lz4", "zstd"])) +def test_roundtrip(table, compression): + _check_arrow_roundtrip(table, compression=compression) + + +@pytest.mark.lz4 +def test_feather_v017_experimental_compression_backward_compatibility(datadir): + # ARROW-11163 - ensure newer pyarrow versions can read the old feather + # files from version 0.17.0 with experimental compression support (before + # it was officially added to IPC format in 1.0.0) + + # file generated with: + # table = pa.table({'a': range(5)}) + # from pyarrow import feather + # feather.write_feather( + # table, "v0.17.0.version.2-compression.lz4.feather", + # compression="lz4", version=2) + expected = pa.table({'a': range(5)}) + result = read_table(datadir / "v0.17.0.version.2-compression.lz4.feather") + assert result.equals(expected) + + +@pytest.mark.pandas +def test_preserve_index_pandas(version): + df = pd.DataFrame({'a': [1, 2, 3]}, index=['a', 'b', 'c']) + + if version == 1: + expected = df.reset_index(drop=True).rename(columns=str) + else: + expected = df + + _check_pandas_roundtrip(df, expected, version=version) + + +@pytest.mark.pandas +def test_feather_datetime_resolution_arrow_to_pandas(tempdir): + # ARROW-17192 - ensure timestamp_as_object=True (together with other + # **kwargs) can be passed in read_feather to to_pandas. + + from datetime import datetime + df = pd.DataFrame({"date": [ + datetime.fromisoformat("1654-01-01"), + datetime.fromisoformat("1920-01-01"), ], + }) + write_feather(df, tempdir / "test_resolution.feather") + + expected_0 = datetime.fromisoformat("1654-01-01") + expected_1 = datetime.fromisoformat("1920-01-01") + + result = read_feather(tempdir / "test_resolution.feather", + timestamp_as_object=True) + + assert expected_0 == result['date'][0] + assert expected_1 == result['date'][1] diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_flight.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_flight.py new file mode 100644 index 0000000000000000000000000000000000000000..9553dc2507225f3903b376f3c5d3176f4136aeca --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_flight.py @@ -0,0 +1,2367 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import ast +import base64 +import itertools +import os +import pathlib +import signal +import struct +import tempfile +import threading +import time +import traceback +import json + +import numpy as np +import pytest +import pyarrow as pa + +from pyarrow.lib import IpcReadOptions, tobytes +from pyarrow.util import find_free_port +from pyarrow.tests import util + +try: + from pyarrow import flight + from pyarrow.flight import ( + FlightClient, FlightServerBase, + ServerAuthHandler, ClientAuthHandler, + ServerMiddleware, ServerMiddlewareFactory, + ClientMiddleware, ClientMiddlewareFactory, + ) +except ImportError: + flight = None + FlightClient, FlightServerBase = object, object + ServerAuthHandler, ClientAuthHandler = object, object + ServerMiddleware, ServerMiddlewareFactory = object, object + ClientMiddleware, ClientMiddlewareFactory = object, object + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not flight' +pytestmark = pytest.mark.flight + + +def test_import(): + # So we see the ImportError somewhere + import pyarrow.flight # noqa + + +def resource_root(): + """Get the path to the test resources directory.""" + if not os.environ.get("ARROW_TEST_DATA"): + raise RuntimeError("Test resources not found; set " + "ARROW_TEST_DATA to /testing/data") + return pathlib.Path(os.environ["ARROW_TEST_DATA"]) / "flight" + + +def read_flight_resource(path): + """Get the contents of a test resource file.""" + root = resource_root() + if not root: + return None + try: + with (root / path).open("rb") as f: + return f.read() + except FileNotFoundError: + raise RuntimeError( + "Test resource {} not found; did you initialize the " + "test resource submodule?\n{}".format(root / path, + traceback.format_exc())) + + +def example_tls_certs(): + """Get the paths to test TLS certificates.""" + return { + "root_cert": read_flight_resource("root-ca.pem"), + "certificates": [ + flight.CertKeyPair( + cert=read_flight_resource("cert0.pem"), + key=read_flight_resource("cert0.key"), + ), + flight.CertKeyPair( + cert=read_flight_resource("cert1.pem"), + key=read_flight_resource("cert1.key"), + ), + ] + } + + +def simple_ints_table(): + data = [ + pa.array([-10, -5, 0, 5, 10]) + ] + return pa.Table.from_arrays(data, names=['some_ints']) + + +def simple_dicts_table(): + dict_values = pa.array(["foo", "baz", "quux"], type=pa.utf8()) + data = [ + pa.chunked_array([ + pa.DictionaryArray.from_arrays([1, 0, None], dict_values), + pa.DictionaryArray.from_arrays([2, 1], dict_values) + ]) + ] + return pa.Table.from_arrays(data, names=['some_dicts']) + + +def multiple_column_table(): + return pa.Table.from_arrays([pa.array(['foo', 'bar', 'baz', 'qux']), + pa.array([1, 2, 3, 4])], + names=['a', 'b']) + + +class ConstantFlightServer(FlightServerBase): + """A Flight server that always returns the same data. + + See ARROW-4796: this server implementation will segfault if Flight + does not properly hold a reference to the Table object. + """ + + CRITERIA = b"the expected criteria" + + def __init__(self, location=None, options=None, **kwargs): + super().__init__(location, **kwargs) + # Ticket -> Table + self.table_factories = { + b'ints': simple_ints_table, + b'dicts': simple_dicts_table, + b'multi': multiple_column_table, + } + self.options = options + + def list_flights(self, context, criteria): + if criteria == self.CRITERIA: + yield flight.FlightInfo( + pa.schema([]), + flight.FlightDescriptor.for_path('/foo'), + [], + -1, -1 + ) + + def do_get(self, context, ticket): + # Return a fresh table, so that Flight is the only one keeping a + # reference. + table = self.table_factories[ticket.ticket]() + return flight.RecordBatchStream(table, options=self.options) + + +class MetadataFlightServer(FlightServerBase): + """A Flight server that numbers incoming/outgoing data.""" + + def __init__(self, options=None, **kwargs): + super().__init__(**kwargs) + self.options = options + + def do_get(self, context, ticket): + data = [ + pa.array([-10, -5, 0, 5, 10]) + ] + table = pa.Table.from_arrays(data, names=['a']) + return flight.GeneratorStream( + table.schema, + self.number_batches(table), + options=self.options) + + def do_put(self, context, descriptor, reader, writer): + counter = 0 + expected_data = [-10, -5, 0, 5, 10] + while True: + try: + batch, buf = reader.read_chunk() + assert batch.equals(pa.RecordBatch.from_arrays( + [pa.array([expected_data[counter]])], + ['a'] + )) + assert buf is not None + client_counter, = struct.unpack(' " + "locations=[]>") + info_repr = ( + " " + "endpoints=[] " + "total_records=-1 " + "total_bytes=-1>") + location_repr = "" + result_repr = "" + schema_result_repr = "" + ticket_repr = "" + + assert repr(flight.Action("foo", b"")) == action_repr + assert repr(flight.ActionType("foo", "bar")) == action_type_repr + assert repr(flight.BasicAuth("user", "pass")) == basic_auth_repr + assert repr(flight.FlightDescriptor.for_command("foo")) == descriptor_repr + assert repr(flight.FlightEndpoint(b"foo", [])) == endpoint_repr + info = flight.FlightInfo( + pa.schema([]), flight.FlightDescriptor.for_path(), [], -1, -1) + assert repr(info) == info_repr + assert repr(flight.Location("grpc+tcp://localhost:1234")) == location_repr + assert repr(flight.Result(b"foo")) == result_repr + assert repr(flight.SchemaResult(pa.schema([]))) == schema_result_repr + assert repr(flight.SchemaResult(pa.schema([("int", "int64")]))) == \ + "" + assert repr(flight.Ticket(b"foo")) == ticket_repr + + with pytest.raises(TypeError): + flight.Action("foo", None) + + +def test_eq(): + items = [ + lambda: (flight.Action("foo", b""), flight.Action("foo", b"bar")), + lambda: (flight.ActionType("foo", "bar"), + flight.ActionType("foo", "baz")), + lambda: (flight.BasicAuth("user", "pass"), + flight.BasicAuth("user2", "pass")), + lambda: (flight.FlightDescriptor.for_command("foo"), + flight.FlightDescriptor.for_path("foo")), + lambda: (flight.FlightEndpoint(b"foo", []), + flight.FlightEndpoint(b"", [])), + lambda: ( + flight.FlightInfo( + pa.schema([]), + flight.FlightDescriptor.for_path(), [], -1, -1), + flight.FlightInfo( + pa.schema([]), + flight.FlightDescriptor.for_command(b"foo"), [], -1, 42)), + lambda: (flight.Location("grpc+tcp://localhost:1234"), + flight.Location("grpc+tls://localhost:1234")), + lambda: (flight.Result(b"foo"), flight.Result(b"bar")), + lambda: (flight.SchemaResult(pa.schema([])), + flight.SchemaResult(pa.schema([("ints", pa.int64())]))), + lambda: (flight.Ticket(b""), flight.Ticket(b"foo")), + ] + + for gen in items: + lhs1, rhs1 = gen() + lhs2, rhs2 = gen() + assert lhs1 == lhs2 + assert rhs1 == rhs2 + assert lhs1 != rhs1 + + +def test_flight_server_location_argument(): + locations = [ + None, + 'grpc://localhost:0', + ('localhost', find_free_port()), + ] + for location in locations: + with FlightServerBase(location) as server: + assert isinstance(server, FlightServerBase) + + +def test_server_exit_reraises_exception(): + with pytest.raises(ValueError): + with FlightServerBase(): + raise ValueError() + + +@pytest.mark.slow +def test_client_wait_for_available(): + location = ('localhost', find_free_port()) + server = None + + def serve(): + global server + time.sleep(0.5) + server = FlightServerBase(location) + server.serve() + + with FlightClient(location) as client: + thread = threading.Thread(target=serve, daemon=True) + thread.start() + + started = time.time() + client.wait_for_available(timeout=5) + elapsed = time.time() - started + assert elapsed >= 0.5 + + +def test_flight_list_flights(): + """Try a simple list_flights call.""" + with ConstantFlightServer() as server, \ + flight.connect(('localhost', server.port)) as client: + assert list(client.list_flights()) == [] + flights = client.list_flights(ConstantFlightServer.CRITERIA) + assert len(list(flights)) == 1 + + +def test_flight_client_close(): + with ConstantFlightServer() as server, \ + flight.connect(('localhost', server.port)) as client: + assert list(client.list_flights()) == [] + client.close() + client.close() # Idempotent + with pytest.raises(pa.ArrowInvalid): + list(client.list_flights()) + + +def test_flight_do_get_ints(): + """Try a simple do_get call.""" + table = simple_ints_table() + + with ConstantFlightServer() as server, \ + flight.connect(('localhost', server.port)) as client: + data = client.do_get(flight.Ticket(b'ints')).read_all() + assert data.equals(table) + + options = pa.ipc.IpcWriteOptions( + metadata_version=pa.ipc.MetadataVersion.V4) + with ConstantFlightServer(options=options) as server, \ + flight.connect(('localhost', server.port)) as client: + data = client.do_get(flight.Ticket(b'ints')).read_all() + assert data.equals(table) + + # Also test via RecordBatchReader interface + data = client.do_get(flight.Ticket(b'ints')).to_reader().read_all() + assert data.equals(table) + + with pytest.raises(flight.FlightServerError, + match="expected IpcWriteOptions, got "): + with ConstantFlightServer(options=42) as server, \ + flight.connect(('localhost', server.port)) as client: + data = client.do_get(flight.Ticket(b'ints')).read_all() + + +@pytest.mark.pandas +def test_do_get_ints_pandas(): + """Try a simple do_get call.""" + table = simple_ints_table() + + with ConstantFlightServer() as server, \ + flight.connect(('localhost', server.port)) as client: + data = client.do_get(flight.Ticket(b'ints')).read_pandas() + assert list(data['some_ints']) == table.column(0).to_pylist() + + +def test_flight_do_get_dicts(): + table = simple_dicts_table() + + with ConstantFlightServer() as server, \ + flight.connect(('localhost', server.port)) as client: + data = client.do_get(flight.Ticket(b'dicts')).read_all() + assert data.equals(table) + + +def test_flight_do_get_ticket(): + """Make sure Tickets get passed to the server.""" + data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())] + table = pa.Table.from_arrays(data1, names=['a']) + with CheckTicketFlightServer(expected_ticket=b'the-ticket') as server, \ + flight.connect(('localhost', server.port)) as client: + data = client.do_get(flight.Ticket(b'the-ticket')).read_all() + assert data.equals(table) + + +def test_flight_get_info(): + """Make sure FlightEndpoint accepts string and object URIs.""" + with GetInfoFlightServer() as server: + client = FlightClient(('localhost', server.port)) + info = client.get_flight_info(flight.FlightDescriptor.for_command(b'')) + assert info.total_records == -1 + assert info.total_bytes == -1 + assert info.schema == pa.schema([('a', pa.int32())]) + assert len(info.endpoints) == 2 + assert len(info.endpoints[0].locations) == 1 + assert info.endpoints[0].locations[0] == flight.Location('grpc://test') + assert info.endpoints[1].locations[0] == \ + flight.Location.for_grpc_tcp('localhost', 5005) + + +def test_flight_get_schema(): + """Make sure GetSchema returns correct schema.""" + with GetInfoFlightServer() as server, \ + FlightClient(('localhost', server.port)) as client: + info = client.get_schema(flight.FlightDescriptor.for_command(b'')) + assert info.schema == pa.schema([('a', pa.int32())]) + + +def test_list_actions(): + """Make sure the return type of ListActions is validated.""" + # ARROW-6392 + with ListActionsErrorFlightServer() as server, \ + FlightClient(('localhost', server.port)) as client: + with pytest.raises( + flight.FlightServerError, + match=("Results of list_actions must be " + "ActionType or tuple") + ): + list(client.list_actions()) + + with ListActionsFlightServer() as server, \ + FlightClient(('localhost', server.port)) as client: + assert list(client.list_actions()) == \ + ListActionsFlightServer.expected_actions() + + +class ConvenienceServer(FlightServerBase): + """ + Server for testing various implementation conveniences (auto-boxing, etc.) + """ + + @property + def simple_action_results(self): + return [b'foo', b'bar', b'baz'] + + def do_action(self, context, action): + if action.type == 'simple-action': + return self.simple_action_results + elif action.type == 'echo': + return [action.body] + elif action.type == 'bad-action': + return ['foo'] + elif action.type == 'arrow-exception': + raise pa.ArrowMemoryError() + elif action.type == 'forever': + def gen(): + while not context.is_cancelled(): + yield b'foo' + return gen() + + +def test_do_action_result_convenience(): + with ConvenienceServer() as server, \ + FlightClient(('localhost', server.port)) as client: + + # do_action as action type without body + results = [x.body for x in client.do_action('simple-action')] + assert results == server.simple_action_results + + # do_action with tuple of type and body + body = b'the-body' + results = [x.body for x in client.do_action(('echo', body))] + assert results == [body] + + +def test_nicer_server_exceptions(): + with ConvenienceServer() as server, \ + FlightClient(('localhost', server.port)) as client: + with pytest.raises(flight.FlightServerError, + match="a bytes-like object is required"): + list(client.do_action('bad-action')) + # While Flight/C++ sends across the original status code, it + # doesn't get mapped to the equivalent code here, since we + # want to be able to distinguish between client- and server- + # side errors. + with pytest.raises(flight.FlightServerError, + match="ArrowMemoryError"): + list(client.do_action('arrow-exception')) + + +def test_get_port(): + """Make sure port() works.""" + server = GetInfoFlightServer("grpc://localhost:0") + try: + assert server.port > 0 + finally: + server.shutdown() + + +@pytest.mark.skipif(os.name == 'nt', + reason="Unix sockets can't be tested on Windows") +def test_flight_domain_socket(): + """Try a simple do_get call over a Unix domain socket.""" + with tempfile.NamedTemporaryFile() as sock: + sock.close() + location = flight.Location.for_grpc_unix(sock.name) + with ConstantFlightServer(location=location), \ + FlightClient(location) as client: + + reader = client.do_get(flight.Ticket(b'ints')) + table = simple_ints_table() + assert reader.schema.equals(table.schema) + data = reader.read_all() + assert data.equals(table) + + reader = client.do_get(flight.Ticket(b'dicts')) + table = simple_dicts_table() + assert reader.schema.equals(table.schema) + data = reader.read_all() + assert data.equals(table) + + +@pytest.mark.slow +def test_flight_large_message(): + """Try sending/receiving a large message via Flight. + + See ARROW-4421: by default, gRPC won't allow us to send messages > + 4MiB in size. + """ + data = pa.Table.from_arrays([ + pa.array(range(0, 10 * 1024 * 1024)) + ], names=['a']) + + with EchoFlightServer(expected_schema=data.schema) as server, \ + FlightClient(('localhost', server.port)) as client: + writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'), + data.schema) + # Write a single giant chunk + writer.write_table(data, 10 * 1024 * 1024) + writer.close() + result = client.do_get(flight.Ticket(b'')).read_all() + assert result.equals(data) + + +def test_flight_generator_stream(): + """Try downloading a flight of RecordBatches in a GeneratorStream.""" + data = pa.Table.from_arrays([ + pa.array(range(0, 10 * 1024)) + ], names=['a']) + + with EchoStreamFlightServer() as server, \ + FlightClient(('localhost', server.port)) as client: + writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'), + data.schema) + writer.write_table(data) + writer.close() + result = client.do_get(flight.Ticket(b'')).read_all() + assert result.equals(data) + + +def test_flight_invalid_generator_stream(): + """Try streaming data with mismatched schemas.""" + with InvalidStreamFlightServer() as server, \ + FlightClient(('localhost', server.port)) as client: + with pytest.raises(pa.ArrowException): + client.do_get(flight.Ticket(b'')).read_all() + + +def test_timeout_fires(): + """Make sure timeouts fire on slow requests.""" + # Do this in a separate thread so that if it fails, we don't hang + # the entire test process + with SlowFlightServer() as server, \ + FlightClient(('localhost', server.port)) as client: + action = flight.Action("", b"") + options = flight.FlightCallOptions(timeout=0.2) + # gRPC error messages change based on version, so don't look + # for a particular error + with pytest.raises(flight.FlightTimedOutError): + list(client.do_action(action, options=options)) + + +def test_timeout_passes(): + """Make sure timeouts do not fire on fast requests.""" + with ConstantFlightServer() as server, \ + FlightClient(('localhost', server.port)) as client: + options = flight.FlightCallOptions(timeout=5.0) + client.do_get(flight.Ticket(b'ints'), options=options).read_all() + + +def test_read_options(): + """Make sure ReadOptions can be used.""" + expected = pa.Table.from_arrays([pa.array([1, 2, 3, 4])], names=["b"]) + with ConstantFlightServer() as server, \ + FlightClient(('localhost', server.port)) as client: + options = flight.FlightCallOptions( + read_options=IpcReadOptions(included_fields=[1])) + response1 = client.do_get(flight.Ticket( + b'multi'), options=options).read_all() + response2 = client.do_get(flight.Ticket(b'multi')).read_all() + + assert response2.num_columns == 2 + assert response1.num_columns == 1 + assert response1 == expected + assert response2 == multiple_column_table() + + +basic_auth_handler = HttpBasicServerAuthHandler(creds={ + b"test": b"p4ssw0rd", +}) + +token_auth_handler = TokenServerAuthHandler(creds={ + b"test": b"p4ssw0rd", +}) + + +@pytest.mark.slow +def test_http_basic_unauth(): + """Test that auth fails when not authenticated.""" + with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server, \ + FlightClient(('localhost', server.port)) as client: + action = flight.Action("who-am-i", b"") + with pytest.raises(flight.FlightUnauthenticatedError, + match=".*unauthenticated.*"): + list(client.do_action(action)) + + +@pytest.mark.skipif(os.name == 'nt', + reason="ARROW-10013: gRPC on Windows corrupts peer()") +def test_http_basic_auth(): + """Test a Python implementation of HTTP basic authentication.""" + with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server, \ + FlightClient(('localhost', server.port)) as client: + action = flight.Action("who-am-i", b"") + client.authenticate(HttpBasicClientAuthHandler('test', 'p4ssw0rd')) + results = client.do_action(action) + identity = next(results) + assert identity.body.to_pybytes() == b'test' + peer_address = next(results) + assert peer_address.body.to_pybytes() != b'' + + +def test_http_basic_auth_invalid_password(): + """Test that auth fails with the wrong password.""" + with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server, \ + FlightClient(('localhost', server.port)) as client: + action = flight.Action("who-am-i", b"") + with pytest.raises(flight.FlightUnauthenticatedError, + match=".*wrong password.*"): + client.authenticate(HttpBasicClientAuthHandler('test', 'wrong')) + next(client.do_action(action)) + + +def test_token_auth(): + """Test an auth mechanism that uses a handshake.""" + with EchoStreamFlightServer(auth_handler=token_auth_handler) as server, \ + FlightClient(('localhost', server.port)) as client: + action = flight.Action("who-am-i", b"") + client.authenticate(TokenClientAuthHandler('test', 'p4ssw0rd')) + identity = next(client.do_action(action)) + assert identity.body.to_pybytes() == b'test' + + +def test_token_auth_invalid(): + """Test an auth mechanism that uses a handshake.""" + with EchoStreamFlightServer(auth_handler=token_auth_handler) as server, \ + FlightClient(('localhost', server.port)) as client: + with pytest.raises(flight.FlightUnauthenticatedError): + client.authenticate(TokenClientAuthHandler('test', 'wrong')) + + +header_auth_server_middleware_factory = HeaderAuthServerMiddlewareFactory() +no_op_auth_handler = NoopAuthHandler() + + +def test_authenticate_basic_token(): + """Test authenticate_basic_token with bearer token and auth headers.""" + with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={ + "auth": HeaderAuthServerMiddlewareFactory() + }) as server, \ + FlightClient(('localhost', server.port)) as client: + token_pair = client.authenticate_basic_token(b'test', b'password') + assert token_pair[0] == b'authorization' + assert token_pair[1] == b'Bearer token1234' + + +def test_authenticate_basic_token_invalid_password(): + """Test authenticate_basic_token with an invalid password.""" + with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={ + "auth": HeaderAuthServerMiddlewareFactory() + }) as server, \ + FlightClient(('localhost', server.port)) as client: + with pytest.raises(flight.FlightUnauthenticatedError): + client.authenticate_basic_token(b'test', b'badpassword') + + +def test_authenticate_basic_token_and_action(): + """Test authenticate_basic_token and doAction after authentication.""" + with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={ + "auth": HeaderAuthServerMiddlewareFactory() + }) as server, \ + FlightClient(('localhost', server.port)) as client: + token_pair = client.authenticate_basic_token(b'test', b'password') + assert token_pair[0] == b'authorization' + assert token_pair[1] == b'Bearer token1234' + options = flight.FlightCallOptions(headers=[token_pair]) + result = list(client.do_action( + action=flight.Action('test-action', b''), options=options)) + assert result[0].body.to_pybytes() == b'token1234' + + +def test_authenticate_basic_token_with_client_middleware(): + """Test authenticate_basic_token with client middleware + to intercept authorization header returned by the + HTTP header auth enabled server. + """ + with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={ + "auth": HeaderAuthServerMiddlewareFactory() + }) as server: + client_auth_middleware = ClientHeaderAuthMiddlewareFactory() + client = FlightClient( + ('localhost', server.port), + middleware=[client_auth_middleware] + ) + encoded_credentials = base64.b64encode(b'test:password') + options = flight.FlightCallOptions(headers=[ + (b'authorization', b'Basic ' + encoded_credentials) + ]) + result = list(client.do_action( + action=flight.Action('test-action', b''), options=options)) + assert result[0].body.to_pybytes() == b'token1234' + assert client_auth_middleware.call_credential[0] == b'authorization' + assert client_auth_middleware.call_credential[1] == \ + b'Bearer ' + b'token1234' + result2 = list(client.do_action( + action=flight.Action('test-action', b''), options=options)) + assert result2[0].body.to_pybytes() == b'token1234' + assert client_auth_middleware.call_credential[0] == b'authorization' + assert client_auth_middleware.call_credential[1] == \ + b'Bearer ' + b'token1234' + client.close() + + +def test_arbitrary_headers_in_flight_call_options(): + """Test passing multiple arbitrary headers to the middleware.""" + with ArbitraryHeadersFlightServer( + auth_handler=no_op_auth_handler, + middleware={ + "auth": HeaderAuthServerMiddlewareFactory(), + "arbitrary-headers": ArbitraryHeadersServerMiddlewareFactory() + }) as server, \ + FlightClient(('localhost', server.port)) as client: + token_pair = client.authenticate_basic_token(b'test', b'password') + assert token_pair[0] == b'authorization' + assert token_pair[1] == b'Bearer token1234' + options = flight.FlightCallOptions(headers=[ + token_pair, + (b'test-header-1', b'value1'), + (b'test-header-2', b'value2') + ]) + result = list(client.do_action(flight.Action( + "test-action", b""), options=options)) + assert result[0].body.to_pybytes() == b'value1' + assert result[1].body.to_pybytes() == b'value2' + + +def test_location_invalid(): + """Test constructing invalid URIs.""" + with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"): + flight.connect("%") + + with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"): + ConstantFlightServer("%") + + +def test_location_unknown_scheme(): + """Test creating locations for unknown schemes.""" + assert flight.Location("s3://foo").uri == b"s3://foo" + assert flight.Location("https://example.com/bar.parquet").uri == \ + b"https://example.com/bar.parquet" + + +@pytest.mark.slow +@pytest.mark.requires_testing_data +def test_tls_fails(): + """Make sure clients cannot connect when cert verification fails.""" + certs = example_tls_certs() + + # Ensure client doesn't connect when certificate verification + # fails (this is a slow test since gRPC does retry a few times) + with ConstantFlightServer(tls_certificates=certs["certificates"]) as s, \ + FlightClient("grpc+tls://localhost:" + str(s.port)) as client: + # gRPC error messages change based on version, so don't look + # for a particular error + with pytest.raises(flight.FlightUnavailableError): + client.do_get(flight.Ticket(b'ints')).read_all() + + +@pytest.mark.requires_testing_data +def test_tls_do_get(): + """Try a simple do_get call over TLS.""" + table = simple_ints_table() + certs = example_tls_certs() + + with ConstantFlightServer(tls_certificates=certs["certificates"]) as s, \ + FlightClient(('localhost', s.port), + tls_root_certs=certs["root_cert"]) as client: + data = client.do_get(flight.Ticket(b'ints')).read_all() + assert data.equals(table) + + +@pytest.mark.requires_testing_data +def test_tls_disable_server_verification(): + """Try a simple do_get call over TLS with server verification disabled.""" + table = simple_ints_table() + certs = example_tls_certs() + + with ConstantFlightServer(tls_certificates=certs["certificates"]) as s: + try: + client = FlightClient(('localhost', s.port), + disable_server_verification=True) + except NotImplementedError: + pytest.skip('disable_server_verification feature is not available') + data = client.do_get(flight.Ticket(b'ints')).read_all() + assert data.equals(table) + client.close() + + +@pytest.mark.requires_testing_data +def test_tls_override_hostname(): + """Check that incorrectly overriding the hostname fails.""" + certs = example_tls_certs() + + with ConstantFlightServer(tls_certificates=certs["certificates"]) as s, \ + flight.connect(('localhost', s.port), + tls_root_certs=certs["root_cert"], + override_hostname="fakehostname") as client: + with pytest.raises(flight.FlightUnavailableError): + client.do_get(flight.Ticket(b'ints')) + + +def test_flight_do_get_metadata(): + """Try a simple do_get call with metadata.""" + data = [ + pa.array([-10, -5, 0, 5, 10]) + ] + table = pa.Table.from_arrays(data, names=['a']) + + batches = [] + with MetadataFlightServer() as server, \ + FlightClient(('localhost', server.port)) as client: + reader = client.do_get(flight.Ticket(b'')) + idx = 0 + while True: + try: + batch, metadata = reader.read_chunk() + batches.append(batch) + server_idx, = struct.unpack('= 0 + assert file_info.mtime_ns == pytest.approx( + file_info.mtime.timestamp() * 1e9) + # It's an aware UTC datetime + tzinfo = file_info.mtime.tzinfo + assert tzinfo is not None + assert tzinfo.utcoffset(None) == timedelta(0) + + +def check_mtime_absent(file_info): + assert file_info.mtime is None + assert file_info.mtime_ns is None + + +def check_mtime_or_absent(file_info): + if file_info.mtime is None: + check_mtime_absent(file_info) + else: + check_mtime(file_info) + + +def skip_fsspec_s3fs(fs): + if fs.type_name == "py::fsspec+('s3', 's3a')": + pytest.xfail(reason="Not working with fsspec's s3fs") + + +def skip_azure(fs, reason): + if fs.type_name == "abfs": + pytest.skip(reason=reason) + + +@pytest.mark.s3 +def test_s3fs_limited_permissions_create_bucket(s3_server): + from pyarrow.fs import S3FileSystem + _configure_s3_limited_user(s3_server, _minio_limited_policy) + host, port, _, _ = s3_server['connection'] + + fs = S3FileSystem( + access_key='limited', + secret_key='limited123', + endpoint_override='{}:{}'.format(host, port), + scheme='http' + ) + fs.create_dir('existing-bucket/test') + + with pytest.raises(pa.ArrowIOError, match="Bucket 'new-bucket' not found"): + fs.create_dir('new-bucket') + + with pytest.raises(pa.ArrowIOError, match="Would delete bucket"): + fs.delete_dir('existing-bucket') + + +def test_file_info_constructor(): + dt = datetime.fromtimestamp(1568799826, timezone.utc) + + info = FileInfo("foo/bar") + assert info.path == "foo/bar" + assert info.base_name == "bar" + assert info.type == FileType.Unknown + assert info.size is None + check_mtime_absent(info) + + info = FileInfo("foo/baz.txt", type=FileType.File, size=123, + mtime=1568799826.5) + assert info.path == "foo/baz.txt" + assert info.base_name == "baz.txt" + assert info.type == FileType.File + assert info.size == 123 + assert info.mtime_ns == 1568799826500000000 + check_mtime(info) + + info = FileInfo("foo", type=FileType.Directory, mtime=dt) + assert info.path == "foo" + assert info.base_name == "foo" + assert info.type == FileType.Directory + assert info.size is None + assert info.mtime == dt + assert info.mtime_ns == 1568799826000000000 + check_mtime(info) + + +def test_cannot_instantiate_base_filesystem(): + with pytest.raises(TypeError): + FileSystem() + + +def test_filesystem_equals(): + fs0 = LocalFileSystem() + fs1 = LocalFileSystem() + fs2 = _MockFileSystem() + + assert fs0.equals(fs0) + assert fs0.equals(fs1) + with pytest.raises(TypeError): + fs0.equals('string') + assert fs0 == fs0 == fs1 + assert fs0 != 4 + + assert fs2 == fs2 + assert fs2 != _MockFileSystem() + + assert SubTreeFileSystem('/base', fs0) == SubTreeFileSystem('/base', fs0) + assert SubTreeFileSystem('/base', fs0) != SubTreeFileSystem('/base', fs2) + assert SubTreeFileSystem('/base', fs0) != SubTreeFileSystem('/other', fs0) + + +def test_filesystem_equals_none(fs): + with pytest.raises(TypeError, match="got NoneType"): + fs.equals(None) + + assert fs is not None + + +def test_subtree_filesystem(): + localfs = LocalFileSystem() + + subfs = SubTreeFileSystem('/base', localfs) + assert subfs.base_path == '/base/' + assert subfs.base_fs == localfs + assert repr(subfs).startswith('SubTreeFileSystem(base_path=/base/, ' + 'base_fs= not selecting the nested file_c + selector = FileSelector(base_dir, recursive=False) + + infos = fs.get_file_info(selector) + if fs.type_name == "py::fsspec+('s3', 's3a')": + # s3fs only lists directories if they are not empty + assert len(infos) == 3 + else: + assert len(infos) == 4 + + finally: + fs.delete_dir(base_dir) + + +def test_create_dir(fs, pathfn): + # s3fs fails deleting dir fails if it is empty + # (https://github.com/dask/s3fs/issues/317) + skip_fsspec_s3fs(fs) + d = pathfn('test-directory/') + + with pytest.raises(pa.ArrowIOError): + fs.delete_dir(d) + + fs.create_dir(d) + fs.delete_dir(d) + + d = pathfn('deeply/nested/test-directory/') + fs.create_dir(d, recursive=True) + fs.delete_dir(d) + + +def test_delete_dir(fs, pathfn): + skip_fsspec_s3fs(fs) + + d = pathfn('directory/') + nd = pathfn('directory/nested/') + + fs.create_dir(nd) + fs.delete_dir(d) + with pytest.raises(pa.ArrowIOError): + fs.delete_dir(nd) + with pytest.raises(pa.ArrowIOError): + fs.delete_dir(d) + + +def test_delete_dir_with_explicit_subdir(fs, pathfn): + # GH-38618: regression with AWS failing to delete directories, + # depending on whether they were created explicitly. Note that + # Minio doesn't reproduce the issue, so this test is not a regression + # test in itself. + skip_fsspec_s3fs(fs) + + d = pathfn('directory/') + nd = pathfn('directory/nested/') + + # deleting dir with explicit subdir + fs.create_dir(d) + fs.create_dir(nd) + fs.delete_dir(d) + dir_info = fs.get_file_info(d) + assert dir_info.type == FileType.NotFound + + # deleting dir with blob in explicit subdir + d = pathfn('directory2') + nd = pathfn('directory2/nested') + f = pathfn('directory2/nested/target-file') + + fs.create_dir(d) + fs.create_dir(nd) + with fs.open_output_stream(f) as s: + s.write(b'data') + + fs.delete_dir(d) + dir_info = fs.get_file_info(d) + assert dir_info.type == FileType.NotFound + + +def test_delete_dir_contents(fs, pathfn): + skip_fsspec_s3fs(fs) + + d = pathfn('directory/') + nd = pathfn('directory/nested/') + + fs.create_dir(nd) + fs.delete_dir_contents(d) + with pytest.raises(pa.ArrowIOError): + fs.delete_dir(nd) + fs.delete_dir_contents(nd, missing_dir_ok=True) + with pytest.raises(pa.ArrowIOError): + fs.delete_dir_contents(nd) + fs.delete_dir(d) + with pytest.raises(pa.ArrowIOError): + fs.delete_dir(d) + + +def _check_root_dir_contents(config): + fs = config['fs'] + pathfn = config['pathfn'] + + d = pathfn('directory/') + nd = pathfn('directory/nested/') + + fs.create_dir(nd) + with pytest.raises(pa.ArrowInvalid): + fs.delete_dir_contents("") + with pytest.raises(pa.ArrowInvalid): + fs.delete_dir_contents("/") + with pytest.raises(pa.ArrowInvalid): + fs.delete_dir_contents("//") + + fs.delete_dir_contents("", accept_root_dir=True) + fs.delete_dir_contents("/", accept_root_dir=True) + fs.delete_dir_contents("//", accept_root_dir=True) + with pytest.raises(pa.ArrowIOError): + fs.delete_dir(d) + + +def test_delete_root_dir_contents(mockfs, py_mockfs): + _check_root_dir_contents(mockfs) + _check_root_dir_contents(py_mockfs) + + +def test_copy_file(fs, pathfn): + s = pathfn('test-copy-source-file') + t = pathfn('test-copy-target-file') + + with fs.open_output_stream(s): + pass + + fs.copy_file(s, t) + fs.delete_file(s) + fs.delete_file(t) + + +def test_move_directory(fs, pathfn, allow_move_dir): + # TODO(GH-40025): Stop skipping this test + skip_azure(fs, "Not implemented yet in for Azure. See GH-40025") + + # move directory (doesn't work with S3) + s = pathfn('source-dir/') + t = pathfn('target-dir/') + + fs.create_dir(s) + + if allow_move_dir: + fs.move(s, t) + with pytest.raises(pa.ArrowIOError): + fs.delete_dir(s) + fs.delete_dir(t) + else: + with pytest.raises(pa.ArrowIOError): + fs.move(s, t) + + +def test_move_file(fs, pathfn): + # s3fs moving a file with recursive=True on latest 0.5 version + # (https://github.com/dask/s3fs/issues/394) + skip_fsspec_s3fs(fs) + + # TODO(GH-40025): Stop skipping this test + skip_azure(fs, "Not implemented yet in for Azure. See GH-40025") + + s = pathfn('test-move-source-file') + t = pathfn('test-move-target-file') + + with fs.open_output_stream(s): + pass + + fs.move(s, t) + with pytest.raises(pa.ArrowIOError): + fs.delete_file(s) + fs.delete_file(t) + + +def test_delete_file(fs, pathfn): + p = pathfn('test-delete-target-file') + with fs.open_output_stream(p): + pass + + fs.delete_file(p) + with pytest.raises(pa.ArrowIOError): + fs.delete_file(p) + + d = pathfn('test-delete-nested') + fs.create_dir(d) + f = pathfn('test-delete-nested/target-file') + with fs.open_output_stream(f) as s: + s.write(b'data') + + fs.delete_dir(d) + + +def identity(v): + return v + + +@pytest.mark.gzip +@pytest.mark.parametrize( + ('compression', 'buffer_size', 'compressor'), + [ + (None, None, identity), + (None, 64, identity), + ('gzip', None, gzip.compress), + ('gzip', 256, gzip.compress), + ] +) +def test_open_input_stream(fs, pathfn, compression, buffer_size, compressor): + p = pathfn('open-input-stream') + + data = b'some data for reading\n' * 512 + with fs.open_output_stream(p) as s: + s.write(compressor(data)) + + with fs.open_input_stream(p, compression, buffer_size) as s: + result = s.read() + + assert result == data + + +def test_open_input_file(fs, pathfn): + p = pathfn('open-input-file') + + data = b'some data' * 1024 + with fs.open_output_stream(p) as s: + s.write(data) + + read_from = len(b'some data') * 512 + with fs.open_input_file(p) as f: + result = f.read() + assert result == data + + with fs.open_input_file(p) as f: + f.seek(read_from) + result = f.read() + + assert result == data[read_from:] + + +def test_open_input_stream_not_found(fs, pathfn): + # The proper exception should be raised for this common case (ARROW-15896) + p = pathfn('open-input-stream-not-found') + with pytest.raises(FileNotFoundError): + fs.open_input_stream(p) + + +@pytest.mark.gzip +@pytest.mark.parametrize( + ('compression', 'buffer_size', 'decompressor'), + [ + (None, None, identity), + (None, 64, identity), + ('gzip', None, gzip.decompress), + ('gzip', 256, gzip.decompress), + ] +) +def test_open_output_stream(fs, pathfn, compression, buffer_size, + decompressor): + p = pathfn('open-output-stream') + + data = b'some data for writing' * 1024 + with fs.open_output_stream(p, compression, buffer_size) as f: + f.write(data) + + with fs.open_input_stream(p, compression, buffer_size) as f: + assert f.read(len(data)) == data + + +@pytest.mark.gzip +@pytest.mark.parametrize( + ('compression', 'buffer_size', 'compressor', 'decompressor'), + [ + (None, None, identity, identity), + (None, 64, identity, identity), + ('gzip', None, gzip.compress, gzip.decompress), + ('gzip', 256, gzip.compress, gzip.decompress), + ] +) +def test_open_append_stream(fs, pathfn, compression, buffer_size, compressor, + decompressor, allow_append_to_file): + p = pathfn('open-append-stream') + + initial = compressor(b'already existing') + with fs.open_output_stream(p) as s: + s.write(initial) + + if allow_append_to_file: + with fs.open_append_stream(p, compression=compression, + buffer_size=buffer_size) as f: + f.write(b'\nnewly added') + + with fs.open_input_stream(p) as f: + result = f.read() + + result = decompressor(result) + assert result == b'already existing\nnewly added' + else: + with pytest.raises(pa.ArrowNotImplementedError): + fs.open_append_stream(p, compression=compression, + buffer_size=buffer_size) + + +def test_open_output_stream_metadata(fs, pathfn): + p = pathfn('open-output-stream-metadata') + metadata = {'Content-Type': 'x-pyarrow/test'} + + data = b'some data' + with fs.open_output_stream(p, metadata=metadata) as f: + f.write(data) + + with fs.open_input_stream(p) as f: + assert f.read() == data + got_metadata = f.metadata() + + if fs.type_name in ['s3', 'gcs', 'abfs'] or 'mock' in fs.type_name: + # TODO(GH-40026): Stop skipping this test + skip_azure( + fs, "Azure filesystem currently only returns system metadata not user " + "metadata. See GH-40026") + for k, v in metadata.items(): + assert got_metadata[k] == v.encode() + else: + assert got_metadata == {} + + +def test_localfs_options(): + # LocalFileSystem instantiation + LocalFileSystem(use_mmap=False) + + with pytest.raises(TypeError): + LocalFileSystem(xxx=False) + + +def test_localfs_errors(localfs): + # Local filesystem errors should raise the right Python exceptions + # (e.g. FileNotFoundError) + fs = localfs['fs'] + with assert_file_not_found(): + fs.open_input_stream('/non/existent/file') + with assert_file_not_found(): + fs.open_output_stream('/non/existent/file') + with assert_file_not_found(): + fs.create_dir('/non/existent/dir', recursive=False) + with assert_file_not_found(): + fs.delete_dir('/non/existent/dir') + with assert_file_not_found(): + fs.delete_file('/non/existent/dir') + with assert_file_not_found(): + fs.move('/non/existent', '/xxx') + with assert_file_not_found(): + fs.copy_file('/non/existent', '/xxx') + + +def test_localfs_file_info(localfs): + fs = localfs['fs'] + + file_path = pathlib.Path(__file__) + dir_path = file_path.parent + [file_info, dir_info] = fs.get_file_info([file_path.as_posix(), + dir_path.as_posix()]) + assert file_info.size == file_path.stat().st_size + assert file_info.mtime_ns == file_path.stat().st_mtime_ns + check_mtime(file_info) + assert dir_info.mtime_ns == dir_path.stat().st_mtime_ns + check_mtime(dir_info) + + +def test_mockfs_mtime_roundtrip(mockfs): + dt = datetime.fromtimestamp(1568799826, timezone.utc) + fs = _MockFileSystem(dt) + + with fs.open_output_stream('foo'): + pass + [info] = fs.get_file_info(['foo']) + assert info.mtime == dt + + +@pytest.mark.gcs +def test_gcs_options(pickle_module): + from pyarrow.fs import GcsFileSystem + dt = datetime.now() + fs = GcsFileSystem(access_token='abc', + target_service_account='service_account@apache', + credential_token_expiration=dt, + default_bucket_location='us-west2', + scheme='https', endpoint_override='localhost:8999', + project_id='test-project-id') + assert isinstance(fs, GcsFileSystem) + assert fs.default_bucket_location == 'us-west2' + assert fs.project_id == 'test-project-id' + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + fs = GcsFileSystem() + assert isinstance(fs, GcsFileSystem) + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + fs = GcsFileSystem(anonymous=True) + assert isinstance(fs, GcsFileSystem) + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + fs = GcsFileSystem(default_metadata={"ACL": "authenticated-read", + "Content-Type": "text/plain"}) + assert isinstance(fs, GcsFileSystem) + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + with pytest.raises(ValueError): + GcsFileSystem(access_token='access') + with pytest.raises(ValueError): + GcsFileSystem(anonymous=True, access_token='secret') + with pytest.raises(ValueError): + GcsFileSystem(anonymous=True, target_service_account='acct') + with pytest.raises(ValueError): + GcsFileSystem(credential_token_expiration=datetime.now()) + + +@pytest.mark.s3 +def test_s3_options(pickle_module): + from pyarrow.fs import (AwsDefaultS3RetryStrategy, + AwsStandardS3RetryStrategy, S3FileSystem, + S3RetryStrategy) + + fs = S3FileSystem(access_key='access', secret_key='secret', + session_token='token', region='us-east-2', + scheme='https', endpoint_override='localhost:8999') + assert isinstance(fs, S3FileSystem) + assert fs.region == 'us-east-2' + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + fs = S3FileSystem(role_arn='role', session_name='session', + external_id='id', load_frequency=100) + assert isinstance(fs, S3FileSystem) + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + # Note that the retry strategy won't survive pickling for now + fs = S3FileSystem( + retry_strategy=AwsStandardS3RetryStrategy(max_attempts=5)) + assert isinstance(fs, S3FileSystem) + + fs = S3FileSystem( + retry_strategy=AwsDefaultS3RetryStrategy(max_attempts=5)) + assert isinstance(fs, S3FileSystem) + + fs2 = S3FileSystem(role_arn='role') + assert isinstance(fs2, S3FileSystem) + assert pickle_module.loads(pickle_module.dumps(fs2)) == fs2 + assert fs2 != fs + + fs = S3FileSystem(anonymous=True) + assert isinstance(fs, S3FileSystem) + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + fs = S3FileSystem(background_writes=True) + assert isinstance(fs, S3FileSystem) + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + fs2 = S3FileSystem(background_writes=True, + default_metadata={"ACL": "authenticated-read", + "Content-Type": "text/plain"}) + assert isinstance(fs2, S3FileSystem) + assert pickle_module.loads(pickle_module.dumps(fs2)) == fs2 + assert fs2 != fs + + fs = S3FileSystem(allow_bucket_creation=True, allow_bucket_deletion=True) + assert isinstance(fs, S3FileSystem) + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + fs = S3FileSystem(request_timeout=0.5, connect_timeout=0.25) + assert isinstance(fs, S3FileSystem) + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + fs2 = S3FileSystem(request_timeout=0.25, connect_timeout=0.5) + assert isinstance(fs2, S3FileSystem) + assert pickle_module.loads(pickle_module.dumps(fs2)) == fs2 + assert fs2 != fs + + fs = S3FileSystem(endpoint_override='localhost:8999', force_virtual_addressing=True) + assert isinstance(fs, S3FileSystem) + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + with pytest.raises(ValueError): + S3FileSystem(access_key='access') + with pytest.raises(ValueError): + S3FileSystem(secret_key='secret') + with pytest.raises(ValueError): + S3FileSystem(access_key='access', session_token='token') + with pytest.raises(ValueError): + S3FileSystem(secret_key='secret', session_token='token') + with pytest.raises(ValueError): + S3FileSystem( + access_key='access', secret_key='secret', role_arn='arn' + ) + with pytest.raises(ValueError): + S3FileSystem( + access_key='access', secret_key='secret', anonymous=True + ) + with pytest.raises(ValueError): + S3FileSystem(role_arn="arn", anonymous=True) + with pytest.raises(ValueError): + S3FileSystem(default_metadata=["foo", "bar"]) + with pytest.raises(ValueError): + S3FileSystem(retry_strategy=S3RetryStrategy()) + + +@pytest.mark.s3 +def test_s3_proxy_options(monkeypatch, pickle_module): + from pyarrow.fs import S3FileSystem + + # The following two are equivalent: + proxy_opts_1_dict = {'scheme': 'http', 'host': 'localhost', 'port': 8999} + proxy_opts_1_str = 'http://localhost:8999' + # The following two are equivalent: + proxy_opts_2_dict = {'scheme': 'https', 'host': 'localhost', 'port': 8080} + proxy_opts_2_str = 'https://localhost:8080' + + # Check dict case for 'proxy_options' + fs = S3FileSystem(proxy_options=proxy_opts_1_dict) + assert isinstance(fs, S3FileSystem) + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + fs = S3FileSystem(proxy_options=proxy_opts_2_dict) + assert isinstance(fs, S3FileSystem) + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + # Check str case for 'proxy_options' + fs = S3FileSystem(proxy_options=proxy_opts_1_str) + assert isinstance(fs, S3FileSystem) + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + fs = S3FileSystem(proxy_options=proxy_opts_2_str) + assert isinstance(fs, S3FileSystem) + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + # Check that two FSs using the same proxy_options dict are equal + fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict) + fs2 = S3FileSystem(proxy_options=proxy_opts_1_dict) + assert fs1 == fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) == fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) == fs1 + + fs1 = S3FileSystem(proxy_options=proxy_opts_2_dict) + fs2 = S3FileSystem(proxy_options=proxy_opts_2_dict) + assert fs1 == fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) == fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) == fs1 + + # Check that two FSs using the same proxy_options str are equal + fs1 = S3FileSystem(proxy_options=proxy_opts_1_str) + fs2 = S3FileSystem(proxy_options=proxy_opts_1_str) + assert fs1 == fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) == fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) == fs1 + + fs1 = S3FileSystem(proxy_options=proxy_opts_2_str) + fs2 = S3FileSystem(proxy_options=proxy_opts_2_str) + assert fs1 == fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) == fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) == fs1 + + # Check that two FSs using equivalent proxy_options + # (one dict, one str) are equal + fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict) + fs2 = S3FileSystem(proxy_options=proxy_opts_1_str) + assert fs1 == fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) == fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) == fs1 + + fs1 = S3FileSystem(proxy_options=proxy_opts_2_dict) + fs2 = S3FileSystem(proxy_options=proxy_opts_2_str) + assert fs1 == fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) == fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) == fs1 + + # Check that two FSs using nonequivalent proxy_options are not equal + fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict) + fs2 = S3FileSystem(proxy_options=proxy_opts_2_dict) + assert fs1 != fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) != fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) != fs1 + + fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict) + fs2 = S3FileSystem(proxy_options=proxy_opts_2_str) + assert fs1 != fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) != fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) != fs1 + + fs1 = S3FileSystem(proxy_options=proxy_opts_1_str) + fs2 = S3FileSystem(proxy_options=proxy_opts_2_dict) + assert fs1 != fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) != fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) != fs1 + + fs1 = S3FileSystem(proxy_options=proxy_opts_1_str) + fs2 = S3FileSystem(proxy_options=proxy_opts_2_str) + assert fs1 != fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) != fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) != fs1 + + # Check that two FSs (one using proxy_options and the other not) + # are not equal + fs1 = S3FileSystem(proxy_options=proxy_opts_1_dict) + fs2 = S3FileSystem() + assert fs1 != fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) != fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) != fs1 + + fs1 = S3FileSystem(proxy_options=proxy_opts_1_str) + fs2 = S3FileSystem() + assert fs1 != fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) != fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) != fs1 + + fs1 = S3FileSystem(proxy_options=proxy_opts_2_dict) + fs2 = S3FileSystem() + assert fs1 != fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) != fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) != fs1 + + fs1 = S3FileSystem(proxy_options=proxy_opts_2_str) + fs2 = S3FileSystem() + assert fs1 != fs2 + assert pickle_module.loads(pickle_module.dumps(fs1)) != fs2 + assert pickle_module.loads(pickle_module.dumps(fs2)) != fs1 + + # Only dict and str are supported + with pytest.raises(TypeError): + S3FileSystem(proxy_options=('http', 'localhost', 9090)) + # Missing scheme + with pytest.raises(KeyError): + S3FileSystem(proxy_options={'host': 'localhost', 'port': 9090}) + # Missing host + with pytest.raises(KeyError): + S3FileSystem(proxy_options={'scheme': 'https', 'port': 9090}) + # Missing port + with pytest.raises(KeyError): + S3FileSystem(proxy_options={'scheme': 'http', 'host': 'localhost'}) + # Invalid proxy URI (invalid scheme httpsB) + with pytest.raises(pa.ArrowInvalid): + S3FileSystem(proxy_options='httpsB://localhost:9000') + # Invalid proxy_options dict (invalid scheme httpA) + with pytest.raises(pa.ArrowInvalid): + S3FileSystem(proxy_options={'scheme': 'httpA', 'host': 'localhost', + 'port': 8999}) + + +@pytest.mark.s3 +def test_s3fs_wrong_region(): + from pyarrow.fs import S3FileSystem + + # wrong region for bucket + # anonymous=True incase CI/etc has invalid credentials + fs = S3FileSystem(region='eu-north-1', anonymous=True) + + msg = ("When getting information for bucket 'voltrondata-labs-datasets': " + r"AWS Error UNKNOWN \(HTTP status 301\) during HeadBucket " + "operation: No response body. Looks like the configured region is " + "'eu-north-1' while the bucket is located in 'us-east-2'." + "|NETWORK_CONNECTION") + with pytest.raises(OSError, match=msg) as exc: + fs.get_file_info("voltrondata-labs-datasets") + + # Sometimes fails on unrelated network error, so next call would also fail. + if 'NETWORK_CONNECTION' in str(exc.value): + return + + fs = S3FileSystem(region='us-east-2', anonymous=True) + fs.get_file_info("voltrondata-labs-datasets") + + +@pytest.mark.azure +def test_azurefs_options(pickle_module): + from pyarrow.fs import AzureFileSystem + + fs1 = AzureFileSystem(account_name='fake-account-name') + assert isinstance(fs1, AzureFileSystem) + assert pickle_module.loads(pickle_module.dumps(fs1)) == fs1 + + fs2 = AzureFileSystem(account_name='fake-account-name', + account_key='fakeaccountkey') + assert isinstance(fs2, AzureFileSystem) + assert pickle_module.loads(pickle_module.dumps(fs2)) == fs2 + assert fs2 != fs1 + + fs3 = AzureFileSystem(account_name='fake-account', account_key='fakeaccount', + blob_storage_authority='fake-blob-authority', + dfs_storage_authority='fake-dfs-authority', + blob_storage_scheme='fake-blob-scheme', + dfs_storage_scheme='fake-dfs-scheme') + assert isinstance(fs3, AzureFileSystem) + assert pickle_module.loads(pickle_module.dumps(fs3)) == fs3 + assert fs3 != fs2 + + with pytest.raises(TypeError): + AzureFileSystem() + + +@pytest.mark.hdfs +def test_hdfs_options(hdfs_connection, pickle_module): + from pyarrow.fs import HadoopFileSystem + if not pa.have_libhdfs(): + pytest.skip('Cannot locate libhdfs') + + host, port, user = hdfs_connection + + replication = 2 + buffer_size = 64*1024 + default_block_size = 128*1024**2 + uri = ('hdfs://{}:{}/?user={}&replication={}&buffer_size={}' + '&default_block_size={}') + + hdfs1 = HadoopFileSystem(host, port, user='libhdfs', + replication=replication, buffer_size=buffer_size, + default_block_size=default_block_size) + hdfs2 = HadoopFileSystem.from_uri(uri.format( + host, port, 'libhdfs', replication, buffer_size, default_block_size + )) + hdfs3 = HadoopFileSystem.from_uri(uri.format( + host, port, 'me', replication, buffer_size, default_block_size + )) + hdfs4 = HadoopFileSystem.from_uri(uri.format( + host, port, 'me', replication + 1, buffer_size, default_block_size + )) + hdfs5 = HadoopFileSystem(host, port) + hdfs6 = HadoopFileSystem.from_uri('hdfs://{}:{}'.format(host, port)) + hdfs7 = HadoopFileSystem(host, port, user='localuser') + hdfs8 = HadoopFileSystem(host, port, user='localuser', + kerb_ticket="cache_path") + hdfs9 = HadoopFileSystem(host, port, user='localuser', + kerb_ticket=pathlib.Path("cache_path")) + hdfs10 = HadoopFileSystem(host, port, user='localuser', + kerb_ticket="cache_path2") + hdfs11 = HadoopFileSystem(host, port, user='localuser', + kerb_ticket="cache_path", + extra_conf={'hdfs_token': 'abcd'}) + + assert hdfs1 == hdfs2 + assert hdfs5 == hdfs6 + assert hdfs6 != hdfs7 + assert hdfs2 != hdfs3 + assert hdfs3 != hdfs4 + assert hdfs7 != hdfs5 + assert hdfs2 != hdfs3 + assert hdfs3 != hdfs4 + assert hdfs7 != hdfs8 + assert hdfs8 == hdfs9 + assert hdfs10 != hdfs9 + assert hdfs11 != hdfs8 + + with pytest.raises(TypeError): + HadoopFileSystem() + with pytest.raises(TypeError): + HadoopFileSystem.from_uri(3) + + for fs in [hdfs1, hdfs2, hdfs3, hdfs4, hdfs5, hdfs6, hdfs7, hdfs8, + hdfs9, hdfs10, hdfs11]: + assert pickle_module.loads(pickle_module.dumps(fs)) == fs + + host, port, user = hdfs_connection + + hdfs = HadoopFileSystem(host, port, user=user) + assert hdfs.get_file_info(FileSelector('/')) + + hdfs = HadoopFileSystem.from_uri( + "hdfs://{}:{}/?user={}".format(host, port, user) + ) + assert hdfs.get_file_info(FileSelector('/')) + + +@pytest.mark.parametrize(('uri', 'expected_klass', 'expected_path'), [ + # leading slashes are removed intentionally, because MockFileSystem doesn't + # have a distinction between relative and absolute paths + ('mock:', _MockFileSystem, ''), + ('mock:foo/bar', _MockFileSystem, 'foo/bar'), + ('mock:/foo/bar', _MockFileSystem, 'foo/bar'), + ('mock:///foo/bar', _MockFileSystem, 'foo/bar'), + ('mock:///some%20path/%C3%A9', _MockFileSystem, 'some path/é'), + ('file:/', LocalFileSystem, '/'), + ('file:///', LocalFileSystem, '/'), + ('file:/foo/bar', LocalFileSystem, '/foo/bar'), + ('file:///foo/bar', LocalFileSystem, '/foo/bar'), + ('file:///some%20path/%C3%A9', LocalFileSystem, '/some path/é'), + # no %-decoding for non-URI inputs + ('/', LocalFileSystem, '/'), + ('/foo/bar', LocalFileSystem, '/foo/bar'), + ('/some path/%20é', LocalFileSystem, '/some path/%20é'), +]) +def test_filesystem_from_uri(uri, expected_klass, expected_path): + fs, path = FileSystem.from_uri(uri) + assert isinstance(fs, expected_klass) + assert path == expected_path + + +@pytest.mark.parametrize( + 'path', + ['', '/', 'foo/bar', '/foo/bar', __file__] +) +def test_filesystem_from_path_object(path): + p = pathlib.Path(path) + fs, path = FileSystem.from_uri(p) + assert isinstance(fs, LocalFileSystem) + assert path == p.resolve().absolute().as_posix() + + +@pytest.mark.s3 +def test_filesystem_from_uri_s3(s3_server): + from pyarrow.fs import S3FileSystem + + host, port, access_key, secret_key = s3_server['connection'] + + uri = "s3://{}:{}@mybucket/foo/bar?scheme=http&endpoint_override={}:{}"\ + "&allow_bucket_creation=True" \ + .format(access_key, secret_key, host, port) + + fs, path = FileSystem.from_uri(uri) + assert isinstance(fs, S3FileSystem) + assert path == "mybucket/foo/bar" + + fs.create_dir(path) + [info] = fs.get_file_info([path]) + assert info.path == path + assert info.type == FileType.Directory + + +@pytest.mark.gcs +def test_filesystem_from_uri_gcs(gcs_server): + from pyarrow.fs import GcsFileSystem + + host, port = gcs_server['connection'] + + uri = ("gs://anonymous@" + + f"mybucket/foo/bar?scheme=http&endpoint_override={host}:{port}&" + + "retry_limit_seconds=5&project_id=test-project-id") + + fs, path = FileSystem.from_uri(uri) + assert isinstance(fs, GcsFileSystem) + assert path == "mybucket/foo/bar" + + fs.create_dir(path) + [info] = fs.get_file_info([path]) + assert info.path == path + assert info.type == FileType.Directory + + +def test_py_filesystem(): + handler = DummyHandler() + fs = PyFileSystem(handler) + assert isinstance(fs, PyFileSystem) + assert fs.type_name == "py::dummy" + assert fs.handler is handler + + with pytest.raises(TypeError): + PyFileSystem(None) + + +def test_py_filesystem_equality(): + handler1 = DummyHandler(1) + handler2 = DummyHandler(2) + handler3 = DummyHandler(2) + fs1 = PyFileSystem(handler1) + fs2 = PyFileSystem(handler1) + fs3 = PyFileSystem(handler2) + fs4 = PyFileSystem(handler3) + + assert fs2 is not fs1 + assert fs3 is not fs2 + assert fs4 is not fs3 + assert fs2 == fs1 # Same handler + assert fs3 != fs2 # Unequal handlers + assert fs4 == fs3 # Equal handlers + + assert fs1 != LocalFileSystem() + assert fs1 != object() + + +def test_py_filesystem_pickling(pickle_module): + handler = DummyHandler() + fs = PyFileSystem(handler) + + serialized = pickle_module.dumps(fs) + restored = pickle_module.loads(serialized) + assert isinstance(restored, FileSystem) + assert restored == fs + assert restored.handler == handler + assert restored.type_name == "py::dummy" + + +def test_py_filesystem_lifetime(): + handler = DummyHandler() + fs = PyFileSystem(handler) + assert isinstance(fs, PyFileSystem) + wr = weakref.ref(handler) + handler = None + assert wr() is not None + fs = None + assert wr() is None + + # Taking the .handler attribute doesn't wreck reference counts + handler = DummyHandler() + fs = PyFileSystem(handler) + wr = weakref.ref(handler) + handler = None + assert wr() is fs.handler + assert wr() is not None + fs = None + assert wr() is None + + +def test_py_filesystem_get_file_info(): + handler = DummyHandler() + fs = PyFileSystem(handler) + + [info] = fs.get_file_info(['some/dir']) + assert info.path == 'some/dir' + assert info.type == FileType.Directory + + [info] = fs.get_file_info(['some/file']) + assert info.path == 'some/file' + assert info.type == FileType.File + + [info] = fs.get_file_info(['notfound']) + assert info.path == 'notfound' + assert info.type == FileType.NotFound + + with pytest.raises(TypeError): + fs.get_file_info(['badtype']) + + with pytest.raises(IOError): + fs.get_file_info(['xxx']) + + +def test_py_filesystem_get_file_info_selector(): + handler = DummyHandler() + fs = PyFileSystem(handler) + + selector = FileSelector(base_dir="somedir") + infos = fs.get_file_info(selector) + assert len(infos) == 2 + assert infos[0].path == "somedir/file1" + assert infos[0].type == FileType.File + assert infos[0].size == 123 + assert infos[1].path == "somedir/subdir1" + assert infos[1].type == FileType.Directory + assert infos[1].size is None + + selector = FileSelector(base_dir="somedir", recursive=True) + infos = fs.get_file_info(selector) + assert len(infos) == 3 + assert infos[0].path == "somedir/file1" + assert infos[1].path == "somedir/subdir1" + assert infos[2].path == "somedir/subdir1/file2" + + selector = FileSelector(base_dir="notfound") + with pytest.raises(FileNotFoundError): + fs.get_file_info(selector) + + selector = FileSelector(base_dir="notfound", allow_not_found=True) + assert fs.get_file_info(selector) == [] + + +def test_py_filesystem_ops(): + handler = DummyHandler() + fs = PyFileSystem(handler) + + fs.create_dir("recursive", recursive=True) + fs.create_dir("non-recursive", recursive=False) + with pytest.raises(IOError): + fs.create_dir("foobar") + + fs.delete_dir("delete_dir") + fs.delete_dir_contents("delete_dir_contents") + for path in ("", "/", "//"): + with pytest.raises(ValueError): + fs.delete_dir_contents(path) + fs.delete_dir_contents(path, accept_root_dir=True) + fs.delete_file("delete_file") + fs.move("move_from", "move_to") + fs.copy_file("copy_file_from", "copy_file_to") + + +def test_py_open_input_stream(): + fs = PyFileSystem(DummyHandler()) + + with fs.open_input_stream("somefile") as f: + assert f.read() == b"somefile:input_stream" + with pytest.raises(FileNotFoundError): + fs.open_input_stream("notfound") + + +def test_py_open_input_file(): + fs = PyFileSystem(DummyHandler()) + + with fs.open_input_file("somefile") as f: + assert f.read() == b"somefile:input_file" + with pytest.raises(FileNotFoundError): + fs.open_input_file("notfound") + + +def test_py_open_output_stream(): + fs = PyFileSystem(DummyHandler()) + + with fs.open_output_stream("somefile") as f: + f.write(b"data") + + +def test_py_open_append_stream(): + fs = PyFileSystem(DummyHandler()) + + with fs.open_append_stream("somefile") as f: + f.write(b"data") + + +@pytest.mark.s3 +def test_s3_real_aws(): + # Exercise connection code with an AWS-backed S3 bucket. + # This is a minimal integration check for ARROW-9261 and similar issues. + from pyarrow.fs import S3FileSystem + default_region = (os.environ.get('PYARROW_TEST_S3_REGION') or + 'us-east-1') + fs = S3FileSystem(anonymous=True) + assert fs.region == default_region + + fs = S3FileSystem(anonymous=True, region='us-east-2') + entries = fs.get_file_info(FileSelector( + 'voltrondata-labs-datasets/nyc-taxi')) + assert len(entries) > 0 + key = 'voltrondata-labs-datasets/nyc-taxi/year=2019/month=6/part-0.parquet' + with fs.open_input_stream(key) as f: + md = f.metadata() + assert 'Content-Type' in md + assert md['Last-Modified'] == b'2022-07-12T23:32:00Z' + # For some reason, the header value is quoted + # (both with AWS and Minio) + assert md['ETag'] == b'"4c6a76826a695c6ac61592bc30cda3df-16"' + + +@pytest.mark.s3 +def test_s3_real_aws_region_selection(): + # Taken from a registry of open S3-hosted datasets + # at https://github.com/awslabs/open-data-registry + fs, path = FileSystem.from_uri('s3://mf-nwp-models/README.txt') + assert fs.region == 'eu-west-1' + with fs.open_input_stream(path) as f: + assert b"Meteo-France Atmospheric models on AWS" in f.read(50) + + # Passing an explicit region disables auto-selection + fs, path = FileSystem.from_uri( + 's3://mf-nwp-models/README.txt?region=us-east-2') + assert fs.region == 'us-east-2' + # Reading from the wrong region may still work for public buckets... + + # Nonexistent bucket (hopefully, otherwise need to fix this test) + with pytest.raises(IOError, match="Bucket '.*' not found"): + FileSystem.from_uri('s3://x-arrow-nonexistent-bucket') + fs, path = FileSystem.from_uri( + 's3://x-arrow-nonexistent-bucket?region=us-east-3') + assert fs.region == 'us-east-3' + + +@pytest.mark.s3 +def test_resolve_s3_region(): + from pyarrow.fs import resolve_s3_region + assert resolve_s3_region('voltrondata-labs-datasets') == 'us-east-2' + assert resolve_s3_region('mf-nwp-models') == 'eu-west-1' + + with pytest.raises(ValueError, match="Not a valid bucket name"): + resolve_s3_region('foo/bar') + with pytest.raises(ValueError, match="Not a valid bucket name"): + resolve_s3_region('s3:bucket') + + +@pytest.mark.s3 +def test_copy_files(s3_connection, s3fs, tempdir): + fs = s3fs["fs"] + pathfn = s3fs["pathfn"] + + # create test file on S3 filesystem + path = pathfn('c.txt') + with fs.open_output_stream(path) as f: + f.write(b'test') + + # create URI for created file + host, port, access_key, secret_key = s3_connection + source_uri = ( + f"s3://{access_key}:{secret_key}@{path}" + f"?scheme=http&endpoint_override={host}:{port}" + ) + # copy from S3 URI to local file + local_path1 = str(tempdir / "c_copied1.txt") + copy_files(source_uri, local_path1) + + localfs = LocalFileSystem() + with localfs.open_input_stream(local_path1) as f: + assert f.read() == b"test" + + # copy from S3 path+filesystem to local file + local_path2 = str(tempdir / "c_copied2.txt") + copy_files(path, local_path2, source_filesystem=fs) + with localfs.open_input_stream(local_path2) as f: + assert f.read() == b"test" + + # copy to local file with URI + local_path3 = str(tempdir / "c_copied3.txt") + destination_uri = _filesystem_uri(local_path3) # file:// + copy_files(source_uri, destination_uri) + + with localfs.open_input_stream(local_path3) as f: + assert f.read() == b"test" + + # copy to local file with path+filesystem + local_path4 = str(tempdir / "c_copied4.txt") + copy_files(source_uri, local_path4, destination_filesystem=localfs) + + with localfs.open_input_stream(local_path4) as f: + assert f.read() == b"test" + + # copy with additional options + local_path5 = str(tempdir / "c_copied5.txt") + copy_files(source_uri, local_path5, chunk_size=1, use_threads=False) + + with localfs.open_input_stream(local_path5) as f: + assert f.read() == b"test" + + +def test_copy_files_directory(tempdir): + localfs = LocalFileSystem() + + # create source directory with 2 files + source_dir = tempdir / "source" + source_dir.mkdir() + with localfs.open_output_stream(str(source_dir / "file1")) as f: + f.write(b'test1') + with localfs.open_output_stream(str(source_dir / "file2")) as f: + f.write(b'test2') + + def check_copied_files(destination_dir): + with localfs.open_input_stream(str(destination_dir / "file1")) as f: + assert f.read() == b"test1" + with localfs.open_input_stream(str(destination_dir / "file2")) as f: + assert f.read() == b"test2" + + # Copy directory with local file paths + destination_dir1 = tempdir / "destination1" + # TODO need to create? + destination_dir1.mkdir() + copy_files(str(source_dir), str(destination_dir1)) + check_copied_files(destination_dir1) + + # Copy directory with path+filesystem + destination_dir2 = tempdir / "destination2" + destination_dir2.mkdir() + copy_files(str(source_dir), str(destination_dir2), + source_filesystem=localfs, destination_filesystem=localfs) + check_copied_files(destination_dir2) + + # Copy directory with URI + destination_dir3 = tempdir / "destination3" + destination_dir3.mkdir() + source_uri = _filesystem_uri(str(source_dir)) # file:// + destination_uri = _filesystem_uri(str(destination_dir3)) + copy_files(source_uri, destination_uri) + check_copied_files(destination_dir3) + + # Copy directory with Path objects + destination_dir4 = tempdir / "destination4" + destination_dir4.mkdir() + copy_files(source_dir, destination_dir4) + check_copied_files(destination_dir4) + + # copy with additional non-default options + destination_dir5 = tempdir / "destination5" + destination_dir5.mkdir() + copy_files(source_dir, destination_dir5, chunk_size=1, use_threads=False) + check_copied_files(destination_dir5) + + +@pytest.mark.s3 +def test_s3_finalize(): + # Once finalize_s3() was called, most/all operations on S3 filesystems + # should raise. + code = """if 1: + import pytest + from pyarrow.fs import (FileSystem, S3FileSystem, + ensure_s3_initialized, finalize_s3) + + fs, path = FileSystem.from_uri('s3://mf-nwp-models/README.txt') + assert fs.region == 'eu-west-1' + f = fs.open_input_stream(path) + f.read(50) + + finalize_s3() + + with pytest.raises(ValueError, match="S3 .* finalized"): + f.read(50) + with pytest.raises(ValueError, match="S3 .* finalized"): + fs.open_input_stream(path) + with pytest.raises(ValueError, match="S3 .* finalized"): + S3FileSystem(anonymous=True) + with pytest.raises(ValueError, match="S3 .* finalized"): + FileSystem.from_uri('s3://mf-nwp-models/README.txt') + """ + subprocess.check_call([sys.executable, "-c", code]) + + +@pytest.mark.s3 +def test_s3_finalize_region_resolver(): + # Same as test_s3_finalize(), but exercising region resolution + code = """if 1: + import pytest + from pyarrow.fs import resolve_s3_region, ensure_s3_initialized, finalize_s3 + + resolve_s3_region('mf-nwp-models') + + finalize_s3() + + # Testing both cached and uncached accesses + with pytest.raises(ValueError, match="S3 .* finalized"): + resolve_s3_region('mf-nwp-models') + with pytest.raises(ValueError, match="S3 .* finalized"): + resolve_s3_region('voltrondata-labs-datasets') + """ + subprocess.check_call([sys.executable, "-c", code]) + + +@pytest.mark.s3 +def test_concurrent_s3fs_init(): + # GH-39897: lazy concurrent initialization of S3 subsystem should not crash + code = """if 1: + import threading + import pytest + from pyarrow.fs import (FileSystem, S3FileSystem, + ensure_s3_initialized, finalize_s3) + threads = [] + fn = lambda: FileSystem.from_uri('s3://mf-nwp-models/README.txt') + for i in range(4): + thread = threading.Thread(target = fn) + threads.append(thread) + thread.start() + + for thread in threads: + thread.join() + + finalize_s3() + """ + subprocess.check_call([sys.executable, "-c", code]) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_gandiva.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_gandiva.py new file mode 100644 index 0000000000000000000000000000000000000000..80d119a48530d4285e0009cb71e40b3cc0feb33b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_gandiva.py @@ -0,0 +1,434 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import datetime +import pytest + +import pyarrow as pa + + +@pytest.mark.gandiva +def test_tree_exp_builder(): + import pyarrow.gandiva as gandiva + + builder = gandiva.TreeExprBuilder() + + field_a = pa.field('a', pa.int32()) + field_b = pa.field('b', pa.int32()) + + schema = pa.schema([field_a, field_b]) + + field_result = pa.field('res', pa.int32()) + + node_a = builder.make_field(field_a) + node_b = builder.make_field(field_b) + + assert node_a.return_type() == field_a.type + + condition = builder.make_function("greater_than", [node_a, node_b], + pa.bool_()) + if_node = builder.make_if(condition, node_a, node_b, pa.int32()) + + expr = builder.make_expression(if_node, field_result) + + assert expr.result().type == pa.int32() + + config = gandiva.Configuration(dump_ir=True) + projector = gandiva.make_projector( + schema, [expr], pa.default_memory_pool(), "NONE", config) + + # Gandiva generates compute kernel function named `@expr_X` + assert projector.llvm_ir.find("@expr_") != -1 + + a = pa.array([10, 12, -20, 5], type=pa.int32()) + b = pa.array([5, 15, 15, 17], type=pa.int32()) + e = pa.array([10, 15, 15, 17], type=pa.int32()) + input_batch = pa.RecordBatch.from_arrays([a, b], names=['a', 'b']) + + r, = projector.evaluate(input_batch) + assert r.equals(e) + + +@pytest.mark.gandiva +def test_table(): + import pyarrow.gandiva as gandiva + + table = pa.Table.from_arrays([pa.array([1.0, 2.0]), pa.array([3.0, 4.0])], + ['a', 'b']) + + builder = gandiva.TreeExprBuilder() + node_a = builder.make_field(table.schema.field("a")) + node_b = builder.make_field(table.schema.field("b")) + + sum = builder.make_function("add", [node_a, node_b], pa.float64()) + + field_result = pa.field("c", pa.float64()) + expr = builder.make_expression(sum, field_result) + + projector = gandiva.make_projector( + table.schema, [expr], pa.default_memory_pool()) + + # TODO: Add .evaluate function which can take Tables instead of + # RecordBatches + r, = projector.evaluate(table.to_batches()[0]) + + e = pa.array([4.0, 6.0]) + assert r.equals(e) + + +@pytest.mark.gandiva +def test_filter(): + import pyarrow.gandiva as gandiva + + table = pa.Table.from_arrays([pa.array([1.0 * i for i in range(10000)])], + ['a']) + + builder = gandiva.TreeExprBuilder() + node_a = builder.make_field(table.schema.field("a")) + thousand = builder.make_literal(1000.0, pa.float64()) + cond = builder.make_function("less_than", [node_a, thousand], pa.bool_()) + condition = builder.make_condition(cond) + + assert condition.result().type == pa.bool_() + + config = gandiva.Configuration(dump_ir=True) + filter = gandiva.make_filter(table.schema, condition, config) + # Gandiva generates compute kernel function named `@expr_X` + assert filter.llvm_ir.find("@expr_") != -1 + + result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool()) + assert result.to_array().equals(pa.array(range(1000), type=pa.uint32())) + + +@pytest.mark.gandiva +def test_in_expr(): + import pyarrow.gandiva as gandiva + + arr = pa.array(["ga", "an", "nd", "di", "iv", "va"]) + table = pa.Table.from_arrays([arr], ["a"]) + + # string + builder = gandiva.TreeExprBuilder() + node_a = builder.make_field(table.schema.field("a")) + cond = builder.make_in_expression(node_a, ["an", "nd"], pa.string()) + condition = builder.make_condition(cond) + filter = gandiva.make_filter(table.schema, condition) + result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool()) + assert result.to_array().equals(pa.array([1, 2], type=pa.uint32())) + + # int32 + arr = pa.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 4]) + table = pa.Table.from_arrays([arr.cast(pa.int32())], ["a"]) + node_a = builder.make_field(table.schema.field("a")) + cond = builder.make_in_expression(node_a, [1, 5], pa.int32()) + condition = builder.make_condition(cond) + filter = gandiva.make_filter(table.schema, condition) + result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool()) + assert result.to_array().equals(pa.array([1, 3, 4, 8], type=pa.uint32())) + + # int64 + arr = pa.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 4]) + table = pa.Table.from_arrays([arr], ["a"]) + node_a = builder.make_field(table.schema.field("a")) + cond = builder.make_in_expression(node_a, [1, 5], pa.int64()) + condition = builder.make_condition(cond) + filter = gandiva.make_filter(table.schema, condition) + result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool()) + assert result.to_array().equals(pa.array([1, 3, 4, 8], type=pa.uint32())) + + +@pytest.mark.skip(reason="Gandiva C++ did not have *real* binary, " + "time and date support.") +def test_in_expr_todo(): + import pyarrow.gandiva as gandiva + # TODO: Implement reasonable support for timestamp, time & date. + # Current exceptions: + # pyarrow.lib.ArrowException: ExpressionValidationError: + # Evaluation expression for IN clause returns XXXX values are of typeXXXX + + # binary + arr = pa.array([b"ga", b"an", b"nd", b"di", b"iv", b"va"]) + table = pa.Table.from_arrays([arr], ["a"]) + + builder = gandiva.TreeExprBuilder() + node_a = builder.make_field(table.schema.field("a")) + cond = builder.make_in_expression(node_a, [b'an', b'nd'], pa.binary()) + condition = builder.make_condition(cond) + + filter = gandiva.make_filter(table.schema, condition) + result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool()) + assert result.to_array().equals(pa.array([1, 2], type=pa.uint32())) + + # timestamp + datetime_1 = datetime.datetime.utcfromtimestamp(1542238951.621877) + datetime_2 = datetime.datetime.utcfromtimestamp(1542238911.621877) + datetime_3 = datetime.datetime.utcfromtimestamp(1542238051.621877) + + arr = pa.array([datetime_1, datetime_2, datetime_3]) + table = pa.Table.from_arrays([arr], ["a"]) + + builder = gandiva.TreeExprBuilder() + node_a = builder.make_field(table.schema.field("a")) + cond = builder.make_in_expression(node_a, [datetime_2], pa.timestamp('ms')) + condition = builder.make_condition(cond) + + filter = gandiva.make_filter(table.schema, condition) + result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool()) + assert list(result.to_array()) == [1] + + # time + time_1 = datetime_1.time() + time_2 = datetime_2.time() + time_3 = datetime_3.time() + + arr = pa.array([time_1, time_2, time_3]) + table = pa.Table.from_arrays([arr], ["a"]) + + builder = gandiva.TreeExprBuilder() + node_a = builder.make_field(table.schema.field("a")) + cond = builder.make_in_expression(node_a, [time_2], pa.time64('ms')) + condition = builder.make_condition(cond) + + filter = gandiva.make_filter(table.schema, condition) + result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool()) + assert list(result.to_array()) == [1] + + # date + date_1 = datetime_1.date() + date_2 = datetime_2.date() + date_3 = datetime_3.date() + + arr = pa.array([date_1, date_2, date_3]) + table = pa.Table.from_arrays([arr], ["a"]) + + builder = gandiva.TreeExprBuilder() + node_a = builder.make_field(table.schema.field("a")) + cond = builder.make_in_expression(node_a, [date_2], pa.date32()) + condition = builder.make_condition(cond) + + filter = gandiva.make_filter(table.schema, condition) + result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool()) + assert list(result.to_array()) == [1] + + +@pytest.mark.gandiva +def test_boolean(): + import pyarrow.gandiva as gandiva + + table = pa.Table.from_arrays([ + pa.array([1., 31., 46., 3., 57., 44., 22.]), + pa.array([5., 45., 36., 73., 83., 23., 76.])], + ['a', 'b']) + + builder = gandiva.TreeExprBuilder() + node_a = builder.make_field(table.schema.field("a")) + node_b = builder.make_field(table.schema.field("b")) + fifty = builder.make_literal(50.0, pa.float64()) + eleven = builder.make_literal(11.0, pa.float64()) + + cond_1 = builder.make_function("less_than", [node_a, fifty], pa.bool_()) + cond_2 = builder.make_function("greater_than", [node_a, node_b], + pa.bool_()) + cond_3 = builder.make_function("less_than", [node_b, eleven], pa.bool_()) + cond = builder.make_or([builder.make_and([cond_1, cond_2]), cond_3]) + condition = builder.make_condition(cond) + + filter = gandiva.make_filter(table.schema, condition) + result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool()) + assert result.to_array().equals(pa.array([0, 2, 5], type=pa.uint32())) + + +@pytest.mark.gandiva +def test_literals(): + import pyarrow.gandiva as gandiva + + builder = gandiva.TreeExprBuilder() + + builder.make_literal(True, pa.bool_()) + builder.make_literal(0, pa.uint8()) + builder.make_literal(1, pa.uint16()) + builder.make_literal(2, pa.uint32()) + builder.make_literal(3, pa.uint64()) + builder.make_literal(4, pa.int8()) + builder.make_literal(5, pa.int16()) + builder.make_literal(6, pa.int32()) + builder.make_literal(7, pa.int64()) + builder.make_literal(8.0, pa.float32()) + builder.make_literal(9.0, pa.float64()) + builder.make_literal("hello", pa.string()) + builder.make_literal(b"world", pa.binary()) + + builder.make_literal(True, "bool") + builder.make_literal(0, "uint8") + builder.make_literal(1, "uint16") + builder.make_literal(2, "uint32") + builder.make_literal(3, "uint64") + builder.make_literal(4, "int8") + builder.make_literal(5, "int16") + builder.make_literal(6, "int32") + builder.make_literal(7, "int64") + builder.make_literal(8.0, "float32") + builder.make_literal(9.0, "float64") + builder.make_literal("hello", "string") + builder.make_literal(b"world", "binary") + + with pytest.raises(TypeError): + builder.make_literal("hello", pa.int64()) + with pytest.raises(TypeError): + builder.make_literal(True, None) + + +@pytest.mark.gandiva +def test_regex(): + import pyarrow.gandiva as gandiva + + elements = ["park", "sparkle", "bright spark and fire", "spark"] + data = pa.array(elements, type=pa.string()) + table = pa.Table.from_arrays([data], names=['a']) + + builder = gandiva.TreeExprBuilder() + node_a = builder.make_field(table.schema.field("a")) + regex = builder.make_literal("%spark%", pa.string()) + like = builder.make_function("like", [node_a, regex], pa.bool_()) + + field_result = pa.field("b", pa.bool_()) + expr = builder.make_expression(like, field_result) + + projector = gandiva.make_projector( + table.schema, [expr], pa.default_memory_pool()) + + r, = projector.evaluate(table.to_batches()[0]) + b = pa.array([False, True, True, True], type=pa.bool_()) + assert r.equals(b) + + +@pytest.mark.gandiva +def test_get_registered_function_signatures(): + import pyarrow.gandiva as gandiva + signatures = gandiva.get_registered_function_signatures() + + assert type(signatures[0].return_type()) is pa.DataType + assert type(signatures[0].param_types()) is list + assert hasattr(signatures[0], "name") + + +@pytest.mark.gandiva +def test_filter_project(): + import pyarrow.gandiva as gandiva + mpool = pa.default_memory_pool() + # Create a table with some sample data + array0 = pa.array([10, 12, -20, 5, 21, 29], pa.int32()) + array1 = pa.array([5, 15, 15, 17, 12, 3], pa.int32()) + array2 = pa.array([1, 25, 11, 30, -21, None], pa.int32()) + + table = pa.Table.from_arrays([array0, array1, array2], ['a', 'b', 'c']) + + field_result = pa.field("res", pa.int32()) + + builder = gandiva.TreeExprBuilder() + node_a = builder.make_field(table.schema.field("a")) + node_b = builder.make_field(table.schema.field("b")) + node_c = builder.make_field(table.schema.field("c")) + + greater_than_function = builder.make_function("greater_than", + [node_a, node_b], pa.bool_()) + filter_condition = builder.make_condition( + greater_than_function) + + project_condition = builder.make_function("less_than", + [node_b, node_c], pa.bool_()) + if_node = builder.make_if(project_condition, + node_b, node_c, pa.int32()) + expr = builder.make_expression(if_node, field_result) + + # Build a filter for the expressions. + filter = gandiva.make_filter(table.schema, filter_condition) + + # Build a projector for the expressions. + projector = gandiva.make_projector( + table.schema, [expr], mpool, "UINT32") + + # Evaluate filter + selection_vector = filter.evaluate(table.to_batches()[0], mpool) + + # Evaluate project + r, = projector.evaluate( + table.to_batches()[0], selection_vector) + + exp = pa.array([1, -21, None], pa.int32()) + assert r.equals(exp) + + +@pytest.mark.gandiva +def test_to_string(): + import pyarrow.gandiva as gandiva + builder = gandiva.TreeExprBuilder() + + assert str(builder.make_literal(2.0, pa.float64()) + ).startswith('(const double) 2 raw(') + assert str(builder.make_literal(2, pa.int64())) == '(const int64) 2' + assert str(builder.make_field(pa.field('x', pa.float64()))) == '(double) x' + assert str(builder.make_field(pa.field('y', pa.string()))) == '(string) y' + + field_z = builder.make_field(pa.field('z', pa.bool_())) + func_node = builder.make_function('not', [field_z], pa.bool_()) + assert str(func_node) == 'bool not((bool) z)' + + field_y = builder.make_field(pa.field('y', pa.bool_())) + and_node = builder.make_and([func_node, field_y]) + assert str(and_node) == 'bool not((bool) z) && (bool) y' + + +@pytest.mark.gandiva +def test_rejects_none(): + import pyarrow.gandiva as gandiva + + builder = gandiva.TreeExprBuilder() + + field_x = pa.field('x', pa.int32()) + schema = pa.schema([field_x]) + literal_true = builder.make_literal(True, pa.bool_()) + + with pytest.raises(TypeError): + builder.make_field(None) + + with pytest.raises(TypeError): + builder.make_if(literal_true, None, None, None) + + with pytest.raises(TypeError): + builder.make_and([literal_true, None]) + + with pytest.raises(TypeError): + builder.make_or([None, literal_true]) + + with pytest.raises(TypeError): + builder.make_in_expression(None, [1, 2, 3], pa.int32()) + + with pytest.raises(TypeError): + builder.make_expression(None, field_x) + + with pytest.raises(TypeError): + builder.make_condition(None) + + with pytest.raises(TypeError): + builder.make_function('less_than', [literal_true, None], pa.bool_()) + + with pytest.raises(TypeError): + gandiva.make_projector(schema, [None]) + + with pytest.raises(TypeError): + gandiva.make_filter(schema, None) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_gdb.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_gdb.py new file mode 100644 index 0000000000000000000000000000000000000000..0d12d710dcf649cbc7e4d6fc2abc8563f5f362bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_gdb.py @@ -0,0 +1,1082 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from functools import lru_cache +import os +import re +import shutil +import subprocess +import sys + +import pytest + +import pyarrow as pa + + +pytestmark = pytest.mark.gdb + +here = os.path.dirname(os.path.abspath(__file__)) + +# The GDB script may be found in the source tree (if available) +# or in another location given by the ARROW_GDB_SCRIPT environment variable. +gdb_script = (os.environ.get('ARROW_GDB_SCRIPT') or + os.path.join(here, "../../../cpp/gdb_arrow.py")) + +gdb_command = ["gdb", "--nx"] + + +def environment_for_gdb(): + env = {} + for var in ['PATH', 'LD_LIBRARY_PATH']: + try: + env[var] = os.environ[var] + except KeyError: + pass + return env + + +@lru_cache() +def is_gdb_available(): + try: + # Try to use the same arguments as in GdbSession so that the + # same error return gets propagated. + proc = subprocess.run(gdb_command + ["--version"], + env=environment_for_gdb(), bufsize=0, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + except FileNotFoundError: + return False + return proc.returncode == 0 + + +@lru_cache() +def python_executable(): + path = shutil.which("python3") + assert path is not None, "Couldn't find python3 executable" + return path + + +def skip_if_gdb_unavailable(): + if not is_gdb_available(): + pytest.skip("gdb command unavailable") + + +def skip_if_gdb_script_unavailable(): + if not os.path.exists(gdb_script): + pytest.skip("gdb script not found") + + +class GdbSession: + proc = None + verbose = True + + def __init__(self, *args, **env): + # Let stderr through to let pytest display it separately on errors + gdb_env = environment_for_gdb() + gdb_env.update(env) + self.proc = subprocess.Popen(gdb_command + list(args), + env=gdb_env, bufsize=0, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + self.last_stdout = [] + self.last_stdout_line = b"" + + def wait_until_ready(self): + """ + Record output until the gdb prompt displays. Return recorded output. + """ + # TODO: add timeout? + while (not self.last_stdout_line.startswith(b"(gdb) ") and + self.proc.poll() is None): + block = self.proc.stdout.read(4096) + if self.verbose: + sys.stdout.buffer.write(block) + sys.stdout.buffer.flush() + block, sep, last_line = block.rpartition(b"\n") + if sep: + self.last_stdout.append(self.last_stdout_line) + self.last_stdout.append(block + sep) + self.last_stdout_line = last_line + else: + assert block == b"" + self.last_stdout_line += last_line + + if self.proc.poll() is not None: + raise IOError("gdb session terminated unexpectedly") + + out = b"".join(self.last_stdout).decode('utf-8') + self.last_stdout = [] + self.last_stdout_line = b"" + return out + + def issue_command(self, line): + line = line.encode('utf-8') + b"\n" + if self.verbose: + sys.stdout.buffer.write(line) + sys.stdout.buffer.flush() + self.proc.stdin.write(line) + self.proc.stdin.flush() + + def run_command(self, line): + self.issue_command(line) + return self.wait_until_ready() + + def print_value(self, expr): + """ + Ask gdb to print the value of an expression and return the result. + """ + out = self.run_command(f"p {expr}") + out, n = re.subn(r"^\$\d+ = ", "", out) + assert n == 1, out + # gdb may add whitespace depending on result width, remove it + return out.strip() + + def select_frame(self, func_name): + """ + Select the innermost frame with the given function name. + """ + # Ideally, we would use the "frame function" command, + # but it's not available on old GDB versions (such as 8.1.1), + # so instead parse the stack trace for a matching frame number. + out = self.run_command("info stack") + pat = r"(?mi)^#(\d+)\s+.* in " + re.escape(func_name) + r"\b" + m = re.search(pat, out) + if m is None: + pytest.fail(f"Could not select frame for function {func_name}") + + frame_num = int(m[1]) + out = self.run_command(f"frame {frame_num}") + assert f"in {func_name}" in out + + def join(self): + if self.proc is not None: + self.proc.stdin.close() + self.proc.stdout.close() # avoid ResourceWarning + self.proc.kill() + self.proc.wait() + self.proc = None + + def __del__(self): + self.join() + + +@pytest.fixture(scope='session') +def gdb(): + skip_if_gdb_unavailable() + gdb = GdbSession("-q", python_executable()) + try: + gdb.wait_until_ready() + gdb.run_command("set confirm off") + gdb.run_command("set print array-indexes on") + # Make sure gdb formatting is not terminal-dependent + gdb.run_command("set width unlimited") + gdb.run_command("set charset UTF-8") + yield gdb + finally: + gdb.join() + + +@pytest.fixture(scope='session') +def gdb_arrow(gdb): + if 'deb' not in pa.cpp_build_info.build_type: + pytest.skip("Arrow C++ debug symbols not available") + + skip_if_gdb_script_unavailable() + gdb.run_command(f"source {gdb_script}") + + lib_path_var = 'PATH' if sys.platform == 'win32' else 'LD_LIBRARY_PATH' + lib_path = os.environ.get(lib_path_var) + if lib_path: + # GDB starts the inferior process in a pristine shell, need + # to propagate the library search path to find the Arrow DLL + gdb.run_command(f"set env {lib_path_var} {lib_path}") + + code = "from pyarrow.lib import _gdb_test_session; _gdb_test_session()" + out = gdb.run_command(f"run -c '{code}'") + assert ("Trace/breakpoint trap" in out or + "received signal" in out), out + gdb.select_frame("arrow::gdb::TestSession") + return gdb + + +def test_gdb_session(gdb): + out = gdb.run_command("show version") + assert out.startswith("GNU gdb ("), out + + +def test_gdb_arrow(gdb_arrow): + s = gdb_arrow.print_value("42 + 1") + assert s == "43" + + +def check_stack_repr(gdb, expr, expected): + """ + Check printing a stack-located value. + """ + s = gdb.print_value(expr) + if isinstance(expected, re.Pattern): + assert expected.match(s), s + else: + assert s == expected + + +def check_heap_repr(gdb, expr, expected): + """ + Check printing a heap-located value, given its address. + """ + s = gdb.print_value(f"*{expr}") + # GDB may prefix the value with an address or type specification + if s != expected: + assert s.endswith(f" {expected}") + + +def test_status(gdb_arrow): + check_stack_repr(gdb_arrow, "ok_status", "arrow::Status::OK()") + check_stack_repr(gdb_arrow, "error_status", + 'arrow::Status::IOError("This is an error")') + check_stack_repr( + gdb_arrow, "error_detail_status", + 'arrow::Status::IOError("This is an error", ' + 'detail=[custom-detail-id] "This is a detail")') + + check_stack_repr(gdb_arrow, "ok_result", "arrow::Result(42)") + check_stack_repr( + gdb_arrow, "error_result", + 'arrow::Result(arrow::Status::IOError("This is an error"))') + check_stack_repr( + gdb_arrow, "error_detail_result", + 'arrow::Result(arrow::Status::IOError("This is an error", ' + 'detail=[custom-detail-id] "This is a detail"))') + + +def test_buffer_stack(gdb_arrow): + check_stack_repr(gdb_arrow, "buffer_null", + "arrow::Buffer of size 0, read-only") + check_stack_repr(gdb_arrow, "buffer_abc", + 'arrow::Buffer of size 3, read-only, "abc"') + check_stack_repr( + gdb_arrow, "buffer_special_chars", + r'arrow::Buffer of size 12, read-only, "foo\"bar\000\r\n\t\037"') + check_stack_repr(gdb_arrow, "buffer_mutable", + 'arrow::MutableBuffer of size 3, mutable, "abc"') + + +def test_buffer_heap(gdb_arrow): + check_heap_repr(gdb_arrow, "heap_buffer", + 'arrow::Buffer of size 3, read-only, "abc"') + check_heap_repr(gdb_arrow, "heap_buffer_mutable.get()", + 'arrow::Buffer of size 3, mutable, "abc"') + + +def test_decimals(gdb_arrow): + v128 = "98765432109876543210987654321098765432" + check_stack_repr(gdb_arrow, "decimal128_zero", "arrow::Decimal128(0)") + check_stack_repr(gdb_arrow, "decimal128_pos", + f"arrow::Decimal128({v128})") + check_stack_repr(gdb_arrow, "decimal128_neg", + f"arrow::Decimal128(-{v128})") + check_stack_repr(gdb_arrow, "basic_decimal128_zero", + "arrow::BasicDecimal128(0)") + check_stack_repr(gdb_arrow, "basic_decimal128_pos", + f"arrow::BasicDecimal128({v128})") + check_stack_repr(gdb_arrow, "basic_decimal128_neg", + f"arrow::BasicDecimal128(-{v128})") + + v256 = ("9876543210987654321098765432109876543210" + "987654321098765432109876543210987654") + check_stack_repr(gdb_arrow, "decimal256_zero", "arrow::Decimal256(0)") + check_stack_repr(gdb_arrow, "decimal256_pos", + f"arrow::Decimal256({v256})") + check_stack_repr(gdb_arrow, "decimal256_neg", + f"arrow::Decimal256(-{v256})") + check_stack_repr(gdb_arrow, "basic_decimal256_zero", + "arrow::BasicDecimal256(0)") + check_stack_repr(gdb_arrow, "basic_decimal256_pos", + f"arrow::BasicDecimal256({v256})") + check_stack_repr(gdb_arrow, "basic_decimal256_neg", + f"arrow::BasicDecimal256(-{v256})") + + +def test_metadata(gdb_arrow): + check_heap_repr(gdb_arrow, "empty_metadata.get()", + "arrow::KeyValueMetadata of size 0") + check_heap_repr( + gdb_arrow, "metadata.get()", + ('arrow::KeyValueMetadata of size 2 = {' + '["key_text"] = "some value", ["key_binary"] = "z\\000\\037\\377"}')) + + +def test_types_stack(gdb_arrow): + check_stack_repr(gdb_arrow, "null_type", "arrow::null()") + check_stack_repr(gdb_arrow, "bool_type", "arrow::boolean()") + + check_stack_repr(gdb_arrow, "date32_type", "arrow::date32()") + check_stack_repr(gdb_arrow, "date64_type", "arrow::date64()") + check_stack_repr(gdb_arrow, "time_type_s", + "arrow::time32(arrow::TimeUnit::SECOND)") + check_stack_repr(gdb_arrow, "time_type_ms", + "arrow::time32(arrow::TimeUnit::MILLI)") + check_stack_repr(gdb_arrow, "time_type_us", + "arrow::time64(arrow::TimeUnit::MICRO)") + check_stack_repr(gdb_arrow, "time_type_ns", + "arrow::time64(arrow::TimeUnit::NANO)") + check_stack_repr(gdb_arrow, "timestamp_type_s", + "arrow::timestamp(arrow::TimeUnit::SECOND)") + check_stack_repr( + gdb_arrow, "timestamp_type_ms_timezone", + 'arrow::timestamp(arrow::TimeUnit::MILLI, "Europe/Paris")') + check_stack_repr(gdb_arrow, "timestamp_type_us", + "arrow::timestamp(arrow::TimeUnit::MICRO)") + check_stack_repr( + gdb_arrow, "timestamp_type_ns_timezone", + 'arrow::timestamp(arrow::TimeUnit::NANO, "Europe/Paris")') + + check_stack_repr(gdb_arrow, "day_time_interval_type", + "arrow::day_time_interval()") + check_stack_repr(gdb_arrow, "month_interval_type", + "arrow::month_interval()") + check_stack_repr(gdb_arrow, "month_day_nano_interval_type", + "arrow::month_day_nano_interval()") + check_stack_repr(gdb_arrow, "duration_type_s", + "arrow::duration(arrow::TimeUnit::SECOND)") + check_stack_repr(gdb_arrow, "duration_type_ns", + "arrow::duration(arrow::TimeUnit::NANO)") + + check_stack_repr(gdb_arrow, "decimal128_type", + "arrow::decimal128(16, 5)") + check_stack_repr(gdb_arrow, "decimal256_type", + "arrow::decimal256(42, 12)") + + check_stack_repr(gdb_arrow, "binary_type", "arrow::binary()") + check_stack_repr(gdb_arrow, "string_type", "arrow::utf8()") + check_stack_repr(gdb_arrow, "large_binary_type", "arrow::large_binary()") + check_stack_repr(gdb_arrow, "large_string_type", "arrow::large_utf8()") + check_stack_repr(gdb_arrow, "fixed_size_binary_type", + "arrow::fixed_size_binary(10)") + + check_stack_repr(gdb_arrow, "list_type", + "arrow::list(arrow::uint8())") + check_stack_repr(gdb_arrow, "large_list_type", + "arrow::large_list(arrow::large_utf8())") + check_stack_repr(gdb_arrow, "fixed_size_list_type", + "arrow::fixed_size_list(arrow::float64(), 3)") + check_stack_repr( + gdb_arrow, "map_type_unsorted", + "arrow::map(arrow::utf8(), arrow::binary(), keys_sorted=false)") + check_stack_repr( + gdb_arrow, "map_type_sorted", + "arrow::map(arrow::utf8(), arrow::binary(), keys_sorted=true)") + + check_stack_repr(gdb_arrow, "struct_type_empty", + "arrow::struct_({})") + check_stack_repr( + gdb_arrow, "struct_type", + ('arrow::struct_({arrow::field("ints", arrow::int8()), ' + 'arrow::field("strs", arrow::utf8(), nullable=false)})')) + + check_stack_repr( + gdb_arrow, "sparse_union_type", + ('arrow::sparse_union(fields={arrow::field("ints", arrow::int8()), ' + 'arrow::field("strs", arrow::utf8(), nullable=false)}, ' + 'type_codes={7, 42})')) + check_stack_repr( + gdb_arrow, "dense_union_type", + ('arrow::dense_union(fields={arrow::field("ints", arrow::int8()), ' + 'arrow::field("strs", arrow::utf8(), nullable=false)}, ' + 'type_codes={7, 42})')) + + check_stack_repr( + gdb_arrow, "dict_type_unordered", + "arrow::dictionary(arrow::int16(), arrow::utf8(), ordered=false)") + check_stack_repr( + gdb_arrow, "dict_type_ordered", + "arrow::dictionary(arrow::int16(), arrow::utf8(), ordered=true)") + + check_stack_repr( + gdb_arrow, "uuid_type", + ('arrow::ExtensionType "extension" ' + 'with storage type arrow::fixed_size_binary(16)')) + + +def test_types_heap(gdb_arrow): + check_heap_repr(gdb_arrow, "heap_null_type", "arrow::null()") + check_heap_repr(gdb_arrow, "heap_bool_type", "arrow::boolean()") + + check_heap_repr(gdb_arrow, "heap_time_type_ns", + "arrow::time64(arrow::TimeUnit::NANO)") + check_heap_repr( + gdb_arrow, "heap_timestamp_type_ns_timezone", + 'arrow::timestamp(arrow::TimeUnit::NANO, "Europe/Paris")') + + check_heap_repr(gdb_arrow, "heap_decimal128_type", + "arrow::decimal128(16, 5)") + + check_heap_repr(gdb_arrow, "heap_list_type", + "arrow::list(arrow::uint8())") + check_heap_repr(gdb_arrow, "heap_large_list_type", + "arrow::large_list(arrow::large_utf8())") + check_heap_repr(gdb_arrow, "heap_fixed_size_list_type", + "arrow::fixed_size_list(arrow::float64(), 3)") + check_heap_repr( + gdb_arrow, "heap_map_type", + "arrow::map(arrow::utf8(), arrow::binary(), keys_sorted=false)") + + check_heap_repr( + gdb_arrow, "heap_struct_type", + ('arrow::struct_({arrow::field("ints", arrow::int8()), ' + 'arrow::field("strs", arrow::utf8(), nullable=false)})')) + + check_heap_repr( + gdb_arrow, "heap_dict_type", + "arrow::dictionary(arrow::int16(), arrow::utf8(), ordered=false)") + + check_heap_repr( + gdb_arrow, "heap_uuid_type", + ('arrow::ExtensionType "extension" ' + 'with storage type arrow::fixed_size_binary(16)')) + + +def test_fields_stack(gdb_arrow): + check_stack_repr(gdb_arrow, "int_field", + 'arrow::field("ints", arrow::int64())') + check_stack_repr( + gdb_arrow, "float_field", + 'arrow::field("floats", arrow::float32(), nullable=false)') + + +def test_fields_heap(gdb_arrow): + check_heap_repr(gdb_arrow, "heap_int_field", + 'arrow::field("ints", arrow::int64())') + + +def test_scalars_stack(gdb_arrow): + check_stack_repr(gdb_arrow, "null_scalar", "arrow::NullScalar") + check_stack_repr(gdb_arrow, "bool_scalar", + "arrow::BooleanScalar of value true") + check_stack_repr(gdb_arrow, "bool_scalar_null", + "arrow::BooleanScalar of null value") + check_stack_repr(gdb_arrow, "int8_scalar", + "arrow::Int8Scalar of value -42") + check_stack_repr(gdb_arrow, "uint8_scalar", + "arrow::UInt8Scalar of value 234") + check_stack_repr(gdb_arrow, "int64_scalar", + "arrow::Int64Scalar of value -9223372036854775808") + check_stack_repr(gdb_arrow, "uint64_scalar", + "arrow::UInt64Scalar of value 18446744073709551615") + check_stack_repr(gdb_arrow, "half_float_scalar", + "arrow::HalfFloatScalar of value -1.5 [48640]") + check_stack_repr(gdb_arrow, "float_scalar", + "arrow::FloatScalar of value 1.25") + check_stack_repr(gdb_arrow, "double_scalar", + "arrow::DoubleScalar of value 2.5") + + check_stack_repr(gdb_arrow, "time_scalar_s", + "arrow::Time32Scalar of value 100s") + check_stack_repr(gdb_arrow, "time_scalar_ms", + "arrow::Time32Scalar of value 1000ms") + check_stack_repr(gdb_arrow, "time_scalar_us", + "arrow::Time64Scalar of value 10000us") + check_stack_repr(gdb_arrow, "time_scalar_ns", + "arrow::Time64Scalar of value 100000ns") + check_stack_repr(gdb_arrow, "time_scalar_null", + "arrow::Time64Scalar of null value [ns]") + + check_stack_repr(gdb_arrow, "duration_scalar_s", + "arrow::DurationScalar of value -100s") + check_stack_repr(gdb_arrow, "duration_scalar_ms", + "arrow::DurationScalar of value -1000ms") + check_stack_repr(gdb_arrow, "duration_scalar_us", + "arrow::DurationScalar of value -10000us") + check_stack_repr(gdb_arrow, "duration_scalar_ns", + "arrow::DurationScalar of value -100000ns") + check_stack_repr(gdb_arrow, "duration_scalar_null", + "arrow::DurationScalar of null value [ns]") + + check_stack_repr( + gdb_arrow, "timestamp_scalar_s", + "arrow::TimestampScalar of value 12345s [no timezone]") + check_stack_repr( + gdb_arrow, "timestamp_scalar_ms", + "arrow::TimestampScalar of value -123456ms [no timezone]") + check_stack_repr( + gdb_arrow, "timestamp_scalar_us", + "arrow::TimestampScalar of value 1234567us [no timezone]") + check_stack_repr( + gdb_arrow, "timestamp_scalar_ns", + "arrow::TimestampScalar of value -12345678ns [no timezone]") + check_stack_repr( + gdb_arrow, "timestamp_scalar_null", + "arrow::TimestampScalar of null value [ns, no timezone]") + + check_stack_repr( + gdb_arrow, "timestamp_scalar_s_tz", + 'arrow::TimestampScalar of value 12345s ["Europe/Paris"]') + check_stack_repr( + gdb_arrow, "timestamp_scalar_ms_tz", + 'arrow::TimestampScalar of value -123456ms ["Europe/Paris"]') + check_stack_repr( + gdb_arrow, "timestamp_scalar_us_tz", + 'arrow::TimestampScalar of value 1234567us ["Europe/Paris"]') + check_stack_repr( + gdb_arrow, "timestamp_scalar_ns_tz", + 'arrow::TimestampScalar of value -12345678ns ["Europe/Paris"]') + check_stack_repr( + gdb_arrow, "timestamp_scalar_null_tz", + 'arrow::TimestampScalar of null value [ns, "Europe/Paris"]') + + check_stack_repr(gdb_arrow, "month_interval_scalar", + "arrow::MonthIntervalScalar of value 23M") + check_stack_repr(gdb_arrow, "month_interval_scalar_null", + "arrow::MonthIntervalScalar of null value") + check_stack_repr(gdb_arrow, "day_time_interval_scalar", + "arrow::DayTimeIntervalScalar of value 23d-456ms") + check_stack_repr(gdb_arrow, "day_time_interval_scalar_null", + "arrow::DayTimeIntervalScalar of null value") + check_stack_repr( + gdb_arrow, "month_day_nano_interval_scalar", + "arrow::MonthDayNanoIntervalScalar of value 1M23d-456ns") + check_stack_repr( + gdb_arrow, "month_day_nano_interval_scalar_null", + "arrow::MonthDayNanoIntervalScalar of null value") + + check_stack_repr(gdb_arrow, "date32_scalar", + "arrow::Date32Scalar of value 23d [1970-01-24]") + check_stack_repr(gdb_arrow, "date32_scalar_null", + "arrow::Date32Scalar of null value") + check_stack_repr(gdb_arrow, "date64_scalar", + "arrow::Date64Scalar of value 3888000000ms [1970-02-15]") + check_stack_repr(gdb_arrow, "date64_scalar_null", + "arrow::Date64Scalar of null value") + + check_stack_repr( + gdb_arrow, "decimal128_scalar_null", + "arrow::Decimal128Scalar of null value [precision=10, scale=4]") + check_stack_repr( + gdb_arrow, "decimal128_scalar_pos_scale_pos", + "arrow::Decimal128Scalar of value 123.4567 [precision=10, scale=4]") + check_stack_repr( + gdb_arrow, "decimal128_scalar_pos_scale_neg", + "arrow::Decimal128Scalar of value -123.4567 [precision=10, scale=4]") + check_stack_repr( + gdb_arrow, "decimal128_scalar_neg_scale_pos", + ("arrow::Decimal128Scalar of value 1.234567e+10 " + "[precision=10, scale=-4]")) + check_stack_repr( + gdb_arrow, "decimal128_scalar_neg_scale_neg", + ("arrow::Decimal128Scalar of value -1.234567e+10 " + "[precision=10, scale=-4]")) + + check_stack_repr( + gdb_arrow, "decimal256_scalar_null", + "arrow::Decimal256Scalar of null value [precision=50, scale=4]") + check_stack_repr( + gdb_arrow, "decimal256_scalar_pos_scale_pos", + ("arrow::Decimal256Scalar of value " + "123456789012345678901234567890123456789012.3456 " + "[precision=50, scale=4]")) + check_stack_repr( + gdb_arrow, "decimal256_scalar_pos_scale_neg", + ("arrow::Decimal256Scalar of value " + "-123456789012345678901234567890123456789012.3456 " + "[precision=50, scale=4]")) + check_stack_repr( + gdb_arrow, "decimal256_scalar_neg_scale_pos", + ("arrow::Decimal256Scalar of value " + "1.234567890123456789012345678901234567890123456e+49 " + "[precision=50, scale=-4]")) + check_stack_repr( + gdb_arrow, "decimal256_scalar_neg_scale_neg", + ("arrow::Decimal256Scalar of value " + "-1.234567890123456789012345678901234567890123456e+49 " + "[precision=50, scale=-4]")) + + check_stack_repr( + gdb_arrow, "binary_scalar_null", + "arrow::BinaryScalar of null value") + check_stack_repr( + gdb_arrow, "binary_scalar_unallocated", + "arrow::BinaryScalar of value ") + check_stack_repr( + gdb_arrow, "binary_scalar_empty", + 'arrow::BinaryScalar of size 0, value ""') + check_stack_repr( + gdb_arrow, "binary_scalar_abc", + 'arrow::BinaryScalar of size 3, value "abc"') + check_stack_repr( + gdb_arrow, "binary_scalar_bytes", + r'arrow::BinaryScalar of size 3, value "\000\037\377"') + check_stack_repr( + gdb_arrow, "large_binary_scalar_abc", + 'arrow::LargeBinaryScalar of size 3, value "abc"') + + check_stack_repr( + gdb_arrow, "string_scalar_null", + "arrow::StringScalar of null value") + check_stack_repr( + gdb_arrow, "string_scalar_unallocated", + "arrow::StringScalar of value ") + check_stack_repr( + gdb_arrow, "string_scalar_empty", + 'arrow::StringScalar of size 0, value ""') + check_stack_repr( + gdb_arrow, "string_scalar_hehe", + 'arrow::StringScalar of size 6, value "héhé"') + # FIXME: excessive escaping ('\\xff' vs. '\x00') + check_stack_repr( + gdb_arrow, "string_scalar_invalid_chars", + r'arrow::StringScalar of size 11, value "abc\x00def\\xffghi"') + check_stack_repr( + gdb_arrow, "large_string_scalar_hehe", + 'arrow::LargeStringScalar of size 6, value "héhé"') + + check_stack_repr( + gdb_arrow, "fixed_size_binary_scalar", + 'arrow::FixedSizeBinaryScalar of size 3, value "abc"') + check_stack_repr( + gdb_arrow, "fixed_size_binary_scalar_null", + 'arrow::FixedSizeBinaryScalar of size 3, null with value " "') + + check_stack_repr( + gdb_arrow, "dict_scalar", + re.compile( + (r'^arrow::DictionaryScalar of index ' + r'arrow::Int8Scalar of value 42, ' + r'dictionary arrow::StringArray '))) + check_stack_repr( + gdb_arrow, "dict_scalar_null", + ('arrow::DictionaryScalar of type ' + 'arrow::dictionary(arrow::int8(), arrow::utf8(), ordered=false), ' + 'null value')) + + check_stack_repr( + gdb_arrow, "list_scalar", + ('arrow::ListScalar of value arrow::Int32Array of ' + 'length 3, offset 0, null count 0 = {[0] = 4, [1] = 5, [2] = 6}')) + check_stack_repr( + gdb_arrow, "list_scalar_null", + 'arrow::ListScalar of type arrow::list(arrow::int32()), null value') + check_stack_repr( + gdb_arrow, "large_list_scalar", + ('arrow::LargeListScalar of value arrow::Int32Array of ' + 'length 3, offset 0, null count 0 = {[0] = 4, [1] = 5, [2] = 6}')) + check_stack_repr( + gdb_arrow, "large_list_scalar_null", + ('arrow::LargeListScalar of type arrow::large_list(arrow::int32()), ' + 'null value')) + check_stack_repr( + gdb_arrow, "fixed_size_list_scalar", + ('arrow::FixedSizeListScalar of value arrow::Int32Array of ' + 'length 3, offset 0, null count 0 = {[0] = 4, [1] = 5, [2] = 6}')) + check_stack_repr( + gdb_arrow, "fixed_size_list_scalar_null", + ('arrow::FixedSizeListScalar of type ' + 'arrow::fixed_size_list(arrow::int32(), 3), null value')) + + check_stack_repr( + gdb_arrow, "struct_scalar", + ('arrow::StructScalar = {["ints"] = arrow::Int32Scalar of value 42, ' + '["strs"] = arrow::StringScalar of size 9, value "some text"}')) + check_stack_repr( + gdb_arrow, "struct_scalar_null", + ('arrow::StructScalar of type arrow::struct_(' + '{arrow::field("ints", arrow::int32()), ' + 'arrow::field("strs", arrow::utf8())}), null value')) + + check_stack_repr( + gdb_arrow, "sparse_union_scalar", + ('arrow::SparseUnionScalar of type code 7, ' + 'value arrow::Int32Scalar of value 43')) + check_stack_repr( + gdb_arrow, "sparse_union_scalar_null", re.compile( + r'^arrow::SparseUnionScalar of type arrow::sparse_union\(.*\), ' + r'type code 7, null value$')) + check_stack_repr( + gdb_arrow, "dense_union_scalar", + ('arrow::DenseUnionScalar of type code 7, ' + 'value arrow::Int32Scalar of value 43')) + check_stack_repr( + gdb_arrow, "dense_union_scalar_null", re.compile( + r'^arrow::DenseUnionScalar of type arrow::dense_union\(.*\), ' + r'type code 7, null value$')) + + check_stack_repr( + gdb_arrow, "extension_scalar", + ('arrow::ExtensionScalar of type "extension", ' + 'value arrow::FixedSizeBinaryScalar of size 16, ' + 'value "0123456789abcdef"')) + check_stack_repr( + gdb_arrow, "extension_scalar_null", + 'arrow::ExtensionScalar of type "extension", null value') + + +def test_scalars_heap(gdb_arrow): + check_heap_repr(gdb_arrow, "heap_null_scalar", "arrow::NullScalar") + check_heap_repr(gdb_arrow, "heap_bool_scalar", + "arrow::BooleanScalar of value true") + check_heap_repr( + gdb_arrow, "heap_decimal128_scalar", + "arrow::Decimal128Scalar of value 123.4567 [precision=10, scale=4]") + check_heap_repr( + gdb_arrow, "heap_decimal256_scalar", + ("arrow::Decimal256Scalar of value " + "123456789012345678901234567890123456789012.3456 " + "[precision=50, scale=4]")) + + check_heap_repr( + gdb_arrow, "heap_map_scalar", + ('arrow::MapScalar of type arrow::map(arrow::utf8(), arrow::int32(), ' + 'keys_sorted=false), value length 2, offset 0, null count 0')) + check_heap_repr( + gdb_arrow, "heap_map_scalar_null", + ('arrow::MapScalar of type arrow::map(arrow::utf8(), arrow::int32(), ' + 'keys_sorted=false), null value')) + + +def test_array_data(gdb_arrow): + check_stack_repr( + gdb_arrow, "int32_array_data", + ("arrow::ArrayData of type arrow::int32(), length 4, offset 0, " + "null count 1 = {[0] = -5, [1] = 6, [2] = null, [3] = 42}")) + + +def test_arrays_stack(gdb_arrow): + check_stack_repr( + gdb_arrow, "int32_array", + ("arrow::Int32Array of length 4, offset 0, null count 1 = " + "{[0] = -5, [1] = 6, [2] = null, [3] = 42}")) + check_stack_repr( + gdb_arrow, "list_array", + ("arrow::ListArray of type arrow::list(arrow::int64()), " + "length 3, offset 0, null count 1")) + + +def test_arrays_heap(gdb_arrow): + # Null + check_heap_repr( + gdb_arrow, "heap_null_array", + "arrow::NullArray of length 2, offset 0, null count 2") + + # Primitive + check_heap_repr( + gdb_arrow, "heap_int32_array", + ("arrow::Int32Array of length 4, offset 0, null count 1 = {" + "[0] = -5, [1] = 6, [2] = null, [3] = 42}")) + check_heap_repr( + gdb_arrow, "heap_int32_array_no_nulls", + ("arrow::Int32Array of length 4, offset 0, null count 0 = {" + "[0] = -5, [1] = 6, [2] = 3, [3] = 42}")) + check_heap_repr( + gdb_arrow, "heap_int32_array_sliced_1_9", + ("arrow::Int32Array of length 9, offset 1, unknown null count = {" + "[0] = 2, [1] = -3, [2] = 4, [3] = null, [4] = -5, [5] = 6, " + "[6] = -7, [7] = 8, [8] = null}")) + check_heap_repr( + gdb_arrow, "heap_int32_array_sliced_2_6", + ("arrow::Int32Array of length 6, offset 2, unknown null count = {" + "[0] = -3, [1] = 4, [2] = null, [3] = -5, [4] = 6, [5] = -7}")) + check_heap_repr( + gdb_arrow, "heap_int32_array_sliced_8_4", + ("arrow::Int32Array of length 4, offset 8, unknown null count = {" + "[0] = 8, [1] = null, [2] = -9, [3] = -10}")) + check_heap_repr( + gdb_arrow, "heap_int32_array_sliced_empty", + "arrow::Int32Array of length 0, offset 6, unknown null count") + + check_heap_repr( + gdb_arrow, "heap_double_array", + ("arrow::DoubleArray of length 2, offset 0, null count 1 = {" + "[0] = -1.5, [1] = null}")) + check_heap_repr( + gdb_arrow, "heap_float16_array", + ("arrow::HalfFloatArray of length 2, offset 0, null count 0 = {" + "[0] = 0.0, [1] = -1.5}")) + + # Boolean + check_heap_repr( + gdb_arrow, "heap_bool_array", + ("arrow::BooleanArray of length 18, offset 0, null count 6 = {" + "[0] = false, [1] = false, [2] = true, [3] = true, [4] = null, " + "[5] = null, [6] = false, [7] = false, [8] = true, [9] = true, " + "[10] = null, [11] = null, [12] = false, [13] = false, " + "[14] = true, [15] = true, [16] = null, [17] = null}")) + check_heap_repr( + gdb_arrow, "heap_bool_array_sliced_1_9", + ("arrow::BooleanArray of length 9, offset 1, unknown null count = {" + "[0] = false, [1] = true, [2] = true, [3] = null, [4] = null, " + "[5] = false, [6] = false, [7] = true, [8] = true}")) + check_heap_repr( + gdb_arrow, "heap_bool_array_sliced_2_6", + ("arrow::BooleanArray of length 6, offset 2, unknown null count = {" + "[0] = true, [1] = true, [2] = null, [3] = null, [4] = false, " + "[5] = false}")) + check_heap_repr( + gdb_arrow, "heap_bool_array_sliced_empty", + "arrow::BooleanArray of length 0, offset 6, unknown null count") + + # Temporal + check_heap_repr( + gdb_arrow, "heap_date32_array", + ("arrow::Date32Array of length 6, offset 0, null count 1 = {" + "[0] = 0d [1970-01-01], [1] = null, [2] = 18336d [2020-03-15], " + "[3] = -9004d [1945-05-08], [4] = -719162d [0001-01-01], " + "[5] = -719163d [year <= 0]}")) + check_heap_repr( + gdb_arrow, "heap_date64_array", + ("arrow::Date64Array of length 5, offset 0, null count 0 = {" + "[0] = 1584230400000ms [2020-03-15], " + "[1] = -777945600000ms [1945-05-08], " + "[2] = -62135596800000ms [0001-01-01], " + "[3] = -62135683200000ms [year <= 0], " + "[4] = 123ms [non-multiple of 86400000]}")) + check_heap_repr( + gdb_arrow, "heap_time32_array_s", + ("arrow::Time32Array of type arrow::time32(arrow::TimeUnit::SECOND), " + "length 3, offset 0, null count 1 = {" + "[0] = null, [1] = -123s, [2] = 456s}")) + check_heap_repr( + gdb_arrow, "heap_time32_array_ms", + ("arrow::Time32Array of type arrow::time32(arrow::TimeUnit::MILLI), " + "length 3, offset 0, null count 1 = {" + "[0] = null, [1] = -123ms, [2] = 456ms}")) + check_heap_repr( + gdb_arrow, "heap_time64_array_us", + ("arrow::Time64Array of type arrow::time64(arrow::TimeUnit::MICRO), " + "length 3, offset 0, null count 1 = {" + "[0] = null, [1] = -123us, [2] = 456us}")) + check_heap_repr( + gdb_arrow, "heap_time64_array_ns", + ("arrow::Time64Array of type arrow::time64(arrow::TimeUnit::NANO), " + "length 3, offset 0, null count 1 = {" + "[0] = null, [1] = -123ns, [2] = 456ns}")) + check_heap_repr( + gdb_arrow, "heap_month_interval_array", + ("arrow::MonthIntervalArray of length 3, offset 0, null count 1 = {" + "[0] = 123M, [1] = -456M, [2] = null}")) + check_heap_repr( + gdb_arrow, "heap_day_time_interval_array", + ("arrow::DayTimeIntervalArray of length 2, offset 0, null count 1 = {" + "[0] = 1d-600ms, [1] = null}")) + check_heap_repr( + gdb_arrow, "heap_month_day_nano_interval_array", + ("arrow::MonthDayNanoIntervalArray of length 2, offset 0, " + "null count 1 = {[0] = 1M-600d5000ns, [1] = null}")) + check_heap_repr( + gdb_arrow, "heap_duration_array_s", + ("arrow::DurationArray of type arrow::duration" + "(arrow::TimeUnit::SECOND), length 2, offset 0, null count 1 = {" + "[0] = null, [1] = -1234567890123456789s}")) + check_heap_repr( + gdb_arrow, "heap_duration_array_ns", + ("arrow::DurationArray of type arrow::duration" + "(arrow::TimeUnit::NANO), length 2, offset 0, null count 1 = {" + "[0] = null, [1] = -1234567890123456789ns}")) + if sys.maxsize > 2**32: + check_heap_repr( + gdb_arrow, "heap_timestamp_array_s", + ("arrow::TimestampArray of type arrow::timestamp" + "(arrow::TimeUnit::SECOND), length 4, offset 0, null count 1 = {" + "[0] = null, [1] = 0s [1970-01-01 00:00:00], " + "[2] = -2203932304s [1900-02-28 12:34:56], " + "[3] = 63730281600s [3989-07-14 00:00:00]}")) + check_heap_repr( + gdb_arrow, "heap_timestamp_array_ms", + ("arrow::TimestampArray of type arrow::timestamp" + "(arrow::TimeUnit::MILLI), length 3, offset 0, null count 1 = {" + "[0] = null, [1] = -2203932303877ms [1900-02-28 12:34:56.123], " + "[2] = 63730281600789ms [3989-07-14 00:00:00.789]}")) + check_heap_repr( + gdb_arrow, "heap_timestamp_array_us", + ("arrow::TimestampArray of type arrow::timestamp" + "(arrow::TimeUnit::MICRO), length 3, offset 0, null count 1 = {" + "[0] = null, " + "[1] = -2203932303345679us [1900-02-28 12:34:56.654321], " + "[2] = 63730281600456789us [3989-07-14 00:00:00.456789]}")) + check_heap_repr( + gdb_arrow, "heap_timestamp_array_ns", + ("arrow::TimestampArray of type arrow::timestamp" + "(arrow::TimeUnit::NANO), length 2, offset 0, null count 1 = {" + "[0] = null, " + "[1] = -2203932303012345679ns [1900-02-28 12:34:56.987654321]}")) + else: + # Python's datetime is limited to smaller timestamps on 32-bit platforms + check_heap_repr( + gdb_arrow, "heap_timestamp_array_s", + ("arrow::TimestampArray of type arrow::timestamp" + "(arrow::TimeUnit::SECOND), length 4, offset 0, null count 1 = {" + "[0] = null, [1] = 0s [1970-01-01 00:00:00], " + "[2] = -2203932304s [too large to represent], " + "[3] = 63730281600s [too large to represent]}")) + check_heap_repr( + gdb_arrow, "heap_timestamp_array_ms", + ("arrow::TimestampArray of type arrow::timestamp" + "(arrow::TimeUnit::MILLI), length 3, offset 0, null count 1 = {" + "[0] = null, [1] = -2203932303877ms [too large to represent], " + "[2] = 63730281600789ms [too large to represent]}")) + check_heap_repr( + gdb_arrow, "heap_timestamp_array_us", + ("arrow::TimestampArray of type arrow::timestamp" + "(arrow::TimeUnit::MICRO), length 3, offset 0, null count 1 = {" + "[0] = null, " + "[1] = -2203932303345679us [too large to represent], " + "[2] = 63730281600456789us [too large to represent]}")) + check_heap_repr( + gdb_arrow, "heap_timestamp_array_ns", + ("arrow::TimestampArray of type arrow::timestamp" + "(arrow::TimeUnit::NANO), length 2, offset 0, null count 1 = {" + "[0] = null, " + "[1] = -2203932303012345679ns [too large to represent]}")) + + # Decimal + check_heap_repr( + gdb_arrow, "heap_decimal128_array", + ("arrow::Decimal128Array of type arrow::decimal128(30, 6), " + "length 3, offset 0, null count 1 = {" + "[0] = null, [1] = -1234567890123456789.012345, " + "[2] = 1234567890123456789.012345}")) + check_heap_repr( + gdb_arrow, "heap_decimal256_array", + ("arrow::Decimal256Array of type arrow::decimal256(50, 6), " + "length 2, offset 0, null count 1 = {" + "[0] = null, " + "[1] = -123456789012345678901234567890123456789.012345}")) + check_heap_repr( + gdb_arrow, "heap_decimal128_array_sliced", + ("arrow::Decimal128Array of type arrow::decimal128(30, 6), " + "length 1, offset 1, unknown null count = {" + "[0] = -1234567890123456789.012345}")) + + # Binary-like + check_heap_repr( + gdb_arrow, "heap_fixed_size_binary_array", + (r'arrow::FixedSizeBinaryArray of type arrow::fixed_size_binary(3), ' + r'length 3, offset 0, null count 1 = {' + r'[0] = null, [1] = "abc", [2] = "\000\037\377"}')) + check_heap_repr( + gdb_arrow, "heap_fixed_size_binary_array_zero_width", + (r'arrow::FixedSizeBinaryArray of type arrow::fixed_size_binary(0), ' + r'length 2, offset 0, null count 1 = {[0] = null, [1] = ""}')) + check_heap_repr( + gdb_arrow, "heap_fixed_size_binary_array_sliced", + (r'arrow::FixedSizeBinaryArray of type arrow::fixed_size_binary(3), ' + r'length 1, offset 1, unknown null count = {[0] = "abc"}')) + check_heap_repr( + gdb_arrow, "heap_binary_array", + (r'arrow::BinaryArray of length 3, offset 0, null count 1 = {' + r'[0] = null, [1] = "abcd", [2] = "\000\037\377"}')) + check_heap_repr( + gdb_arrow, "heap_large_binary_array", + (r'arrow::LargeBinaryArray of length 3, offset 0, null count 1 = {' + r'[0] = null, [1] = "abcd", [2] = "\000\037\377"}')) + check_heap_repr( + gdb_arrow, "heap_string_array", + (r'arrow::StringArray of length 3, offset 0, null count 1 = {' + r'[0] = null, [1] = "héhé", [2] = "invalid \\xff char"}')) + check_heap_repr( + gdb_arrow, "heap_large_string_array", + (r'arrow::LargeStringArray of length 3, offset 0, null count 1 = {' + r'[0] = null, [1] = "héhé", [2] = "invalid \\xff char"}')) + check_heap_repr( + gdb_arrow, "heap_binary_array_sliced", + (r'arrow::BinaryArray of length 1, offset 1, unknown null count = ' + r'{[0] = "abcd"}')) + + # Nested + check_heap_repr( + gdb_arrow, "heap_list_array", + ("arrow::ListArray of type arrow::list(arrow::int64()), " + "length 3, offset 0, null count 1")) + + +def test_schema(gdb_arrow): + check_heap_repr(gdb_arrow, "schema_empty", + "arrow::Schema with 0 fields") + check_heap_repr( + gdb_arrow, "schema_non_empty", + ('arrow::Schema with 2 fields = {["ints"] = arrow::int8(), ' + '["strs"] = arrow::utf8()}')) + check_heap_repr( + gdb_arrow, "schema_with_metadata", + ('arrow::Schema with 2 fields and 2 metadata items = ' + '{["ints"] = arrow::int8(), ["strs"] = arrow::utf8()}')) + + +def test_chunked_array(gdb_arrow): + check_stack_repr( + gdb_arrow, "chunked_array", + ("arrow::ChunkedArray of type arrow::int32(), length 5, null count 1 " + "with 2 chunks = {[0] = length 2, offset 0, null count 0, " + "[1] = length 3, offset 0, null count 1}")) + + +def test_record_batch(gdb_arrow): + expected_prefix = 'arrow::RecordBatch with 2 columns, 3 rows' + expected_suffix = ( + '{["ints"] = arrow::ArrayData of type arrow::int32(), ' + 'length 3, offset 0, null count 0 = ' + '{[0] = 1, [1] = 2, [2] = 3}, ' + '["strs"] = arrow::ArrayData of type arrow::utf8(), ' + 'length 3, offset 0, null count 1 = ' + '{[0] = "abc", [1] = null, [2] = "def"}}') + + expected = f"{expected_prefix} = {expected_suffix}" + # Representations may differ between those two because of + # RecordBatch (base class) vs. SimpleRecordBatch (concrete class). + check_heap_repr(gdb_arrow, "batch", expected) + check_heap_repr(gdb_arrow, "batch.get()", expected) + + expected = f"{expected_prefix}, 3 metadata items = {expected_suffix}" + check_heap_repr(gdb_arrow, "batch_with_metadata", expected) + + +def test_table(gdb_arrow): + expected_table = ( + 'arrow::Table with 2 columns, 5 rows = {' + '["ints"] = arrow::ChunkedArray of type arrow::int32(), ' + 'length 5, null count 0 with 2 chunks = ' + '{[0] = length 3, offset 0, null count 0, ' + '[1] = length 2, offset 0, null count 0}, ' + '["strs"] = arrow::ChunkedArray of type arrow::utf8(), ' + 'length 5, null count 1 with 3 chunks = ' + '{[0] = length 2, offset 0, null count 1, ' + '[1] = length 1, offset 0, null count 0, ' + '[2] = length 2, offset 0, null count 0}}') + + # Same as RecordBatch above (Table vs. SimpleTable) + check_heap_repr(gdb_arrow, "table", expected_table) + check_heap_repr(gdb_arrow, "table.get()", expected_table) + + +def test_datum(gdb_arrow): + check_stack_repr(gdb_arrow, "empty_datum", "arrow::Datum (empty)") + check_stack_repr( + gdb_arrow, "scalar_datum", + "arrow::Datum of value arrow::BooleanScalar of null value") + check_stack_repr( + gdb_arrow, "array_datum", + re.compile(r"^arrow::Datum of value arrow::ArrayData of type ")) + check_stack_repr( + gdb_arrow, "chunked_array_datum", + re.compile(r"^arrow::Datum of value arrow::ChunkedArray of type ")) + check_stack_repr( + gdb_arrow, "batch_datum", + re.compile(r"^arrow::Datum of value arrow::RecordBatch " + r"with 2 columns, 3 rows ")) + check_stack_repr( + gdb_arrow, "table_datum", + re.compile(r"^arrow::Datum of value arrow::Table " + r"with 2 columns, 5 rows ")) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_io.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_io.py new file mode 100644 index 0000000000000000000000000000000000000000..17eab871a25754c78f908373785cbce5843075ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_io.py @@ -0,0 +1,2121 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import bz2 +from contextlib import contextmanager +from io import (BytesIO, StringIO, TextIOWrapper, BufferedIOBase, IOBase) +import itertools +import gc +import gzip +import math +import os +import pathlib +import pytest +import sys +import tempfile +import weakref + +import numpy as np + +from pyarrow.util import guid +from pyarrow import Codec +import pyarrow as pa + + +def check_large_seeks(file_factory, expected_error=None): + if sys.platform in ('win32', 'darwin'): + pytest.skip("need sparse file support") + try: + filename = tempfile.mktemp(prefix='test_io') + with open(filename, 'wb') as f: + f.truncate(2 ** 32 + 10) + f.seek(2 ** 32 + 5) + f.write(b'mark\n') + if expected_error: + with expected_error: + file_factory(filename) + else: + with file_factory(filename) as f: + assert f.size() == 2 ** 32 + 10 + assert f.seek(2 ** 32 + 5) == 2 ** 32 + 5 + assert f.tell() == 2 ** 32 + 5 + assert f.read(5) == b'mark\n' + assert f.tell() == 2 ** 32 + 10 + finally: + os.unlink(filename) + + +@contextmanager +def assert_file_not_found(): + with pytest.raises(FileNotFoundError): + yield + + +# ---------------------------------------------------------------------- +# Python file-like objects + + +def test_python_file_write(): + buf = BytesIO() + + f = pa.PythonFile(buf) + + assert f.tell() == 0 + + s1 = b'enga\xc3\xb1ado' + s2 = b'foobar' + + f.write(s1) + assert f.tell() == len(s1) + + f.write(s2) + + expected = s1 + s2 + + result = buf.getvalue() + assert result == expected + + assert not f.closed + f.close() + assert f.closed + + with pytest.raises(TypeError, match="binary file expected"): + pa.PythonFile(StringIO()) + + +def test_python_file_read(): + data = b'some sample data' + + buf = BytesIO(data) + f = pa.PythonFile(buf, mode='r') + + assert f.size() == len(data) + + assert f.tell() == 0 + + assert f.read(4) == b'some' + assert f.tell() == 4 + + f.seek(0) + assert f.tell() == 0 + + f.seek(5) + assert f.tell() == 5 + + v = f.read(50) + assert v == b'sample data' + assert len(v) == 11 + + assert f.size() == len(data) + + assert not f.closed + f.close() + assert f.closed + + with pytest.raises(TypeError, match="binary file expected"): + pa.PythonFile(StringIO(), mode='r') + + +@pytest.mark.parametrize("nbytes", (-1, 0, 1, 5, 100)) +@pytest.mark.parametrize("file_offset", (-1, 0, 5, 100)) +def test_python_file_get_stream(nbytes, file_offset): + + data = b'data1data2data3data4data5' + + f = pa.PythonFile(BytesIO(data), mode='r') + + # negative nbytes or offsets don't make sense here, raise ValueError + if nbytes < 0 or file_offset < 0: + with pytest.raises(pa.ArrowInvalid, + match="should be a positive value"): + f.get_stream(file_offset=file_offset, nbytes=nbytes) + f.close() + return + else: + stream = f.get_stream(file_offset=file_offset, nbytes=nbytes) + + # Subsequent calls to 'read' should match behavior if same + # data passed to BytesIO where get_stream should handle if + # nbytes/file_offset results in no bytes b/c out of bounds. + start = min(file_offset, len(data)) + end = min(file_offset + nbytes, len(data)) + buf = BytesIO(data[start:end]) + + # read some chunks + assert stream.read(nbytes=4) == buf.read(4) + assert stream.read(nbytes=6) == buf.read(6) + + # Read to end of each stream + assert stream.read() == buf.read() + + # Try reading past the stream + n = len(data) * 2 + assert stream.read(n) == buf.read(n) + + # NativeFile[CInputStream] is not seekable + with pytest.raises(OSError, match="seekable"): + stream.seek(0) + + stream.close() + assert stream.closed + + +def test_python_file_read_at(): + data = b'some sample data' + + buf = BytesIO(data) + f = pa.PythonFile(buf, mode='r') + + # test simple read at + v = f.read_at(nbytes=5, offset=3) + assert v == b'e sam' + assert len(v) == 5 + + # test reading entire file when nbytes > len(file) + w = f.read_at(nbytes=50, offset=0) + assert w == data + assert len(w) == 16 + + +def test_python_file_readall(): + data = b'some sample data' + + buf = BytesIO(data) + with pa.PythonFile(buf, mode='r') as f: + assert f.readall() == data + + +def test_python_file_readinto(): + length = 10 + data = b'some sample data longer than 10' + dst_buf = bytearray(length) + src_buf = BytesIO(data) + + with pa.PythonFile(src_buf, mode='r') as f: + assert f.readinto(dst_buf) == 10 + + assert dst_buf[:length] == data[:length] + assert len(dst_buf) == length + + +def test_python_file_read_buffer(): + length = 10 + data = b'0123456798' + dst_buf = bytearray(data) + + class DuckReader: + def close(self): + pass + + @property + def closed(self): + return False + + def read_buffer(self, nbytes): + assert nbytes == length + return memoryview(dst_buf)[:nbytes] + + duck_reader = DuckReader() + with pa.PythonFile(duck_reader, mode='r') as f: + buf = f.read_buffer(length) + assert len(buf) == length + assert memoryview(buf).tobytes() == dst_buf[:length] + # buf should point to the same memory, so modifying it + memoryview(buf)[0] = ord(b'x') + # should modify the original + assert dst_buf[0] == ord(b'x') + + +def test_python_file_correct_abc(): + with pa.PythonFile(BytesIO(b''), mode='r') as f: + assert isinstance(f, BufferedIOBase) + assert isinstance(f, IOBase) + + +def test_python_file_iterable(): + data = b'''line1 + line2 + line3 + ''' + + buf = BytesIO(data) + buf2 = BytesIO(data) + + with pa.PythonFile(buf, mode='r') as f: + for read, expected in zip(f, buf2): + assert read == expected + + +def test_python_file_large_seeks(): + def factory(filename): + return pa.PythonFile(open(filename, 'rb')) + + check_large_seeks(factory) + + +def test_bytes_reader(): + # Like a BytesIO, but zero-copy underneath for C++ consumers + data = b'some sample data' + f = pa.BufferReader(data) + assert f.tell() == 0 + + assert f.size() == len(data) + + assert f.read(4) == b'some' + assert f.tell() == 4 + + f.seek(0) + assert f.tell() == 0 + + f.seek(0, 2) + assert f.tell() == len(data) + + f.seek(5) + assert f.tell() == 5 + + assert f.read(50) == b'sample data' + + assert not f.closed + f.close() + assert f.closed + + +def test_bytes_reader_non_bytes(): + with pytest.raises(TypeError): + pa.BufferReader('some sample data') + + +def test_bytes_reader_retains_parent_reference(): + import gc + + # ARROW-421 + def get_buffer(): + data = b'some sample data' * 1000 + reader = pa.BufferReader(data) + reader.seek(5) + return reader.read_buffer(6) + + buf = get_buffer() + gc.collect() + assert buf.to_pybytes() == b'sample' + assert buf.parent is not None + + +def test_python_file_implicit_mode(tmpdir): + path = os.path.join(str(tmpdir), 'foo.txt') + with open(path, 'wb') as f: + pf = pa.PythonFile(f) + assert pf.writable() + assert not pf.readable() + assert not pf.seekable() # PyOutputStream isn't seekable + f.write(b'foobar\n') + + with open(path, 'rb') as f: + pf = pa.PythonFile(f) + assert pf.readable() + assert not pf.writable() + assert pf.seekable() + assert pf.read() == b'foobar\n' + + bio = BytesIO() + pf = pa.PythonFile(bio) + assert pf.writable() + assert not pf.readable() + assert not pf.seekable() + pf.write(b'foobar\n') + assert bio.getvalue() == b'foobar\n' + + +def test_python_file_writelines(tmpdir): + lines = [b'line1\n', b'line2\n' b'line3'] + path = os.path.join(str(tmpdir), 'foo.txt') + with open(path, 'wb') as f: + try: + f = pa.PythonFile(f, mode='w') + assert f.writable() + f.writelines(lines) + finally: + f.close() + + with open(path, 'rb') as f: + try: + f = pa.PythonFile(f, mode='r') + assert f.readable() + assert f.read() == b''.join(lines) + finally: + f.close() + + +def test_python_file_closing(): + bio = BytesIO() + pf = pa.PythonFile(bio) + wr = weakref.ref(pf) + del pf + assert wr() is None # object was destroyed + assert not bio.closed + pf = pa.PythonFile(bio) + pf.close() + assert bio.closed + + +# ---------------------------------------------------------------------- +# Buffers + + +def check_buffer_pickling(buf, pickler): + # Check that buffer survives a pickle roundtrip + for protocol in range(0, pickler.HIGHEST_PROTOCOL + 1): + result = pickler.loads(pickler.dumps(buf, protocol=protocol)) + assert len(result) == len(buf) + assert memoryview(result) == memoryview(buf) + assert result.to_pybytes() == buf.to_pybytes() + assert result.is_mutable == buf.is_mutable + + +def test_buffer_bytes(pickle_module): + val = b'some data' + + buf = pa.py_buffer(val) + assert isinstance(buf, pa.Buffer) + assert not buf.is_mutable + assert buf.is_cpu + + result = buf.to_pybytes() + assert result == val + + check_buffer_pickling(buf, pickle_module) + + +def test_buffer_null_data(pickle_module): + null_buff = pa.foreign_buffer(address=0, size=0) + assert null_buff.to_pybytes() == b"" + assert null_buff.address == 0 + # ARROW-16048: we shouldn't expose a NULL address through the Python + # buffer protocol. + m = memoryview(null_buff) + assert m.tobytes() == b"" + assert pa.py_buffer(m).address != 0 + + check_buffer_pickling(null_buff, pickle_module) + + +def test_buffer_memoryview(pickle_module): + val = b'some data' + + buf = pa.py_buffer(val) + assert isinstance(buf, pa.Buffer) + assert not buf.is_mutable + assert buf.is_cpu + + result = memoryview(buf) + assert result == val + + check_buffer_pickling(buf, pickle_module) + + +def test_buffer_bytearray(pickle_module): + val = bytearray(b'some data') + + buf = pa.py_buffer(val) + assert isinstance(buf, pa.Buffer) + assert buf.is_mutable + assert buf.is_cpu + + result = bytearray(buf) + assert result == val + + check_buffer_pickling(buf, pickle_module) + + +def test_buffer_invalid(): + with pytest.raises(TypeError, + match="(bytes-like object|buffer interface)"): + pa.py_buffer(None) + + +def test_buffer_weakref(): + buf = pa.py_buffer(b'some data') + wr = weakref.ref(buf) + assert wr() is not None + del buf + assert wr() is None + + +@pytest.mark.parametrize('val, expected_hex_buffer', + [(b'check', b'636865636B'), + (b'\a0', b'0730'), + (b'', b'')]) +def test_buffer_hex(val, expected_hex_buffer): + buf = pa.py_buffer(val) + assert buf.hex() == expected_hex_buffer + + +def test_buffer_to_numpy(): + # Make sure creating a numpy array from an arrow buffer works + byte_array = bytearray(20) + byte_array[0] = 42 + buf = pa.py_buffer(byte_array) + array = np.frombuffer(buf, dtype="uint8") + assert array[0] == byte_array[0] + byte_array[0] += 1 + assert array[0] == byte_array[0] + assert array.base == buf + + +def test_buffer_from_numpy(): + # C-contiguous + arr = np.arange(12, dtype=np.int8).reshape((3, 4)) + buf = pa.py_buffer(arr) + assert buf.is_cpu + assert buf.is_mutable + assert buf.to_pybytes() == arr.tobytes() + # F-contiguous; note strides information is lost + buf = pa.py_buffer(arr.T) + assert buf.is_cpu + assert buf.is_mutable + assert buf.to_pybytes() == arr.tobytes() + # Non-contiguous + with pytest.raises(ValueError, match="not contiguous"): + buf = pa.py_buffer(arr.T[::2]) + + +def test_buffer_address(): + b1 = b'some data!' + b2 = bytearray(b1) + b3 = bytearray(b1) + + buf1 = pa.py_buffer(b1) + buf2 = pa.py_buffer(b1) + buf3 = pa.py_buffer(b2) + buf4 = pa.py_buffer(b3) + + assert buf1.address > 0 + assert buf1.address == buf2.address + assert buf3.address != buf2.address + assert buf4.address != buf3.address + + arr = np.arange(5) + buf = pa.py_buffer(arr) + assert buf.address == arr.ctypes.data + + +def test_buffer_equals(): + # Buffer.equals() returns true iff the buffers have the same contents + def eq(a, b): + assert a.equals(b) + assert a == b + assert not (a != b) + + def ne(a, b): + assert not a.equals(b) + assert not (a == b) + assert a != b + + b1 = b'some data!' + b2 = bytearray(b1) + b3 = bytearray(b1) + b3[0] = 42 + buf1 = pa.py_buffer(b1) + buf2 = pa.py_buffer(b2) + buf3 = pa.py_buffer(b2) + buf4 = pa.py_buffer(b3) + buf5 = pa.py_buffer(np.frombuffer(b2, dtype=np.int16)) + eq(buf1, buf1) + eq(buf1, buf2) + eq(buf2, buf3) + ne(buf2, buf4) + # Data type is indifferent + eq(buf2, buf5) + + +def test_buffer_eq_bytes(): + buf = pa.py_buffer(b'some data') + assert buf == b'some data' + assert buf == bytearray(b'some data') + assert buf != b'some dat1' + + with pytest.raises(TypeError): + buf == 'some data' + + +def test_buffer_getitem(): + data = bytearray(b'some data!') + buf = pa.py_buffer(data) + + n = len(data) + for ix in range(-n, n - 1): + assert buf[ix] == data[ix] + + with pytest.raises(IndexError): + buf[n] + + with pytest.raises(IndexError): + buf[-n - 1] + + +def test_buffer_slicing(): + data = b'some data!' + buf = pa.py_buffer(data) + + sliced = buf.slice(2) + expected = pa.py_buffer(b'me data!') + assert sliced.equals(expected) + + sliced2 = buf.slice(2, 4) + expected2 = pa.py_buffer(b'me d') + assert sliced2.equals(expected2) + + # 0 offset + assert buf.slice(0).equals(buf) + + # Slice past end of buffer + assert len(buf.slice(len(buf))) == 0 + + with pytest.raises(IndexError): + buf.slice(-1) + + with pytest.raises(IndexError): + buf.slice(len(buf) + 1) + assert buf[11:].to_pybytes() == b"" + + # Slice stop exceeds buffer length + with pytest.raises(IndexError): + buf.slice(1, len(buf)) + assert buf[1:11].to_pybytes() == buf.to_pybytes()[1:] + + # Negative length + with pytest.raises(IndexError): + buf.slice(1, -1) + + # Test slice notation + assert buf[2:].equals(buf.slice(2)) + assert buf[2:5].equals(buf.slice(2, 3)) + assert buf[-5:].equals(buf.slice(len(buf) - 5)) + assert buf[-5:-2].equals(buf.slice(len(buf) - 5, 3)) + + with pytest.raises(IndexError): + buf[::-1] + with pytest.raises(IndexError): + buf[::2] + + n = len(buf) + for start in range(-n * 2, n * 2): + for stop in range(-n * 2, n * 2): + assert buf[start:stop].to_pybytes() == buf.to_pybytes()[start:stop] + + +def test_buffer_hashing(): + # Buffers are unhashable + with pytest.raises(TypeError, match="unhashable"): + hash(pa.py_buffer(b'123')) + + +def test_buffer_protocol_respects_immutability(): + # ARROW-3228; NumPy's frombuffer ctor determines whether a buffer-like + # object is mutable by first attempting to get a mutable buffer using + # PyObject_FromBuffer. If that fails, it assumes that the object is + # immutable + a = b'12345' + arrow_ref = pa.py_buffer(a) + numpy_ref = np.frombuffer(arrow_ref, dtype=np.uint8) + assert not numpy_ref.flags.writeable + + +def test_foreign_buffer(): + obj = np.array([1, 2], dtype=np.int32) + addr = obj.__array_interface__["data"][0] + size = obj.nbytes + buf = pa.foreign_buffer(addr, size, obj) + wr = weakref.ref(obj) + del obj + assert np.frombuffer(buf, dtype=np.int32).tolist() == [1, 2] + assert wr() is not None + del buf + assert wr() is None + + +def test_allocate_buffer(): + buf = pa.allocate_buffer(100) + assert buf.size == 100 + assert buf.is_mutable + assert buf.parent is None + + bit = b'abcde' + writer = pa.FixedSizeBufferWriter(buf) + writer.write(bit) + + assert buf.to_pybytes()[:5] == bit + + +def test_allocate_buffer_resizable(): + buf = pa.allocate_buffer(100, resizable=True) + assert isinstance(buf, pa.ResizableBuffer) + + buf.resize(200) + assert buf.size == 200 + + +def test_cache_options(): + opts1 = pa.CacheOptions() + opts2 = pa.CacheOptions(hole_size_limit=1024) + opts3 = pa.CacheOptions(hole_size_limit=4096, range_size_limit=8192) + opts4 = pa.CacheOptions(hole_size_limit=4096, + range_size_limit=8192, prefetch_limit=5) + opts5 = pa.CacheOptions(hole_size_limit=4096, + range_size_limit=8192, lazy=False) + opts6 = pa.CacheOptions.from_network_metrics(time_to_first_byte_millis=100, + transfer_bandwidth_mib_per_sec=200, + ideal_bandwidth_utilization_frac=0.9, + max_ideal_request_size_mib=64) + + assert opts1.hole_size_limit == 8192 + assert opts1.range_size_limit == 32 * 1024 * 1024 + assert opts1.lazy is True + assert opts1.prefetch_limit == 0 + + assert opts2.hole_size_limit == 1024 + assert opts2.range_size_limit == 32 * 1024 * 1024 + assert opts2.lazy is True + assert opts2.prefetch_limit == 0 + + assert opts3.hole_size_limit == 4096 + assert opts3.range_size_limit == 8192 + assert opts3.lazy is True + assert opts3.prefetch_limit == 0 + + assert opts4.hole_size_limit == 4096 + assert opts4.range_size_limit == 8192 + assert opts4.lazy is True + assert opts4.prefetch_limit == 5 + + assert opts5.hole_size_limit == 4096 + assert opts5.range_size_limit == 8192 + assert opts5.lazy is False + assert opts5.prefetch_limit == 0 + + assert opts6.lazy is False + + assert opts1 == opts1 + assert opts1 != opts2 + assert opts2 != opts3 + assert opts3 != opts4 + assert opts4 != opts5 + assert opts6 != opts1 + + +def test_cache_options_pickling(pickle_module): + options = [ + pa.CacheOptions(), + pa.CacheOptions(hole_size_limit=4096, range_size_limit=8192, + lazy=True, prefetch_limit=5), + ] + + for option in options: + assert pickle_module.loads(pickle_module.dumps(option)) == option + + +@pytest.mark.parametrize("compression", [ + pytest.param( + "bz2", marks=pytest.mark.xfail(raises=pa.lib.ArrowNotImplementedError) + ), + "brotli", + "gzip", + "lz4", + "zstd", + "snappy" +]) +def test_compress_decompress(compression): + if not Codec.is_available(compression): + pytest.skip("{} support is not built".format(compression)) + + INPUT_SIZE = 10000 + test_data = (np.random.randint(0, 255, size=INPUT_SIZE) + .astype(np.uint8) + .tobytes()) + test_buf = pa.py_buffer(test_data) + + compressed_buf = pa.compress(test_buf, codec=compression) + compressed_bytes = pa.compress(test_data, codec=compression, + asbytes=True) + + assert isinstance(compressed_bytes, bytes) + + decompressed_buf = pa.decompress(compressed_buf, INPUT_SIZE, + codec=compression) + decompressed_bytes = pa.decompress(compressed_bytes, INPUT_SIZE, + codec=compression, asbytes=True) + + assert isinstance(decompressed_bytes, bytes) + + assert decompressed_buf.equals(test_buf) + assert decompressed_bytes == test_data + + with pytest.raises(ValueError): + pa.decompress(compressed_bytes, codec=compression) + + +@pytest.mark.parametrize("compression", [ + pytest.param( + "bz2", marks=pytest.mark.xfail(raises=pa.lib.ArrowNotImplementedError) + ), + "brotli", + "gzip", + "lz4", + "zstd", + "snappy" +]) +def test_compression_level(compression): + if not Codec.is_available(compression): + pytest.skip("{} support is not built".format(compression)) + + codec = Codec(compression) + if codec.name == "snappy": + assert codec.compression_level is None + else: + assert isinstance(codec.compression_level, int) + + # These codecs do not support a compression level + no_level = ['snappy'] + if compression in no_level: + assert not Codec.supports_compression_level(compression) + with pytest.raises(ValueError): + Codec(compression, 0) + with pytest.raises(ValueError): + Codec.minimum_compression_level(compression) + with pytest.raises(ValueError): + Codec.maximum_compression_level(compression) + with pytest.raises(ValueError): + Codec.default_compression_level(compression) + return + + INPUT_SIZE = 10000 + test_data = (np.random.randint(0, 255, size=INPUT_SIZE) + .astype(np.uint8) + .tobytes()) + test_buf = pa.py_buffer(test_data) + + min_level = Codec.minimum_compression_level(compression) + max_level = Codec.maximum_compression_level(compression) + default_level = Codec.default_compression_level(compression) + + assert min_level < max_level + assert default_level >= min_level + assert default_level <= max_level + + for compression_level in range(min_level, max_level+1): + codec = Codec(compression, compression_level) + compressed_buf = codec.compress(test_buf) + compressed_bytes = codec.compress(test_data, asbytes=True) + assert isinstance(compressed_bytes, bytes) + decompressed_buf = codec.decompress(compressed_buf, INPUT_SIZE) + decompressed_bytes = codec.decompress(compressed_bytes, INPUT_SIZE, + asbytes=True) + + assert isinstance(decompressed_bytes, bytes) + + assert decompressed_buf.equals(test_buf) + assert decompressed_bytes == test_data + + with pytest.raises(ValueError): + codec.decompress(compressed_bytes) + + # The ability to set a seed this way is not present on older versions of + # numpy (currently in our python 3.6 CI build). Some inputs might just + # happen to compress the same between the two levels so using seeded + # random numbers is necessary to help get more reliable results + # + # The goal of this part is to ensure the compression_level is being + # passed down to the C++ layer, not to verify the compression algs + # themselves + if not hasattr(np.random, 'default_rng'): + pytest.skip('Requires newer version of numpy') + rng = np.random.default_rng(seed=42) + values = rng.integers(0, 100, 1000) + arr = pa.array(values) + hard_to_compress_buffer = arr.buffers()[1] + + weak_codec = Codec(compression, min_level) + weakly_compressed_buf = weak_codec.compress(hard_to_compress_buffer) + + strong_codec = Codec(compression, max_level) + strongly_compressed_buf = strong_codec.compress(hard_to_compress_buffer) + + assert len(weakly_compressed_buf) > len(strongly_compressed_buf) + + +def test_buffer_memoryview_is_immutable(): + val = b'some data' + + buf = pa.py_buffer(val) + assert not buf.is_mutable + assert isinstance(buf, pa.Buffer) + + result = memoryview(buf) + assert result.readonly + + with pytest.raises(TypeError) as exc: + result[0] = b'h' + assert 'cannot modify read-only' in str(exc.value) + + b = bytes(buf) + with pytest.raises(TypeError) as exc: + b[0] = b'h' + assert 'cannot modify read-only' in str(exc.value) + + +def test_uninitialized_buffer(): + # ARROW-2039: calling Buffer() directly creates an uninitialized object + # ARROW-2638: prevent calling extension class constructors directly + with pytest.raises(TypeError): + pa.Buffer() + + +def test_memory_output_stream(): + # 10 bytes + val = b'dataabcdef' + f = pa.BufferOutputStream() + + K = 1000 + for i in range(K): + f.write(val) + + buf = f.getvalue() + assert len(buf) == len(val) * K + assert buf.to_pybytes() == val * K + + +def test_inmemory_write_after_closed(): + f = pa.BufferOutputStream() + f.write(b'ok') + assert not f.closed + f.getvalue() + assert f.closed + + with pytest.raises(ValueError): + f.write(b'not ok') + + +def test_buffer_protocol_ref_counting(): + def make_buffer(bytes_obj): + return bytearray(pa.py_buffer(bytes_obj)) + + buf = make_buffer(b'foo') + gc.collect() + assert buf == b'foo' + + # ARROW-1053 + val = b'foo' + refcount_before = sys.getrefcount(val) + for i in range(10): + make_buffer(val) + gc.collect() + assert refcount_before == sys.getrefcount(val) + + +def test_nativefile_write_memoryview(): + f = pa.BufferOutputStream() + data = b'ok' + + arr = np.frombuffer(data, dtype='S1') + + f.write(arr) + f.write(bytearray(data)) + f.write(pa.py_buffer(data)) + with pytest.raises(TypeError): + f.write(data.decode('utf8')) + + buf = f.getvalue() + + assert buf.to_pybytes() == data * 3 + + +# ---------------------------------------------------------------------- +# Mock output stream + + +def test_mock_output_stream(): + # Make sure that the MockOutputStream and the BufferOutputStream record the + # same size + + # 10 bytes + val = b'dataabcdef' + + f1 = pa.MockOutputStream() + f2 = pa.BufferOutputStream() + + K = 1000 + for i in range(K): + f1.write(val) + f2.write(val) + + assert f1.size() == len(f2.getvalue()) + + # Do the same test with a table + record_batch = pa.RecordBatch.from_arrays([pa.array([1, 2, 3])], ['a']) + + f1 = pa.MockOutputStream() + f2 = pa.BufferOutputStream() + + stream_writer1 = pa.RecordBatchStreamWriter(f1, record_batch.schema) + stream_writer2 = pa.RecordBatchStreamWriter(f2, record_batch.schema) + + stream_writer1.write_batch(record_batch) + stream_writer2.write_batch(record_batch) + stream_writer1.close() + stream_writer2.close() + + assert f1.size() == len(f2.getvalue()) + + +# ---------------------------------------------------------------------- +# OS files and memory maps + + +@pytest.fixture +def sample_disk_data(request, tmpdir): + SIZE = 4096 + arr = np.random.randint(0, 256, size=SIZE).astype('u1') + data = arr.tobytes()[:SIZE] + + path = os.path.join(str(tmpdir), guid()) + + with open(path, 'wb') as f: + f.write(data) + + def teardown(): + _try_delete(path) + + request.addfinalizer(teardown) + return path, data + + +def _check_native_file_reader(FACTORY, sample_data, + allow_read_out_of_bounds=True): + path, data = sample_data + + f = FACTORY(path, mode='r') + + assert f.read(10) == data[:10] + assert f.read(0) == b'' + assert f.tell() == 10 + + assert f.read() == data[10:] + + assert f.size() == len(data) + + f.seek(0) + assert f.tell() == 0 + + # Seeking past end of file not supported in memory maps + if allow_read_out_of_bounds: + f.seek(len(data) + 1) + assert f.tell() == len(data) + 1 + assert f.read(5) == b'' + + # Test whence argument of seek, ARROW-1287 + assert f.seek(3) == 3 + assert f.seek(3, os.SEEK_CUR) == 6 + assert f.tell() == 6 + + ex_length = len(data) - 2 + assert f.seek(-2, os.SEEK_END) == ex_length + assert f.tell() == ex_length + + +def test_memory_map_reader(sample_disk_data): + _check_native_file_reader(pa.memory_map, sample_disk_data, + allow_read_out_of_bounds=False) + + +def test_memory_map_retain_buffer_reference(sample_disk_data): + path, data = sample_disk_data + + cases = [] + with pa.memory_map(path, 'rb') as f: + cases.append((f.read_buffer(100), data[:100])) + cases.append((f.read_buffer(100), data[100:200])) + cases.append((f.read_buffer(100), data[200:300])) + + # Call gc.collect() for good measure + gc.collect() + + for buf, expected in cases: + assert buf.to_pybytes() == expected + + +def test_os_file_reader(sample_disk_data): + _check_native_file_reader(pa.OSFile, sample_disk_data) + + +def test_os_file_large_seeks(): + check_large_seeks(pa.OSFile) + + +def _try_delete(path): + try: + os.remove(path) + except os.error: + pass + + +def test_memory_map_writer(tmpdir): + SIZE = 4096 + arr = np.random.randint(0, 256, size=SIZE).astype('u1') + data = arr.tobytes()[:SIZE] + + path = os.path.join(str(tmpdir), guid()) + with open(path, 'wb') as f: + f.write(data) + + f = pa.memory_map(path, mode='r+b') + + f.seek(10) + f.write(b'peekaboo') + assert f.tell() == 18 + + f.seek(10) + assert f.read(8) == b'peekaboo' + + f2 = pa.memory_map(path, mode='r+b') + + f2.seek(10) + f2.write(b'booapeak') + f2.seek(10) + + f.seek(10) + assert f.read(8) == b'booapeak' + + # Does not truncate file + f3 = pa.memory_map(path, mode='w') + f3.write(b'foo') + + with pa.memory_map(path) as f4: + assert f4.size() == SIZE + + with pytest.raises(IOError): + f3.read(5) + + f.seek(0) + assert f.read(3) == b'foo' + + +def test_memory_map_resize(tmpdir): + SIZE = 4096 + arr = np.random.randint(0, 256, size=SIZE).astype(np.uint8) + data1 = arr.tobytes()[:(SIZE // 2)] + data2 = arr.tobytes()[(SIZE // 2):] + + path = os.path.join(str(tmpdir), guid()) + + mmap = pa.create_memory_map(path, SIZE / 2) + mmap.write(data1) + + mmap.resize(SIZE) + mmap.write(data2) + + mmap.close() + + with open(path, 'rb') as f: + assert f.read() == arr.tobytes() + + +def test_memory_zero_length(tmpdir): + path = os.path.join(str(tmpdir), guid()) + f = open(path, 'wb') + f.close() + with pa.memory_map(path, mode='r+b') as memory_map: + assert memory_map.size() == 0 + + +def test_memory_map_large_seeks(): + if sys.maxsize >= 2**32: + expected_error = None + else: + expected_error = pytest.raises( + pa.ArrowCapacityError, + match="Requested memory map length 4294967306 " + "does not fit in a C size_t") + check_large_seeks(pa.memory_map, expected_error=expected_error) + + +def test_memory_map_close_remove(tmpdir): + # ARROW-6740: should be able to delete closed memory-mapped file (Windows) + path = os.path.join(str(tmpdir), guid()) + mmap = pa.create_memory_map(path, 4096) + mmap.close() + assert mmap.closed + os.remove(path) # Shouldn't fail + + +def test_memory_map_deref_remove(tmpdir): + path = os.path.join(str(tmpdir), guid()) + pa.create_memory_map(path, 4096) + os.remove(path) # Shouldn't fail + + +def test_os_file_writer(tmpdir): + SIZE = 4096 + arr = np.random.randint(0, 256, size=SIZE).astype('u1') + data = arr.tobytes()[:SIZE] + + path = os.path.join(str(tmpdir), guid()) + with open(path, 'wb') as f: + f.write(data) + + # Truncates file + f2 = pa.OSFile(path, mode='w') + f2.write(b'foo') + + with pa.OSFile(path) as f3: + assert f3.size() == 3 + + with pytest.raises(IOError): + f2.read(5) + f2.close() + + # Append + with pa.OSFile(path, mode='ab') as f4: + f4.write(b'bar') + with pa.OSFile(path) as f5: + assert f5.size() == 6 # foo + bar + + +def test_native_file_write_reject_unicode(): + # ARROW-3227 + nf = pa.BufferOutputStream() + with pytest.raises(TypeError): + nf.write('foo') + + +def test_native_file_modes(tmpdir): + path = os.path.join(str(tmpdir), guid()) + with open(path, 'wb') as f: + f.write(b'foooo') + + with pa.OSFile(path, mode='r') as f: + assert f.mode == 'rb' + assert f.readable() + assert not f.writable() + assert f.seekable() + + with pa.OSFile(path, mode='rb') as f: + assert f.mode == 'rb' + assert f.readable() + assert not f.writable() + assert f.seekable() + + with pa.OSFile(path, mode='w') as f: + assert f.mode == 'wb' + assert not f.readable() + assert f.writable() + assert not f.seekable() + + with pa.OSFile(path, mode='wb') as f: + assert f.mode == 'wb' + assert not f.readable() + assert f.writable() + assert not f.seekable() + + with pa.OSFile(path, mode='ab') as f: + assert f.mode == 'ab' + assert not f.readable() + assert f.writable() + assert not f.seekable() + + with pa.OSFile(path, mode='a') as f: + assert f.mode == 'ab' + assert not f.readable() + assert f.writable() + assert not f.seekable() + + with open(path, 'wb') as f: + f.write(b'foooo') + + with pa.memory_map(path, 'r') as f: + assert f.mode == 'rb' + assert f.readable() + assert not f.writable() + assert f.seekable() + + with pa.memory_map(path, 'r+') as f: + assert f.mode == 'rb+' + assert f.readable() + assert f.writable() + assert f.seekable() + + with pa.memory_map(path, 'r+b') as f: + assert f.mode == 'rb+' + assert f.readable() + assert f.writable() + assert f.seekable() + + +def test_native_file_permissions(tmpdir): + # ARROW-10124: permissions of created files should follow umask + cur_umask = os.umask(0o002) + os.umask(cur_umask) + + path = os.path.join(str(tmpdir), guid()) + with pa.OSFile(path, mode='w'): + pass + assert os.stat(path).st_mode & 0o777 == 0o666 & ~cur_umask + + path = os.path.join(str(tmpdir), guid()) + with pa.memory_map(path, 'w'): + pass + assert os.stat(path).st_mode & 0o777 == 0o666 & ~cur_umask + + +def test_native_file_raises_ValueError_after_close(tmpdir): + path = os.path.join(str(tmpdir), guid()) + with open(path, 'wb') as f: + f.write(b'foooo') + + with pa.OSFile(path, mode='rb') as os_file: + assert not os_file.closed + assert os_file.closed + + with pa.memory_map(path, mode='rb') as mmap_file: + assert not mmap_file.closed + assert mmap_file.closed + + files = [os_file, + mmap_file] + + methods = [('tell', ()), + ('seek', (0,)), + ('size', ()), + ('flush', ()), + ('readable', ()), + ('writable', ()), + ('seekable', ())] + + for f in files: + for method, args in methods: + with pytest.raises(ValueError): + getattr(f, method)(*args) + + +def test_native_file_TextIOWrapper(tmpdir): + data = ('foooo\n' + 'barrr\n' + 'bazzz\n') + + path = os.path.join(str(tmpdir), guid()) + with open(path, 'wb') as f: + f.write(data.encode('utf-8')) + + with TextIOWrapper(pa.OSFile(path, mode='rb')) as fil: + assert fil.readable() + res = fil.read() + assert res == data + assert fil.closed + + with TextIOWrapper(pa.OSFile(path, mode='rb')) as fil: + # Iteration works + lines = list(fil) + assert ''.join(lines) == data + + # Writing + path2 = os.path.join(str(tmpdir), guid()) + with TextIOWrapper(pa.OSFile(path2, mode='wb')) as fil: + assert fil.writable() + fil.write(data) + + with TextIOWrapper(pa.OSFile(path2, mode='rb')) as fil: + res = fil.read() + assert res == data + + +def test_native_file_TextIOWrapper_perf(tmpdir): + # ARROW-16272: TextIOWrapper.readline() shouldn't exhaust a large + # Arrow input stream. + data = b'foo\nquux\n' + path = str(tmpdir / 'largefile.txt') + with open(path, 'wb') as f: + f.write(data * 100_000) + + binary_file = pa.OSFile(path, mode='rb') + with TextIOWrapper(binary_file) as f: + assert binary_file.tell() == 0 + nbytes = 20_000 + lines = f.readlines(nbytes) + assert len(lines) == math.ceil(2 * nbytes / len(data)) + assert nbytes <= binary_file.tell() <= nbytes * 2 + + +def test_native_file_read1(tmpdir): + # ARROW-16272: read1() should not exhaust the input stream if there + # is a large amount of data remaining. + data = b'123\n' * 1_000_000 + path = str(tmpdir / 'largefile.txt') + with open(path, 'wb') as f: + f.write(data) + + chunks = [] + with pa.OSFile(path, mode='rb') as f: + while True: + b = f.read1() + assert len(b) < len(data) + chunks.append(b) + b = f.read1(30_000) + assert len(b) <= 30_000 + chunks.append(b) + if not b: + break + + assert b"".join(chunks) == data + + +@pytest.mark.pandas +def test_native_file_pandas_text_reader(tmpdir): + # ARROW-16272: Pandas' read_csv() should not exhaust an Arrow + # input stream when a small nrows is passed. + import pandas as pd + import pandas.testing as tm + data = b'a,b\n' * 10_000_000 + path = str(tmpdir / 'largefile.txt') + with open(path, 'wb') as f: + f.write(data) + + with pa.OSFile(path, mode='rb') as f: + df = pd.read_csv(f, nrows=10) + expected = pd.DataFrame({'a': ['a'] * 10, 'b': ['b'] * 10}) + tm.assert_frame_equal(df, expected) + # Some readahead occurred, but not up to the end of file + assert f.tell() <= 256 * 1024 + + +def test_native_file_open_error(): + with assert_file_not_found(): + pa.OSFile('non_existent_file', 'rb') + with assert_file_not_found(): + pa.memory_map('non_existent_file', 'rb') + + +# ---------------------------------------------------------------------- +# Buffered streams + +def test_buffered_input_stream(): + raw = pa.BufferReader(b"123456789") + f = pa.BufferedInputStream(raw, buffer_size=4) + assert f.read(2) == b"12" + assert raw.tell() == 4 + f.close() + assert f.closed + assert raw.closed + + +def test_buffered_input_stream_detach_seekable(): + # detach() to a seekable file (io::RandomAccessFile in C++) + f = pa.BufferedInputStream(pa.BufferReader(b"123456789"), buffer_size=4) + assert f.read(2) == b"12" + raw = f.detach() + assert f.closed + assert not raw.closed + assert raw.seekable() + assert raw.read(4) == b"5678" + raw.seek(2) + assert raw.read(4) == b"3456" + + +def test_buffered_input_stream_detach_non_seekable(): + # detach() to a non-seekable file (io::InputStream in C++) + f = pa.BufferedInputStream( + pa.BufferedInputStream(pa.BufferReader(b"123456789"), buffer_size=4), + buffer_size=4) + assert f.read(2) == b"12" + raw = f.detach() + assert f.closed + assert not raw.closed + assert not raw.seekable() + assert raw.read(4) == b"5678" + with pytest.raises(EnvironmentError): + raw.seek(2) + + +def test_buffered_output_stream(): + np_buf = np.zeros(100, dtype=np.int8) # zero-initialized buffer + buf = pa.py_buffer(np_buf) + + raw = pa.FixedSizeBufferWriter(buf) + f = pa.BufferedOutputStream(raw, buffer_size=4) + f.write(b"12") + assert np_buf[:4].tobytes() == b'\0\0\0\0' + f.flush() + assert np_buf[:4].tobytes() == b'12\0\0' + f.write(b"3456789") + f.close() + assert f.closed + assert raw.closed + assert np_buf[:10].tobytes() == b'123456789\0' + + +def test_buffered_output_stream_detach(): + np_buf = np.zeros(100, dtype=np.int8) # zero-initialized buffer + buf = pa.py_buffer(np_buf) + + f = pa.BufferedOutputStream(pa.FixedSizeBufferWriter(buf), buffer_size=4) + f.write(b"12") + assert np_buf[:4].tobytes() == b'\0\0\0\0' + raw = f.detach() + assert f.closed + assert not raw.closed + assert np_buf[:4].tobytes() == b'12\0\0' + + +# ---------------------------------------------------------------------- +# Compressed input and output streams + +def check_compressed_input(data, fn, compression): + raw = pa.OSFile(fn, mode="rb") + with pa.CompressedInputStream(raw, compression) as compressed: + assert not compressed.closed + assert compressed.readable() + assert not compressed.writable() + assert not compressed.seekable() + got = compressed.read() + assert got == data + assert compressed.closed + assert raw.closed + + # Same with read_buffer() + raw = pa.OSFile(fn, mode="rb") + with pa.CompressedInputStream(raw, compression) as compressed: + buf = compressed.read_buffer() + assert isinstance(buf, pa.Buffer) + assert buf.to_pybytes() == data + + +@pytest.mark.gzip +def test_compressed_input_gzip(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + fn = str(tmpdir / "compressed_input_test.gz") + with gzip.open(fn, "wb") as f: + f.write(data) + check_compressed_input(data, fn, "gzip") + + +def test_compressed_input_bz2(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + fn = str(tmpdir / "compressed_input_test.bz2") + with bz2.BZ2File(fn, "w") as f: + f.write(data) + try: + check_compressed_input(data, fn, "bz2") + except NotImplementedError as e: + pytest.skip(str(e)) + + +@pytest.mark.gzip +def test_compressed_input_openfile(tmpdir): + if not Codec.is_available("gzip"): + pytest.skip("gzip support is not built") + + data = b"some test data\n" * 10 + b"eof\n" + fn = str(tmpdir / "test_compressed_input_openfile.gz") + with gzip.open(fn, "wb") as f: + f.write(data) + + with pa.CompressedInputStream(fn, "gzip") as compressed: + buf = compressed.read_buffer() + assert buf.to_pybytes() == data + assert compressed.closed + + with pa.CompressedInputStream(pathlib.Path(fn), "gzip") as compressed: + buf = compressed.read_buffer() + assert buf.to_pybytes() == data + assert compressed.closed + + f = open(fn, "rb") + with pa.CompressedInputStream(f, "gzip") as compressed: + buf = compressed.read_buffer() + assert buf.to_pybytes() == data + assert f.closed + + +def check_compressed_concatenated(data, fn, compression): + raw = pa.OSFile(fn, mode="rb") + with pa.CompressedInputStream(raw, compression) as compressed: + got = compressed.read() + assert got == data + + +@pytest.mark.gzip +def test_compressed_concatenated_gzip(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + fn = str(tmpdir / "compressed_input_test2.gz") + with gzip.open(fn, "wb") as f: + f.write(data[:50]) + with gzip.open(fn, "ab") as f: + f.write(data[50:]) + check_compressed_concatenated(data, fn, "gzip") + + +@pytest.mark.gzip +def test_compressed_input_invalid(): + data = b"foo" * 10 + raw = pa.BufferReader(data) + with pytest.raises(ValueError): + pa.CompressedInputStream(raw, "unknown_compression") + with pytest.raises(TypeError): + pa.CompressedInputStream(raw, None) + + with pa.CompressedInputStream(raw, "gzip") as compressed: + with pytest.raises(IOError, match="zlib inflate failed"): + compressed.read() + + +def make_compressed_output(data, fn, compression): + raw = pa.BufferOutputStream() + with pa.CompressedOutputStream(raw, compression) as compressed: + assert not compressed.closed + assert not compressed.readable() + assert compressed.writable() + assert not compressed.seekable() + compressed.write(data) + assert compressed.closed + assert raw.closed + with open(fn, "wb") as f: + f.write(raw.getvalue()) + + +@pytest.mark.gzip +def test_compressed_output_gzip(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + fn = str(tmpdir / "compressed_output_test.gz") + make_compressed_output(data, fn, "gzip") + with gzip.open(fn, "rb") as f: + got = f.read() + assert got == data + + +def test_compressed_output_bz2(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + fn = str(tmpdir / "compressed_output_test.bz2") + try: + make_compressed_output(data, fn, "bz2") + except NotImplementedError as e: + pytest.skip(str(e)) + with bz2.BZ2File(fn, "r") as f: + got = f.read() + assert got == data + + +def test_output_stream_constructor(tmpdir): + if not Codec.is_available("gzip"): + pytest.skip("gzip support is not built") + with pa.CompressedOutputStream(tmpdir / "ctor.gz", "gzip") as stream: + stream.write(b"test") + with (tmpdir / "ctor2.gz").open("wb") as f: + with pa.CompressedOutputStream(f, "gzip") as stream: + stream.write(b"test") + + +@pytest.mark.parametrize(("path", "expected_compression"), [ + ("file.bz2", "bz2"), + ("file.lz4", "lz4"), + (pathlib.Path("file.gz"), "gzip"), + (pathlib.Path("path/to/file.zst"), "zstd"), +]) +def test_compression_detection(path, expected_compression): + if not Codec.is_available(expected_compression): + with pytest.raises(pa.lib.ArrowNotImplementedError): + Codec.detect(path) + else: + codec = Codec.detect(path) + assert isinstance(codec, Codec) + assert codec.name == expected_compression + + +def test_unknown_compression_raises(): + with pytest.raises(ValueError): + Codec.is_available('unknown') + with pytest.raises(TypeError): + Codec(None) + with pytest.raises(ValueError): + Codec('unknown') + + +@pytest.mark.parametrize("compression", [ + "bz2", + "brotli", + "gzip", + "lz4", + "zstd", + pytest.param( + "snappy", + marks=pytest.mark.xfail(raises=pa.lib.ArrowNotImplementedError) + ) +]) +def test_compressed_roundtrip(compression): + if not Codec.is_available(compression): + pytest.skip("{} support is not built".format(compression)) + + data = b"some test data\n" * 10 + b"eof\n" + raw = pa.BufferOutputStream() + with pa.CompressedOutputStream(raw, compression) as compressed: + compressed.write(data) + + cdata = raw.getvalue() + assert len(cdata) < len(data) + raw = pa.BufferReader(cdata) + with pa.CompressedInputStream(raw, compression) as compressed: + got = compressed.read() + assert got == data + + +@pytest.mark.parametrize( + "compression", + ["bz2", "brotli", "gzip", "lz4", "zstd"] +) +def test_compressed_recordbatch_stream(compression): + if not Codec.is_available(compression): + pytest.skip("{} support is not built".format(compression)) + + # ARROW-4836: roundtrip a RecordBatch through a compressed stream + table = pa.Table.from_arrays([pa.array([1, 2, 3, 4, 5])], ['a']) + raw = pa.BufferOutputStream() + stream = pa.CompressedOutputStream(raw, compression) + writer = pa.RecordBatchStreamWriter(stream, table.schema) + writer.write_table(table, max_chunksize=3) + writer.close() + stream.close() # Flush data + buf = raw.getvalue() + stream = pa.CompressedInputStream(pa.BufferReader(buf), compression) + got_table = pa.RecordBatchStreamReader(stream).read_all() + assert got_table == table + + +# ---------------------------------------------------------------------- +# Transform input streams + +unicode_transcoding_example = ( + "Dès Noël où un zéphyr haï me vêt de glaçons würmiens " + "je dîne d’exquis rôtis de bœuf au kir à l’aÿ d’âge mûr & cætera !" +) + + +def check_transcoding(data, src_encoding, dest_encoding, chunk_sizes): + chunk_sizes = iter(chunk_sizes) + stream = pa.transcoding_input_stream( + pa.BufferReader(data.encode(src_encoding)), + src_encoding, dest_encoding) + out = [] + while True: + buf = stream.read(next(chunk_sizes)) + out.append(buf) + if not buf: + break + out = b''.join(out) + assert out.decode(dest_encoding) == data + + +@pytest.mark.parametrize('src_encoding, dest_encoding', + [('utf-8', 'utf-16'), + ('utf-16', 'utf-8'), + ('utf-8', 'utf-32-le'), + ('utf-8', 'utf-32-be'), + ]) +def test_transcoding_input_stream(src_encoding, dest_encoding): + # All at once + check_transcoding(unicode_transcoding_example, + src_encoding, dest_encoding, [1000, 0]) + # Incremental + check_transcoding(unicode_transcoding_example, + src_encoding, dest_encoding, + itertools.cycle([1, 2, 3, 5])) + + +@pytest.mark.parametrize('src_encoding, dest_encoding', + [('utf-8', 'utf-8'), + ('utf-8', 'UTF8')]) +def test_transcoding_no_ops(src_encoding, dest_encoding): + # No indirection is wasted when a trivial transcoding is requested + stream = pa.BufferReader(b"abc123") + assert pa.transcoding_input_stream( + stream, src_encoding, dest_encoding) is stream + + +@pytest.mark.parametrize('src_encoding, dest_encoding', + [('utf-8', 'ascii'), + ('utf-8', 'latin-1'), + ]) +def test_transcoding_encoding_error(src_encoding, dest_encoding): + # Character \u0100 cannot be represented in the destination encoding + stream = pa.transcoding_input_stream( + pa.BufferReader("\u0100".encode(src_encoding)), + src_encoding, + dest_encoding) + with pytest.raises(UnicodeEncodeError): + stream.read(1) + + +@pytest.mark.parametrize('src_encoding, dest_encoding', + [('utf-8', 'utf-16'), + ('utf-16', 'utf-8'), + ]) +def test_transcoding_decoding_error(src_encoding, dest_encoding): + # The given bytestring is not valid in the source encoding + stream = pa.transcoding_input_stream( + pa.BufferReader(b"\xff\xff\xff\xff"), + src_encoding, + dest_encoding) + with pytest.raises(UnicodeError): + stream.read(1) + + +# ---------------------------------------------------------------------- +# High-level API + +@pytest.mark.gzip +def test_input_stream_buffer(): + data = b"some test data\n" * 10 + b"eof\n" + for arg in [pa.py_buffer(data), memoryview(data)]: + stream = pa.input_stream(arg) + assert stream.read() == data + + gz_data = gzip.compress(data) + stream = pa.input_stream(memoryview(gz_data)) + assert stream.read() == gz_data + stream = pa.input_stream(memoryview(gz_data), compression='gzip') + assert stream.read() == data + + +def test_input_stream_duck_typing(): + # Accept objects having the right file-like methods... + class DuckReader: + + def close(self): + pass + + @property + def closed(self): + return False + + def read(self, nbytes=None): + return b'hello' + + stream = pa.input_stream(DuckReader()) + assert stream.read(5) == b'hello' + + +def test_input_stream_file_path(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + file_path = tmpdir / 'input_stream' + with open(str(file_path), 'wb') as f: + f.write(data) + + stream = pa.input_stream(file_path) + assert stream.read() == data + stream = pa.input_stream(str(file_path)) + assert stream.read() == data + stream = pa.input_stream(pathlib.Path(str(file_path))) + assert stream.read() == data + + +@pytest.mark.gzip +def test_input_stream_file_path_compressed(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + gz_data = gzip.compress(data) + file_path = tmpdir / 'input_stream.gz' + with open(str(file_path), 'wb') as f: + f.write(gz_data) + + stream = pa.input_stream(file_path) + assert stream.read() == data + stream = pa.input_stream(str(file_path)) + assert stream.read() == data + stream = pa.input_stream(pathlib.Path(str(file_path))) + assert stream.read() == data + + stream = pa.input_stream(file_path, compression='gzip') + assert stream.read() == data + stream = pa.input_stream(file_path, compression=None) + assert stream.read() == gz_data + + +def test_input_stream_file_path_buffered(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + file_path = tmpdir / 'input_stream.buffered' + with open(str(file_path), 'wb') as f: + f.write(data) + + stream = pa.input_stream(file_path, buffer_size=32) + assert isinstance(stream, pa.BufferedInputStream) + assert stream.read() == data + stream = pa.input_stream(str(file_path), buffer_size=64) + assert isinstance(stream, pa.BufferedInputStream) + assert stream.read() == data + stream = pa.input_stream(pathlib.Path(str(file_path)), buffer_size=1024) + assert isinstance(stream, pa.BufferedInputStream) + assert stream.read() == data + + unbuffered_stream = pa.input_stream(file_path, buffer_size=0) + assert isinstance(unbuffered_stream, pa.OSFile) + + msg = 'Buffer size must be larger than zero' + with pytest.raises(ValueError, match=msg): + pa.input_stream(file_path, buffer_size=-1) + with pytest.raises(TypeError): + pa.input_stream(file_path, buffer_size='million') + + +@pytest.mark.gzip +def test_input_stream_file_path_compressed_and_buffered(tmpdir): + data = b"some test data\n" * 100 + b"eof\n" + gz_data = gzip.compress(data) + file_path = tmpdir / 'input_stream_compressed_and_buffered.gz' + with open(str(file_path), 'wb') as f: + f.write(gz_data) + + stream = pa.input_stream(file_path, buffer_size=32, compression='gzip') + assert stream.read() == data + stream = pa.input_stream(str(file_path), buffer_size=64) + assert stream.read() == data + stream = pa.input_stream(pathlib.Path(str(file_path)), buffer_size=1024) + assert stream.read() == data + + +@pytest.mark.gzip +def test_input_stream_python_file(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + bio = BytesIO(data) + + stream = pa.input_stream(bio) + assert stream.read() == data + + gz_data = gzip.compress(data) + bio = BytesIO(gz_data) + stream = pa.input_stream(bio) + assert stream.read() == gz_data + bio.seek(0) + stream = pa.input_stream(bio, compression='gzip') + assert stream.read() == data + + file_path = tmpdir / 'input_stream' + with open(str(file_path), 'wb') as f: + f.write(data) + with open(str(file_path), 'rb') as f: + stream = pa.input_stream(f) + assert stream.read() == data + + +@pytest.mark.gzip +def test_input_stream_native_file(): + data = b"some test data\n" * 10 + b"eof\n" + gz_data = gzip.compress(data) + reader = pa.BufferReader(gz_data) + stream = pa.input_stream(reader) + assert stream is reader + reader = pa.BufferReader(gz_data) + stream = pa.input_stream(reader, compression='gzip') + assert stream.read() == data + + +def test_input_stream_errors(tmpdir): + buf = memoryview(b"") + with pytest.raises(ValueError): + pa.input_stream(buf, compression="foo") + + for arg in [bytearray(), StringIO()]: + with pytest.raises(TypeError): + pa.input_stream(arg) + + with assert_file_not_found(): + pa.input_stream("non_existent_file") + + with open(str(tmpdir / 'new_file'), 'wb') as f: + with pytest.raises(TypeError, match="readable file expected"): + pa.input_stream(f) + + +def test_output_stream_buffer(): + data = b"some test data\n" * 10 + b"eof\n" + buf = bytearray(len(data)) + stream = pa.output_stream(pa.py_buffer(buf)) + stream.write(data) + assert buf == data + + buf = bytearray(len(data)) + stream = pa.output_stream(memoryview(buf)) + stream.write(data) + assert buf == data + + +def test_output_stream_duck_typing(): + # Accept objects having the right file-like methods... + class DuckWriter: + def __init__(self): + self.buf = pa.BufferOutputStream() + + def close(self): + pass + + @property + def closed(self): + return False + + def write(self, data): + self.buf.write(data) + + duck_writer = DuckWriter() + stream = pa.output_stream(duck_writer) + assert stream.write(b'hello') + assert duck_writer.buf.getvalue().to_pybytes() == b'hello' + + +def test_output_stream_file_path(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + file_path = tmpdir / 'output_stream' + + def check_data(file_path, data): + with pa.output_stream(file_path) as stream: + stream.write(data) + with open(str(file_path), 'rb') as f: + assert f.read() == data + + check_data(file_path, data) + check_data(str(file_path), data) + check_data(pathlib.Path(str(file_path)), data) + + +@pytest.mark.gzip +def test_output_stream_file_path_compressed(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + file_path = tmpdir / 'output_stream.gz' + + def check_data(file_path, data, **kwargs): + with pa.output_stream(file_path, **kwargs) as stream: + stream.write(data) + with open(str(file_path), 'rb') as f: + return f.read() + + assert gzip.decompress(check_data(file_path, data)) == data + assert gzip.decompress(check_data(str(file_path), data)) == data + assert gzip.decompress( + check_data(pathlib.Path(str(file_path)), data)) == data + + assert gzip.decompress( + check_data(file_path, data, compression='gzip')) == data + assert check_data(file_path, data, compression=None) == data + + with pytest.raises(ValueError, match='Invalid value for compression'): + assert check_data(file_path, data, compression='rabbit') == data + + +def test_output_stream_file_path_buffered(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + file_path = tmpdir / 'output_stream.buffered' + + def check_data(file_path, data, **kwargs): + with pa.output_stream(file_path, **kwargs) as stream: + if kwargs.get('buffer_size', 0) > 0: + assert isinstance(stream, pa.BufferedOutputStream) + stream.write(data) + with open(str(file_path), 'rb') as f: + return f.read() + + unbuffered_stream = pa.output_stream(file_path, buffer_size=0) + assert isinstance(unbuffered_stream, pa.OSFile) + + msg = 'Buffer size must be larger than zero' + with pytest.raises(ValueError, match=msg): + assert check_data(file_path, data, buffer_size=-128) == data + + assert check_data(file_path, data, buffer_size=32) == data + assert check_data(file_path, data, buffer_size=1024) == data + assert check_data(str(file_path), data, buffer_size=32) == data + + result = check_data(pathlib.Path(str(file_path)), data, buffer_size=32) + assert result == data + + +@pytest.mark.gzip +def test_output_stream_file_path_compressed_and_buffered(tmpdir): + data = b"some test data\n" * 100 + b"eof\n" + file_path = tmpdir / 'output_stream_compressed_and_buffered.gz' + + def check_data(file_path, data, **kwargs): + with pa.output_stream(file_path, **kwargs) as stream: + stream.write(data) + with open(str(file_path), 'rb') as f: + return f.read() + + result = check_data(file_path, data, buffer_size=32) + assert gzip.decompress(result) == data + + result = check_data(file_path, data, buffer_size=1024) + assert gzip.decompress(result) == data + + result = check_data(file_path, data, buffer_size=1024, compression='gzip') + assert gzip.decompress(result) == data + + +def test_output_stream_destructor(tmpdir): + # The wrapper returned by pa.output_stream() should respect Python + # file semantics, i.e. destroying it should close the underlying + # file cleanly. + data = b"some test data\n" + file_path = tmpdir / 'output_stream.buffered' + + def check_data(file_path, data, **kwargs): + stream = pa.output_stream(file_path, **kwargs) + stream.write(data) + del stream + gc.collect() + with open(str(file_path), 'rb') as f: + return f.read() + + assert check_data(file_path, data, buffer_size=0) == data + assert check_data(file_path, data, buffer_size=1024) == data + + +@pytest.mark.gzip +def test_output_stream_python_file(tmpdir): + data = b"some test data\n" * 10 + b"eof\n" + + def check_data(data, **kwargs): + # XXX cannot use BytesIO because stream.close() is necessary + # to finish writing compressed data, but it will also close the + # underlying BytesIO + fn = str(tmpdir / 'output_stream_file') + with open(fn, 'wb') as f: + with pa.output_stream(f, **kwargs) as stream: + stream.write(data) + with open(fn, 'rb') as f: + return f.read() + + assert check_data(data) == data + assert gzip.decompress(check_data(data, compression='gzip')) == data + + +def test_output_stream_errors(tmpdir): + buf = memoryview(bytearray()) + with pytest.raises(ValueError): + pa.output_stream(buf, compression="foo") + + for arg in [bytearray(), StringIO()]: + with pytest.raises(TypeError): + pa.output_stream(arg) + + fn = str(tmpdir / 'new_file') + with open(fn, 'wb') as f: + pass + with open(fn, 'rb') as f: + with pytest.raises(TypeError, match="writable file expected"): + pa.output_stream(f) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_ipc.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_ipc.py new file mode 100644 index 0000000000000000000000000000000000000000..d38f45b5feff46409f60d85e70b92b0649d3a3f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_ipc.py @@ -0,0 +1,1302 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from collections import UserList +import io +import pathlib +import pytest +import socket +import threading +import weakref + +import numpy as np + +import pyarrow as pa +from pyarrow.tests.util import changed_environ, invoke_script + + +try: + from pandas.testing import assert_frame_equal + import pandas as pd +except ImportError: + pass + + +class IpcFixture: + write_stats = None + + def __init__(self, sink_factory=lambda: io.BytesIO()): + self._sink_factory = sink_factory + self.sink = self.get_sink() + + def get_sink(self): + return self._sink_factory() + + def get_source(self): + return self.sink.getvalue() + + def write_batches(self, num_batches=5, as_table=False): + nrows = 5 + schema = pa.schema([('one', pa.float64()), ('two', pa.utf8())]) + + writer = self._get_writer(self.sink, schema) + + batches = [] + for i in range(num_batches): + batch = pa.record_batch( + [np.random.randn(nrows), + ['foo', None, 'bar', 'bazbaz', 'qux']], + schema=schema) + batches.append(batch) + + if as_table: + table = pa.Table.from_batches(batches) + writer.write_table(table) + else: + for batch in batches: + writer.write_batch(batch) + + self.write_stats = writer.stats + writer.close() + return batches + + +class FileFormatFixture(IpcFixture): + + is_file = True + options = None + + def _get_writer(self, sink, schema): + return pa.ipc.new_file(sink, schema, options=self.options) + + def _check_roundtrip(self, as_table=False): + batches = self.write_batches(as_table=as_table) + file_contents = pa.BufferReader(self.get_source()) + + reader = pa.ipc.open_file(file_contents) + + assert reader.num_record_batches == len(batches) + + for i, batch in enumerate(batches): + # it works. Must convert back to DataFrame + batch = reader.get_batch(i) + assert batches[i].equals(batch) + assert reader.schema.equals(batches[0].schema) + + assert isinstance(reader.stats, pa.ipc.ReadStats) + assert isinstance(self.write_stats, pa.ipc.WriteStats) + assert tuple(reader.stats) == tuple(self.write_stats) + + +class StreamFormatFixture(IpcFixture): + + # ARROW-6474, for testing writing old IPC protocol with 4-byte prefix + use_legacy_ipc_format = False + # ARROW-9395, for testing writing old metadata version + options = None + is_file = False + + def _get_writer(self, sink, schema): + return pa.ipc.new_stream( + sink, + schema, + use_legacy_format=self.use_legacy_ipc_format, + options=self.options, + ) + + +class MessageFixture(IpcFixture): + + def _get_writer(self, sink, schema): + return pa.RecordBatchStreamWriter(sink, schema) + + +@pytest.fixture +def ipc_fixture(): + return IpcFixture() + + +@pytest.fixture +def file_fixture(): + return FileFormatFixture() + + +@pytest.fixture +def stream_fixture(): + return StreamFormatFixture() + + +@pytest.fixture(params=[ + pytest.param( + 'file_fixture', + id='File Format' + ), + pytest.param( + 'stream_fixture', + id='Stream Format' + ) +]) +def format_fixture(request): + return request.getfixturevalue(request.param) + + +def test_empty_file(): + buf = b'' + with pytest.raises(pa.ArrowInvalid): + pa.ipc.open_file(pa.BufferReader(buf)) + + +def test_file_simple_roundtrip(file_fixture): + file_fixture._check_roundtrip(as_table=False) + + +def test_file_write_table(file_fixture): + file_fixture._check_roundtrip(as_table=True) + + +@pytest.mark.parametrize("sink_factory", [ + lambda: io.BytesIO(), + lambda: pa.BufferOutputStream() +]) +def test_file_read_all(sink_factory): + fixture = FileFormatFixture(sink_factory) + + batches = fixture.write_batches() + file_contents = pa.BufferReader(fixture.get_source()) + + reader = pa.ipc.open_file(file_contents) + + result = reader.read_all() + expected = pa.Table.from_batches(batches) + assert result.equals(expected) + + +def test_open_file_from_buffer(file_fixture): + # ARROW-2859; APIs accept the buffer protocol + file_fixture.write_batches() + source = file_fixture.get_source() + + reader1 = pa.ipc.open_file(source) + reader2 = pa.ipc.open_file(pa.BufferReader(source)) + reader3 = pa.RecordBatchFileReader(source) + + result1 = reader1.read_all() + result2 = reader2.read_all() + result3 = reader3.read_all() + + assert result1.equals(result2) + assert result1.equals(result3) + + st1 = reader1.stats + assert st1.num_messages == 6 + assert st1.num_record_batches == 5 + assert reader2.stats == st1 + assert reader3.stats == st1 + + +@pytest.mark.pandas +def test_file_read_pandas(file_fixture): + frames = [batch.to_pandas() for batch in file_fixture.write_batches()] + + file_contents = pa.BufferReader(file_fixture.get_source()) + reader = pa.ipc.open_file(file_contents) + result = reader.read_pandas() + + expected = pd.concat(frames).reset_index(drop=True) + assert_frame_equal(result, expected) + + +def test_file_pathlib(file_fixture, tmpdir): + file_fixture.write_batches() + source = file_fixture.get_source() + + path = tmpdir.join('file.arrow').strpath + with open(path, 'wb') as f: + f.write(source) + + t1 = pa.ipc.open_file(pathlib.Path(path)).read_all() + t2 = pa.ipc.open_file(pa.OSFile(path)).read_all() + + assert t1.equals(t2) + + +def test_empty_stream(): + buf = io.BytesIO(b'') + with pytest.raises(pa.ArrowInvalid): + pa.ipc.open_stream(buf) + + +@pytest.mark.pandas +def test_read_year_month_nano_interval(tmpdir): + """ARROW-15783: Verify to_pandas works for interval types. + + Interval types require static structures to be enabled. This test verifies + that they are when no other library functions are invoked. + """ + mdn_interval_type = pa.month_day_nano_interval() + schema = pa.schema([pa.field('nums', mdn_interval_type)]) + + path = tmpdir.join('file.arrow').strpath + with pa.OSFile(path, 'wb') as sink: + with pa.ipc.new_file(sink, schema) as writer: + interval_array = pa.array([(1, 2, 3)], type=mdn_interval_type) + batch = pa.record_batch([interval_array], schema) + writer.write(batch) + invoke_script('read_record_batch.py', path) + + +@pytest.mark.pandas +def test_stream_categorical_roundtrip(stream_fixture): + df = pd.DataFrame({ + 'one': np.random.randn(5), + 'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'], + categories=['foo', 'bar'], + ordered=True) + }) + batch = pa.RecordBatch.from_pandas(df) + with stream_fixture._get_writer(stream_fixture.sink, batch.schema) as wr: + wr.write_batch(batch) + + table = (pa.ipc.open_stream(pa.BufferReader(stream_fixture.get_source())) + .read_all()) + assert_frame_equal(table.to_pandas(), df) + + +def test_open_stream_from_buffer(stream_fixture): + # ARROW-2859 + stream_fixture.write_batches() + source = stream_fixture.get_source() + + reader1 = pa.ipc.open_stream(source) + reader2 = pa.ipc.open_stream(pa.BufferReader(source)) + reader3 = pa.RecordBatchStreamReader(source) + + result1 = reader1.read_all() + result2 = reader2.read_all() + result3 = reader3.read_all() + + assert result1.equals(result2) + assert result1.equals(result3) + + st1 = reader1.stats + assert st1.num_messages == 6 + assert st1.num_record_batches == 5 + assert reader2.stats == st1 + assert reader3.stats == st1 + + assert tuple(st1) == tuple(stream_fixture.write_stats) + + +@pytest.mark.parametrize('options', [ + pa.ipc.IpcReadOptions(), + pa.ipc.IpcReadOptions(use_threads=False), +]) +def test_open_stream_options(stream_fixture, options): + stream_fixture.write_batches() + source = stream_fixture.get_source() + + reader = pa.ipc.open_stream(source, options=options) + + reader.read_all() + st = reader.stats + assert st.num_messages == 6 + assert st.num_record_batches == 5 + + assert tuple(st) == tuple(stream_fixture.write_stats) + + +def test_open_stream_with_wrong_options(stream_fixture): + stream_fixture.write_batches() + source = stream_fixture.get_source() + + with pytest.raises(TypeError): + pa.ipc.open_stream(source, options=True) + + +@pytest.mark.parametrize('options', [ + pa.ipc.IpcReadOptions(), + pa.ipc.IpcReadOptions(use_threads=False), +]) +def test_open_file_options(file_fixture, options): + file_fixture.write_batches() + source = file_fixture.get_source() + + reader = pa.ipc.open_file(source, options=options) + + reader.read_all() + + st = reader.stats + assert st.num_messages == 6 + assert st.num_record_batches == 5 + + +def test_open_file_with_wrong_options(file_fixture): + file_fixture.write_batches() + source = file_fixture.get_source() + + with pytest.raises(TypeError): + pa.ipc.open_file(source, options=True) + + +@pytest.mark.pandas +def test_stream_write_dispatch(stream_fixture): + # ARROW-1616 + df = pd.DataFrame({ + 'one': np.random.randn(5), + 'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'], + categories=['foo', 'bar'], + ordered=True) + }) + table = pa.Table.from_pandas(df, preserve_index=False) + batch = pa.RecordBatch.from_pandas(df, preserve_index=False) + with stream_fixture._get_writer(stream_fixture.sink, table.schema) as wr: + wr.write(table) + wr.write(batch) + + table = (pa.ipc.open_stream(pa.BufferReader(stream_fixture.get_source())) + .read_all()) + assert_frame_equal(table.to_pandas(), + pd.concat([df, df], ignore_index=True)) + + +@pytest.mark.pandas +def test_stream_write_table_batches(stream_fixture): + # ARROW-504 + df = pd.DataFrame({ + 'one': np.random.randn(20), + }) + + b1 = pa.RecordBatch.from_pandas(df[:10], preserve_index=False) + b2 = pa.RecordBatch.from_pandas(df, preserve_index=False) + + table = pa.Table.from_batches([b1, b2, b1]) + + with stream_fixture._get_writer(stream_fixture.sink, table.schema) as wr: + wr.write_table(table, max_chunksize=15) + + batches = list(pa.ipc.open_stream(stream_fixture.get_source())) + + assert list(map(len, batches)) == [10, 15, 5, 10] + result_table = pa.Table.from_batches(batches) + assert_frame_equal(result_table.to_pandas(), + pd.concat([df[:10], df, df[:10]], + ignore_index=True)) + + +@pytest.mark.parametrize('use_legacy_ipc_format', [False, True]) +def test_stream_simple_roundtrip(stream_fixture, use_legacy_ipc_format): + stream_fixture.use_legacy_ipc_format = use_legacy_ipc_format + batches = stream_fixture.write_batches() + file_contents = pa.BufferReader(stream_fixture.get_source()) + reader = pa.ipc.open_stream(file_contents) + + assert reader.schema.equals(batches[0].schema) + + total = 0 + for i, next_batch in enumerate(reader): + assert next_batch.equals(batches[i]) + total += 1 + + assert total == len(batches) + + with pytest.raises(StopIteration): + reader.read_next_batch() + + +@pytest.mark.zstd +def test_compression_roundtrip(): + sink = io.BytesIO() + values = np.random.randint(0, 3, 10000) + table = pa.Table.from_arrays([values], names=["values"]) + + options = pa.ipc.IpcWriteOptions(compression='zstd') + with pa.ipc.RecordBatchFileWriter( + sink, table.schema, options=options) as writer: + writer.write_table(table) + len1 = len(sink.getvalue()) + + sink2 = io.BytesIO() + codec = pa.Codec('zstd', compression_level=5) + options = pa.ipc.IpcWriteOptions(compression=codec) + with pa.ipc.RecordBatchFileWriter( + sink2, table.schema, options=options) as writer: + writer.write_table(table) + len2 = len(sink2.getvalue()) + + # In theory len2 should be less than len1 but for this test we just want + # to ensure compression_level is being correctly passed down to the C++ + # layer so we don't really care if it makes it worse or better + assert len2 != len1 + + t1 = pa.ipc.open_file(sink).read_all() + t2 = pa.ipc.open_file(sink2).read_all() + + assert t1 == t2 + + +def test_write_options(): + options = pa.ipc.IpcWriteOptions() + assert options.allow_64bit is False + assert options.use_legacy_format is False + assert options.metadata_version == pa.ipc.MetadataVersion.V5 + + options.allow_64bit = True + assert options.allow_64bit is True + + options.use_legacy_format = True + assert options.use_legacy_format is True + + options.metadata_version = pa.ipc.MetadataVersion.V4 + assert options.metadata_version == pa.ipc.MetadataVersion.V4 + for value in ('V5', 42): + with pytest.raises((TypeError, ValueError)): + options.metadata_version = value + + assert options.compression is None + for value in ['lz4', 'zstd']: + if pa.Codec.is_available(value): + options.compression = value + assert options.compression == value + options.compression = value.upper() + assert options.compression == value + options.compression = None + assert options.compression is None + + with pytest.raises(TypeError): + options.compression = 0 + + assert options.use_threads is True + options.use_threads = False + assert options.use_threads is False + + if pa.Codec.is_available('lz4'): + options = pa.ipc.IpcWriteOptions( + metadata_version=pa.ipc.MetadataVersion.V4, + allow_64bit=True, + use_legacy_format=True, + compression='lz4', + use_threads=False) + assert options.metadata_version == pa.ipc.MetadataVersion.V4 + assert options.allow_64bit is True + assert options.use_legacy_format is True + assert options.compression == 'lz4' + assert options.use_threads is False + + +def test_write_options_legacy_exclusive(stream_fixture): + with pytest.raises( + ValueError, + match="provide at most one of options and use_legacy_format"): + stream_fixture.use_legacy_ipc_format = True + stream_fixture.options = pa.ipc.IpcWriteOptions() + stream_fixture.write_batches() + + +@pytest.mark.parametrize('options', [ + pa.ipc.IpcWriteOptions(), + pa.ipc.IpcWriteOptions(allow_64bit=True), + pa.ipc.IpcWriteOptions(use_legacy_format=True), + pa.ipc.IpcWriteOptions(metadata_version=pa.ipc.MetadataVersion.V4), + pa.ipc.IpcWriteOptions(use_legacy_format=True, + metadata_version=pa.ipc.MetadataVersion.V4), +]) +def test_stream_options_roundtrip(stream_fixture, options): + stream_fixture.use_legacy_ipc_format = None + stream_fixture.options = options + batches = stream_fixture.write_batches() + file_contents = pa.BufferReader(stream_fixture.get_source()) + + message = pa.ipc.read_message(stream_fixture.get_source()) + assert message.metadata_version == options.metadata_version + + reader = pa.ipc.open_stream(file_contents) + + assert reader.schema.equals(batches[0].schema) + + total = 0 + for i, next_batch in enumerate(reader): + assert next_batch.equals(batches[i]) + total += 1 + + assert total == len(batches) + + with pytest.raises(StopIteration): + reader.read_next_batch() + + +def test_read_options(): + options = pa.ipc.IpcReadOptions() + assert options.use_threads is True + assert options.ensure_native_endian is True + assert options.included_fields == [] + + options.ensure_native_endian = False + assert options.ensure_native_endian is False + + options.use_threads = False + assert options.use_threads is False + + options.included_fields = [0, 1] + assert options.included_fields == [0, 1] + + with pytest.raises(TypeError): + options.included_fields = None + + options = pa.ipc.IpcReadOptions( + use_threads=False, ensure_native_endian=False, + included_fields=[1] + ) + assert options.use_threads is False + assert options.ensure_native_endian is False + assert options.included_fields == [1] + + +def test_read_options_included_fields(stream_fixture): + options1 = pa.ipc.IpcReadOptions() + options2 = pa.ipc.IpcReadOptions(included_fields=[1]) + table = pa.Table.from_arrays([pa.array(['foo', 'bar', 'baz', 'qux']), + pa.array([1, 2, 3, 4])], + names=['a', 'b']) + with stream_fixture._get_writer(stream_fixture.sink, table.schema) as wr: + wr.write_table(table) + source = stream_fixture.get_source() + + reader1 = pa.ipc.open_stream(source, options=options1) + reader2 = pa.ipc.open_stream( + source, options=options2, memory_pool=pa.system_memory_pool()) + + result1 = reader1.read_all() + result2 = reader2.read_all() + + assert result1.num_columns == 2 + assert result2.num_columns == 1 + + expected = pa.Table.from_arrays([pa.array([1, 2, 3, 4])], names=["b"]) + assert result2 == expected + assert result1 == table + + +def test_dictionary_delta(format_fixture): + ty = pa.dictionary(pa.int8(), pa.utf8()) + data = [["foo", "foo", None], + ["foo", "bar", "foo"], # potential delta + ["foo", "bar"], # nothing new + ["foo", None, "bar", "quux"], # potential delta + ["bar", "quux"], # replacement + ] + batches = [ + pa.RecordBatch.from_arrays([pa.array(v, type=ty)], names=['dicts']) + for v in data] + batches_delta_only = batches[:4] + schema = batches[0].schema + + def write_batches(batches, as_table=False): + with format_fixture._get_writer(pa.MockOutputStream(), + schema) as writer: + if as_table: + table = pa.Table.from_batches(batches) + writer.write_table(table) + else: + for batch in batches: + writer.write_batch(batch) + return writer.stats + + if format_fixture.is_file: + # File format cannot handle replacement + with pytest.raises(pa.ArrowInvalid): + write_batches(batches) + # File format cannot handle delta if emit_deltas + # is not provided + with pytest.raises(pa.ArrowInvalid): + write_batches(batches_delta_only) + else: + st = write_batches(batches) + assert st.num_record_batches == 5 + assert st.num_dictionary_batches == 4 + assert st.num_replaced_dictionaries == 3 + assert st.num_dictionary_deltas == 0 + + format_fixture.use_legacy_ipc_format = None + format_fixture.options = pa.ipc.IpcWriteOptions( + emit_dictionary_deltas=True) + if format_fixture.is_file: + # File format cannot handle replacement + with pytest.raises(pa.ArrowInvalid): + write_batches(batches) + else: + st = write_batches(batches) + assert st.num_record_batches == 5 + assert st.num_dictionary_batches == 4 + assert st.num_replaced_dictionaries == 1 + assert st.num_dictionary_deltas == 2 + + st = write_batches(batches_delta_only) + assert st.num_record_batches == 4 + assert st.num_dictionary_batches == 3 + assert st.num_replaced_dictionaries == 0 + assert st.num_dictionary_deltas == 2 + + format_fixture.options = pa.ipc.IpcWriteOptions( + unify_dictionaries=True + ) + st = write_batches(batches, as_table=True) + assert st.num_record_batches == 5 + if format_fixture.is_file: + assert st.num_dictionary_batches == 1 + assert st.num_replaced_dictionaries == 0 + assert st.num_dictionary_deltas == 0 + else: + assert st.num_dictionary_batches == 4 + assert st.num_replaced_dictionaries == 3 + assert st.num_dictionary_deltas == 0 + + +def test_envvar_set_legacy_ipc_format(): + schema = pa.schema([pa.field('foo', pa.int32())]) + + writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema) + assert not writer._use_legacy_format + assert writer._metadata_version == pa.ipc.MetadataVersion.V5 + writer = pa.ipc.new_file(pa.BufferOutputStream(), schema) + assert not writer._use_legacy_format + assert writer._metadata_version == pa.ipc.MetadataVersion.V5 + + with changed_environ('ARROW_PRE_0_15_IPC_FORMAT', '1'): + writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema) + assert writer._use_legacy_format + assert writer._metadata_version == pa.ipc.MetadataVersion.V5 + writer = pa.ipc.new_file(pa.BufferOutputStream(), schema) + assert writer._use_legacy_format + assert writer._metadata_version == pa.ipc.MetadataVersion.V5 + + with changed_environ('ARROW_PRE_1_0_METADATA_VERSION', '1'): + writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema) + assert not writer._use_legacy_format + assert writer._metadata_version == pa.ipc.MetadataVersion.V4 + writer = pa.ipc.new_file(pa.BufferOutputStream(), schema) + assert not writer._use_legacy_format + assert writer._metadata_version == pa.ipc.MetadataVersion.V4 + + with changed_environ('ARROW_PRE_1_0_METADATA_VERSION', '1'): + with changed_environ('ARROW_PRE_0_15_IPC_FORMAT', '1'): + writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema) + assert writer._use_legacy_format + assert writer._metadata_version == pa.ipc.MetadataVersion.V4 + writer = pa.ipc.new_file(pa.BufferOutputStream(), schema) + assert writer._use_legacy_format + assert writer._metadata_version == pa.ipc.MetadataVersion.V4 + + +def test_stream_read_all(stream_fixture): + batches = stream_fixture.write_batches() + file_contents = pa.BufferReader(stream_fixture.get_source()) + reader = pa.ipc.open_stream(file_contents) + + result = reader.read_all() + expected = pa.Table.from_batches(batches) + assert result.equals(expected) + + +@pytest.mark.pandas +def test_stream_read_pandas(stream_fixture): + frames = [batch.to_pandas() for batch in stream_fixture.write_batches()] + file_contents = stream_fixture.get_source() + reader = pa.ipc.open_stream(file_contents) + result = reader.read_pandas() + + expected = pd.concat(frames).reset_index(drop=True) + assert_frame_equal(result, expected) + + +@pytest.fixture +def example_messages(stream_fixture): + batches = stream_fixture.write_batches() + file_contents = stream_fixture.get_source() + buf_reader = pa.BufferReader(file_contents) + reader = pa.MessageReader.open_stream(buf_reader) + return batches, list(reader) + + +def test_message_ctors_no_segfault(): + with pytest.raises(TypeError): + repr(pa.Message()) + + with pytest.raises(TypeError): + repr(pa.MessageReader()) + + +def test_message_reader(example_messages): + _, messages = example_messages + + assert len(messages) == 6 + assert messages[0].type == 'schema' + assert isinstance(messages[0].metadata, pa.Buffer) + assert isinstance(messages[0].body, pa.Buffer) + assert messages[0].metadata_version == pa.MetadataVersion.V5 + + for msg in messages[1:]: + assert msg.type == 'record batch' + assert isinstance(msg.metadata, pa.Buffer) + assert isinstance(msg.body, pa.Buffer) + assert msg.metadata_version == pa.MetadataVersion.V5 + + +def test_message_serialize_read_message(example_messages): + _, messages = example_messages + + msg = messages[0] + buf = msg.serialize() + reader = pa.BufferReader(buf.to_pybytes() * 2) + + restored = pa.ipc.read_message(buf) + restored2 = pa.ipc.read_message(reader) + restored3 = pa.ipc.read_message(buf.to_pybytes()) + restored4 = pa.ipc.read_message(reader) + + assert msg.equals(restored) + assert msg.equals(restored2) + assert msg.equals(restored3) + assert msg.equals(restored4) + + with pytest.raises(pa.ArrowInvalid, match="Corrupted message"): + pa.ipc.read_message(pa.BufferReader(b'ab')) + + with pytest.raises(EOFError): + pa.ipc.read_message(reader) + + +@pytest.mark.gzip +def test_message_read_from_compressed(example_messages): + # Part of ARROW-5910 + _, messages = example_messages + for message in messages: + raw_out = pa.BufferOutputStream() + with pa.output_stream(raw_out, compression='gzip') as compressed_out: + message.serialize_to(compressed_out) + + compressed_buf = raw_out.getvalue() + + result = pa.ipc.read_message(pa.input_stream(compressed_buf, + compression='gzip')) + assert result.equals(message) + + +def test_message_read_schema(example_messages): + batches, messages = example_messages + schema = pa.ipc.read_schema(messages[0]) + assert schema.equals(batches[1].schema) + + +def test_message_read_record_batch(example_messages): + batches, messages = example_messages + + for batch, message in zip(batches, messages[1:]): + read_batch = pa.ipc.read_record_batch(message, batch.schema) + assert read_batch.equals(batch) + + +def test_read_record_batch_on_stream_error_message(): + # ARROW-5374 + batch = pa.record_batch([pa.array([b"foo"], type=pa.utf8())], + names=['strs']) + stream = pa.BufferOutputStream() + with pa.ipc.new_stream(stream, batch.schema) as writer: + writer.write_batch(batch) + buf = stream.getvalue() + with pytest.raises(IOError, + match="type record batch but got schema"): + pa.ipc.read_record_batch(buf, batch.schema) + + +# ---------------------------------------------------------------------- +# Socket streaming testa + + +class StreamReaderServer(threading.Thread): + + def init(self, do_read_all): + self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._sock.bind(('127.0.0.1', 0)) + self._sock.listen(1) + host, port = self._sock.getsockname() + self._do_read_all = do_read_all + self._schema = None + self._batches = [] + self._table = None + return port + + def run(self): + connection, client_address = self._sock.accept() + try: + source = connection.makefile(mode='rb') + reader = pa.ipc.open_stream(source) + self._schema = reader.schema + if self._do_read_all: + self._table = reader.read_all() + else: + for i, batch in enumerate(reader): + self._batches.append(batch) + finally: + connection.close() + self._sock.close() + + def get_result(self): + return (self._schema, self._table if self._do_read_all + else self._batches) + + +class SocketStreamFixture(IpcFixture): + + def __init__(self): + # XXX(wesm): test will decide when to start socket server. This should + # probably be refactored + pass + + def start_server(self, do_read_all): + self._server = StreamReaderServer() + port = self._server.init(do_read_all) + self._server.start() + self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._sock.connect(('127.0.0.1', port)) + self.sink = self.get_sink() + + def stop_and_get_result(self): + import struct + self.sink.write(struct.pack('Q', 0)) + self.sink.flush() + self._sock.close() + self._server.join() + return self._server.get_result() + + def get_sink(self): + return self._sock.makefile(mode='wb') + + def _get_writer(self, sink, schema): + return pa.RecordBatchStreamWriter(sink, schema) + + +@pytest.fixture +def socket_fixture(): + return SocketStreamFixture() + + +def test_socket_simple_roundtrip(socket_fixture): + socket_fixture.start_server(do_read_all=False) + writer_batches = socket_fixture.write_batches() + reader_schema, reader_batches = socket_fixture.stop_and_get_result() + + assert reader_schema.equals(writer_batches[0].schema) + assert len(reader_batches) == len(writer_batches) + for i, batch in enumerate(writer_batches): + assert reader_batches[i].equals(batch) + + +def test_socket_read_all(socket_fixture): + socket_fixture.start_server(do_read_all=True) + writer_batches = socket_fixture.write_batches() + _, result = socket_fixture.stop_and_get_result() + + expected = pa.Table.from_batches(writer_batches) + assert result.equals(expected) + + +# ---------------------------------------------------------------------- +# Miscellaneous IPC tests + +@pytest.mark.pandas +def test_ipc_file_stream_has_eos(): + # ARROW-5395 + df = pd.DataFrame({'foo': [1.5]}) + batch = pa.RecordBatch.from_pandas(df) + sink = pa.BufferOutputStream() + write_file(batch, sink) + buffer = sink.getvalue() + + # skip the file magic + reader = pa.ipc.open_stream(buffer[8:]) + + # will fail if encounters footer data instead of eos + rdf = reader.read_pandas() + + assert_frame_equal(df, rdf) + + +@pytest.mark.pandas +def test_ipc_zero_copy_numpy(): + df = pd.DataFrame({'foo': [1.5]}) + + batch = pa.RecordBatch.from_pandas(df) + sink = pa.BufferOutputStream() + write_file(batch, sink) + buffer = sink.getvalue() + reader = pa.BufferReader(buffer) + + batches = read_file(reader) + + data = batches[0].to_pandas() + rdf = pd.DataFrame(data) + assert_frame_equal(df, rdf) + + +@pytest.mark.pandas +@pytest.mark.parametrize("ipc_type", ["stream", "file"]) +def test_batches_with_custom_metadata_roundtrip(ipc_type): + df = pd.DataFrame({'foo': [1.5]}) + + batch = pa.RecordBatch.from_pandas(df) + sink = pa.BufferOutputStream() + + batch_count = 2 + file_factory = {"stream": pa.ipc.new_stream, + "file": pa.ipc.new_file}[ipc_type] + + with file_factory(sink, batch.schema) as writer: + for i in range(batch_count): + writer.write_batch(batch, custom_metadata={"batch_id": str(i)}) + # write a batch without custom metadata + writer.write_batch(batch) + + buffer = sink.getvalue() + + if ipc_type == "stream": + with pa.ipc.open_stream(buffer) as reader: + batch_with_metas = list(reader.iter_batches_with_custom_metadata()) + else: + with pa.ipc.open_file(buffer) as reader: + batch_with_metas = [reader.get_batch_with_custom_metadata(i) + for i in range(reader.num_record_batches)] + + for i in range(batch_count): + assert batch_with_metas[i].batch.num_rows == 1 + assert isinstance( + batch_with_metas[i].custom_metadata, pa.KeyValueMetadata) + assert batch_with_metas[i].custom_metadata == {"batch_id": str(i)} + + # the last batch has no custom metadata + assert batch_with_metas[batch_count].batch.num_rows == 1 + assert batch_with_metas[batch_count].custom_metadata is None + + +def test_ipc_stream_no_batches(): + # ARROW-2307 + table = pa.Table.from_arrays([pa.array([1, 2, 3, 4]), + pa.array(['foo', 'bar', 'baz', 'qux'])], + names=['a', 'b']) + + sink = pa.BufferOutputStream() + with pa.ipc.new_stream(sink, table.schema): + pass + + source = sink.getvalue() + with pa.ipc.open_stream(source) as reader: + result = reader.read_all() + + assert result.schema.equals(table.schema) + assert len(result) == 0 + + +@pytest.mark.pandas +def test_get_record_batch_size(): + N = 10 + itemsize = 8 + df = pd.DataFrame({'foo': np.random.randn(N)}) + + batch = pa.RecordBatch.from_pandas(df) + assert pa.ipc.get_record_batch_size(batch) > (N * itemsize) + + +@pytest.mark.pandas +def _check_serialize_pandas_round_trip(df, use_threads=False): + buf = pa.serialize_pandas(df, nthreads=2 if use_threads else 1) + result = pa.deserialize_pandas(buf, use_threads=use_threads) + assert_frame_equal(result, df) + + +@pytest.mark.pandas +def test_pandas_serialize_round_trip(): + index = pd.Index([1, 2, 3], name='my_index') + columns = ['foo', 'bar'] + df = pd.DataFrame( + {'foo': [1.5, 1.6, 1.7], 'bar': list('abc')}, + index=index, columns=columns + ) + _check_serialize_pandas_round_trip(df) + + +@pytest.mark.pandas +def test_pandas_serialize_round_trip_nthreads(): + index = pd.Index([1, 2, 3], name='my_index') + columns = ['foo', 'bar'] + df = pd.DataFrame( + {'foo': [1.5, 1.6, 1.7], 'bar': list('abc')}, + index=index, columns=columns + ) + _check_serialize_pandas_round_trip(df, use_threads=True) + + +@pytest.mark.pandas +def test_pandas_serialize_round_trip_multi_index(): + index1 = pd.Index([1, 2, 3], name='level_1') + index2 = pd.Index(list('def'), name=None) + index = pd.MultiIndex.from_arrays([index1, index2]) + + columns = ['foo', 'bar'] + df = pd.DataFrame( + {'foo': [1.5, 1.6, 1.7], 'bar': list('abc')}, + index=index, + columns=columns, + ) + _check_serialize_pandas_round_trip(df) + + +@pytest.mark.pandas +def test_serialize_pandas_empty_dataframe(): + df = pd.DataFrame() + _check_serialize_pandas_round_trip(df) + + +@pytest.mark.pandas +def test_pandas_serialize_round_trip_not_string_columns(): + df = pd.DataFrame(list(zip([1.5, 1.6, 1.7], 'abc'))) + buf = pa.serialize_pandas(df) + result = pa.deserialize_pandas(buf) + assert_frame_equal(result, df) + + +@pytest.mark.pandas +def test_serialize_pandas_no_preserve_index(): + df = pd.DataFrame({'a': [1, 2, 3]}, index=[1, 2, 3]) + expected = pd.DataFrame({'a': [1, 2, 3]}) + + buf = pa.serialize_pandas(df, preserve_index=False) + result = pa.deserialize_pandas(buf) + assert_frame_equal(result, expected) + + buf = pa.serialize_pandas(df, preserve_index=True) + result = pa.deserialize_pandas(buf) + assert_frame_equal(result, df) + + +@pytest.mark.pandas +def test_schema_batch_serialize_methods(): + nrows = 5 + df = pd.DataFrame({ + 'one': np.random.randn(nrows), + 'two': ['foo', np.nan, 'bar', 'bazbaz', 'qux']}) + batch = pa.RecordBatch.from_pandas(df) + + s_schema = batch.schema.serialize() + s_batch = batch.serialize() + + recons_schema = pa.ipc.read_schema(s_schema) + recons_batch = pa.ipc.read_record_batch(s_batch, recons_schema) + assert recons_batch.equals(batch) + + +def test_schema_serialization_with_metadata(): + field_metadata = {b'foo': b'bar', b'kind': b'field'} + schema_metadata = {b'foo': b'bar', b'kind': b'schema'} + + f0 = pa.field('a', pa.int8()) + f1 = pa.field('b', pa.string(), metadata=field_metadata) + + schema = pa.schema([f0, f1], metadata=schema_metadata) + + s_schema = schema.serialize() + recons_schema = pa.ipc.read_schema(s_schema) + + assert recons_schema.equals(schema) + assert recons_schema.metadata == schema_metadata + assert recons_schema[0].metadata is None + assert recons_schema[1].metadata == field_metadata + + +def write_file(batch, sink): + with pa.ipc.new_file(sink, batch.schema) as writer: + writer.write_batch(batch) + + +def read_file(source): + with pa.ipc.open_file(source) as reader: + return [reader.get_batch(i) for i in range(reader.num_record_batches)] + + +def test_write_empty_ipc_file(): + # ARROW-3894: IPC file was not being properly initialized when no record + # batches are being written + schema = pa.schema([('field', pa.int64())]) + + sink = pa.BufferOutputStream() + with pa.ipc.new_file(sink, schema): + pass + + buf = sink.getvalue() + with pa.RecordBatchFileReader(pa.BufferReader(buf)) as reader: + table = reader.read_all() + assert len(table) == 0 + assert table.schema.equals(schema) + + +def test_py_record_batch_reader(): + def make_schema(): + return pa.schema([('field', pa.int64())]) + + def make_batches(): + schema = make_schema() + batch1 = pa.record_batch([[1, 2, 3]], schema=schema) + batch2 = pa.record_batch([[4, 5]], schema=schema) + return [batch1, batch2] + + # With iterable + batches = UserList(make_batches()) # weakrefable + wr = weakref.ref(batches) + + with pa.RecordBatchReader.from_batches(make_schema(), + batches) as reader: + batches = None + assert wr() is not None + assert list(reader) == make_batches() + assert wr() is None + + # With iterator + batches = iter(UserList(make_batches())) # weakrefable + wr = weakref.ref(batches) + + with pa.RecordBatchReader.from_batches(make_schema(), + batches) as reader: + batches = None + assert wr() is not None + assert list(reader) == make_batches() + assert wr() is None + + # ensure we get proper error when not passing a schema + # (https://issues.apache.org/jira/browse/ARROW-18229) + batches = make_batches() + with pytest.raises(TypeError): + reader = pa.RecordBatchReader.from_batches( + [('field', pa.int64())], batches) + pass + + with pytest.raises(TypeError): + reader = pa.RecordBatchReader.from_batches(None, batches) + pass + + +def test_record_batch_reader_from_arrow_stream(): + + class StreamWrapper: + def __init__(self, batches): + self.batches = batches + + def __arrow_c_stream__(self, requested_schema=None): + reader = pa.RecordBatchReader.from_batches( + self.batches[0].schema, self.batches) + return reader.__arrow_c_stream__(requested_schema) + + data = [ + pa.record_batch([pa.array([1, 2, 3], type=pa.int64())], names=['a']), + pa.record_batch([pa.array([4, 5, 6], type=pa.int64())], names=['a']) + ] + wrapper = StreamWrapper(data) + + # Can roundtrip a pyarrow stream-like object + expected = pa.Table.from_batches(data) + reader = pa.RecordBatchReader.from_stream(expected) + assert reader.read_all() == expected + + # Can roundtrip through the wrapper. + reader = pa.RecordBatchReader.from_stream(wrapper) + assert reader.read_all() == expected + + # Passing schema works if already that schema + reader = pa.RecordBatchReader.from_stream(wrapper, schema=data[0].schema) + assert reader.read_all() == expected + + # Passing a different but castable schema works + good_schema = pa.schema([pa.field("a", pa.int32())]) + reader = pa.RecordBatchReader.from_stream(wrapper, schema=good_schema) + assert reader.read_all() == expected.cast(good_schema) + + # If schema doesn't match, raises TypeError + with pytest.raises(pa.lib.ArrowTypeError, match='Field 0 cannot be cast'): + pa.RecordBatchReader.from_stream( + wrapper, schema=pa.schema([pa.field('a', pa.list_(pa.int32()))]) + ) + + # Proper type errors for wrong input + with pytest.raises(TypeError): + pa.RecordBatchReader.from_stream(data[0]['a']) + + with pytest.raises(TypeError): + pa.RecordBatchReader.from_stream(expected, schema=data[0]) + + +def test_record_batch_reader_cast(): + schema_src = pa.schema([pa.field('a', pa.int64())]) + data = [ + pa.record_batch([pa.array([1, 2, 3], type=pa.int64())], names=['a']), + pa.record_batch([pa.array([4, 5, 6], type=pa.int64())], names=['a']), + ] + table_src = pa.Table.from_batches(data) + + # Cast to same type should always work + reader = pa.RecordBatchReader.from_batches(schema_src, data) + assert reader.cast(schema_src).read_all() == table_src + + # Check non-trivial cast + schema_dst = pa.schema([pa.field('a', pa.int32())]) + reader = pa.RecordBatchReader.from_batches(schema_src, data) + assert reader.cast(schema_dst).read_all() == table_src.cast(schema_dst) + + # Check error for field name/length mismatch + reader = pa.RecordBatchReader.from_batches(schema_src, data) + with pytest.raises(ValueError, match="Target schema's field names"): + reader.cast(pa.schema([])) + + # Check error for impossible cast in call to .cast() + reader = pa.RecordBatchReader.from_batches(schema_src, data) + with pytest.raises(pa.lib.ArrowTypeError, match='Field 0 cannot be cast'): + reader.cast(pa.schema([pa.field('a', pa.list_(pa.int32()))])) + + +def test_record_batch_reader_cast_nulls(): + schema_src = pa.schema([pa.field('a', pa.int64())]) + data_with_nulls = [ + pa.record_batch([pa.array([1, 2, None], type=pa.int64())], names=['a']), + ] + data_without_nulls = [ + pa.record_batch([pa.array([1, 2, 3], type=pa.int64())], names=['a']), + ] + table_with_nulls = pa.Table.from_batches(data_with_nulls) + table_without_nulls = pa.Table.from_batches(data_without_nulls) + + # Cast to nullable destination should work + reader = pa.RecordBatchReader.from_batches(schema_src, data_with_nulls) + schema_dst = pa.schema([pa.field('a', pa.int32())]) + assert reader.cast(schema_dst).read_all() == table_with_nulls.cast(schema_dst) + + # Cast to non-nullable destination should work if there are no nulls + reader = pa.RecordBatchReader.from_batches(schema_src, data_without_nulls) + schema_dst = pa.schema([pa.field('a', pa.int32(), nullable=False)]) + assert reader.cast(schema_dst).read_all() == table_without_nulls.cast(schema_dst) + + # Cast to non-nullable destination should error if there are nulls + # when the batch is pulled + reader = pa.RecordBatchReader.from_batches(schema_src, data_with_nulls) + casted_reader = reader.cast(schema_dst) + with pytest.raises(pa.lib.ArrowInvalid, match="Can't cast array"): + casted_reader.read_all() diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_json.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_json.py new file mode 100644 index 0000000000000000000000000000000000000000..a0a6174266310fca4ffa08308587ad06029403f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_json.py @@ -0,0 +1,351 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from collections import OrderedDict +from decimal import Decimal +import io +import itertools +import json +import string +import unittest + +import numpy as np +import pytest + +import pyarrow as pa +from pyarrow.json import read_json, ReadOptions, ParseOptions + + +def generate_col_names(): + # 'a', 'b'... 'z', then 'aa', 'ab'... + letters = string.ascii_lowercase + yield from letters + for first in letters: + for second in letters: + yield first + second + + +def make_random_json(num_cols=2, num_rows=10, linesep='\r\n'): + arr = np.random.RandomState(42).randint(0, 1000, size=(num_cols, num_rows)) + col_names = list(itertools.islice(generate_col_names(), num_cols)) + lines = [] + for row in arr.T: + json_obj = OrderedDict([(k, int(v)) for (k, v) in zip(col_names, row)]) + lines.append(json.dumps(json_obj)) + data = linesep.join(lines).encode() + columns = [pa.array(col, type=pa.int64()) for col in arr] + expected = pa.Table.from_arrays(columns, col_names) + return data, expected + + +def check_options_class_pickling(cls, pickler, **attr_values): + opts = cls(**attr_values) + new_opts = pickler.loads(pickler.dumps(opts, + protocol=pickler.HIGHEST_PROTOCOL)) + for name, value in attr_values.items(): + assert getattr(new_opts, name) == value + + +def test_read_options(pickle_module): + cls = ReadOptions + opts = cls() + + assert opts.block_size > 0 + opts.block_size = 12345 + assert opts.block_size == 12345 + + assert opts.use_threads is True + opts.use_threads = False + assert opts.use_threads is False + + opts = cls(block_size=1234, use_threads=False) + assert opts.block_size == 1234 + assert opts.use_threads is False + + check_options_class_pickling(cls, pickler=pickle_module, + block_size=1234, + use_threads=False) + + +def test_parse_options(pickle_module): + cls = ParseOptions + opts = cls() + assert opts.newlines_in_values is False + assert opts.explicit_schema is None + + opts.newlines_in_values = True + assert opts.newlines_in_values is True + + schema = pa.schema([pa.field('foo', pa.int32())]) + opts.explicit_schema = schema + assert opts.explicit_schema == schema + + assert opts.unexpected_field_behavior == "infer" + for value in ["ignore", "error", "infer"]: + opts.unexpected_field_behavior = value + assert opts.unexpected_field_behavior == value + + with pytest.raises(ValueError): + opts.unexpected_field_behavior = "invalid-value" + + check_options_class_pickling(cls, pickler=pickle_module, + explicit_schema=schema, + newlines_in_values=False, + unexpected_field_behavior="ignore") + + +class BaseTestJSONRead: + + def read_bytes(self, b, **kwargs): + return self.read_json(pa.py_buffer(b), **kwargs) + + def check_names(self, table, names): + assert table.num_columns == len(names) + assert [c.name for c in table.columns] == names + + def test_file_object(self): + data = b'{"a": 1, "b": 2}\n' + expected_data = {'a': [1], 'b': [2]} + bio = io.BytesIO(data) + table = self.read_json(bio) + assert table.to_pydict() == expected_data + # Text files not allowed + sio = io.StringIO(data.decode()) + with pytest.raises(TypeError): + self.read_json(sio) + + def test_block_sizes(self): + rows = b'{"a": 1}\n{"a": 2}\n{"a": 3}' + read_options = ReadOptions() + parse_options = ParseOptions() + + for data in [rows, rows + b'\n']: + for newlines_in_values in [False, True]: + parse_options.newlines_in_values = newlines_in_values + read_options.block_size = 4 + with pytest.raises(ValueError, + match="try to increase block size"): + self.read_bytes(data, read_options=read_options, + parse_options=parse_options) + + # Validate reader behavior with various block sizes. + # There used to be bugs in this area. + for block_size in range(9, 20): + read_options.block_size = block_size + table = self.read_bytes(data, read_options=read_options, + parse_options=parse_options) + assert table.to_pydict() == {'a': [1, 2, 3]} + + def test_no_newline_at_end(self): + rows = b'{"a": 1,"b": 2, "c": 3}\n{"a": 4,"b": 5, "c": 6}' + table = self.read_bytes(rows) + assert table.to_pydict() == { + 'a': [1, 4], + 'b': [2, 5], + 'c': [3, 6], + } + + def test_simple_ints(self): + # Infer integer columns + rows = b'{"a": 1,"b": 2, "c": 3}\n{"a": 4,"b": 5, "c": 6}\n' + table = self.read_bytes(rows) + schema = pa.schema([('a', pa.int64()), + ('b', pa.int64()), + ('c', pa.int64())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [1, 4], + 'b': [2, 5], + 'c': [3, 6], + } + + def test_simple_varied(self): + # Infer various kinds of data + rows = (b'{"a": 1,"b": 2, "c": "3", "d": false}\n' + b'{"a": 4.0, "b": -5, "c": "foo", "d": true}\n') + table = self.read_bytes(rows) + schema = pa.schema([('a', pa.float64()), + ('b', pa.int64()), + ('c', pa.string()), + ('d', pa.bool_())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [1.0, 4.0], + 'b': [2, -5], + 'c': ["3", "foo"], + 'd': [False, True], + } + + def test_simple_nulls(self): + # Infer various kinds of data, with nulls + rows = (b'{"a": 1, "b": 2, "c": null, "d": null, "e": null}\n' + b'{"a": null, "b": -5, "c": "foo", "d": null, "e": true}\n' + b'{"a": 4.5, "b": null, "c": "nan", "d": null,"e": false}\n') + table = self.read_bytes(rows) + schema = pa.schema([('a', pa.float64()), + ('b', pa.int64()), + ('c', pa.string()), + ('d', pa.null()), + ('e', pa.bool_())]) + assert table.schema == schema + assert table.to_pydict() == { + 'a': [1.0, None, 4.5], + 'b': [2, -5, None], + 'c': [None, "foo", "nan"], + 'd': [None, None, None], + 'e': [None, True, False], + } + + def test_empty_lists(self): + # ARROW-10955: Infer list(null) + rows = b'{"a": []}' + table = self.read_bytes(rows) + schema = pa.schema([('a', pa.list_(pa.null()))]) + assert table.schema == schema + assert table.to_pydict() == {'a': [[]]} + + def test_empty_rows(self): + rows = b'{}\n{}\n' + table = self.read_bytes(rows) + schema = pa.schema([]) + assert table.schema == schema + assert table.num_columns == 0 + assert table.num_rows == 2 + + def test_reconcile_across_blocks(self): + # ARROW-12065: reconciling inferred types across blocks + first_row = b'{ }\n' + read_options = ReadOptions(block_size=len(first_row)) + for next_rows, expected_pylist in [ + (b'{"a": 0}', [None, 0]), + (b'{"a": []}', [None, []]), + (b'{"a": []}\n{"a": [[1]]}', [None, [], [[1]]]), + (b'{"a": {}}', [None, {}]), + (b'{"a": {}}\n{"a": {"b": {"c": 1}}}', + [None, {"b": None}, {"b": {"c": 1}}]), + ]: + table = self.read_bytes(first_row + next_rows, + read_options=read_options) + expected = {"a": expected_pylist} + assert table.to_pydict() == expected + # Check that the issue was exercised + assert table.column("a").num_chunks > 1 + + def test_explicit_schema_decimal(self): + rows = (b'{"a": 1}\n' + b'{"a": 1.45}\n' + b'{"a": -23.456}\n' + b'{}\n') + expected = { + 'a': [Decimal("1"), Decimal("1.45"), Decimal("-23.456"), None], + } + for type_factory in (pa.decimal128, pa.decimal256): + schema = pa.schema([('a', type_factory(9, 4))]) + opts = ParseOptions(explicit_schema=schema) + table = self.read_bytes(rows, parse_options=opts) + assert table.schema == schema + assert table.to_pydict() == expected + + def test_explicit_schema_with_unexpected_behaviour(self): + # infer by default + rows = (b'{"foo": "bar", "num": 0}\n' + b'{"foo": "baz", "num": 1}\n') + schema = pa.schema([ + ('foo', pa.binary()) + ]) + + opts = ParseOptions(explicit_schema=schema) + table = self.read_bytes(rows, parse_options=opts) + assert table.schema == pa.schema([ + ('foo', pa.binary()), + ('num', pa.int64()) + ]) + assert table.to_pydict() == { + 'foo': [b'bar', b'baz'], + 'num': [0, 1], + } + + # ignore the unexpected fields + opts = ParseOptions(explicit_schema=schema, + unexpected_field_behavior="ignore") + table = self.read_bytes(rows, parse_options=opts) + assert table.schema == pa.schema([ + ('foo', pa.binary()), + ]) + assert table.to_pydict() == { + 'foo': [b'bar', b'baz'], + } + + # raise error + opts = ParseOptions(explicit_schema=schema, + unexpected_field_behavior="error") + with pytest.raises(pa.ArrowInvalid, + match="JSON parse error: unexpected field"): + self.read_bytes(rows, parse_options=opts) + + def test_small_random_json(self): + data, expected = make_random_json(num_cols=2, num_rows=10) + table = self.read_bytes(data) + assert table.schema == expected.schema + assert table.equals(expected) + assert table.to_pydict() == expected.to_pydict() + + def test_load_large_json(self): + data, expected = make_random_json(num_cols=2, num_rows=100100) + # set block size is 10MB + read_options = ReadOptions(block_size=1024*1024*10) + table = self.read_bytes(data, read_options=read_options) + assert table.num_rows == 100100 + assert expected.num_rows == 100100 + + def test_stress_block_sizes(self): + # Test a number of small block sizes to stress block stitching + data_base, expected = make_random_json(num_cols=2, num_rows=100) + read_options = ReadOptions() + parse_options = ParseOptions() + + for data in [data_base, data_base.rstrip(b'\r\n')]: + for newlines_in_values in [False, True]: + parse_options.newlines_in_values = newlines_in_values + for block_size in [22, 23, 37]: + read_options.block_size = block_size + table = self.read_bytes(data, read_options=read_options, + parse_options=parse_options) + assert table.schema == expected.schema + if not table.equals(expected): + # Better error output + assert table.to_pydict() == expected.to_pydict() + + +class TestSerialJSONRead(BaseTestJSONRead, unittest.TestCase): + + def read_json(self, *args, **kwargs): + read_options = kwargs.setdefault('read_options', ReadOptions()) + read_options.use_threads = False + table = read_json(*args, **kwargs) + table.validate(full=True) + return table + + +class TestParallelJSONRead(BaseTestJSONRead, unittest.TestCase): + + def read_json(self, *args, **kwargs): + read_options = kwargs.setdefault('read_options', ReadOptions()) + read_options.use_threads = True + table = read_json(*args, **kwargs) + table.validate(full=True) + return table diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_jvm.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_jvm.py new file mode 100644 index 0000000000000000000000000000000000000000..c5996f921534316b892f8c294530b9c6c8587df1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_jvm.py @@ -0,0 +1,433 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import json +import os +import pyarrow as pa +import pyarrow.jvm as pa_jvm +import pytest +import sys +import xml.etree.ElementTree as ET + + +jpype = pytest.importorskip("jpype") + + +@pytest.fixture(scope="session") +def root_allocator(): + # This test requires Arrow Java to be built in the same source tree + try: + arrow_dir = os.environ["ARROW_SOURCE_DIR"] + except KeyError: + arrow_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..') + pom_path = os.path.join(arrow_dir, 'java', 'pom.xml') + tree = ET.parse(pom_path) + version = tree.getroot().find( + 'POM:version', + namespaces={ + 'POM': 'http://maven.apache.org/POM/4.0.0' + }).text + jar_path = os.path.join( + arrow_dir, 'java', 'tools', 'target', + 'arrow-tools-{}-jar-with-dependencies.jar'.format(version)) + jar_path = os.getenv("ARROW_TOOLS_JAR", jar_path) + kwargs = {} + # This will be the default behaviour in jpype 0.8+ + kwargs['convertStrings'] = False + jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.class.path=" + jar_path, + **kwargs) + return jpype.JPackage("org").apache.arrow.memory.RootAllocator(sys.maxsize) + + +def test_jvm_buffer(root_allocator): + # Create a Java buffer + jvm_buffer = root_allocator.buffer(8) + for i in range(8): + jvm_buffer.setByte(i, 8 - i) + + orig_refcnt = jvm_buffer.refCnt() + + # Convert to Python + buf = pa_jvm.jvm_buffer(jvm_buffer) + + # Check its content + assert buf.to_pybytes() == b'\x08\x07\x06\x05\x04\x03\x02\x01' + + # Check Java buffer lifetime is tied to PyArrow buffer lifetime + assert jvm_buffer.refCnt() == orig_refcnt + 1 + del buf + assert jvm_buffer.refCnt() == orig_refcnt + + +def test_jvm_buffer_released(root_allocator): + import jpype.imports # noqa + from java.lang import IllegalArgumentException + + jvm_buffer = root_allocator.buffer(8) + jvm_buffer.release() + + with pytest.raises(IllegalArgumentException): + pa_jvm.jvm_buffer(jvm_buffer) + + +def _jvm_field(jvm_spec): + om = jpype.JClass('com.fasterxml.jackson.databind.ObjectMapper')() + pojo_Field = jpype.JClass('org.apache.arrow.vector.types.pojo.Field') + return om.readValue(jvm_spec, pojo_Field) + + +def _jvm_schema(jvm_spec, metadata=None): + field = _jvm_field(jvm_spec) + schema_cls = jpype.JClass('org.apache.arrow.vector.types.pojo.Schema') + fields = jpype.JClass('java.util.ArrayList')() + fields.add(field) + if metadata: + dct = jpype.JClass('java.util.HashMap')() + for k, v in metadata.items(): + dct.put(k, v) + return schema_cls(fields, dct) + else: + return schema_cls(fields) + + +# In the following, we use the JSON serialization of the Field objects in Java. +# This ensures that we neither rely on the exact mechanics on how to construct +# them using Java code as well as enables us to define them as parameters +# without to invoke the JVM. +# +# The specifications were created using: +# +# om = jpype.JClass('com.fasterxml.jackson.databind.ObjectMapper')() +# field = … # Code to instantiate the field +# jvm_spec = om.writeValueAsString(field) +@pytest.mark.parametrize('pa_type,jvm_spec', [ + (pa.null(), '{"name":"null"}'), + (pa.bool_(), '{"name":"bool"}'), + (pa.int8(), '{"name":"int","bitWidth":8,"isSigned":true}'), + (pa.int16(), '{"name":"int","bitWidth":16,"isSigned":true}'), + (pa.int32(), '{"name":"int","bitWidth":32,"isSigned":true}'), + (pa.int64(), '{"name":"int","bitWidth":64,"isSigned":true}'), + (pa.uint8(), '{"name":"int","bitWidth":8,"isSigned":false}'), + (pa.uint16(), '{"name":"int","bitWidth":16,"isSigned":false}'), + (pa.uint32(), '{"name":"int","bitWidth":32,"isSigned":false}'), + (pa.uint64(), '{"name":"int","bitWidth":64,"isSigned":false}'), + (pa.float16(), '{"name":"floatingpoint","precision":"HALF"}'), + (pa.float32(), '{"name":"floatingpoint","precision":"SINGLE"}'), + (pa.float64(), '{"name":"floatingpoint","precision":"DOUBLE"}'), + (pa.time32('s'), '{"name":"time","unit":"SECOND","bitWidth":32}'), + (pa.time32('ms'), '{"name":"time","unit":"MILLISECOND","bitWidth":32}'), + (pa.time64('us'), '{"name":"time","unit":"MICROSECOND","bitWidth":64}'), + (pa.time64('ns'), '{"name":"time","unit":"NANOSECOND","bitWidth":64}'), + (pa.timestamp('s'), '{"name":"timestamp","unit":"SECOND",' + '"timezone":null}'), + (pa.timestamp('ms'), '{"name":"timestamp","unit":"MILLISECOND",' + '"timezone":null}'), + (pa.timestamp('us'), '{"name":"timestamp","unit":"MICROSECOND",' + '"timezone":null}'), + (pa.timestamp('ns'), '{"name":"timestamp","unit":"NANOSECOND",' + '"timezone":null}'), + (pa.timestamp('ns', tz='UTC'), '{"name":"timestamp","unit":"NANOSECOND"' + ',"timezone":"UTC"}'), + (pa.timestamp('ns', tz='Europe/Paris'), '{"name":"timestamp",' + '"unit":"NANOSECOND","timezone":"Europe/Paris"}'), + (pa.date32(), '{"name":"date","unit":"DAY"}'), + (pa.date64(), '{"name":"date","unit":"MILLISECOND"}'), + (pa.decimal128(19, 4), '{"name":"decimal","precision":19,"scale":4}'), + (pa.string(), '{"name":"utf8"}'), + (pa.binary(), '{"name":"binary"}'), + (pa.binary(10), '{"name":"fixedsizebinary","byteWidth":10}'), + # TODO(ARROW-2609): complex types that have children + # pa.list_(pa.int32()), + # pa.struct([pa.field('a', pa.int32()), + # pa.field('b', pa.int8()), + # pa.field('c', pa.string())]), + # pa.union([pa.field('a', pa.binary(10)), + # pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE), + # pa.union([pa.field('a', pa.binary(10)), + # pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE), + # TODO: DictionaryType requires a vector in the type + # pa.dictionary(pa.int32(), pa.array(['a', 'b', 'c'])), +]) +@pytest.mark.parametrize('nullable', [True, False]) +def test_jvm_types(root_allocator, pa_type, jvm_spec, nullable): + if pa_type == pa.null() and not nullable: + return + spec = { + 'name': 'field_name', + 'nullable': nullable, + 'type': json.loads(jvm_spec), + # TODO: This needs to be set for complex types + 'children': [] + } + jvm_field = _jvm_field(json.dumps(spec)) + result = pa_jvm.field(jvm_field) + expected_field = pa.field('field_name', pa_type, nullable=nullable) + assert result == expected_field + + jvm_schema = _jvm_schema(json.dumps(spec)) + result = pa_jvm.schema(jvm_schema) + assert result == pa.schema([expected_field]) + + # Schema with custom metadata + jvm_schema = _jvm_schema(json.dumps(spec), {'meta': 'data'}) + result = pa_jvm.schema(jvm_schema) + assert result == pa.schema([expected_field], {'meta': 'data'}) + + # Schema with custom field metadata + spec['metadata'] = [{'key': 'field meta', 'value': 'field data'}] + jvm_schema = _jvm_schema(json.dumps(spec)) + result = pa_jvm.schema(jvm_schema) + expected_field = expected_field.with_metadata( + {'field meta': 'field data'}) + assert result == pa.schema([expected_field]) + + +# These test parameters mostly use an integer range as an input as this is +# often the only type that is understood by both Python and Java +# implementations of Arrow. +@pytest.mark.parametrize('pa_type,py_data,jvm_type', [ + (pa.bool_(), [True, False, True, True], 'BitVector'), + (pa.uint8(), list(range(128)), 'UInt1Vector'), + (pa.uint16(), list(range(128)), 'UInt2Vector'), + (pa.int32(), list(range(128)), 'IntVector'), + (pa.int64(), list(range(128)), 'BigIntVector'), + (pa.float32(), list(range(128)), 'Float4Vector'), + (pa.float64(), list(range(128)), 'Float8Vector'), + (pa.timestamp('s'), list(range(128)), 'TimeStampSecVector'), + (pa.timestamp('ms'), list(range(128)), 'TimeStampMilliVector'), + (pa.timestamp('us'), list(range(128)), 'TimeStampMicroVector'), + (pa.timestamp('ns'), list(range(128)), 'TimeStampNanoVector'), + # TODO(ARROW-2605): These types miss a conversion from pure Python objects + # * pa.time32('s') + # * pa.time32('ms') + # * pa.time64('us') + # * pa.time64('ns') + (pa.date32(), list(range(128)), 'DateDayVector'), + (pa.date64(), list(range(128)), 'DateMilliVector'), + # TODO(ARROW-2606): pa.decimal128(19, 4) +]) +def test_jvm_array(root_allocator, pa_type, py_data, jvm_type): + # Create vector + cls = "org.apache.arrow.vector.{}".format(jvm_type) + jvm_vector = jpype.JClass(cls)("vector", root_allocator) + jvm_vector.allocateNew(len(py_data)) + for i, val in enumerate(py_data): + # char and int are ambiguous overloads for these two setSafe calls + if jvm_type in {'UInt1Vector', 'UInt2Vector'}: + val = jpype.JInt(val) + jvm_vector.setSafe(i, val) + jvm_vector.setValueCount(len(py_data)) + + py_array = pa.array(py_data, type=pa_type) + jvm_array = pa_jvm.array(jvm_vector) + + assert py_array.equals(jvm_array) + + +def test_jvm_array_empty(root_allocator): + cls = "org.apache.arrow.vector.{}".format('IntVector') + jvm_vector = jpype.JClass(cls)("vector", root_allocator) + jvm_vector.allocateNew() + jvm_array = pa_jvm.array(jvm_vector) + assert len(jvm_array) == 0 + assert jvm_array.type == pa.int32() + + +# These test parameters mostly use an integer range as an input as this is +# often the only type that is understood by both Python and Java +# implementations of Arrow. +@pytest.mark.parametrize('pa_type,py_data,jvm_type,jvm_spec', [ + # TODO: null + (pa.bool_(), [True, False, True, True], 'BitVector', '{"name":"bool"}'), + ( + pa.uint8(), + list(range(128)), + 'UInt1Vector', + '{"name":"int","bitWidth":8,"isSigned":false}' + ), + ( + pa.uint16(), + list(range(128)), + 'UInt2Vector', + '{"name":"int","bitWidth":16,"isSigned":false}' + ), + ( + pa.uint32(), + list(range(128)), + 'UInt4Vector', + '{"name":"int","bitWidth":32,"isSigned":false}' + ), + ( + pa.uint64(), + list(range(128)), + 'UInt8Vector', + '{"name":"int","bitWidth":64,"isSigned":false}' + ), + ( + pa.int8(), + list(range(128)), + 'TinyIntVector', + '{"name":"int","bitWidth":8,"isSigned":true}' + ), + ( + pa.int16(), + list(range(128)), + 'SmallIntVector', + '{"name":"int","bitWidth":16,"isSigned":true}' + ), + ( + pa.int32(), + list(range(128)), + 'IntVector', + '{"name":"int","bitWidth":32,"isSigned":true}' + ), + ( + pa.int64(), + list(range(128)), + 'BigIntVector', + '{"name":"int","bitWidth":64,"isSigned":true}' + ), + # TODO: float16 + ( + pa.float32(), + list(range(128)), + 'Float4Vector', + '{"name":"floatingpoint","precision":"SINGLE"}' + ), + ( + pa.float64(), + list(range(128)), + 'Float8Vector', + '{"name":"floatingpoint","precision":"DOUBLE"}' + ), + ( + pa.timestamp('s'), + list(range(128)), + 'TimeStampSecVector', + '{"name":"timestamp","unit":"SECOND","timezone":null}' + ), + ( + pa.timestamp('ms'), + list(range(128)), + 'TimeStampMilliVector', + '{"name":"timestamp","unit":"MILLISECOND","timezone":null}' + ), + ( + pa.timestamp('us'), + list(range(128)), + 'TimeStampMicroVector', + '{"name":"timestamp","unit":"MICROSECOND","timezone":null}' + ), + ( + pa.timestamp('ns'), + list(range(128)), + 'TimeStampNanoVector', + '{"name":"timestamp","unit":"NANOSECOND","timezone":null}' + ), + # TODO(ARROW-2605): These types miss a conversion from pure Python objects + # * pa.time32('s') + # * pa.time32('ms') + # * pa.time64('us') + # * pa.time64('ns') + ( + pa.date32(), + list(range(128)), + 'DateDayVector', + '{"name":"date","unit":"DAY"}' + ), + ( + pa.date64(), + list(range(128)), + 'DateMilliVector', + '{"name":"date","unit":"MILLISECOND"}' + ), + # TODO(ARROW-2606): pa.decimal128(19, 4) +]) +def test_jvm_record_batch(root_allocator, pa_type, py_data, jvm_type, + jvm_spec): + # Create vector + cls = "org.apache.arrow.vector.{}".format(jvm_type) + jvm_vector = jpype.JClass(cls)("vector", root_allocator) + jvm_vector.allocateNew(len(py_data)) + for i, val in enumerate(py_data): + if jvm_type in {'UInt1Vector', 'UInt2Vector'}: + val = jpype.JInt(val) + jvm_vector.setSafe(i, val) + jvm_vector.setValueCount(len(py_data)) + + # Create field + spec = { + 'name': 'field_name', + 'nullable': False, + 'type': json.loads(jvm_spec), + # TODO: This needs to be set for complex types + 'children': [] + } + jvm_field = _jvm_field(json.dumps(spec)) + + # Create VectorSchemaRoot + jvm_fields = jpype.JClass('java.util.ArrayList')() + jvm_fields.add(jvm_field) + jvm_vectors = jpype.JClass('java.util.ArrayList')() + jvm_vectors.add(jvm_vector) + jvm_vsr = jpype.JClass('org.apache.arrow.vector.VectorSchemaRoot') + jvm_vsr = jvm_vsr(jvm_fields, jvm_vectors, len(py_data)) + + py_record_batch = pa.RecordBatch.from_arrays( + [pa.array(py_data, type=pa_type)], + ['col'] + ) + jvm_record_batch = pa_jvm.record_batch(jvm_vsr) + + assert py_record_batch.equals(jvm_record_batch) + + +def _string_to_varchar_holder(ra, string): + nvch_cls = "org.apache.arrow.vector.holders.NullableVarCharHolder" + holder = jpype.JClass(nvch_cls)() + if string is None: + holder.isSet = 0 + else: + holder.isSet = 1 + value = jpype.JClass("java.lang.String")("string") + std_charsets = jpype.JClass("java.nio.charset.StandardCharsets") + bytes_ = value.getBytes(std_charsets.UTF_8) + holder.buffer = ra.buffer(len(bytes_)) + holder.buffer.setBytes(0, bytes_, 0, len(bytes_)) + holder.start = 0 + holder.end = len(bytes_) + return holder + + +# TODO(ARROW-2607) +@pytest.mark.xfail(reason="from_buffers is only supported for " + "primitive arrays yet") +def test_jvm_string_array(root_allocator): + data = ["string", None, "töst"] + cls = "org.apache.arrow.vector.VarCharVector" + jvm_vector = jpype.JClass(cls)("vector", root_allocator) + jvm_vector.allocateNew() + + for i, string in enumerate(data): + holder = _string_to_varchar_holder(root_allocator, "string") + jvm_vector.setSafe(i, holder) + jvm_vector.setValueCount(i + 1) + + py_array = pa.array(data, type=pa.string()) + jvm_array = pa_jvm.array(jvm_vector) + + assert py_array.equals(jvm_array) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_misc.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..39dac4eb81dfb65a58b89a945b966d0da01b97c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_misc.py @@ -0,0 +1,250 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import subprocess +import sys + +import pytest + +import pyarrow as pa +from pyarrow.lib import ArrowInvalid + + +def test_get_include(): + include_dir = pa.get_include() + assert os.path.exists(os.path.join(include_dir, 'arrow', 'api.h')) + + +@pytest.mark.skipif('sys.platform != "win32"') +def test_get_library_dirs_win32(): + assert any(os.path.exists(os.path.join(directory, 'arrow.lib')) + for directory in pa.get_library_dirs()) + + +def test_cpu_count(): + n = pa.cpu_count() + assert n > 0 + try: + pa.set_cpu_count(n + 5) + assert pa.cpu_count() == n + 5 + finally: + pa.set_cpu_count(n) + + +def test_io_thread_count(): + n = pa.io_thread_count() + assert n > 0 + try: + pa.set_io_thread_count(n + 5) + assert pa.io_thread_count() == n + 5 + finally: + pa.set_io_thread_count(n) + + +def test_env_var_io_thread_count(): + # Test that the number of IO threads can be overridden with the + # ARROW_IO_THREADS environment variable. + code = """if 1: + import pyarrow as pa + print(pa.io_thread_count()) + """ + + def run_with_env_var(env_var): + env = os.environ.copy() + env['ARROW_IO_THREADS'] = env_var + res = subprocess.run([sys.executable, "-c", code], env=env, + capture_output=True) + res.check_returncode() + return res.stdout.decode(), res.stderr.decode() + + out, err = run_with_env_var('17') + assert out.strip() == '17' + assert err == '' + + for v in ('-1', 'z'): + out, err = run_with_env_var(v) + assert out.strip() == '8' # default value + assert ("ARROW_IO_THREADS does not contain a valid number of threads" + in err.strip()) + + +def test_build_info(): + assert isinstance(pa.cpp_build_info, pa.BuildInfo) + assert isinstance(pa.cpp_version_info, pa.VersionInfo) + assert isinstance(pa.cpp_version, str) + assert isinstance(pa.__version__, str) + assert pa.cpp_build_info.version_info == pa.cpp_version_info + + assert pa.cpp_build_info.build_type in ( + 'debug', 'release', 'minsizerel', 'relwithdebinfo') + + # assert pa.version == pa.__version__ # XXX currently false + + +def test_runtime_info(): + info = pa.runtime_info() + assert isinstance(info, pa.RuntimeInfo) + possible_simd_levels = ('none', 'sse4_2', 'avx', 'avx2', 'avx512') + assert info.simd_level in possible_simd_levels + assert info.detected_simd_level in possible_simd_levels + + if info.simd_level != 'none': + env = os.environ.copy() + env['ARROW_USER_SIMD_LEVEL'] = 'none' + code = f"""if 1: + import pyarrow as pa + + info = pa.runtime_info() + assert info.simd_level == 'none', info.simd_level + assert info.detected_simd_level == {info.detected_simd_level!r},\ + info.detected_simd_level + """ + subprocess.check_call([sys.executable, "-c", code], env=env) + + +def test_import_at_shutdown(): + # GH-38626: importing PyArrow at interpreter shutdown would crash + code = """if 1: + import atexit + + def import_arrow(): + import pyarrow + + atexit.register(import_arrow) + """ + subprocess.check_call([sys.executable, "-c", code]) + + +@pytest.mark.skipif(sys.platform == "win32", + reason="Path to timezone database is not configurable " + "on non-Windows platforms") +def test_set_timezone_db_path_non_windows(): + # set_timezone_db_path raises an error on non-Windows platforms + with pytest.raises(ArrowInvalid, + match="Arrow was set to use OS timezone " + "database at compile time"): + pa.set_timezone_db_path("path") + + +@pytest.mark.parametrize('klass', [ + pa.Field, + pa.Schema, + pa.ChunkedArray, + pa.RecordBatch, + pa.Table, + pa.Buffer, + pa.Array, + pa.Tensor, + pa.DataType, + pa.ListType, + pa.LargeListType, + pa.FixedSizeListType, + pa.ListViewType, + pa.LargeListViewType, + pa.UnionType, + pa.SparseUnionType, + pa.DenseUnionType, + pa.StructType, + pa.Time32Type, + pa.Time64Type, + pa.TimestampType, + pa.Decimal128Type, + pa.Decimal256Type, + pa.DictionaryType, + pa.FixedSizeBinaryType, + pa.NullArray, + pa.NumericArray, + pa.IntegerArray, + pa.FloatingPointArray, + pa.BooleanArray, + pa.Int8Array, + pa.Int16Array, + pa.Int32Array, + pa.Int64Array, + pa.UInt8Array, + pa.UInt16Array, + pa.UInt32Array, + pa.UInt64Array, + pa.ListArray, + pa.LargeListArray, + pa.MapArray, + pa.FixedSizeListArray, + pa.UnionArray, + pa.BinaryArray, + pa.StringArray, + pa.BinaryViewArray, + pa.StringViewArray, + pa.FixedSizeBinaryArray, + pa.DictionaryArray, + pa.Date32Array, + pa.Date64Array, + pa.TimestampArray, + pa.Time32Array, + pa.Time64Array, + pa.DurationArray, + pa.Decimal128Array, + pa.Decimal256Array, + pa.StructArray, + pa.RunEndEncodedArray, + pa.Scalar, + pa.BooleanScalar, + pa.Int8Scalar, + pa.Int16Scalar, + pa.Int32Scalar, + pa.Int64Scalar, + pa.UInt8Scalar, + pa.UInt16Scalar, + pa.UInt32Scalar, + pa.UInt64Scalar, + pa.HalfFloatScalar, + pa.FloatScalar, + pa.DoubleScalar, + pa.Decimal128Scalar, + pa.Decimal256Scalar, + pa.Date32Scalar, + pa.Date64Scalar, + pa.Time32Scalar, + pa.Time64Scalar, + pa.TimestampScalar, + pa.DurationScalar, + pa.StringScalar, + pa.BinaryScalar, + pa.FixedSizeBinaryScalar, + pa.BinaryViewScalar, + pa.StringViewScalar, + pa.ListScalar, + pa.LargeListScalar, + pa.ListViewScalar, + pa.LargeListViewScalar, + pa.MapScalar, + pa.FixedSizeListScalar, + pa.UnionScalar, + pa.StructScalar, + pa.DictionaryScalar, + pa.RunEndEncodedScalar, + pa.ipc.Message, + pa.ipc.MessageReader, + pa.MemoryPool, + pa.LoggingMemoryPool, + pa.ProxyMemoryPool, +]) +def test_extension_type_constructor_errors(klass): + # ARROW-2638: prevent calling extension class constructors directly + msg = "Do not call {cls}'s constructor directly, use .* instead." + with pytest.raises(TypeError, match=msg.format(cls=klass.__name__)): + klass() diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_orc.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_orc.py new file mode 100644 index 0000000000000000000000000000000000000000..1b467d523304c44614ef23f17b5558bad9e26840 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_orc.py @@ -0,0 +1,637 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest +import decimal +import datetime + +import pyarrow as pa +from pyarrow import fs +from pyarrow.tests import util + + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not orc' +pytestmark = pytest.mark.orc + + +try: + from pandas.testing import assert_frame_equal + import pandas as pd +except ImportError: + pass + + +@pytest.fixture(scope="module") +def datadir(base_datadir): + return base_datadir / "orc" + + +def fix_example_values(actual_cols, expected_cols): + """ + Fix type of expected values (as read from JSON) according to + actual ORC datatype. + """ + for name in expected_cols: + expected = expected_cols[name] + actual = actual_cols[name] + if (name == "map" and + [d.keys() == {'key', 'value'} for m in expected for d in m]): + # convert [{'key': k, 'value': v}, ...] to [(k, v), ...] + col = expected_cols[name].copy() + for i, m in enumerate(expected): + col[i] = [(d['key'], d['value']) for d in m] + expected_cols[name] = col + continue + + typ = actual[0].__class__ + if issubclass(typ, datetime.datetime): + # timestamp fields are represented as strings in JSON files + expected = pd.to_datetime(expected) + elif issubclass(typ, datetime.date): + # date fields are represented as strings in JSON files + expected = expected.dt.date + elif typ is decimal.Decimal: + converted_decimals = [None] * len(expected) + # decimal fields are represented as reals in JSON files + for i, (d, v) in enumerate(zip(actual, expected)): + if not pd.isnull(v): + exp = d.as_tuple().exponent + factor = 10 ** -exp + converted_decimals[i] = ( + decimal.Decimal(round(v * factor)).scaleb(exp)) + expected = pd.Series(converted_decimals) + + expected_cols[name] = expected + + +def check_example_values(orc_df, expected_df, start=None, stop=None): + if start is not None or stop is not None: + expected_df = expected_df[start:stop].reset_index(drop=True) + assert_frame_equal(orc_df, expected_df, check_dtype=False) + + +def check_example_file(orc_path, expected_df, need_fix=False): + """ + Check a ORC file against the expected columns dictionary. + """ + from pyarrow import orc + + orc_file = orc.ORCFile(orc_path) + # Exercise ORCFile.read() + table = orc_file.read() + assert isinstance(table, pa.Table) + table.validate() + + # This workaround needed because of ARROW-3080 + orc_df = pd.DataFrame(table.to_pydict()) + + assert set(expected_df.columns) == set(orc_df.columns) + + # reorder columns if necessary + if not orc_df.columns.equals(expected_df.columns): + expected_df = expected_df.reindex(columns=orc_df.columns) + + if need_fix: + fix_example_values(orc_df, expected_df) + + check_example_values(orc_df, expected_df) + # Exercise ORCFile.read_stripe() + json_pos = 0 + for i in range(orc_file.nstripes): + batch = orc_file.read_stripe(i) + check_example_values(pd.DataFrame(batch.to_pydict()), + expected_df, + start=json_pos, + stop=json_pos + len(batch)) + json_pos += len(batch) + assert json_pos == orc_file.nrows + + +@pytest.mark.pandas +@pytest.mark.parametrize('filename', [ + 'TestOrcFile.test1.orc', + 'TestOrcFile.testDate1900.orc', + 'decimal.orc' +]) +def test_example_using_json(filename, datadir): + """ + Check a ORC file example against the equivalent JSON file, as given + in the Apache ORC repository (the JSON file has one JSON object per + line, corresponding to one row in the ORC file). + """ + # Read JSON file + path = datadir / filename + table = pd.read_json(str(path.with_suffix('.jsn.gz')), lines=True) + check_example_file(path, table, need_fix=True) + + +def test_orcfile_empty(datadir): + from pyarrow import orc + + table = orc.ORCFile(datadir / "TestOrcFile.emptyFile.orc").read() + assert table.num_rows == 0 + + expected_schema = pa.schema([ + ("boolean1", pa.bool_()), + ("byte1", pa.int8()), + ("short1", pa.int16()), + ("int1", pa.int32()), + ("long1", pa.int64()), + ("float1", pa.float32()), + ("double1", pa.float64()), + ("bytes1", pa.binary()), + ("string1", pa.string()), + ("middle", pa.struct( + [("list", pa.list_( + pa.struct([("int1", pa.int32()), + ("string1", pa.string())]))) + ])), + ("list", pa.list_( + pa.struct([("int1", pa.int32()), + ("string1", pa.string())]) + )), + ("map", pa.map_(pa.string(), + pa.struct([("int1", pa.int32()), + ("string1", pa.string())]) + )), + ]) + assert table.schema == expected_schema + + +def test_filesystem_uri(tmpdir): + from pyarrow import orc + table = pa.table({"a": [1, 2, 3]}) + + directory = tmpdir / "data_dir" + directory.mkdir() + path = directory / "data.orc" + orc.write_table(table, str(path)) + + # filesystem object + result = orc.read_table(path, filesystem=fs.LocalFileSystem()) + assert result.equals(table) + + # filesystem URI + result = orc.read_table( + "data_dir/data.orc", filesystem=util._filesystem_uri(tmpdir)) + assert result.equals(table) + + # use the path only + result = orc.read_table( + util._filesystem_uri(path)) + assert result.equals(table) + + +def test_orcfile_readwrite(tmpdir): + from pyarrow import orc + a = pa.array([1, None, 3, None]) + b = pa.array([None, "Arrow", None, "ORC"]) + table = pa.table({"int64": a, "utf8": b}) + file = tmpdir.join("test.orc") + orc.write_table(table, file) + output_table = orc.read_table(file) + assert table.equals(output_table) + + output_table = orc.read_table(file, []) + assert 4 == output_table.num_rows + assert 0 == output_table.num_columns + + output_table = orc.read_table(file, columns=["int64"]) + assert 4 == output_table.num_rows + assert 1 == output_table.num_columns + + +def test_bytesio_readwrite(): + from pyarrow import orc + from io import BytesIO + + buf = BytesIO() + a = pa.array([1, None, 3, None]) + b = pa.array([None, "Arrow", None, "ORC"]) + table = pa.table({"int64": a, "utf8": b}) + orc.write_table(table, buf) + buf.seek(0) + orc_file = orc.ORCFile(buf) + output_table = orc_file.read() + assert table.equals(output_table) + + +def test_buffer_readwrite(): + from pyarrow import orc + + buffer_output_stream = pa.BufferOutputStream() + a = pa.array([1, None, 3, None]) + b = pa.array([None, "Arrow", None, "ORC"]) + table = pa.table({"int64": a, "utf8": b}) + orc.write_table(table, buffer_output_stream) + buffer_reader = pa.BufferReader(buffer_output_stream.getvalue()) + orc_file = orc.ORCFile(buffer_reader) + output_table = orc_file.read() + assert table.equals(output_table) + # Check for default WriteOptions + assert orc_file.compression == 'UNCOMPRESSED' + assert orc_file.file_version == '0.12' + assert orc_file.row_index_stride == 10000 + assert orc_file.compression_size == 65536 + + # deprecated keyword order + buffer_output_stream = pa.BufferOutputStream() + with pytest.warns(FutureWarning): + orc.write_table(buffer_output_stream, table) + buffer_reader = pa.BufferReader(buffer_output_stream.getvalue()) + orc_file = orc.ORCFile(buffer_reader) + output_table = orc_file.read() + assert table.equals(output_table) + # Check for default WriteOptions + assert orc_file.compression == 'UNCOMPRESSED' + assert orc_file.file_version == '0.12' + assert orc_file.row_index_stride == 10000 + assert orc_file.compression_size == 65536 + + +@pytest.mark.snappy +def test_buffer_readwrite_with_writeoptions(): + from pyarrow import orc + + buffer_output_stream = pa.BufferOutputStream() + a = pa.array([1, None, 3, None]) + b = pa.array([None, "Arrow", None, "ORC"]) + table = pa.table({"int64": a, "utf8": b}) + orc.write_table( + table, + buffer_output_stream, + compression='snappy', + file_version='0.11', + row_index_stride=5000, + compression_block_size=32768, + ) + buffer_reader = pa.BufferReader(buffer_output_stream.getvalue()) + orc_file = orc.ORCFile(buffer_reader) + output_table = orc_file.read() + assert table.equals(output_table) + # Check for modified WriteOptions + assert orc_file.compression == 'SNAPPY' + assert orc_file.file_version == '0.11' + assert orc_file.row_index_stride == 5000 + assert orc_file.compression_size == 32768 + + # deprecated keyword order + buffer_output_stream = pa.BufferOutputStream() + with pytest.warns(FutureWarning): + orc.write_table( + buffer_output_stream, + table, + compression='uncompressed', + file_version='0.11', + row_index_stride=20000, + compression_block_size=16384, + ) + buffer_reader = pa.BufferReader(buffer_output_stream.getvalue()) + orc_file = orc.ORCFile(buffer_reader) + output_table = orc_file.read() + assert table.equals(output_table) + # Check for default WriteOptions + assert orc_file.compression == 'UNCOMPRESSED' + assert orc_file.file_version == '0.11' + assert orc_file.row_index_stride == 20000 + assert orc_file.compression_size == 16384 + + +def test_buffer_readwrite_with_bad_writeoptions(): + from pyarrow import orc + buffer_output_stream = pa.BufferOutputStream() + a = pa.array([1, None, 3, None]) + table = pa.table({"int64": a}) + + # batch_size must be a positive integer + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + batch_size=0, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + batch_size=-100, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + batch_size=1024.23, + ) + + # file_version must be 0.11 or 0.12 + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + file_version=0.13, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + file_version='1.1', + ) + + # stripe_size must be a positive integer + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + stripe_size=0, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + stripe_size=-400, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + stripe_size=4096.73, + ) + + # compression must be among the given options + with pytest.raises(TypeError): + orc.write_table( + table, + buffer_output_stream, + compression=0, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression='none', + ) + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression='zlid', + ) + + # compression_block_size must be a positive integer + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression_block_size=0, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression_block_size=-200, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression_block_size=1096.73, + ) + + # compression_strategy must be among the given options + with pytest.raises(TypeError): + orc.write_table( + table, + buffer_output_stream, + compression_strategy=0, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression_strategy='no', + ) + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression_strategy='large', + ) + + # row_index_stride must be a positive integer + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + row_index_stride=0, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + row_index_stride=-800, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + row_index_stride=3096.29, + ) + + # padding_tolerance must be possible to cast to float + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + padding_tolerance='cat', + ) + + # dictionary_key_size_threshold must be possible to cast to + # float between 0.0 and 1.0 + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + dictionary_key_size_threshold='arrow', + ) + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + dictionary_key_size_threshold=1.2, + ) + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + dictionary_key_size_threshold=-3.2, + ) + + # bloom_filter_columns must be convertible to a list containing + # nonnegative integers + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + bloom_filter_columns="string", + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + bloom_filter_columns=[0, 1.4], + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + bloom_filter_columns={0, 2, -1}, + ) + + # bloom_filter_fpp must be convertible to a float between 0.0 and 1.0 + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + bloom_filter_fpp='arrow', + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + bloom_filter_fpp=1.1, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + bloom_filter_fpp=-0.1, + ) + + +def test_column_selection(tempdir): + from pyarrow import orc + + # create a table with nested types + inner = pa.field('inner', pa.int64()) + middle = pa.field('middle', pa.struct([inner])) + fields = [ + pa.field('basic', pa.int32()), + pa.field( + 'list', pa.list_(pa.field('item', pa.int32())) + ), + pa.field( + 'struct', pa.struct([middle, pa.field('inner2', pa.int64())]) + ), + pa.field( + 'list-struct', pa.list_(pa.field( + 'item', pa.struct([ + pa.field('inner1', pa.int64()), + pa.field('inner2', pa.int64()) + ]) + )) + ), + pa.field('basic2', pa.int64()), + ] + arrs = [ + [0], [[1, 2]], [{"middle": {"inner": 3}, "inner2": 4}], + [[{"inner1": 5, "inner2": 6}, {"inner1": 7, "inner2": 8}]], [9]] + table = pa.table(arrs, schema=pa.schema(fields)) + + path = str(tempdir / 'test.orc') + orc.write_table(table, path) + orc_file = orc.ORCFile(path) + + # default selecting all columns + result1 = orc_file.read() + assert result1.equals(table) + + # selecting with columns names + result2 = orc_file.read(columns=["basic", "basic2"]) + assert result2.equals(table.select(["basic", "basic2"])) + + result3 = orc_file.read(columns=["list", "struct", "basic2"]) + assert result3.equals(table.select(["list", "struct", "basic2"])) + + # using dotted paths + result4 = orc_file.read(columns=["struct.middle.inner"]) + expected4 = pa.table({"struct": [{"middle": {"inner": 3}}]}) + assert result4.equals(expected4) + + result5 = orc_file.read(columns=["struct.inner2"]) + expected5 = pa.table({"struct": [{"inner2": 4}]}) + assert result5.equals(expected5) + + result6 = orc_file.read( + columns=["list", "struct.middle.inner", "struct.inner2"] + ) + assert result6.equals(table.select(["list", "struct"])) + + result7 = orc_file.read(columns=["list-struct.inner1"]) + expected7 = pa.table({"list-struct": [[{"inner1": 5}, {"inner1": 7}]]}) + assert result7.equals(expected7) + + # selecting with (Arrow-based) field indices + result2 = orc_file.read(columns=[0, 4]) + assert result2.equals(table.select(["basic", "basic2"])) + + result3 = orc_file.read(columns=[1, 2, 3]) + assert result3.equals(table.select(["list", "struct", "list-struct"])) + + # error on non-existing name or index + with pytest.raises(IOError): + # liborc returns ParseError, which gets translated into IOError + # instead of ValueError + orc_file.read(columns=["wrong"]) + + with pytest.raises(ValueError): + orc_file.read(columns=[5]) + + +def test_wrong_usage_orc_writer(tempdir): + from pyarrow import orc + + path = str(tempdir / 'test.orc') + with orc.ORCWriter(path) as writer: + with pytest.raises(AttributeError): + writer.test() + + +def test_orc_writer_with_null_arrays(tempdir): + from pyarrow import orc + + path = str(tempdir / 'test.orc') + a = pa.array([1, None, 3, None]) + b = pa.array([None, None, None, None]) + table = pa.table({"int64": a, "utf8": b}) + with pytest.raises(pa.ArrowNotImplementedError): + orc.write_table(table, path) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_pandas.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_pandas.py new file mode 100644 index 0000000000000000000000000000000000000000..3678b4e57a9a815f34d260318e7ae7a58f069ea0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_pandas.py @@ -0,0 +1,5120 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import gc +import decimal +import json +import multiprocessing as mp +import sys +import warnings + +from collections import OrderedDict +from datetime import date, datetime, time, timedelta, timezone + +import hypothesis as h +import hypothesis.strategies as st +import numpy as np +import numpy.testing as npt +import pytest + +from pyarrow.pandas_compat import get_logical_type, _pandas_api +from pyarrow.tests.util import invoke_script, random_ascii, rands +import pyarrow.tests.strategies as past +import pyarrow.tests.util as test_util +from pyarrow.vendored.version import Version + +import pyarrow as pa +try: + from pyarrow import parquet as pq +except ImportError: + pass + +try: + import pandas as pd + import pandas.testing as tm + from .pandas_examples import dataframe_with_arrays, dataframe_with_lists +except ImportError: + pass + + +try: + _np_VisibleDeprecationWarning = np.VisibleDeprecationWarning +except AttributeError: + from numpy.exceptions import ( + VisibleDeprecationWarning as _np_VisibleDeprecationWarning + ) + + +# Marks all of the tests in this module +pytestmark = pytest.mark.pandas + + +def _alltypes_example(size=100): + return pd.DataFrame({ + 'uint8': np.arange(size, dtype=np.uint8), + 'uint16': np.arange(size, dtype=np.uint16), + 'uint32': np.arange(size, dtype=np.uint32), + 'uint64': np.arange(size, dtype=np.uint64), + 'int8': np.arange(size, dtype=np.int16), + 'int16': np.arange(size, dtype=np.int16), + 'int32': np.arange(size, dtype=np.int32), + 'int64': np.arange(size, dtype=np.int64), + 'float32': np.arange(size, dtype=np.float32), + 'float64': np.arange(size, dtype=np.float64), + 'bool': np.random.randn(size) > 0, + 'datetime[s]': np.arange("2016-01-01T00:00:00.001", size, + dtype='datetime64[s]'), + 'datetime[ms]': np.arange("2016-01-01T00:00:00.001", size, + dtype='datetime64[ms]'), + 'datetime[us]': np.arange("2016-01-01T00:00:00.001", size, + dtype='datetime64[us]'), + 'datetime[ns]': np.arange("2016-01-01T00:00:00.001", size, + dtype='datetime64[ns]'), + 'timedelta64[s]': np.arange(0, size, dtype='timedelta64[s]'), + 'timedelta64[ms]': np.arange(0, size, dtype='timedelta64[ms]'), + 'timedelta64[us]': np.arange(0, size, dtype='timedelta64[us]'), + 'timedelta64[ns]': np.arange(0, size, dtype='timedelta64[ns]'), + 'str': [str(x) for x in range(size)], + 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None], + 'empty_str': [''] * size + }) + + +def _check_pandas_roundtrip(df, expected=None, use_threads=False, + expected_schema=None, + check_dtype=True, schema=None, + preserve_index=False, + as_batch=False): + klass = pa.RecordBatch if as_batch else pa.Table + table = klass.from_pandas(df, schema=schema, + preserve_index=preserve_index, + nthreads=2 if use_threads else 1) + result = table.to_pandas(use_threads=use_threads) + + if expected_schema: + # all occurrences of _check_pandas_roundtrip passes expected_schema + # without the pandas generated key-value metadata + assert table.schema.equals(expected_schema) + + if expected is None: + expected = df + + for col in expected.columns: + if expected[col].dtype == 'object': + expected[col] = expected[col].replace({np.nan: None}) + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "elementwise comparison failed", DeprecationWarning) + tm.assert_frame_equal(result, expected, check_dtype=check_dtype, + check_index_type=('equiv' if preserve_index + else False)) + + +def _check_series_roundtrip(s, type_=None, expected_pa_type=None): + arr = pa.array(s, from_pandas=True, type=type_) + + if type_ is not None and expected_pa_type is None: + expected_pa_type = type_ + + if expected_pa_type is not None: + assert arr.type == expected_pa_type + + result = pd.Series(arr.to_pandas(), name=s.name) + tm.assert_series_equal(s, result) + + +def _check_array_roundtrip(values, expected=None, mask=None, + type=None): + arr = pa.array(values, from_pandas=True, mask=mask, type=type) + result = arr.to_pandas() + + values_nulls = pd.isnull(values) + if mask is None: + assert arr.null_count == values_nulls.sum() + else: + assert arr.null_count == (mask | values_nulls).sum() + + if expected is None: + if mask is None: + expected = pd.Series(values) + else: + expected = pd.Series(values).copy() + expected[mask.copy()] = None + + if expected.dtype == 'object': + expected = expected.replace({np.nan: None}) + + tm.assert_series_equal(pd.Series(result), expected, check_names=False) + + +def _check_array_from_pandas_roundtrip(np_array, type=None): + arr = pa.array(np_array, from_pandas=True, type=type) + result = arr.to_pandas() + npt.assert_array_equal(result, np_array) + + +class TestConvertMetadata: + """ + Conversion tests for Pandas metadata & indices. + """ + + def test_non_string_columns(self): + df = pd.DataFrame({0: [1, 2, 3]}) + table = pa.Table.from_pandas(df) + assert table.field(0).name == '0' + + def test_non_string_columns_with_index(self): + df = pd.DataFrame({0: [1.0, 2.0, 3.0], 1: [4.0, 5.0, 6.0]}) + df = df.set_index(0) + + # assert that the from_pandas raises the warning + with pytest.warns(UserWarning): + table = pa.Table.from_pandas(df) + assert table.field(0).name == '1' + + expected = df.copy() + # non-str index name will be converted to str + expected.index.name = str(expected.index.name) + with pytest.warns(UserWarning): + _check_pandas_roundtrip(df, expected=expected, + preserve_index=True) + + def test_from_pandas_with_columns(self): + df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]}, + columns=[1, 0]) + + table = pa.Table.from_pandas(df, columns=[0, 1]) + expected = pa.Table.from_pandas(df[[0, 1]]) + assert expected.equals(table) + + record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1]) + record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]]) + assert record_batch_expected.equals(record_batch_table) + + def test_column_index_names_are_preserved(self): + df = pd.DataFrame({'data': [1, 2, 3]}) + df.columns.names = ['a'] + _check_pandas_roundtrip(df, preserve_index=True) + + def test_column_index_names_with_tz(self): + # ARROW-13756 + # Bug if index is timezone aware DataTimeIndex + + df = pd.DataFrame( + np.random.randn(5, 3), + columns=pd.date_range("2021-01-01", periods=3, freq="50D", tz="CET") + ) + _check_pandas_roundtrip(df, preserve_index=True) + + def test_range_index_shortcut(self): + # ARROW-1639 + index_name = 'foo' + df = pd.DataFrame({'a': [1, 2, 3, 4]}, + index=pd.RangeIndex(0, 8, step=2, name=index_name)) + + df2 = pd.DataFrame({'a': [4, 5, 6, 7]}, + index=pd.RangeIndex(0, 4)) + + table = pa.Table.from_pandas(df) + table_no_index_name = pa.Table.from_pandas(df2) + + # The RangeIndex is tracked in the metadata only + assert len(table.schema) == 1 + + result = table.to_pandas() + tm.assert_frame_equal(result, df) + assert isinstance(result.index, pd.RangeIndex) + assert _pandas_api.get_rangeindex_attribute(result.index, 'step') == 2 + assert result.index.name == index_name + + result2 = table_no_index_name.to_pandas() + tm.assert_frame_equal(result2, df2) + assert isinstance(result2.index, pd.RangeIndex) + assert _pandas_api.get_rangeindex_attribute(result2.index, 'step') == 1 + assert result2.index.name is None + + def test_range_index_force_serialization(self): + # ARROW-5427: preserve_index=True will force the RangeIndex to + # be serialized as a column rather than tracked more + # efficiently as metadata + df = pd.DataFrame({'a': [1, 2, 3, 4]}, + index=pd.RangeIndex(0, 8, step=2, name='foo')) + + table = pa.Table.from_pandas(df, preserve_index=True) + assert table.num_columns == 2 + assert 'foo' in table.column_names + + restored = table.to_pandas() + tm.assert_frame_equal(restored, df) + + def test_rangeindex_doesnt_warn(self): + # ARROW-5606: pandas 0.25 deprecated private _start/stop/step + # attributes -> can be removed if support < pd 0.25 is dropped + df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b']) + + with warnings.catch_warnings(): + warnings.simplefilter(action="error") + # make_block deprecation in pandas, still under discussion + # https://github.com/pandas-dev/pandas/pull/56422 + # https://github.com/pandas-dev/pandas/issues/40226 + warnings.filterwarnings( + "ignore", "make_block is deprecated", DeprecationWarning + ) + _check_pandas_roundtrip(df, preserve_index=True) + + def test_multiindex_columns(self): + columns = pd.MultiIndex.from_arrays([ + ['one', 'two'], ['X', 'Y'] + ]) + df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns) + _check_pandas_roundtrip(df, preserve_index=True) + + def test_multiindex_columns_with_dtypes(self): + columns = pd.MultiIndex.from_arrays( + [ + ['one', 'two'], + pd.DatetimeIndex(['2017-08-01', '2017-08-02']), + ], + names=['level_1', 'level_2'], + ) + df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns) + _check_pandas_roundtrip(df, preserve_index=True) + + def test_multiindex_with_column_dtype_object(self): + # ARROW-3651 & ARROW-9096 + # Bug when dtype of the columns is object. + + # uinderlying dtype: integer + df = pd.DataFrame([1], columns=pd.Index([1], dtype=object)) + _check_pandas_roundtrip(df, preserve_index=True) + + # underlying dtype: floating + df = pd.DataFrame([1], columns=pd.Index([1.1], dtype=object)) + _check_pandas_roundtrip(df, preserve_index=True) + + # underlying dtype: datetime + # ARROW-9096: a simple roundtrip now works + df = pd.DataFrame([1], columns=pd.Index( + [datetime(2018, 1, 1)], dtype="object")) + _check_pandas_roundtrip(df, preserve_index=True) + + def test_multiindex_columns_unicode(self): + columns = pd.MultiIndex.from_arrays([['あ', 'い'], ['X', 'Y']]) + df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns) + _check_pandas_roundtrip(df, preserve_index=True) + + def test_multiindex_doesnt_warn(self): + # ARROW-3953: pandas 0.24 rename of MultiIndex labels to codes + columns = pd.MultiIndex.from_arrays([['one', 'two'], ['X', 'Y']]) + df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns) + + with warnings.catch_warnings(): + warnings.simplefilter(action="error") + # make_block deprecation in pandas, still under discussion + # https://github.com/pandas-dev/pandas/pull/56422 + # https://github.com/pandas-dev/pandas/issues/40226 + warnings.filterwarnings( + "ignore", "make_block is deprecated", DeprecationWarning + ) + _check_pandas_roundtrip(df, preserve_index=True) + + def test_integer_index_column(self): + df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')]) + _check_pandas_roundtrip(df, preserve_index=True) + + def test_index_metadata_field_name(self): + # test None case, and strangely named non-index columns + df = pd.DataFrame( + [(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)], + index=pd.MultiIndex.from_arrays( + [['c', 'b', 'a'], [3, 2, 1]], + names=[None, 'foo'] + ), + columns=['a', None, '__index_level_0__'], + ) + with pytest.warns(UserWarning): + t = pa.Table.from_pandas(df, preserve_index=True) + js = t.schema.pandas_metadata + + col1, col2, col3, idx0, foo = js['columns'] + + assert col1['name'] == 'a' + assert col1['name'] == col1['field_name'] + + assert col2['name'] is None + assert col2['field_name'] == 'None' + + assert col3['name'] == '__index_level_0__' + assert col3['name'] == col3['field_name'] + + idx0_descr, foo_descr = js['index_columns'] + assert idx0_descr == '__index_level_0__' + assert idx0['field_name'] == idx0_descr + assert idx0['name'] is None + + assert foo_descr == 'foo' + assert foo['field_name'] == foo_descr + assert foo['name'] == foo_descr + + def test_categorical_column_index(self): + df = pd.DataFrame( + [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)], + columns=pd.Index(list('def'), dtype='category') + ) + t = pa.Table.from_pandas(df, preserve_index=True) + js = t.schema.pandas_metadata + + column_indexes, = js['column_indexes'] + assert column_indexes['name'] is None + assert column_indexes['pandas_type'] == 'categorical' + assert column_indexes['numpy_type'] == 'int8' + + md = column_indexes['metadata'] + assert md['num_categories'] == 3 + assert md['ordered'] is False + + def test_string_column_index(self): + df = pd.DataFrame( + [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)], + columns=pd.Index(list('def'), name='stringz') + ) + t = pa.Table.from_pandas(df, preserve_index=True) + js = t.schema.pandas_metadata + + column_indexes, = js['column_indexes'] + assert column_indexes['name'] == 'stringz' + assert column_indexes['name'] == column_indexes['field_name'] + assert column_indexes['numpy_type'] == 'object' + assert column_indexes['pandas_type'] == 'unicode' + + md = column_indexes['metadata'] + + assert len(md) == 1 + assert md['encoding'] == 'UTF-8' + + def test_datetimetz_column_index(self): + df = pd.DataFrame( + [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)], + columns=pd.date_range( + start='2017-01-01', periods=3, tz='America/New_York' + ) + ) + t = pa.Table.from_pandas(df, preserve_index=True) + js = t.schema.pandas_metadata + + column_indexes, = js['column_indexes'] + assert column_indexes['name'] is None + assert column_indexes['pandas_type'] == 'datetimetz' + assert column_indexes['numpy_type'] == 'datetime64[ns]' + + md = column_indexes['metadata'] + assert md['timezone'] == 'America/New_York' + + def test_datetimetz_row_index(self): + df = pd.DataFrame({ + 'a': pd.date_range( + start='2017-01-01', periods=3, tz='America/New_York' + ) + }) + df = df.set_index('a') + + _check_pandas_roundtrip(df, preserve_index=True) + + def test_categorical_row_index(self): + df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}) + df['a'] = df.a.astype('category') + df = df.set_index('a') + + _check_pandas_roundtrip(df, preserve_index=True) + + def test_duplicate_column_names_does_not_crash(self): + df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa')) + with pytest.raises(ValueError): + pa.Table.from_pandas(df) + + def test_dictionary_indices_boundscheck(self): + # ARROW-1658. No validation of indices leads to segfaults in pandas + indices = [[0, 1], [0, -1]] + + for inds in indices: + arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False) + batch = pa.RecordBatch.from_arrays([arr], ['foo']) + table = pa.Table.from_batches([batch, batch, batch]) + + with pytest.raises(IndexError): + arr.to_pandas() + + with pytest.raises(IndexError): + table.to_pandas() + + def test_unicode_with_unicode_column_and_index(self): + df = pd.DataFrame({'あ': ['い']}, index=['う']) + + _check_pandas_roundtrip(df, preserve_index=True) + + def test_mixed_column_names(self): + # mixed type column names are not reconstructed exactly + df = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}) + + for cols in [['あ', b'a'], [1, '2'], [1, 1.5]]: + df.columns = pd.Index(cols, dtype=object) + + # assert that the from_pandas raises the warning + with pytest.warns(UserWarning): + pa.Table.from_pandas(df) + + expected = df.copy() + expected.columns = df.columns.values.astype(str) + with pytest.warns(UserWarning): + _check_pandas_roundtrip(df, expected=expected, + preserve_index=True) + + def test_binary_column_name(self): + if Version("2.0.0") <= Version(pd.__version__) < Version("3.0.0"): + # TODO: regression in pandas, hopefully fixed in next version + # https://issues.apache.org/jira/browse/ARROW-18394 + # https://github.com/pandas-dev/pandas/issues/50127 + pytest.skip("Regression in pandas 2.0.0") + column_data = ['い'] + key = 'あ'.encode() + data = {key: column_data} + df = pd.DataFrame(data) + + # we can't use _check_pandas_roundtrip here because our metadata + # is always decoded as utf8: even if binary goes in, utf8 comes out + t = pa.Table.from_pandas(df, preserve_index=True) + df2 = t.to_pandas() + assert df.values[0] == df2.values[0] + assert df.index.values[0] == df2.index.values[0] + assert df.columns[0] == key + + def test_multiindex_duplicate_values(self): + num_rows = 3 + numbers = list(range(num_rows)) + index = pd.MultiIndex.from_arrays( + [['foo', 'foo', 'bar'], numbers], + names=['foobar', 'some_numbers'], + ) + + df = pd.DataFrame({'numbers': numbers}, index=index) + + _check_pandas_roundtrip(df, preserve_index=True) + + def test_metadata_with_mixed_types(self): + df = pd.DataFrame({'data': [b'some_bytes', 'some_unicode']}) + table = pa.Table.from_pandas(df) + js = table.schema.pandas_metadata + assert 'mixed' not in js + data_column = js['columns'][0] + assert data_column['pandas_type'] == 'bytes' + assert data_column['numpy_type'] == 'object' + + def test_ignore_metadata(self): + df = pd.DataFrame({'a': [1, 2, 3], 'b': ['foo', 'bar', 'baz']}, + index=['one', 'two', 'three']) + table = pa.Table.from_pandas(df) + + result = table.to_pandas(ignore_metadata=True) + expected = (table.cast(table.schema.remove_metadata()) + .to_pandas()) + + tm.assert_frame_equal(result, expected) + + def test_list_metadata(self): + df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]}) + schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))]) + table = pa.Table.from_pandas(df, schema=schema) + js = table.schema.pandas_metadata + assert 'mixed' not in js + data_column = js['columns'][0] + assert data_column['pandas_type'] == 'list[int64]' + assert data_column['numpy_type'] == 'object' + + def test_struct_metadata(self): + df = pd.DataFrame({'dicts': [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]}) + table = pa.Table.from_pandas(df) + pandas_metadata = table.schema.pandas_metadata + assert pandas_metadata['columns'][0]['pandas_type'] == 'object' + + def test_decimal_metadata(self): + expected = pd.DataFrame({ + 'decimals': [ + decimal.Decimal('394092382910493.12341234678'), + -decimal.Decimal('314292388910493.12343437128'), + ] + }) + table = pa.Table.from_pandas(expected) + js = table.schema.pandas_metadata + assert 'mixed' not in js + data_column = js['columns'][0] + assert data_column['pandas_type'] == 'decimal' + assert data_column['numpy_type'] == 'object' + assert data_column['metadata'] == {'precision': 26, 'scale': 11} + + def test_table_column_subset_metadata(self): + # ARROW-1883 + # non-default index + for index in [ + pd.Index(['a', 'b', 'c'], name='index'), + pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]: + df = pd.DataFrame({'a': [1, 2, 3], + 'b': [.1, .2, .3]}, index=index) + table = pa.Table.from_pandas(df) + + table_subset = table.remove_column(1) + result = table_subset.to_pandas() + expected = df[['a']] + if isinstance(df.index, pd.DatetimeIndex): + df.index.freq = None + tm.assert_frame_equal(result, expected) + + table_subset2 = table_subset.remove_column(1) + result = table_subset2.to_pandas() + tm.assert_frame_equal(result, df[['a']].reset_index(drop=True)) + + def test_to_pandas_column_subset_multiindex(self): + # ARROW-10122 + df = pd.DataFrame( + {"first": list(range(5)), + "second": list(range(5)), + "value": np.arange(5)} + ) + table = pa.Table.from_pandas(df.set_index(["first", "second"])) + + subset = table.select(["first", "value"]) + result = subset.to_pandas() + expected = df[["first", "value"]].set_index("first") + tm.assert_frame_equal(result, expected) + + def test_empty_list_metadata(self): + # Create table with array of empty lists, forced to have type + # list(string) in pyarrow + c1 = [["test"], ["a", "b"], None] + c2 = [[], [], []] + arrays = OrderedDict([ + ('c1', pa.array(c1, type=pa.list_(pa.string()))), + ('c2', pa.array(c2, type=pa.list_(pa.string()))), + ]) + rb = pa.RecordBatch.from_arrays( + list(arrays.values()), + list(arrays.keys()) + ) + tbl = pa.Table.from_batches([rb]) + + # First roundtrip changes schema, because pandas cannot preserve the + # type of empty lists + df = tbl.to_pandas() + tbl2 = pa.Table.from_pandas(df) + md2 = tbl2.schema.pandas_metadata + + # Second roundtrip + df2 = tbl2.to_pandas() + expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)])) + + tm.assert_frame_equal(df2, expected) + + assert md2['columns'] == [ + { + 'name': 'c1', + 'field_name': 'c1', + 'metadata': None, + 'numpy_type': 'object', + 'pandas_type': 'list[unicode]', + }, + { + 'name': 'c2', + 'field_name': 'c2', + 'metadata': None, + 'numpy_type': 'object', + 'pandas_type': 'list[empty]', + } + ] + + def test_metadata_pandas_version(self): + df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}) + table = pa.Table.from_pandas(df) + assert table.schema.pandas_metadata['pandas_version'] is not None + + def test_mismatch_metadata_schema(self): + # ARROW-10511 + # It is possible that the metadata and actual schema is not fully + # matching (eg no timezone information for tz-aware column) + # -> to_pandas() conversion should not fail on that + df = pd.DataFrame({"datetime": pd.date_range("2020-01-01", periods=3)}) + + # OPTION 1: casting after conversion + table = pa.Table.from_pandas(df) + # cast the "datetime" column to be tz-aware + new_col = table["datetime"].cast(pa.timestamp('ns', tz="UTC")) + new_table1 = table.set_column( + 0, pa.field("datetime", new_col.type), new_col + ) + + # OPTION 2: specify schema during conversion + schema = pa.schema([("datetime", pa.timestamp('ns', tz="UTC"))]) + new_table2 = pa.Table.from_pandas(df, schema=schema) + + expected = df.copy() + expected["datetime"] = expected["datetime"].dt.tz_localize("UTC") + + for new_table in [new_table1, new_table2]: + # ensure the new table still has the pandas metadata + assert new_table.schema.pandas_metadata is not None + # convert to pandas + result = new_table.to_pandas() + tm.assert_frame_equal(result, expected) + + +class TestConvertPrimitiveTypes: + """ + Conversion tests for primitive (e.g. numeric) types. + """ + + def test_float_no_nulls(self): + data = {} + fields = [] + dtypes = [('f2', pa.float16()), + ('f4', pa.float32()), + ('f8', pa.float64())] + num_values = 100 + + for numpy_dtype, arrow_dtype in dtypes: + values = np.random.randn(num_values) + data[numpy_dtype] = values.astype(numpy_dtype) + fields.append(pa.field(numpy_dtype, arrow_dtype)) + + df = pd.DataFrame(data) + schema = pa.schema(fields) + _check_pandas_roundtrip(df, expected_schema=schema) + + def test_float_nulls(self): + num_values = 100 + + null_mask = np.random.randint(0, 10, size=num_values) < 3 + dtypes = [('f2', pa.float16()), + ('f4', pa.float32()), + ('f8', pa.float64())] + names = ['f2', 'f4', 'f8'] + expected_cols = [] + + arrays = [] + fields = [] + for name, arrow_dtype in dtypes: + values = np.random.randn(num_values).astype(name) + + arr = pa.array(values, from_pandas=True, mask=null_mask) + arrays.append(arr) + fields.append(pa.field(name, arrow_dtype)) + values[null_mask] = np.nan + + expected_cols.append(values) + + ex_frame = pd.DataFrame(dict(zip(names, expected_cols)), + columns=names) + + table = pa.Table.from_arrays(arrays, names) + assert table.schema.equals(pa.schema(fields)) + result = table.to_pandas() + tm.assert_frame_equal(result, ex_frame) + + def test_float_nulls_to_ints(self): + # ARROW-2135 + df = pd.DataFrame({"a": [1.0, 2.0, np.nan]}) + schema = pa.schema([pa.field("a", pa.int16(), nullable=True)]) + table = pa.Table.from_pandas(df, schema=schema, safe=False) + assert table[0].to_pylist() == [1, 2, None] + tm.assert_frame_equal(df, table.to_pandas()) + + def test_float_nulls_to_boolean(self): + s = pd.Series([0.0, 1.0, 2.0, None, -3.0]) + expected = pd.Series([False, True, True, None, True]) + _check_array_roundtrip(s, expected=expected, type=pa.bool_()) + + def test_series_from_pandas_false_respected(self): + # Check that explicit from_pandas=False is respected + s = pd.Series([0.0, np.nan]) + arr = pa.array(s, from_pandas=False) + assert arr.null_count == 0 + assert np.isnan(arr[1].as_py()) + + def test_integer_no_nulls(self): + data = OrderedDict() + fields = [] + + numpy_dtypes = [ + ('i1', pa.int8()), ('i2', pa.int16()), + ('i4', pa.int32()), ('i8', pa.int64()), + ('u1', pa.uint8()), ('u2', pa.uint16()), + ('u4', pa.uint32()), ('u8', pa.uint64()), + ('longlong', pa.int64()), ('ulonglong', pa.uint64()) + ] + num_values = 100 + + for dtype, arrow_dtype in numpy_dtypes: + info = np.iinfo(dtype) + values = np.random.randint(max(info.min, np.iinfo(np.int_).min), + min(info.max, np.iinfo(np.int_).max), + size=num_values) + data[dtype] = values.astype(dtype) + fields.append(pa.field(dtype, arrow_dtype)) + + df = pd.DataFrame(data) + schema = pa.schema(fields) + _check_pandas_roundtrip(df, expected_schema=schema) + + def test_all_integer_types(self): + # Test all Numpy integer aliases + data = OrderedDict() + numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', + 'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc', + 'int_', 'uint', 'longlong', 'ulonglong'] + for dtype in numpy_dtypes: + data[dtype] = np.arange(12, dtype=dtype) + df = pd.DataFrame(data) + _check_pandas_roundtrip(df) + + # Do the same with pa.array() + # (for some reason, it doesn't use the same code paths at all) + for np_arr in data.values(): + arr = pa.array(np_arr) + assert arr.to_pylist() == np_arr.tolist() + + def test_integer_byteorder(self): + # Byteswapped arrays are not supported yet + int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'] + for dt in int_dtypes: + for order in '=<>': + data = np.array([1, 2, 42], dtype=order + dt) + for np_arr in (data, data[::2]): + if data.dtype.isnative: + arr = pa.array(data) + assert arr.to_pylist() == data.tolist() + else: + with pytest.raises(NotImplementedError): + arr = pa.array(data) + + def test_integer_with_nulls(self): + # pandas requires upcast to float dtype + + int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'] + num_values = 100 + + null_mask = np.random.randint(0, 10, size=num_values) < 3 + + expected_cols = [] + arrays = [] + for name in int_dtypes: + values = np.random.randint(0, 100, size=num_values) + + arr = pa.array(values, mask=null_mask) + arrays.append(arr) + + expected = values.astype('f8') + expected[null_mask] = np.nan + + expected_cols.append(expected) + + ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)), + columns=int_dtypes) + + table = pa.Table.from_arrays(arrays, int_dtypes) + result = table.to_pandas() + + tm.assert_frame_equal(result, ex_frame) + + def test_array_from_pandas_type_cast(self): + arr = np.arange(10, dtype='int64') + + target_type = pa.int8() + + result = pa.array(arr, type=target_type) + expected = pa.array(arr.astype('int8')) + assert result.equals(expected) + + def test_boolean_no_nulls(self): + num_values = 100 + + np.random.seed(0) + + df = pd.DataFrame({'bools': np.random.randn(num_values) > 0}) + field = pa.field('bools', pa.bool_()) + schema = pa.schema([field]) + _check_pandas_roundtrip(df, expected_schema=schema) + + def test_boolean_nulls(self): + # pandas requires upcast to object dtype + num_values = 100 + np.random.seed(0) + + mask = np.random.randint(0, 10, size=num_values) < 3 + values = np.random.randint(0, 10, size=num_values) < 5 + + arr = pa.array(values, mask=mask) + + expected = values.astype(object) + expected[mask] = None + + field = pa.field('bools', pa.bool_()) + schema = pa.schema([field]) + ex_frame = pd.DataFrame({'bools': expected}) + + table = pa.Table.from_arrays([arr], ['bools']) + assert table.schema.equals(schema) + result = table.to_pandas() + + tm.assert_frame_equal(result, ex_frame) + + def test_boolean_to_int(self): + # test from dtype=bool + s = pd.Series([True, True, False, True, True] * 2) + expected = pd.Series([1, 1, 0, 1, 1] * 2) + _check_array_roundtrip(s, expected=expected, type=pa.int64()) + + def test_boolean_objects_to_int(self): + # test from dtype=object + s = pd.Series([True, True, False, True, True] * 2, dtype=object) + expected = pd.Series([1, 1, 0, 1, 1] * 2) + expected_msg = 'Expected integer, got bool' + with pytest.raises(pa.ArrowTypeError, match=expected_msg): + _check_array_roundtrip(s, expected=expected, type=pa.int64()) + + def test_boolean_nulls_to_float(self): + # test from dtype=object + s = pd.Series([True, True, False, None, True] * 2) + expected = pd.Series([1.0, 1.0, 0.0, None, 1.0] * 2) + _check_array_roundtrip(s, expected=expected, type=pa.float64()) + + def test_boolean_multiple_columns(self): + # ARROW-6325 (multiple columns resulting in strided conversion) + df = pd.DataFrame(np.ones((3, 2), dtype='bool'), columns=['a', 'b']) + _check_pandas_roundtrip(df) + + def test_float_object_nulls(self): + arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object) + df = pd.DataFrame({'floats': arr}) + expected = pd.DataFrame({'floats': pd.to_numeric(arr)}) + field = pa.field('floats', pa.float64()) + schema = pa.schema([field]) + _check_pandas_roundtrip(df, expected=expected, + expected_schema=schema) + + def test_float_with_null_as_integer(self): + # ARROW-2298 + s = pd.Series([np.nan, 1., 2., np.nan]) + + types = [pa.int8(), pa.int16(), pa.int32(), pa.int64(), + pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()] + for ty in types: + result = pa.array(s, type=ty) + expected = pa.array([None, 1, 2, None], type=ty) + assert result.equals(expected) + + df = pd.DataFrame({'has_nulls': s}) + schema = pa.schema([pa.field('has_nulls', ty)]) + result = pa.Table.from_pandas(df, schema=schema, + preserve_index=False) + assert result[0].chunk(0).equals(expected) + + def test_int_object_nulls(self): + arr = np.array([None, 1, np.int64(3)] * 5, dtype=object) + df = pd.DataFrame({'ints': arr}) + expected = pd.DataFrame({'ints': pd.to_numeric(arr)}) + field = pa.field('ints', pa.int64()) + schema = pa.schema([field]) + _check_pandas_roundtrip(df, expected=expected, + expected_schema=schema) + + def test_boolean_object_nulls(self): + arr = np.array([False, None, True] * 100, dtype=object) + df = pd.DataFrame({'bools': arr}) + field = pa.field('bools', pa.bool_()) + schema = pa.schema([field]) + _check_pandas_roundtrip(df, expected_schema=schema) + + def test_all_nulls_cast_numeric(self): + arr = np.array([None], dtype=object) + + def _check_type(t): + a2 = pa.array(arr, type=t) + assert a2.type == t + assert a2[0].as_py() is None + + _check_type(pa.int32()) + _check_type(pa.float64()) + + def test_half_floats_from_numpy(self): + arr = np.array([1.5, np.nan], dtype=np.float16) + a = pa.array(arr, type=pa.float16()) + x, y = a.to_pylist() + assert isinstance(x, np.float16) + assert x == 1.5 + assert isinstance(y, np.float16) + assert np.isnan(y) + + a = pa.array(arr, type=pa.float16(), from_pandas=True) + x, y = a.to_pylist() + assert isinstance(x, np.float16) + assert x == 1.5 + assert y is None + + +@pytest.mark.parametrize('dtype', + ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']) +def test_array_integer_object_nulls_option(dtype): + num_values = 100 + + null_mask = np.random.randint(0, 10, size=num_values) < 3 + values = np.random.randint(0, 100, size=num_values, dtype=dtype) + + array = pa.array(values, mask=null_mask) + + if null_mask.any(): + expected = values.astype('O') + expected[null_mask] = None + else: + expected = values + + result = array.to_pandas(integer_object_nulls=True) + + np.testing.assert_equal(result, expected) + + +@pytest.mark.parametrize('dtype', + ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']) +def test_table_integer_object_nulls_option(dtype): + num_values = 100 + + null_mask = np.random.randint(0, 10, size=num_values) < 3 + values = np.random.randint(0, 100, size=num_values, dtype=dtype) + + array = pa.array(values, mask=null_mask) + + if null_mask.any(): + expected = values.astype('O') + expected[null_mask] = None + else: + expected = values + + expected = pd.DataFrame({dtype: expected}) + + table = pa.Table.from_arrays([array], [dtype]) + result = table.to_pandas(integer_object_nulls=True) + + tm.assert_frame_equal(result, expected) + + +class TestConvertDateTimeLikeTypes: + """ + Conversion tests for datetime- and timestamp-like types (date64, etc.). + """ + + def test_timestamps_notimezone_no_nulls(self): + df = pd.DataFrame({ + 'datetime64': np.array([ + '2007-07-13T01:23:34.123456789', + '2006-01-13T12:34:56.432539784', + '2010-08-13T05:46:57.437699912'], + dtype='datetime64[ns]') + }) + field = pa.field('datetime64', pa.timestamp('ns')) + schema = pa.schema([field]) + _check_pandas_roundtrip( + df, + expected_schema=schema, + ) + + def test_timestamps_notimezone_nulls(self): + df = pd.DataFrame({ + 'datetime64': np.array([ + '2007-07-13T01:23:34.123456789', + None, + '2010-08-13T05:46:57.437699912'], + dtype='datetime64[ns]') + }) + field = pa.field('datetime64', pa.timestamp('ns')) + schema = pa.schema([field]) + _check_pandas_roundtrip( + df, + expected_schema=schema, + ) + + @pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns']) + def test_timestamps_with_timezone(self, unit): + if Version(pd.__version__) < Version("2.0.0") and unit != 'ns': + pytest.skip("pandas < 2.0 only supports nanosecond datetime64") + df = pd.DataFrame({ + 'datetime64': np.array([ + '2007-07-13T01:23:34.123', + '2006-01-13T12:34:56.432', + '2010-08-13T05:46:57.437'], + dtype=f'datetime64[{unit}]') + }) + df['datetime64'] = df['datetime64'].dt.tz_localize('US/Eastern') + _check_pandas_roundtrip(df) + + _check_series_roundtrip(df['datetime64']) + + # drop-in a null + df = pd.DataFrame({ + 'datetime64': np.array([ + '2007-07-13T01:23:34.123456789', + None, + '2006-01-13T12:34:56.432539784', + '2010-08-13T05:46:57.437699912'], + dtype=f'datetime64[{unit}]') + }) + df['datetime64'] = df['datetime64'].dt.tz_localize('US/Eastern') + + _check_pandas_roundtrip(df) + + def test_python_datetime(self): + # ARROW-2106 + date_array = [datetime.today() + timedelta(days=x) for x in range(10)] + df = pd.DataFrame({ + 'datetime': pd.Series(date_array, dtype=object) + }) + + table = pa.Table.from_pandas(df) + assert isinstance(table[0].chunk(0), pa.TimestampArray) + + result = table.to_pandas() + # Pandas v2 defaults to [ns], but Arrow defaults to [us] time units + # so we need to cast the pandas dtype. Pandas v1 will always silently + # coerce to [ns] due to lack of non-[ns] support. + expected_df = pd.DataFrame({ + 'datetime': pd.Series(date_array, dtype='datetime64[us]') + }) + tm.assert_frame_equal(expected_df, result) + + def test_python_datetime_with_pytz_tzinfo(self): + pytz = pytest.importorskip("pytz") + + for tz in [pytz.utc, pytz.timezone('US/Eastern'), pytz.FixedOffset(1)]: + values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz)] + df = pd.DataFrame({'datetime': values}) + _check_pandas_roundtrip(df) + + @h.given(st.none() | past.timezones) + @h.settings(deadline=None) + def test_python_datetime_with_pytz_timezone(self, tz): + if str(tz) in ["build/etc/localtime", "Factory"]: + pytest.skip("Localtime timezone not supported") + values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz)] + df = pd.DataFrame({'datetime': values}) + _check_pandas_roundtrip(df, check_dtype=False) + + def test_python_datetime_with_timezone_tzinfo(self): + pytz = pytest.importorskip("pytz") + from datetime import timezone + + values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=timezone.utc)] + # also test with index to ensure both paths roundtrip (ARROW-9962) + df = pd.DataFrame({'datetime': values}, index=values) + _check_pandas_roundtrip(df, preserve_index=True) + + # datetime.timezone is going to be pytz.FixedOffset + hours = 1 + tz_timezone = timezone(timedelta(hours=hours)) + tz_pytz = pytz.FixedOffset(hours * 60) + values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz_timezone)] + values_exp = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz_pytz)] + df = pd.DataFrame({'datetime': values}, index=values) + df_exp = pd.DataFrame({'datetime': values_exp}, index=values_exp) + _check_pandas_roundtrip(df, expected=df_exp, preserve_index=True) + + def test_python_datetime_subclass(self): + + class MyDatetime(datetime): + # see https://github.com/pandas-dev/pandas/issues/21142 + nanosecond = 0.0 + + date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)] + df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)}) + + table = pa.Table.from_pandas(df) + assert isinstance(table[0].chunk(0), pa.TimestampArray) + + result = table.to_pandas() + + # Pandas v2 defaults to [ns], but Arrow defaults to [us] time units + # so we need to cast the pandas dtype. Pandas v1 will always silently + # coerce to [ns] due to lack of non-[ns] support. + expected_df = pd.DataFrame( + {"datetime": pd.Series(date_array, dtype='datetime64[us]')}) + + # https://github.com/pandas-dev/pandas/issues/21142 + expected_df["datetime"] = pd.to_datetime(expected_df["datetime"]) + + tm.assert_frame_equal(expected_df, result) + + def test_python_date_subclass(self): + + class MyDate(date): + pass + + date_array = [MyDate(2000, 1, 1)] + df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)}) + + table = pa.Table.from_pandas(df) + assert isinstance(table[0].chunk(0), pa.Date32Array) + + result = table.to_pandas() + expected_df = pd.DataFrame( + {"date": np.array([date(2000, 1, 1)], dtype=object)} + ) + tm.assert_frame_equal(expected_df, result) + + def test_datetime64_to_date32(self): + # ARROW-1718 + arr = pa.array([date(2017, 10, 23), None]) + c = pa.chunked_array([arr]) + s = c.to_pandas() + + arr2 = pa.Array.from_pandas(s, type=pa.date32()) + + assert arr2.equals(arr.cast('date32')) + + @pytest.mark.parametrize('mask', [ + None, + np.array([True, False, False, True, False, False]), + ]) + def test_pandas_datetime_to_date64(self, mask): + s = pd.to_datetime([ + '2018-05-10T00:00:00', + '2018-05-11T00:00:00', + '2018-05-12T00:00:00', + '2018-05-10T10:24:01', + '2018-05-11T10:24:01', + '2018-05-12T10:24:01', + ]) + arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask) + + data = np.array([ + date(2018, 5, 10), + date(2018, 5, 11), + date(2018, 5, 12), + date(2018, 5, 10), + date(2018, 5, 11), + date(2018, 5, 12), + ]) + expected = pa.array(data, mask=mask, type=pa.date64()) + + assert arr.equals(expected) + + @pytest.mark.parametrize("coerce_to_ns,expected_dtype", + [(False, 'datetime64[ms]'), + (True, 'datetime64[ns]')]) + def test_array_types_date_as_object(self, coerce_to_ns, expected_dtype): + data = [date(2000, 1, 1), + None, + date(1970, 1, 1), + date(2040, 2, 26)] + expected_days = np.array(['2000-01-01', None, '1970-01-01', + '2040-02-26'], dtype='datetime64[D]') + + if Version(pd.__version__) < Version("2.0.0"): + # ARROW-3789: Coerce date/timestamp types to datetime64[ns] + expected_dtype = 'datetime64[ns]' + + expected = np.array(['2000-01-01', None, '1970-01-01', + '2040-02-26'], dtype=expected_dtype) + + objects = [pa.array(data), + pa.chunked_array([data])] + + for obj in objects: + result = obj.to_pandas(coerce_temporal_nanoseconds=coerce_to_ns) + expected_obj = expected_days.astype(object) + assert result.dtype == expected_obj.dtype + npt.assert_array_equal(result, expected_obj) + + result = obj.to_pandas(date_as_object=False, + coerce_temporal_nanoseconds=coerce_to_ns) + assert result.dtype == expected.dtype + npt.assert_array_equal(result, expected) + + @pytest.mark.parametrize("coerce_to_ns,expected_type", + [(False, 'datetime64[ms]'), + (True, 'datetime64[ns]')]) + def test_table_convert_date_as_object(self, coerce_to_ns, expected_type): + df = pd.DataFrame({ + 'date': [date(2000, 1, 1), + None, + date(1970, 1, 1), + date(2040, 2, 26)]}) + + table = pa.Table.from_pandas(df, preserve_index=False) + + df_datetime = table.to_pandas(date_as_object=False, + coerce_temporal_nanoseconds=coerce_to_ns) + df_object = table.to_pandas() + + tm.assert_frame_equal(df.astype(expected_type), df_datetime, + check_dtype=True) + tm.assert_frame_equal(df, df_object, check_dtype=True) + + @pytest.mark.parametrize("arrow_type", + [pa.date32(), pa.date64(), pa.timestamp('s'), + pa.timestamp('ms'), pa.timestamp('us'), + pa.timestamp('ns'), pa.timestamp('s', 'UTC'), + pa.timestamp('ms', 'UTC'), pa.timestamp('us', 'UTC'), + pa.timestamp('ns', 'UTC')]) + def test_array_coerce_temporal_nanoseconds(self, arrow_type): + data = [date(2000, 1, 1), datetime(2001, 1, 1)] + expected = pd.Series(data) + arr = pa.array(data).cast(arrow_type) + result = arr.to_pandas( + coerce_temporal_nanoseconds=True, date_as_object=False) + expected_tz = None + if hasattr(arrow_type, 'tz') and arrow_type.tz is not None: + expected_tz = 'UTC' + expected_type = pa.timestamp('ns', expected_tz).to_pandas_dtype() + tm.assert_series_equal(result, expected.astype(expected_type)) + + @pytest.mark.parametrize("arrow_type", + [pa.date32(), pa.date64(), pa.timestamp('s'), + pa.timestamp('ms'), pa.timestamp('us'), + pa.timestamp('ns'), pa.timestamp('s', 'UTC'), + pa.timestamp('ms', 'UTC'), pa.timestamp('us', 'UTC'), + pa.timestamp('ns', 'UTC')]) + def test_table_coerce_temporal_nanoseconds(self, arrow_type): + data = [date(2000, 1, 1), datetime(2001, 1, 1)] + schema = pa.schema([pa.field('date', arrow_type)]) + expected_df = pd.DataFrame({'date': data}) + table = pa.table([pa.array(data)], schema=schema) + result_df = table.to_pandas( + coerce_temporal_nanoseconds=True, date_as_object=False) + expected_tz = None + if hasattr(arrow_type, 'tz') and arrow_type.tz is not None: + expected_tz = 'UTC' + expected_type = pa.timestamp('ns', expected_tz).to_pandas_dtype() + tm.assert_frame_equal(result_df, expected_df.astype(expected_type)) + + def test_date_infer(self): + df = pd.DataFrame({ + 'date': [date(2000, 1, 1), + None, + date(1970, 1, 1), + date(2040, 2, 26)]}) + table = pa.Table.from_pandas(df, preserve_index=False) + field = pa.field('date', pa.date32()) + + # schema's metadata is generated by from_pandas conversion + expected_schema = pa.schema([field], metadata=table.schema.metadata) + assert table.schema.equals(expected_schema) + + result = table.to_pandas() + tm.assert_frame_equal(result, df) + + def test_date_mask(self): + arr = np.array([date(2017, 4, 3), date(2017, 4, 4)], + dtype='datetime64[D]') + mask = [True, False] + result = pa.array(arr, mask=np.array(mask)) + expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]') + expected = pa.array(expected, from_pandas=True) + assert expected.equals(result) + + def test_date_objects_typed(self): + arr = np.array([ + date(2017, 4, 3), + None, + date(2017, 4, 4), + date(2017, 4, 5)], dtype=object) + + arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32') + arr_i8 = arr_i4.astype('int64') * 86400000 + mask = np.array([False, True, False, False]) + + t32 = pa.date32() + t64 = pa.date64() + + a32 = pa.array(arr, type=t32) + a64 = pa.array(arr, type=t64) + + a32_expected = pa.array(arr_i4, mask=mask, type=t32) + a64_expected = pa.array(arr_i8, mask=mask, type=t64) + + assert a32.equals(a32_expected) + assert a64.equals(a64_expected) + + # Test converting back to pandas + colnames = ['date32', 'date64'] + table = pa.Table.from_arrays([a32, a64], colnames) + + ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04', + '2017-04-05'], + dtype='datetime64[D]')) + ex_values[1] = pd.NaT.value + + # date32 and date64 convert to [ms] in pandas v2, but + # in pandas v1 they are silently coerced to [ns] + ex_datetime64ms = ex_values.astype('datetime64[ms]') + expected_pandas = pd.DataFrame({'date32': ex_datetime64ms, + 'date64': ex_datetime64ms}, + columns=colnames) + table_pandas = table.to_pandas(date_as_object=False) + tm.assert_frame_equal(table_pandas, expected_pandas) + + table_pandas_objects = table.to_pandas() + ex_objects = ex_values.astype('object') + expected_pandas_objects = pd.DataFrame({'date32': ex_objects, + 'date64': ex_objects}, + columns=colnames) + tm.assert_frame_equal(table_pandas_objects, + expected_pandas_objects) + + def test_pandas_null_values(self): + # ARROW-842 + pd_NA = getattr(pd, 'NA', None) + values = np.array([datetime(2000, 1, 1), pd.NaT, pd_NA], dtype=object) + values_with_none = np.array([datetime(2000, 1, 1), None, None], + dtype=object) + result = pa.array(values, from_pandas=True) + expected = pa.array(values_with_none, from_pandas=True) + assert result.equals(expected) + assert result.null_count == 2 + + # ARROW-9407 + assert pa.array([pd.NaT], from_pandas=True).type == pa.null() + assert pa.array([pd_NA], from_pandas=True).type == pa.null() + + def test_dates_from_integers(self): + t1 = pa.date32() + t2 = pa.date64() + + arr = np.array([17259, 17260, 17261], dtype='int32') + arr2 = arr.astype('int64') * 86400000 + + a1 = pa.array(arr, type=t1) + a2 = pa.array(arr2, type=t2) + + expected = date(2017, 4, 3) + assert a1[0].as_py() == expected + assert a2[0].as_py() == expected + + def test_pytime_from_pandas(self): + pytimes = [time(1, 2, 3, 1356), + time(4, 5, 6, 1356)] + + # microseconds + t1 = pa.time64('us') + + aobjs = np.array(pytimes + [None], dtype=object) + parr = pa.array(aobjs) + assert parr.type == t1 + assert parr[0].as_py() == pytimes[0] + assert parr[1].as_py() == pytimes[1] + assert parr[2].as_py() is None + + # DataFrame + df = pd.DataFrame({'times': aobjs}) + batch = pa.RecordBatch.from_pandas(df) + assert batch[0].equals(parr) + + # Test ndarray of int64 values + arr = np.array([_pytime_to_micros(v) for v in pytimes], + dtype='int64') + + a1 = pa.array(arr, type=pa.time64('us')) + assert a1[0].as_py() == pytimes[0] + + a2 = pa.array(arr * 1000, type=pa.time64('ns')) + assert a2[0].as_py() == pytimes[0] + + a3 = pa.array((arr / 1000).astype('i4'), + type=pa.time32('ms')) + assert a3[0].as_py() == pytimes[0].replace(microsecond=1000) + + a4 = pa.array((arr / 1000000).astype('i4'), + type=pa.time32('s')) + assert a4[0].as_py() == pytimes[0].replace(microsecond=0) + + def test_arrow_time_to_pandas(self): + pytimes = [time(1, 2, 3, 1356), + time(4, 5, 6, 1356), + time(0, 0, 0)] + + expected = np.array(pytimes[:2] + [None]) + expected_ms = np.array([x.replace(microsecond=1000) + for x in pytimes[:2]] + + [None]) + expected_s = np.array([x.replace(microsecond=0) + for x in pytimes[:2]] + + [None]) + + arr = np.array([_pytime_to_micros(v) for v in pytimes], + dtype='int64') + arr = np.array([_pytime_to_micros(v) for v in pytimes], + dtype='int64') + + null_mask = np.array([False, False, True], dtype=bool) + + a1 = pa.array(arr, mask=null_mask, type=pa.time64('us')) + a2 = pa.array(arr * 1000, mask=null_mask, + type=pa.time64('ns')) + + a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask, + type=pa.time32('ms')) + a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask, + type=pa.time32('s')) + + names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]'] + batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names) + + for arr, expected_values in [(a1, expected), + (a2, expected), + (a3, expected_ms), + (a4, expected_s)]: + result_pandas = arr.to_pandas() + assert (result_pandas.values == expected_values).all() + + df = batch.to_pandas() + expected_df = pd.DataFrame({'time64[us]': expected, + 'time64[ns]': expected, + 'time32[ms]': expected_ms, + 'time32[s]': expected_s}, + columns=names) + + tm.assert_frame_equal(df, expected_df) + + def test_numpy_datetime64_columns(self): + datetime64_ns = np.array([ + '2007-07-13T01:23:34.123456789', + None, + '2006-01-13T12:34:56.432539784', + '2010-08-13T05:46:57.437699912'], + dtype='datetime64[ns]') + _check_array_from_pandas_roundtrip(datetime64_ns) + + datetime64_us = np.array([ + '2007-07-13T01:23:34.123456', + None, + '2006-01-13T12:34:56.432539', + '2010-08-13T05:46:57.437699'], + dtype='datetime64[us]') + _check_array_from_pandas_roundtrip(datetime64_us) + + datetime64_ms = np.array([ + '2007-07-13T01:23:34.123', + None, + '2006-01-13T12:34:56.432', + '2010-08-13T05:46:57.437'], + dtype='datetime64[ms]') + _check_array_from_pandas_roundtrip(datetime64_ms) + + datetime64_s = np.array([ + '2007-07-13T01:23:34', + None, + '2006-01-13T12:34:56', + '2010-08-13T05:46:57'], + dtype='datetime64[s]') + _check_array_from_pandas_roundtrip(datetime64_s) + + def test_timestamp_to_pandas_coerces_to_ns(self): + # non-ns timestamp gets cast to ns on conversion to pandas + if Version(pd.__version__) >= Version("2.0.0"): + pytest.skip("pandas >= 2.0 supports non-nanosecond datetime64") + + arr = pa.array([1, 2, 3], pa.timestamp('ms')) + expected = pd.Series(pd.to_datetime([1, 2, 3], unit='ms')) + s = arr.to_pandas() + tm.assert_series_equal(s, expected) + arr = pa.chunked_array([arr]) + s = arr.to_pandas() + tm.assert_series_equal(s, expected) + + def test_timestamp_to_pandas_out_of_bounds(self): + # ARROW-7758 check for out of bounds timestamps for non-ns timestamps + # that end up getting coerced into ns timestamps. + + for unit in ['s', 'ms', 'us']: + for tz in [None, 'America/New_York']: + arr = pa.array([datetime(1, 1, 1)], pa.timestamp(unit, tz=tz)) + table = pa.table({'a': arr}) + + msg = "would result in out of bounds timestamp" + with pytest.raises(ValueError, match=msg): + arr.to_pandas(coerce_temporal_nanoseconds=True) + + with pytest.raises(ValueError, match=msg): + table.to_pandas(coerce_temporal_nanoseconds=True) + + with pytest.raises(ValueError, match=msg): + # chunked array + table.column('a').to_pandas(coerce_temporal_nanoseconds=True) + + # just ensure those don't give an error, but do not + # check actual garbage output + arr.to_pandas(safe=False, coerce_temporal_nanoseconds=True) + table.to_pandas(safe=False, coerce_temporal_nanoseconds=True) + table.column('a').to_pandas( + safe=False, coerce_temporal_nanoseconds=True) + + def test_timestamp_to_pandas_empty_chunked(self): + # ARROW-7907 table with chunked array with 0 chunks + table = pa.table({'a': pa.chunked_array([], type=pa.timestamp('us'))}) + result = table.to_pandas() + expected = pd.DataFrame({'a': pd.Series([], dtype="datetime64[us]")}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize('dtype', [pa.date32(), pa.date64()]) + def test_numpy_datetime64_day_unit(self, dtype): + datetime64_d = np.array([ + '2007-07-13', + None, + '2006-01-15', + '2010-08-19'], + dtype='datetime64[D]') + _check_array_from_pandas_roundtrip(datetime64_d, type=dtype) + + def test_array_from_pandas_date_with_mask(self): + m = np.array([True, False, True]) + data = pd.Series([ + date(1990, 1, 1), + date(1991, 1, 1), + date(1992, 1, 1) + ]) + + result = pa.Array.from_pandas(data, mask=m) + + expected = pd.Series([None, date(1991, 1, 1), None]) + assert pa.Array.from_pandas(expected).equals(result) + + @pytest.mark.skipif( + Version('1.16.0') <= Version(np.__version__) < Version('1.16.1'), + reason='Until numpy/numpy#12745 is resolved') + def test_fixed_offset_timezone(self): + df = pd.DataFrame({ + 'a': [ + pd.Timestamp('2012-11-11 00:00:00+01:00'), + pd.NaT + ] + }) + # 'check_dtype=False' because pandas >= 2 uses datetime.timezone + # instead of pytz.FixedOffset, and thus the dtype is not exactly + # identical (pyarrow still defaults to pytz) + # TODO remove if https://github.com/apache/arrow/issues/15047 is fixed + _check_pandas_roundtrip(df, check_dtype=False) + + @pytest.mark.parametrize("unit", ['s', 'ms', 'us', 'ns']) + def test_timedeltas_no_nulls(self, unit): + if Version(pd.__version__) < Version("2.0.0"): + unit = 'ns' + df = pd.DataFrame({ + 'timedelta64': np.array([0, 3600000000000, 7200000000000], + dtype=f'timedelta64[{unit}]') + }) + field = pa.field('timedelta64', pa.duration(unit)) + schema = pa.schema([field]) + _check_pandas_roundtrip( + df, + expected_schema=schema, + ) + + @pytest.mark.parametrize("unit", ['s', 'ms', 'us', 'ns']) + def test_timedeltas_nulls(self, unit): + if Version(pd.__version__) < Version("2.0.0"): + unit = 'ns' + df = pd.DataFrame({ + 'timedelta64': np.array([0, None, 7200000000000], + dtype=f'timedelta64[{unit}]') + }) + field = pa.field('timedelta64', pa.duration(unit)) + schema = pa.schema([field]) + _check_pandas_roundtrip( + df, + expected_schema=schema, + ) + + def test_month_day_nano_interval(self): + from pandas.tseries.offsets import DateOffset + df = pd.DataFrame({ + 'date_offset': [None, + DateOffset(days=3600, months=3600, microseconds=3, + nanoseconds=600)] + }) + schema = pa.schema([('date_offset', pa.month_day_nano_interval())]) + _check_pandas_roundtrip( + df, + expected_schema=schema) + + +# ---------------------------------------------------------------------- +# Conversion tests for string and binary types. + + +class TestConvertStringLikeTypes: + + def test_pandas_unicode(self): + repeats = 1000 + values = ['foo', None, 'bar', 'mañana', np.nan] + df = pd.DataFrame({'strings': values * repeats}) + field = pa.field('strings', pa.string()) + schema = pa.schema([field]) + ex_values = ['foo', None, 'bar', 'mañana', None] + expected = pd.DataFrame({'strings': ex_values * repeats}) + + _check_pandas_roundtrip(df, expected=expected, expected_schema=schema) + + def test_bytes_to_binary(self): + values = ['qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan] + df = pd.DataFrame({'strings': values}) + + table = pa.Table.from_pandas(df) + assert table[0].type == pa.binary() + + values2 = [b'qux', b'foo', None, b'barz', b'qux', None] + expected = pd.DataFrame({'strings': values2}) + _check_pandas_roundtrip(df, expected) + + @pytest.mark.large_memory + def test_bytes_exceed_2gb(self): + v1 = b'x' * 100000000 + v2 = b'x' * 147483646 + + # ARROW-2227, hit exactly 2GB on the nose + df = pd.DataFrame({ + 'strings': [v1] * 20 + [v2] + ['x'] * 20 + }) + arr = pa.array(df['strings']) + assert isinstance(arr, pa.ChunkedArray) + assert arr.num_chunks == 2 + arr = None + + table = pa.Table.from_pandas(df) + assert table[0].num_chunks == 2 + + @pytest.mark.large_memory + @pytest.mark.parametrize('char', ['x', b'x']) + def test_auto_chunking_pandas_series_of_strings(self, char): + # ARROW-2367 + v1 = char * 100000000 + v2 = char * 147483646 + + df = pd.DataFrame({ + 'strings': [[v1]] * 20 + [[v2]] + [[b'x']] + }) + arr = pa.array(df['strings'], from_pandas=True) + arr.validate(full=True) + assert isinstance(arr, pa.ChunkedArray) + assert arr.num_chunks == 2 + assert len(arr.chunk(0)) == 21 + assert len(arr.chunk(1)) == 1 + + def test_fixed_size_bytes(self): + values = [b'foo', None, bytearray(b'bar'), None, None, b'hey'] + df = pd.DataFrame({'strings': values}) + schema = pa.schema([pa.field('strings', pa.binary(3))]) + table = pa.Table.from_pandas(df, schema=schema) + assert table.schema[0].type == schema[0].type + assert table.schema[0].name == schema[0].name + result = table.to_pandas() + tm.assert_frame_equal(result, df) + + def test_fixed_size_bytes_does_not_accept_varying_lengths(self): + values = [b'foo', None, b'ba', None, None, b'hey'] + df = pd.DataFrame({'strings': values}) + schema = pa.schema([pa.field('strings', pa.binary(3))]) + with pytest.raises(pa.ArrowInvalid): + pa.Table.from_pandas(df, schema=schema) + + def test_variable_size_bytes(self): + s = pd.Series([b'123', b'', b'a', None]) + _check_series_roundtrip(s, type_=pa.binary()) + + def test_binary_from_bytearray(self): + s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'), + None]) + # Explicitly set type + _check_series_roundtrip(s, type_=pa.binary()) + # Infer type from bytearrays + _check_series_roundtrip(s, expected_pa_type=pa.binary()) + + def test_large_binary(self): + s = pd.Series([b'123', b'', b'a', None]) + _check_series_roundtrip(s, type_=pa.large_binary()) + df = pd.DataFrame({'a': s}) + _check_pandas_roundtrip( + df, schema=pa.schema([('a', pa.large_binary())])) + + def test_large_string(self): + s = pd.Series(['123', '', 'a', None]) + _check_series_roundtrip(s, type_=pa.large_string()) + df = pd.DataFrame({'a': s}) + _check_pandas_roundtrip( + df, schema=pa.schema([('a', pa.large_string())])) + + def test_binary_view(self): + s = pd.Series([b'123', b'', b'a', None]) + _check_series_roundtrip(s, type_=pa.binary_view()) + df = pd.DataFrame({'a': s}) + _check_pandas_roundtrip( + df, schema=pa.schema([('a', pa.binary_view())])) + + def test_string_view(self): + s = pd.Series(['123', '', 'a', None]) + _check_series_roundtrip(s, type_=pa.string_view()) + df = pd.DataFrame({'a': s}) + _check_pandas_roundtrip( + df, schema=pa.schema([('a', pa.string_view())])) + + def test_table_empty_str(self): + values = ['', '', '', '', ''] + df = pd.DataFrame({'strings': values}) + field = pa.field('strings', pa.string()) + schema = pa.schema([field]) + table = pa.Table.from_pandas(df, schema=schema) + + result1 = table.to_pandas(strings_to_categorical=False) + expected1 = pd.DataFrame({'strings': values}) + tm.assert_frame_equal(result1, expected1, check_dtype=True) + + result2 = table.to_pandas(strings_to_categorical=True) + expected2 = pd.DataFrame({'strings': pd.Categorical(values)}) + tm.assert_frame_equal(result2, expected2, check_dtype=True) + + def test_selective_categoricals(self): + values = ['', '', '', '', ''] + df = pd.DataFrame({'strings': values}) + field = pa.field('strings', pa.string()) + schema = pa.schema([field]) + table = pa.Table.from_pandas(df, schema=schema) + expected_str = pd.DataFrame({'strings': values}) + expected_cat = pd.DataFrame({'strings': pd.Categorical(values)}) + + result1 = table.to_pandas(categories=['strings']) + tm.assert_frame_equal(result1, expected_cat, check_dtype=True) + result2 = table.to_pandas(categories=[]) + tm.assert_frame_equal(result2, expected_str, check_dtype=True) + result3 = table.to_pandas(categories=('strings',)) + tm.assert_frame_equal(result3, expected_cat, check_dtype=True) + result4 = table.to_pandas(categories=tuple()) + tm.assert_frame_equal(result4, expected_str, check_dtype=True) + + def test_to_pandas_categorical_zero_length(self): + # ARROW-3586 + array = pa.array([], type=pa.int32()) + table = pa.Table.from_arrays(arrays=[array], names=['col']) + # This would segfault under 0.11.0 + table.to_pandas(categories=['col']) + + def test_to_pandas_categories_already_dictionary(self): + # Showed up in ARROW-6434, ARROW-6435 + array = pa.array(['foo', 'foo', 'foo', 'bar']).dictionary_encode() + table = pa.Table.from_arrays(arrays=[array], names=['col']) + result = table.to_pandas(categories=['col']) + assert table.to_pandas().equals(result) + + def test_table_str_to_categorical_without_na(self): + values = ['a', 'a', 'b', 'b', 'c'] + df = pd.DataFrame({'strings': values}) + field = pa.field('strings', pa.string()) + schema = pa.schema([field]) + table = pa.Table.from_pandas(df, schema=schema) + + result = table.to_pandas(strings_to_categorical=True) + expected = pd.DataFrame({'strings': pd.Categorical(values)}) + tm.assert_frame_equal(result, expected, check_dtype=True) + + with pytest.raises(pa.ArrowInvalid): + table.to_pandas(strings_to_categorical=True, + zero_copy_only=True) + + def test_table_str_to_categorical_with_na(self): + values = [None, 'a', 'b', np.nan] + df = pd.DataFrame({'strings': values}) + field = pa.field('strings', pa.string()) + schema = pa.schema([field]) + table = pa.Table.from_pandas(df, schema=schema) + + result = table.to_pandas(strings_to_categorical=True) + expected = pd.DataFrame({'strings': pd.Categorical(values)}) + tm.assert_frame_equal(result, expected, check_dtype=True) + + with pytest.raises(pa.ArrowInvalid): + table.to_pandas(strings_to_categorical=True, + zero_copy_only=True) + + # Regression test for ARROW-2101 + def test_array_of_bytes_to_strings(self): + converted = pa.array(np.array([b'x'], dtype=object), pa.string()) + assert converted.type == pa.string() + + # Make sure that if an ndarray of bytes is passed to the array + # constructor and the type is string, it will fail if those bytes + # cannot be converted to utf-8 + def test_array_of_bytes_to_strings_bad_data(self): + with pytest.raises( + pa.lib.ArrowInvalid, + match="was not a utf8 string"): + pa.array(np.array([b'\x80\x81'], dtype=object), pa.string()) + + def test_numpy_string_array_to_fixed_size_binary(self): + arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3') + + converted = pa.array(arr, type=pa.binary(3)) + expected = pa.array(list(arr), type=pa.binary(3)) + assert converted.equals(expected) + + mask = np.array([False, True, False]) + converted = pa.array(arr, type=pa.binary(3), mask=mask) + expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3)) + assert converted.equals(expected) + + with pytest.raises(pa.lib.ArrowInvalid, + match=r'Got bytestring of length 3 \(expected 4\)'): + arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3') + pa.array(arr, type=pa.binary(4)) + + with pytest.raises( + pa.lib.ArrowInvalid, + match=r'Got bytestring of length 12 \(expected 3\)'): + arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3') + pa.array(arr, type=pa.binary(3)) + + +class TestConvertDecimalTypes: + """ + Conversion test for decimal types. + """ + decimal32 = [ + decimal.Decimal('-1234.123'), + decimal.Decimal('1234.439') + ] + decimal64 = [ + decimal.Decimal('-129934.123331'), + decimal.Decimal('129534.123731') + ] + decimal128 = [ + decimal.Decimal('394092382910493.12341234678'), + decimal.Decimal('-314292388910493.12343437128') + ] + + @pytest.mark.parametrize(('values', 'expected_type'), [ + pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'), + pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'), + pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128') + ]) + def test_decimal_from_pandas(self, values, expected_type): + expected = pd.DataFrame({'decimals': values}) + table = pa.Table.from_pandas(expected, preserve_index=False) + field = pa.field('decimals', expected_type) + + # schema's metadata is generated by from_pandas conversion + expected_schema = pa.schema([field], metadata=table.schema.metadata) + assert table.schema.equals(expected_schema) + + @pytest.mark.parametrize('values', [ + pytest.param(decimal32, id='decimal32'), + pytest.param(decimal64, id='decimal64'), + pytest.param(decimal128, id='decimal128') + ]) + def test_decimal_to_pandas(self, values): + expected = pd.DataFrame({'decimals': values}) + converted = pa.Table.from_pandas(expected) + df = converted.to_pandas() + tm.assert_frame_equal(df, expected) + + def test_decimal_fails_with_truncation(self): + data1 = [decimal.Decimal('1.234')] + type1 = pa.decimal128(10, 2) + with pytest.raises(pa.ArrowInvalid): + pa.array(data1, type=type1) + + data2 = [decimal.Decimal('1.2345')] + type2 = pa.decimal128(10, 3) + with pytest.raises(pa.ArrowInvalid): + pa.array(data2, type=type2) + + def test_decimal_with_different_precisions(self): + data = [ + decimal.Decimal('0.01'), + decimal.Decimal('0.001'), + ] + series = pd.Series(data) + array = pa.array(series) + assert array.to_pylist() == data + assert array.type == pa.decimal128(3, 3) + + array = pa.array(data, type=pa.decimal128(12, 5)) + expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')] + assert array.to_pylist() == expected + + def test_decimal_with_None_explicit_type(self): + series = pd.Series([decimal.Decimal('3.14'), None]) + _check_series_roundtrip(series, type_=pa.decimal128(12, 5)) + + # Test that having all None values still produces decimal array + series = pd.Series([None] * 2) + _check_series_roundtrip(series, type_=pa.decimal128(12, 5)) + + def test_decimal_with_None_infer_type(self): + series = pd.Series([decimal.Decimal('3.14'), None]) + _check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2)) + + def test_strided_objects(self, tmpdir): + # see ARROW-3053 + data = { + 'a': {0: 'a'}, + 'b': {0: decimal.Decimal('0.0')} + } + + # This yields strided objects + df = pd.DataFrame.from_dict(data) + _check_pandas_roundtrip(df) + + +class TestConvertListTypes: + """ + Conversion tests for list<> types. + """ + + def test_column_of_arrays(self): + df, schema = dataframe_with_arrays() + _check_pandas_roundtrip(df, schema=schema, expected_schema=schema) + table = pa.Table.from_pandas(df, schema=schema, preserve_index=False) + + # schema's metadata is generated by from_pandas conversion + expected_schema = schema.with_metadata(table.schema.metadata) + assert table.schema.equals(expected_schema) + + for column in df.columns: + field = schema.field(column) + _check_array_roundtrip(df[column], type=field.type) + + def test_column_of_arrays_to_py(self): + # Test regression in ARROW-1199 not caught in above test + dtype = 'i1' + arr = np.array([ + np.arange(10, dtype=dtype), + np.arange(5, dtype=dtype), + None, + np.arange(1, dtype=dtype) + ], dtype=object) + type_ = pa.list_(pa.int8()) + parr = pa.array(arr, type=type_) + + assert parr[0].as_py() == list(range(10)) + assert parr[1].as_py() == list(range(5)) + assert parr[2].as_py() is None + assert parr[3].as_py() == [0] + + def test_column_of_boolean_list(self): + # ARROW-4370: Table to pandas conversion fails for list of bool + array = pa.array([[True, False], [True]], type=pa.list_(pa.bool_())) + table = pa.Table.from_arrays([array], names=['col1']) + df = table.to_pandas() + + expected_df = pd.DataFrame({'col1': [[True, False], [True]]}) + tm.assert_frame_equal(df, expected_df) + + s = table[0].to_pandas() + tm.assert_series_equal(pd.Series(s), df['col1'], check_names=False) + + def test_column_of_decimal_list(self): + array = pa.array([[decimal.Decimal('1'), decimal.Decimal('2')], + [decimal.Decimal('3.3')]], + type=pa.list_(pa.decimal128(2, 1))) + table = pa.Table.from_arrays([array], names=['col1']) + df = table.to_pandas() + + expected_df = pd.DataFrame( + {'col1': [[decimal.Decimal('1'), decimal.Decimal('2')], + [decimal.Decimal('3.3')]]}) + tm.assert_frame_equal(df, expected_df) + + def test_nested_types_from_ndarray_null_entries(self): + # Root cause of ARROW-6435 + s = pd.Series(np.array([np.nan, np.nan], dtype=object)) + + for ty in [pa.list_(pa.int64()), + pa.large_list(pa.int64()), + pa.struct([pa.field('f0', 'int32')])]: + result = pa.array(s, type=ty) + expected = pa.array([None, None], type=ty) + assert result.equals(expected) + + with pytest.raises(TypeError): + pa.array(s.values, type=ty) + + def test_column_of_lists(self): + df, schema = dataframe_with_lists() + _check_pandas_roundtrip(df, schema=schema, expected_schema=schema) + table = pa.Table.from_pandas(df, schema=schema, preserve_index=False) + + # schema's metadata is generated by from_pandas conversion + expected_schema = schema.with_metadata(table.schema.metadata) + assert table.schema.equals(expected_schema) + + for column in df.columns: + field = schema.field(column) + _check_array_roundtrip(df[column], type=field.type) + + def test_column_of_lists_first_empty(self): + # ARROW-2124 + num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]] + series = pd.Series([np.array(s, dtype=float) for s in num_lists]) + arr = pa.array(series) + result = pd.Series(arr.to_pandas()) + tm.assert_series_equal(result, series) + + def test_column_of_lists_chunked(self): + # ARROW-1357 + df = pd.DataFrame({ + 'lists': np.array([ + [1, 2], + None, + [2, 3], + [4, 5], + [6, 7], + [8, 9] + ], dtype=object) + }) + + schema = pa.schema([ + pa.field('lists', pa.list_(pa.int64())) + ]) + + t1 = pa.Table.from_pandas(df[:2], schema=schema) + t2 = pa.Table.from_pandas(df[2:], schema=schema) + + table = pa.concat_tables([t1, t2]) + result = table.to_pandas() + + tm.assert_frame_equal(result, df) + + def test_empty_column_of_lists_chunked(self): + df = pd.DataFrame({ + 'lists': np.array([], dtype=object) + }) + + schema = pa.schema([ + pa.field('lists', pa.list_(pa.int64())) + ]) + + table = pa.Table.from_pandas(df, schema=schema) + result = table.to_pandas() + + tm.assert_frame_equal(result, df) + + def test_column_of_lists_chunked2(self): + data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11], + [12, 13], [14, 15], [16, 17]] + data2 = [[8, 9], [18, 19]] + + a1 = pa.array(data1) + a2 = pa.array(data2) + + t1 = pa.Table.from_arrays([a1], names=['a']) + t2 = pa.Table.from_arrays([a2], names=['a']) + + concatenated = pa.concat_tables([t1, t2]) + + result = concatenated.to_pandas() + expected = pd.DataFrame({'a': data1 + data2}) + + tm.assert_frame_equal(result, expected) + + def test_column_of_lists_strided(self): + df, schema = dataframe_with_lists() + df = pd.concat([df] * 6, ignore_index=True) + + arr = df['int64'].values[::3] + assert arr.strides[0] != 8 + + _check_array_roundtrip(arr) + + def test_nested_lists_all_none(self): + data = np.array([[None, None], None], dtype=object) + + arr = pa.array(data) + expected = pa.array(list(data)) + assert arr.equals(expected) + assert arr.type == pa.list_(pa.null()) + + data2 = np.array([None, None, [None, None], + np.array([None, None], dtype=object)], + dtype=object) + arr = pa.array(data2) + expected = pa.array([None, None, [None, None], [None, None]]) + assert arr.equals(expected) + + def test_nested_lists_all_empty(self): + # ARROW-2128 + data = pd.Series([[], [], []]) + arr = pa.array(data) + expected = pa.array(list(data)) + assert arr.equals(expected) + assert arr.type == pa.list_(pa.null()) + + def test_nested_list_first_empty(self): + # ARROW-2711 + data = pd.Series([[], ["a"]]) + arr = pa.array(data) + expected = pa.array(list(data)) + assert arr.equals(expected) + assert arr.type == pa.list_(pa.string()) + + def test_nested_smaller_ints(self): + # ARROW-1345, ARROW-2008, there were some type inference bugs happening + # before + data = pd.Series([np.array([1, 2, 3], dtype='i1'), None]) + result = pa.array(data) + result2 = pa.array(data.values) + expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8())) + assert result.equals(expected) + assert result2.equals(expected) + + data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None]) + result3 = pa.array(data3) + expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32())) + assert result3.equals(expected3) + + def test_infer_lists(self): + data = OrderedDict([ + ('nan_ints', [[np.nan, 1], [2, 3]]), + ('ints', [[0, 1], [2, 3]]), + ('strs', [[None, 'b'], ['c', 'd']]), + ('nested_strs', [[[None, 'b'], ['c', 'd']], None]) + ]) + df = pd.DataFrame(data) + + expected_schema = pa.schema([ + pa.field('nan_ints', pa.list_(pa.int64())), + pa.field('ints', pa.list_(pa.int64())), + pa.field('strs', pa.list_(pa.string())), + pa.field('nested_strs', pa.list_(pa.list_(pa.string()))) + ]) + + _check_pandas_roundtrip(df, expected_schema=expected_schema) + + def test_fixed_size_list(self): + # ARROW-7365 + fixed_ty = pa.list_(pa.int64(), list_size=4) + variable_ty = pa.list_(pa.int64()) + + data = [[0, 1, 2, 3], None, [4, 5, 6, 7], [8, 9, 10, 11]] + fixed_arr = pa.array(data, type=fixed_ty) + variable_arr = pa.array(data, type=variable_ty) + + result = fixed_arr.to_pandas() + expected = variable_arr.to_pandas() + + for left, right in zip(result, expected): + if left is None: + assert right is None + npt.assert_array_equal(left, right) + + def test_infer_numpy_array(self): + data = OrderedDict([ + ('ints', [ + np.array([0, 1], dtype=np.int64), + np.array([2, 3], dtype=np.int64) + ]) + ]) + df = pd.DataFrame(data) + expected_schema = pa.schema([ + pa.field('ints', pa.list_(pa.int64())) + ]) + + _check_pandas_roundtrip(df, expected_schema=expected_schema) + + def test_to_list_of_structs_pandas(self): + ints = pa.array([1, 2, 3], pa.int32()) + strings = pa.array([['a', 'b'], ['c', 'd'], ['e', 'f']], + pa.list_(pa.string())) + structs = pa.StructArray.from_arrays([ints, strings], ['f1', 'f2']) + data = pa.ListArray.from_arrays([0, 1, 3], structs) + + expected = pd.Series([ + [{'f1': 1, 'f2': ['a', 'b']}], + [{'f1': 2, 'f2': ['c', 'd']}, + {'f1': 3, 'f2': ['e', 'f']}] + ]) + + series = pd.Series(data.to_pandas()) + + # pandas.testing generates a + # DeprecationWarning: elementwise comparison failed + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "elementwise comparison failed", + DeprecationWarning) + tm.assert_series_equal(series, expected) + + def test_to_list_of_maps_pandas(self): + if ((Version(np.__version__) >= Version("1.25.0.dev0")) and + (Version(pd.__version__) < Version("2.0.0"))): + # TODO: regression in pandas with numpy 1.25dev + # https://github.com/pandas-dev/pandas/issues/50360 + pytest.skip("Regression in pandas with numpy 1.25") + data = [ + [[('foo', ['a', 'b']), ('bar', ['c', 'd'])]], + [[('baz', []), ('qux', None), ('quux', [None, 'e'])], [('quz', ['f', 'g'])]] + ] + arr = pa.array(data, pa.list_(pa.map_(pa.utf8(), pa.list_(pa.utf8())))) + series = arr.to_pandas() + expected = pd.Series(data) + + # pandas.testing generates a + # DeprecationWarning: elementwise comparison failed + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "elementwise comparison failed", + DeprecationWarning) + tm.assert_series_equal(series, expected) + + def test_to_list_of_maps_pandas_sliced(self): + """ + A slightly more rigorous test for chunk/slice combinations + """ + + if ((Version(np.__version__) >= Version("1.25.0.dev0")) and + (Version(pd.__version__) < Version("2.0.0"))): + # TODO: regression in pandas with numpy 1.25dev + # https://github.com/pandas-dev/pandas/issues/50360 + pytest.skip("Regression in pandas with numpy 1.25") + + keys = pa.array(['ignore', 'foo', 'bar', 'baz', + 'qux', 'quux', 'ignore']).slice(1, 5) + items = pa.array( + [['ignore'], ['ignore'], ['a', 'b'], ['c', 'd'], [], None, [None, 'e']], + pa.list_(pa.string()), + ).slice(2, 5) + map = pa.MapArray.from_arrays([0, 2, 4], keys, items) + arr = pa.ListArray.from_arrays([0, 1, 2], map) + + series = arr.to_pandas() + expected = pd.Series([ + [[('foo', ['a', 'b']), ('bar', ['c', 'd'])]], + [[('baz', []), ('qux', None)]], + ]) + + series_sliced = arr.slice(1, 2).to_pandas() + expected_sliced = pd.Series([ + [[('baz', []), ('qux', None)]], + ]) + + # pandas.testing generates a + # DeprecationWarning: elementwise comparison failed + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "elementwise comparison failed", + DeprecationWarning) + tm.assert_series_equal(series, expected) + tm.assert_series_equal(series_sliced, expected_sliced) + + @pytest.mark.parametrize('t,data,expected', [ + ( + pa.int64, + [[1, 2], [3], None], + [None, [3], None] + ), + ( + pa.string, + [['aaa', 'bb'], ['c'], None], + [None, ['c'], None] + ), + ( + pa.null, + [[None, None], [None], None], + [None, [None], None] + ) + ]) + def test_array_from_pandas_typed_array_with_mask(self, t, data, expected): + m = np.array([True, False, True]) + + s = pd.Series(data) + result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t())) + + assert pa.Array.from_pandas(expected, + type=pa.list_(t())).equals(result) + + def test_empty_list_roundtrip(self): + empty_list_array = np.empty((3,), dtype=object) + empty_list_array.fill([]) + + df = pd.DataFrame({'a': np.array(['1', '2', '3']), + 'b': empty_list_array}) + tbl = pa.Table.from_pandas(df) + + result = tbl.to_pandas() + + tm.assert_frame_equal(result, df) + + def test_array_from_nested_arrays(self): + df, schema = dataframe_with_arrays() + for field in schema: + arr = df[field.name].values + expected = pa.array(list(arr), type=field.type) + result = pa.array(arr) + assert result.type == field.type # == list + assert result.equals(expected) + + def test_nested_large_list(self): + s = (pa.array([[[1, 2, 3], [4]], None], + type=pa.large_list(pa.large_list(pa.int64()))) + .to_pandas()) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", + "Creating an ndarray from ragged nested", + _np_VisibleDeprecationWarning) + warnings.filterwarnings("ignore", "elementwise comparison failed", + DeprecationWarning) + tm.assert_series_equal( + s, pd.Series([[[1, 2, 3], [4]], None], dtype=object), + check_names=False) + + def test_large_binary_list(self): + for list_type_factory in (pa.list_, pa.large_list): + s = (pa.array([["aa", "bb"], None, ["cc"], []], + type=list_type_factory(pa.large_binary())) + .to_pandas()) + tm.assert_series_equal( + s, pd.Series([[b"aa", b"bb"], None, [b"cc"], []]), + check_names=False) + s = (pa.array([["aa", "bb"], None, ["cc"], []], + type=list_type_factory(pa.large_string())) + .to_pandas()) + tm.assert_series_equal( + s, pd.Series([["aa", "bb"], None, ["cc"], []]), + check_names=False) + + def test_list_of_dictionary(self): + child = pa.array(["foo", "bar", None, "foo"]).dictionary_encode() + arr = pa.ListArray.from_arrays([0, 1, 3, 3, 4], child) + + # Expected a Series of lists + expected = pd.Series(arr.to_pylist()) + tm.assert_series_equal(arr.to_pandas(), expected) + + # Same but with nulls + arr = arr.take([0, 1, None, 3]) + expected[2] = None + tm.assert_series_equal(arr.to_pandas(), expected) + + @pytest.mark.large_memory + def test_auto_chunking_on_list_overflow(self): + # ARROW-9976 + n = 2**21 + df = pd.DataFrame.from_dict({ + "a": list(np.zeros((n, 2**10), dtype='uint8')), + "b": range(n) + }) + table = pa.Table.from_pandas(df) + table.validate(full=True) + + column_a = table[0] + assert column_a.num_chunks == 2 + assert len(column_a.chunk(0)) == 2**21 - 1 + assert len(column_a.chunk(1)) == 1 + + def test_map_array_roundtrip(self): + data = [[(b'a', 1), (b'b', 2)], + [(b'c', 3)], + [(b'd', 4), (b'e', 5), (b'f', 6)], + [(b'g', 7)]] + + df = pd.DataFrame({"map": data}) + schema = pa.schema([("map", pa.map_(pa.binary(), pa.int32()))]) + + _check_pandas_roundtrip(df, schema=schema) + + def test_map_array_chunked(self): + data1 = [[(b'a', 1), (b'b', 2)], + [(b'c', 3)], + [(b'd', 4), (b'e', 5), (b'f', 6)], + [(b'g', 7)]] + data2 = [[(k, v * 2) for k, v in row] for row in data1] + + arr1 = pa.array(data1, type=pa.map_(pa.binary(), pa.int32())) + arr2 = pa.array(data2, type=pa.map_(pa.binary(), pa.int32())) + arr = pa.chunked_array([arr1, arr2]) + + expected = pd.Series(data1 + data2) + actual = arr.to_pandas() + tm.assert_series_equal(actual, expected, check_names=False) + + def test_map_array_with_nulls(self): + data = [[(b'a', 1), (b'b', 2)], + None, + [(b'd', 4), (b'e', 5), (b'f', None)], + [(b'g', 7)]] + + # None value in item array causes upcast to float + expected = [[(k, float(v) if v is not None else None) for k, v in row] + if row is not None else None for row in data] + expected = pd.Series(expected) + + arr = pa.array(data, type=pa.map_(pa.binary(), pa.int32())) + actual = arr.to_pandas() + tm.assert_series_equal(actual, expected, check_names=False) + + def test_map_array_dictionary_encoded(self): + offsets = pa.array([0, 3, 5]) + items = pa.array(['a', 'b', 'c', 'a', 'd']).dictionary_encode() + keys = pa.array(list(range(len(items)))) + arr = pa.MapArray.from_arrays(offsets, keys, items) + + # Dictionary encoded values converted to dense + expected = pd.Series( + [[(0, 'a'), (1, 'b'), (2, 'c')], [(3, 'a'), (4, 'd')]]) + + actual = arr.to_pandas() + tm.assert_series_equal(actual, expected, check_names=False) + + def test_list_no_duplicate_base(self): + # ARROW-18400 + arr = pa.array([[1, 2], [3, 4, 5], None, [6, None], [7, 8]]) + chunked_arr = pa.chunked_array([arr.slice(0, 3), arr.slice(3, 1)]) + + np_arr = chunked_arr.to_numpy() + + expected = np.array([[1., 2.], [3., 4., 5.], None, + [6., np.nan]], dtype="object") + for left, right in zip(np_arr, expected): + if right is None: + assert left == right + else: + npt.assert_array_equal(left, right) + + expected_base = np.array([[1., 2., 3., 4., 5., 6., np.nan]]) + npt.assert_array_equal(np_arr[0].base, expected_base) + + np_arr_sliced = chunked_arr.slice(1, 3).to_numpy() + + expected = np.array([[3, 4, 5], None, [6, np.nan]], dtype="object") + for left, right in zip(np_arr_sliced, expected): + if right is None: + assert left == right + else: + npt.assert_array_equal(left, right) + + expected_base = np.array([[3., 4., 5., 6., np.nan]]) + npt.assert_array_equal(np_arr_sliced[0].base, expected_base) + + def test_list_values_behind_null(self): + arr = pa.ListArray.from_arrays( + offsets=pa.array([0, 2, 4, 6]), + values=pa.array([1, 2, 99, 99, 3, None]), + mask=pa.array([False, True, False]) + ) + np_arr = arr.to_numpy(zero_copy_only=False) + + expected = np.array([[1., 2.], None, [3., np.nan]], dtype="object") + for left, right in zip(np_arr, expected): + if right is None: + assert left == right + else: + npt.assert_array_equal(left, right) + + @pytest.mark.parametrize("klass", [pa.ListViewArray, pa.LargeListViewArray]) + def test_list_view_to_pandas_with_in_order_offsets(self, klass): + arr = klass.from_arrays( + offsets=pa.array([0, 2, 4]), + sizes=pa.array([2, 2, 2]), + values=pa.array([1, 2, 3, 4, 5, 6]), + ) + + actual = arr.to_pandas() + expected = pd.Series([[1, 2], [3, 4], [5, 6]]) + + tm.assert_series_equal(actual, expected) + + @pytest.mark.parametrize("klass", [pa.ListViewArray, pa.LargeListViewArray]) + def test_list_view_to_pandas_with_out_of_order_offsets(self, klass): + arr = klass.from_arrays( + offsets=pa.array([2, 4, 0]), + sizes=pa.array([2, 2, 2]), + values=pa.array([1, 2, 3, 4, 5, 6]), + ) + + actual = arr.to_pandas() + expected = pd.Series([[3, 4], [5, 6], [1, 2]]) + + tm.assert_series_equal(actual, expected) + + @pytest.mark.parametrize("klass", [pa.ListViewArray, pa.LargeListViewArray]) + def test_list_view_to_pandas_with_overlapping_offsets(self, klass): + arr = klass.from_arrays( + offsets=pa.array([0, 1, 2]), + sizes=pa.array([4, 4, 4]), + values=pa.array([1, 2, 3, 4, 5, 6]), + ) + + actual = arr.to_pandas() + expected = pd.Series([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]]) + + tm.assert_series_equal(actual, expected) + + @pytest.mark.parametrize("klass", [pa.ListViewArray, pa.LargeListViewArray]) + def test_list_view_to_pandas_with_null_values(self, klass): + arr = klass.from_arrays( + offsets=pa.array([0, 2, 2]), + sizes=pa.array([2, 0, 0]), + values=pa.array([1, None]), + mask=pa.array([False, False, True]) + ) + + actual = arr.to_pandas() + expected = pd.Series([[1, np.nan], [], None]) + + tm.assert_series_equal(actual, expected) + + @pytest.mark.parametrize("klass", [pa.ListViewArray, pa.LargeListViewArray]) + def test_list_view_to_pandas_multiple_chunks(self, klass): + gc.collect() + bytes_start = pa.total_allocated_bytes() + arr1 = klass.from_arrays( + offsets=pa.array([2, 1, 0]), + sizes=pa.array([2, 2, 2]), + values=pa.array([1, 2, 3, 4]) + ) + arr2 = klass.from_arrays( + offsets=pa.array([0, 1, 1]), + sizes=pa.array([3, 3, 0]), + values=pa.array([5, 6, 7, None]), + mask=pa.array([False, False, True]) + ) + arr = pa.chunked_array([arr1, arr2]) + + actual = arr.to_pandas() + expected = pd.Series([[3, 4], [2, 3], [1, 2], [5, 6, 7], [6, 7, np.nan], None]) + + tm.assert_series_equal(actual, expected) + + del actual + del arr + del arr1 + del arr2 + bytes_end = pa.total_allocated_bytes() + assert bytes_end == bytes_start + + +class TestConvertStructTypes: + """ + Conversion tests for struct types. + """ + + def test_pandas_roundtrip(self): + df = pd.DataFrame({'dicts': [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]}) + + expected_schema = pa.schema([ + ('dicts', pa.struct([('a', pa.int64()), ('b', pa.int64())])), + ]) + + _check_pandas_roundtrip(df, expected_schema=expected_schema) + + # specifying schema explicitly in from_pandas + _check_pandas_roundtrip( + df, schema=expected_schema, expected_schema=expected_schema) + + def test_to_pandas(self): + ints = pa.array([None, 2, 3], type=pa.int64()) + strs = pa.array(['a', None, 'c'], type=pa.string()) + bools = pa.array([True, False, None], type=pa.bool_()) + arr = pa.StructArray.from_arrays( + [ints, strs, bools], + ['ints', 'strs', 'bools']) + + expected = pd.Series([ + {'ints': None, 'strs': 'a', 'bools': True}, + {'ints': 2, 'strs': None, 'bools': False}, + {'ints': 3, 'strs': 'c', 'bools': None}, + ]) + + series = pd.Series(arr.to_pandas()) + tm.assert_series_equal(series, expected) + + def test_to_pandas_multiple_chunks(self): + # ARROW-11855 + gc.collect() + bytes_start = pa.total_allocated_bytes() + ints1 = pa.array([1], type=pa.int64()) + ints2 = pa.array([2], type=pa.int64()) + arr1 = pa.StructArray.from_arrays([ints1], ['ints']) + arr2 = pa.StructArray.from_arrays([ints2], ['ints']) + arr = pa.chunked_array([arr1, arr2]) + + expected = pd.Series([ + {'ints': 1}, + {'ints': 2} + ]) + + series = pd.Series(arr.to_pandas()) + tm.assert_series_equal(series, expected) + + del series + del arr + del arr1 + del arr2 + del ints1 + del ints2 + bytes_end = pa.total_allocated_bytes() + assert bytes_end == bytes_start + + def test_from_numpy(self): + dt = np.dtype([('x', np.int32), + (('y_title', 'y'), np.bool_)]) + ty = pa.struct([pa.field('x', pa.int32()), + pa.field('y', pa.bool_())]) + + data = np.array([], dtype=dt) + arr = pa.array(data, type=ty) + assert arr.to_pylist() == [] + + data = np.array([(42, True), (43, False)], dtype=dt) + arr = pa.array(data, type=ty) + assert arr.to_pylist() == [{'x': 42, 'y': True}, + {'x': 43, 'y': False}] + + # With mask + arr = pa.array(data, mask=np.bool_([False, True]), type=ty) + assert arr.to_pylist() == [{'x': 42, 'y': True}, None] + + # Trivial struct type + dt = np.dtype([]) + ty = pa.struct([]) + + data = np.array([], dtype=dt) + arr = pa.array(data, type=ty) + assert arr.to_pylist() == [] + + data = np.array([(), ()], dtype=dt) + arr = pa.array(data, type=ty) + assert arr.to_pylist() == [{}, {}] + + def test_from_numpy_nested(self): + # Note: an object field inside a struct + dt = np.dtype([('x', np.dtype([('xx', np.int8), + ('yy', np.bool_)])), + ('y', np.int16), + ('z', np.object_)]) + # Note: itemsize is not necessarily a multiple of sizeof(object) + # object_ is 8 bytes on 64-bit systems, 4 bytes on 32-bit systems + assert dt.itemsize == (12 if sys.maxsize > 2**32 else 8) + ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()), + pa.field('yy', pa.bool_())])), + pa.field('y', pa.int16()), + pa.field('z', pa.string())]) + + data = np.array([], dtype=dt) + arr = pa.array(data, type=ty) + assert arr.to_pylist() == [] + + data = np.array([ + ((1, True), 2, 'foo'), + ((3, False), 4, 'bar')], dtype=dt) + arr = pa.array(data, type=ty) + assert arr.to_pylist() == [ + {'x': {'xx': 1, 'yy': True}, 'y': 2, 'z': 'foo'}, + {'x': {'xx': 3, 'yy': False}, 'y': 4, 'z': 'bar'}] + + @pytest.mark.slow + @pytest.mark.large_memory + def test_from_numpy_large(self): + # Exercise rechunking + nulls + target_size = 3 * 1024**3 # 4GB + dt = np.dtype([('x', np.float64), ('y', 'object')]) + bs = 65536 - dt.itemsize + block = b'.' * bs + n = target_size // (bs + dt.itemsize) + data = np.zeros(n, dtype=dt) + data['x'] = np.random.random_sample(n) + data['y'] = block + # Add implicit nulls + data['x'][data['x'] < 0.2] = np.nan + + ty = pa.struct([pa.field('x', pa.float64()), + pa.field('y', pa.binary())]) + arr = pa.array(data, type=ty, from_pandas=True) + arr.validate(full=True) + assert arr.num_chunks == 2 + + def iter_chunked_array(arr): + for chunk in arr.iterchunks(): + yield from chunk + + def check(arr, data, mask=None): + assert len(arr) == len(data) + xs = data['x'] + ys = data['y'] + for i, obj in enumerate(iter_chunked_array(arr)): + try: + d = obj.as_py() + if mask is not None and mask[i]: + assert d is None + else: + x = xs[i] + if np.isnan(x): + assert d['x'] is None + else: + assert d['x'] == x + assert d['y'] == ys[i] + except Exception: + print("Failed at index", i) + raise + + check(arr, data) + del arr + + # Now with explicit mask + mask = np.random.random_sample(n) < 0.2 + arr = pa.array(data, type=ty, mask=mask, from_pandas=True) + arr.validate(full=True) + assert arr.num_chunks == 2 + + check(arr, data, mask) + del arr + + def test_from_numpy_bad_input(self): + ty = pa.struct([pa.field('x', pa.int32()), + pa.field('y', pa.bool_())]) + dt = np.dtype([('x', np.int32), + ('z', np.bool_)]) + + data = np.array([], dtype=dt) + with pytest.raises(ValueError, + match="Missing field 'y'"): + pa.array(data, type=ty) + data = np.int32([]) + with pytest.raises(TypeError, + match="Expected struct array"): + pa.array(data, type=ty) + + def test_from_tuples(self): + df = pd.DataFrame({'tuples': [(1, 2), (3, 4)]}) + expected_df = pd.DataFrame( + {'tuples': [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]}) + + # conversion from tuples works when specifying expected struct type + struct_type = pa.struct([('a', pa.int64()), ('b', pa.int64())]) + + arr = np.asarray(df['tuples']) + _check_array_roundtrip( + arr, expected=expected_df['tuples'], type=struct_type) + + expected_schema = pa.schema([('tuples', struct_type)]) + _check_pandas_roundtrip( + df, expected=expected_df, schema=expected_schema, + expected_schema=expected_schema) + + def test_struct_of_dictionary(self): + names = ['ints', 'strs'] + children = [pa.array([456, 789, 456]).dictionary_encode(), + pa.array(["foo", "foo", None]).dictionary_encode()] + arr = pa.StructArray.from_arrays(children, names=names) + + # Expected a Series of {field name: field value} dicts + rows_as_tuples = zip(*(child.to_pylist() for child in children)) + rows_as_dicts = [dict(zip(names, row)) for row in rows_as_tuples] + + expected = pd.Series(rows_as_dicts) + tm.assert_series_equal(arr.to_pandas(), expected) + + # Same but with nulls + arr = arr.take([0, None, 2]) + expected[1] = None + tm.assert_series_equal(arr.to_pandas(), expected) + + +class TestZeroCopyConversion: + """ + Tests that zero-copy conversion works with some types. + """ + + def test_zero_copy_success(self): + result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True) + npt.assert_array_equal(result, [0, 1, 2]) + + def test_zero_copy_dictionaries(self): + arr = pa.DictionaryArray.from_arrays( + np.array([0, 0]), + np.array([5], dtype="int64"), + ) + + result = arr.to_pandas(zero_copy_only=True) + values = pd.Categorical([5, 5]) + + tm.assert_series_equal(pd.Series(result), pd.Series(values), + check_names=False) + + def test_zero_copy_timestamp(self): + arr = np.array(['2007-07-13'], dtype='datetime64[ns]') + result = pa.array(arr).to_pandas(zero_copy_only=True) + npt.assert_array_equal(result, arr) + + def test_zero_copy_duration(self): + arr = np.array([1], dtype='timedelta64[ns]') + result = pa.array(arr).to_pandas(zero_copy_only=True) + npt.assert_array_equal(result, arr) + + def check_zero_copy_failure(self, arr): + with pytest.raises(pa.ArrowInvalid): + arr.to_pandas(zero_copy_only=True) + + def test_zero_copy_failure_on_object_types(self): + self.check_zero_copy_failure(pa.array(['A', 'B', 'C'])) + + def test_zero_copy_failure_with_int_when_nulls(self): + self.check_zero_copy_failure(pa.array([0, 1, None])) + + def test_zero_copy_failure_with_float_when_nulls(self): + self.check_zero_copy_failure(pa.array([0.0, 1.0, None])) + + def test_zero_copy_failure_on_bool_types(self): + self.check_zero_copy_failure(pa.array([True, False])) + + def test_zero_copy_failure_on_list_types(self): + arr = pa.array([[1, 2], [8, 9]], type=pa.list_(pa.int64())) + self.check_zero_copy_failure(arr) + + def test_zero_copy_failure_on_timestamp_with_nulls(self): + arr = np.array([1, None], dtype='datetime64[ns]') + self.check_zero_copy_failure(pa.array(arr)) + + def test_zero_copy_failure_on_duration_with_nulls(self): + arr = np.array([1, None], dtype='timedelta64[ns]') + self.check_zero_copy_failure(pa.array(arr)) + + +def _non_threaded_conversion(): + df = _alltypes_example() + _check_pandas_roundtrip(df, use_threads=False) + _check_pandas_roundtrip(df, use_threads=False, as_batch=True) + + +def _threaded_conversion(): + df = _alltypes_example() + _check_pandas_roundtrip(df, use_threads=True) + _check_pandas_roundtrip(df, use_threads=True, as_batch=True) + + +class TestConvertMisc: + """ + Miscellaneous conversion tests. + """ + + type_pairs = [ + (np.int8, pa.int8()), + (np.int16, pa.int16()), + (np.int32, pa.int32()), + (np.int64, pa.int64()), + (np.uint8, pa.uint8()), + (np.uint16, pa.uint16()), + (np.uint32, pa.uint32()), + (np.uint64, pa.uint64()), + (np.float16, pa.float16()), + (np.float32, pa.float32()), + (np.float64, pa.float64()), + # XXX unsupported + # (np.dtype([('a', 'i2')]), pa.struct([pa.field('a', pa.int16())])), + (np.object_, pa.string()), + (np.object_, pa.binary()), + (np.object_, pa.binary(10)), + (np.object_, pa.list_(pa.int64())), + ] + + def test_all_none_objects(self): + df = pd.DataFrame({'a': [None, None, None]}) + _check_pandas_roundtrip(df) + + def test_all_none_category(self): + df = pd.DataFrame({'a': [None, None, None]}) + df['a'] = df['a'].astype('category') + _check_pandas_roundtrip(df) + + def test_empty_arrays(self): + for dtype, pa_type in self.type_pairs: + arr = np.array([], dtype=dtype) + _check_array_roundtrip(arr, type=pa_type) + + def test_non_threaded_conversion(self): + _non_threaded_conversion() + + def test_threaded_conversion_multiprocess(self): + # Parallel conversion should work from child processes too (ARROW-2963) + pool = mp.Pool(2) + try: + pool.apply(_threaded_conversion) + finally: + pool.close() + pool.join() + + def test_category(self): + repeats = 5 + v1 = ['foo', None, 'bar', 'qux', np.nan] + v2 = [4, 5, 6, 7, 8] + v3 = [b'foo', None, b'bar', b'qux', np.nan] + + arrays = { + 'cat_strings': pd.Categorical(v1 * repeats), + 'cat_strings_with_na': pd.Categorical(v1 * repeats, + categories=['foo', 'bar']), + 'cat_ints': pd.Categorical(v2 * repeats), + 'cat_binary': pd.Categorical(v3 * repeats), + 'cat_strings_ordered': pd.Categorical( + v1 * repeats, categories=['bar', 'qux', 'foo'], + ordered=True), + 'ints': v2 * repeats, + 'ints2': v2 * repeats, + 'strings': v1 * repeats, + 'strings2': v1 * repeats, + 'strings3': v3 * repeats} + df = pd.DataFrame(arrays) + _check_pandas_roundtrip(df) + + for k in arrays: + _check_array_roundtrip(arrays[k]) + + def test_category_implicit_from_pandas(self): + # ARROW-3374 + def _check(v): + arr = pa.array(v) + result = arr.to_pandas() + tm.assert_series_equal(pd.Series(result), pd.Series(v)) + + arrays = [ + pd.Categorical(['a', 'b', 'c'], categories=['a', 'b']), + pd.Categorical(['a', 'b', 'c'], categories=['a', 'b'], + ordered=True) + ] + for arr in arrays: + _check(arr) + + def test_empty_category(self): + # ARROW-2443 + df = pd.DataFrame({'cat': pd.Categorical([])}) + _check_pandas_roundtrip(df) + + def test_category_zero_chunks(self): + # ARROW-5952 + for pa_type, dtype in [(pa.string(), 'object'), (pa.int64(), 'int64')]: + a = pa.chunked_array([], pa.dictionary(pa.int8(), pa_type)) + result = a.to_pandas() + expected = pd.Categorical([], categories=np.array([], dtype=dtype)) + tm.assert_series_equal(pd.Series(result), pd.Series(expected)) + + table = pa.table({'a': a}) + result = table.to_pandas() + expected = pd.DataFrame({'a': expected}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "data,error_type", + [ + ({"a": ["a", 1, 2.0]}, pa.ArrowTypeError), + ({"a": ["a", 1, 2.0]}, pa.ArrowTypeError), + ({"a": [1, True]}, pa.ArrowTypeError), + ({"a": [True, "a"]}, pa.ArrowInvalid), + ({"a": [1, "a"]}, pa.ArrowInvalid), + ({"a": [1.0, "a"]}, pa.ArrowInvalid), + ], + ) + def test_mixed_types_fails(self, data, error_type): + df = pd.DataFrame(data) + msg = "Conversion failed for column a with type object" + with pytest.raises(error_type, match=msg): + pa.Table.from_pandas(df) + + def test_strided_data_import(self): + cases = [] + + columns = ['a', 'b', 'c'] + N, K = 100, 3 + random_numbers = np.random.randn(N, K).copy() * 100 + + numeric_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', + 'f4', 'f8'] + + for type_name in numeric_dtypes: + # Casting np.float64 -> uint32 or uint64 throws a RuntimeWarning + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + cases.append(random_numbers.astype(type_name)) + + # strings + cases.append(np.array([random_ascii(10) for i in range(N * K)], + dtype=object) + .reshape(N, K).copy()) + + # booleans + boolean_objects = (np.array([True, False, True] * N, dtype=object) + .reshape(N, K).copy()) + + # add some nulls, so dtype comes back as objects + boolean_objects[5] = None + cases.append(boolean_objects) + + cases.append(np.arange("2016-01-01T00:00:00.001", N * K, + dtype='datetime64[ms]') + .reshape(N, K).copy()) + + strided_mask = (random_numbers > 0).astype(bool)[:, 0] + + for case in cases: + df = pd.DataFrame(case, columns=columns) + col = df['a'] + + _check_pandas_roundtrip(df) + _check_array_roundtrip(col) + _check_array_roundtrip(col, mask=strided_mask) + + def test_all_nones(self): + def _check_series(s): + converted = pa.array(s) + assert isinstance(converted, pa.NullArray) + assert len(converted) == 3 + assert converted.null_count == 3 + for item in converted: + assert item is pa.NA + + _check_series(pd.Series([None] * 3, dtype=object)) + _check_series(pd.Series([np.nan] * 3, dtype=object)) + _check_series(pd.Series([None, np.nan, None], dtype=object)) + + def test_partial_schema(self): + data = OrderedDict([ + ('a', [0, 1, 2, 3, 4]), + ('b', np.array([-10, -5, 0, 5, 10], dtype=np.int32)), + ('c', [-10, -5, 0, 5, 10]) + ]) + df = pd.DataFrame(data) + + partial_schema = pa.schema([ + pa.field('c', pa.int64()), + pa.field('a', pa.int64()) + ]) + + _check_pandas_roundtrip(df, schema=partial_schema, + expected=df[['c', 'a']], + expected_schema=partial_schema) + + def test_table_batch_empty_dataframe(self): + df = pd.DataFrame({}) + _check_pandas_roundtrip(df, preserve_index=None) + _check_pandas_roundtrip(df, preserve_index=None, as_batch=True) + + expected = pd.DataFrame(columns=pd.Index([])) + _check_pandas_roundtrip(df, expected, preserve_index=False) + _check_pandas_roundtrip(df, expected, preserve_index=False, as_batch=True) + + df2 = pd.DataFrame({}, index=[0, 1, 2]) + _check_pandas_roundtrip(df2, preserve_index=True) + _check_pandas_roundtrip(df2, as_batch=True, preserve_index=True) + + def test_convert_empty_table(self): + arr = pa.array([], type=pa.int64()) + empty_objects = pd.Series(np.array([], dtype=object)) + tm.assert_series_equal(arr.to_pandas(), + pd.Series(np.array([], dtype=np.int64))) + arr = pa.array([], type=pa.string()) + tm.assert_series_equal(arr.to_pandas(), empty_objects) + arr = pa.array([], type=pa.list_(pa.int64())) + tm.assert_series_equal(arr.to_pandas(), empty_objects) + arr = pa.array([], type=pa.struct([pa.field('a', pa.int64())])) + tm.assert_series_equal(arr.to_pandas(), empty_objects) + + def test_non_natural_stride(self): + """ + ARROW-2172: converting from a Numpy array with a stride that's + not a multiple of itemsize. + """ + dtype = np.dtype([('x', np.int32), ('y', np.int16)]) + data = np.array([(42, -1), (-43, 2)], dtype=dtype) + assert data.strides == (6,) + arr = pa.array(data['x'], type=pa.int32()) + assert arr.to_pylist() == [42, -43] + arr = pa.array(data['y'], type=pa.int16()) + assert arr.to_pylist() == [-1, 2] + + def test_array_from_strided_numpy_array(self): + # ARROW-5651 + np_arr = np.arange(0, 10, dtype=np.float32)[1:-1:2] + pa_arr = pa.array(np_arr, type=pa.float64()) + expected = pa.array([1.0, 3.0, 5.0, 7.0], type=pa.float64()) + pa_arr.equals(expected) + + def test_safe_unsafe_casts(self): + # ARROW-2799 + df = pd.DataFrame({ + 'A': list('abc'), + 'B': np.linspace(0, 1, 3) + }) + + schema = pa.schema([ + pa.field('A', pa.string()), + pa.field('B', pa.int32()) + ]) + + with pytest.raises(ValueError): + pa.Table.from_pandas(df, schema=schema) + + table = pa.Table.from_pandas(df, schema=schema, safe=False) + assert table.column('B').type == pa.int32() + + def test_error_sparse(self): + # ARROW-2818 + try: + df = pd.DataFrame({'a': pd.arrays.SparseArray([1, np.nan, 3])}) + except AttributeError: + # pandas.arrays module introduced in pandas 0.24 + df = pd.DataFrame({'a': pd.SparseArray([1, np.nan, 3])}) + with pytest.raises(TypeError, match="Sparse pandas data"): + pa.Table.from_pandas(df) + + +def test_safe_cast_from_float_with_nans_to_int(): + # TODO(kszucs): write tests for creating Date32 and Date64 arrays, see + # ARROW-4258 and https://github.com/apache/arrow/pull/3395 + values = pd.Series([1, 2, None, 4]) + arr = pa.Array.from_pandas(values, type=pa.int32(), safe=True) + expected = pa.array([1, 2, None, 4], type=pa.int32()) + assert arr.equals(expected) + + +def _fully_loaded_dataframe_example(): + index = pd.MultiIndex.from_arrays([ + pd.date_range('2000-01-01', periods=5).repeat(2), + np.tile(np.array(['foo', 'bar'], dtype=object), 5) + ]) + + c1 = pd.date_range('2000-01-01', periods=10) + data = { + 0: c1, + 1: c1.tz_localize('utc'), + 2: c1.tz_localize('US/Eastern'), + 3: c1[::2].tz_localize('utc').repeat(2).astype('category'), + 4: ['foo', 'bar'] * 5, + 5: pd.Series(['foo', 'bar'] * 5).astype('category').values, + 6: [True, False] * 5, + 7: np.random.randn(10), + 8: np.random.randint(0, 100, size=10), + 9: pd.period_range('2013', periods=10, freq='M'), + 10: pd.interval_range(start=1, freq=1, periods=10), + } + return pd.DataFrame(data, index=index) + + +@pytest.mark.parametrize('columns', ([b'foo'], ['foo'])) +def test_roundtrip_with_bytes_unicode(columns): + if Version("2.0.0") <= Version(pd.__version__) < Version("3.0.0"): + # TODO: regression in pandas, hopefully fixed in next version + # https://issues.apache.org/jira/browse/ARROW-18394 + # https://github.com/pandas-dev/pandas/issues/50127 + pytest.skip("Regression in pandas 2.0.0") + + df = pd.DataFrame(columns=columns) + table1 = pa.Table.from_pandas(df) + table2 = pa.Table.from_pandas(table1.to_pandas()) + assert table1.equals(table2) + assert table1.schema.equals(table2.schema) + assert table1.schema.metadata == table2.schema.metadata + + +def _pytime_from_micros(val): + microseconds = val % 1000000 + val //= 1000000 + seconds = val % 60 + val //= 60 + minutes = val % 60 + hours = val // 60 + return time(hours, minutes, seconds, microseconds) + + +def _pytime_to_micros(pytime): + return (pytime.hour * 3600000000 + + pytime.minute * 60000000 + + pytime.second * 1000000 + + pytime.microsecond) + + +def test_convert_unsupported_type_error_message(): + # ARROW-1454 + + # custom python objects + class A: + pass + + df = pd.DataFrame({'a': [A(), A()]}) + + msg = 'Conversion failed for column a with type object' + with pytest.raises(ValueError, match=msg): + pa.Table.from_pandas(df) + + +# ---------------------------------------------------------------------- +# Hypothesis tests + + +@h.given(past.arrays(past.pandas_compatible_types)) +def test_array_to_pandas_roundtrip(arr): + s = arr.to_pandas() + restored = pa.array(s, type=arr.type, from_pandas=True) + assert restored.equals(arr) + + +# ---------------------------------------------------------------------- +# Test object deduplication in to_pandas + + +def _generate_dedup_example(nunique, repeats): + unique_values = [rands(10) for i in range(nunique)] + return unique_values * repeats + + +def _assert_nunique(obj, expected): + assert len({id(x) for x in obj}) == expected + + +def test_to_pandas_deduplicate_strings_array_types(): + nunique = 100 + repeats = 10 + values = _generate_dedup_example(nunique, repeats) + + for arr in [pa.array(values, type=pa.binary()), + pa.array(values, type=pa.utf8()), + pa.chunked_array([values, values])]: + _assert_nunique(arr.to_pandas(), nunique) + _assert_nunique(arr.to_pandas(deduplicate_objects=False), len(arr)) + + +def test_to_pandas_deduplicate_strings_table_types(): + nunique = 100 + repeats = 10 + values = _generate_dedup_example(nunique, repeats) + + arr = pa.array(values) + rb = pa.RecordBatch.from_arrays([arr], ['foo']) + tbl = pa.Table.from_batches([rb]) + + for obj in [rb, tbl]: + _assert_nunique(obj.to_pandas()['foo'], nunique) + _assert_nunique(obj.to_pandas(deduplicate_objects=False)['foo'], + len(obj)) + + +def test_to_pandas_deduplicate_integers_as_objects(): + nunique = 100 + repeats = 10 + + # Python automatically interns smaller integers + unique_values = list(np.random.randint(10000000, 1000000000, size=nunique)) + unique_values[nunique // 2] = None + + arr = pa.array(unique_values * repeats) + + _assert_nunique(arr.to_pandas(integer_object_nulls=True), nunique) + _assert_nunique(arr.to_pandas(integer_object_nulls=True, + deduplicate_objects=False), + # Account for None + (nunique - 1) * repeats + 1) + + +def test_to_pandas_deduplicate_date_time(): + nunique = 100 + repeats = 10 + + unique_values = list(range(nunique)) + + cases = [ + # raw type, array type, to_pandas options + ('int32', 'date32', {'date_as_object': True}), + ('int64', 'date64', {'date_as_object': True}), + ('int32', 'time32[ms]', {}), + ('int64', 'time64[us]', {}) + ] + + for raw_type, array_type, pandas_options in cases: + raw_arr = pa.array(unique_values * repeats, type=raw_type) + casted_arr = raw_arr.cast(array_type) + + _assert_nunique(casted_arr.to_pandas(**pandas_options), + nunique) + _assert_nunique(casted_arr.to_pandas(deduplicate_objects=False, + **pandas_options), + len(casted_arr)) + + +# --------------------------------------------------------------------- + +def test_table_from_pandas_checks_field_nullability(): + # ARROW-2136 + df = pd.DataFrame({'a': [1.2, 2.1, 3.1], + 'b': [np.nan, 'string', 'foo']}) + schema = pa.schema([pa.field('a', pa.float64(), nullable=False), + pa.field('b', pa.utf8(), nullable=False)]) + + with pytest.raises(ValueError): + pa.Table.from_pandas(df, schema=schema) + + +def test_table_from_pandas_keeps_column_order_of_dataframe(): + df1 = pd.DataFrame(OrderedDict([ + ('partition', [0, 0, 1, 1]), + ('arrays', [[0, 1, 2], [3, 4], None, None]), + ('floats', [None, None, 1.1, 3.3]) + ])) + df2 = df1[['floats', 'partition', 'arrays']] + + schema1 = pa.schema([ + ('partition', pa.int64()), + ('arrays', pa.list_(pa.int64())), + ('floats', pa.float64()), + ]) + schema2 = pa.schema([ + ('floats', pa.float64()), + ('partition', pa.int64()), + ('arrays', pa.list_(pa.int64())) + ]) + + table1 = pa.Table.from_pandas(df1, preserve_index=False) + table2 = pa.Table.from_pandas(df2, preserve_index=False) + + assert table1.schema.equals(schema1) + assert table2.schema.equals(schema2) + + +def test_table_from_pandas_keeps_column_order_of_schema(): + # ARROW-3766 + df = pd.DataFrame(OrderedDict([ + ('partition', [0, 0, 1, 1]), + ('arrays', [[0, 1, 2], [3, 4], None, None]), + ('floats', [None, None, 1.1, 3.3]) + ])) + + schema = pa.schema([ + ('floats', pa.float64()), + ('arrays', pa.list_(pa.int32())), + ('partition', pa.int32()) + ]) + + df1 = df[df.partition == 0] + df2 = df[df.partition == 1][['floats', 'partition', 'arrays']] + + table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False) + table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False) + + assert table1.schema.equals(schema) + assert table1.schema.equals(table2.schema) + + +def test_table_from_pandas_columns_argument_only_does_filtering(): + df = pd.DataFrame(OrderedDict([ + ('partition', [0, 0, 1, 1]), + ('arrays', [[0, 1, 2], [3, 4], None, None]), + ('floats', [None, None, 1.1, 3.3]) + ])) + + columns1 = ['arrays', 'floats', 'partition'] + schema1 = pa.schema([ + ('arrays', pa.list_(pa.int64())), + ('floats', pa.float64()), + ('partition', pa.int64()) + ]) + + columns2 = ['floats', 'partition'] + schema2 = pa.schema([ + ('floats', pa.float64()), + ('partition', pa.int64()) + ]) + + table1 = pa.Table.from_pandas(df, columns=columns1, preserve_index=False) + table2 = pa.Table.from_pandas(df, columns=columns2, preserve_index=False) + + assert table1.schema.equals(schema1) + assert table2.schema.equals(schema2) + + +def test_table_from_pandas_columns_and_schema_are_mutually_exclusive(): + df = pd.DataFrame(OrderedDict([ + ('partition', [0, 0, 1, 1]), + ('arrays', [[0, 1, 2], [3, 4], None, None]), + ('floats', [None, None, 1.1, 3.3]) + ])) + schema = pa.schema([ + ('partition', pa.int32()), + ('arrays', pa.list_(pa.int32())), + ('floats', pa.float64()), + ]) + columns = ['arrays', 'floats'] + + with pytest.raises(ValueError): + pa.Table.from_pandas(df, schema=schema, columns=columns) + + +def test_table_from_pandas_keeps_schema_nullability(): + # ARROW-5169 + df = pd.DataFrame({'a': [1, 2, 3, 4]}) + + schema = pa.schema([ + pa.field('a', pa.int64(), nullable=False), + ]) + + table = pa.Table.from_pandas(df) + assert table.schema.field('a').nullable is True + table = pa.Table.from_pandas(df, schema=schema) + assert table.schema.field('a').nullable is False + + +def test_table_from_pandas_schema_index_columns(): + # ARROW-5220 + df = pd.DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]}) + + schema = pa.schema([ + ('a', pa.int64()), + ('b', pa.float64()), + ('index', pa.int64()), + ]) + + # schema includes index with name not in dataframe + with pytest.raises(KeyError, match="name 'index' present in the"): + pa.Table.from_pandas(df, schema=schema) + + df.index.name = 'index' + + # schema includes correct index name -> roundtrip works + _check_pandas_roundtrip(df, schema=schema, preserve_index=True, + expected_schema=schema) + + # schema includes correct index name but preserve_index=False + with pytest.raises(ValueError, match="'preserve_index=False' was"): + pa.Table.from_pandas(df, schema=schema, preserve_index=False) + + # in case of preserve_index=None -> RangeIndex serialized as metadata + # clashes with the index in the schema + with pytest.raises(ValueError, match="name 'index' is present in the " + "schema, but it is a RangeIndex"): + pa.Table.from_pandas(df, schema=schema, preserve_index=None) + + df.index = pd.Index([0, 1, 2], name='index') + + # for non-RangeIndex, both preserve_index=None and True work + _check_pandas_roundtrip(df, schema=schema, preserve_index=None, + expected_schema=schema) + _check_pandas_roundtrip(df, schema=schema, preserve_index=True, + expected_schema=schema) + + # schema has different order (index column not at the end) + schema = pa.schema([ + ('index', pa.int64()), + ('a', pa.int64()), + ('b', pa.float64()), + ]) + _check_pandas_roundtrip(df, schema=schema, preserve_index=None, + expected_schema=schema) + _check_pandas_roundtrip(df, schema=schema, preserve_index=True, + expected_schema=schema) + + # schema does not include the index -> index is not included as column + # even though preserve_index=True/None + schema = pa.schema([ + ('a', pa.int64()), + ('b', pa.float64()), + ]) + expected = df.copy() + expected = expected.reset_index(drop=True) + _check_pandas_roundtrip(df, schema=schema, preserve_index=None, + expected_schema=schema, expected=expected) + _check_pandas_roundtrip(df, schema=schema, preserve_index=True, + expected_schema=schema, expected=expected) + + # dataframe with a MultiIndex + df.index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)], + names=['level1', 'level2']) + schema = pa.schema([ + ('level1', pa.string()), + ('level2', pa.int64()), + ('a', pa.int64()), + ('b', pa.float64()), + ]) + _check_pandas_roundtrip(df, schema=schema, preserve_index=True, + expected_schema=schema) + _check_pandas_roundtrip(df, schema=schema, preserve_index=None, + expected_schema=schema) + + # only one of the levels of the MultiIndex is included + schema = pa.schema([ + ('level2', pa.int64()), + ('a', pa.int64()), + ('b', pa.float64()), + ]) + expected = df.copy() + expected = expected.reset_index('level1', drop=True) + _check_pandas_roundtrip(df, schema=schema, preserve_index=True, + expected_schema=schema, expected=expected) + _check_pandas_roundtrip(df, schema=schema, preserve_index=None, + expected_schema=schema, expected=expected) + + +def test_table_from_pandas_schema_index_columns__unnamed_index(): + # ARROW-6999 - unnamed indices in specified schema + df = pd.DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]}) + + expected_schema = pa.schema([ + ('a', pa.int64()), + ('b', pa.float64()), + ('__index_level_0__', pa.int64()), + ]) + + schema = pa.Schema.from_pandas(df, preserve_index=True) + table = pa.Table.from_pandas(df, preserve_index=True, schema=schema) + assert table.schema.remove_metadata().equals(expected_schema) + + # non-RangeIndex (preserved by default) + df = pd.DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]}, index=[0, 1, 2]) + schema = pa.Schema.from_pandas(df) + table = pa.Table.from_pandas(df, schema=schema) + assert table.schema.remove_metadata().equals(expected_schema) + + +def test_table_from_pandas_schema_with_custom_metadata(): + # ARROW-7087 - metadata disappear from pandas + df = pd.DataFrame() + schema = pa.Schema.from_pandas(df).with_metadata({'meta': 'True'}) + table = pa.Table.from_pandas(df, schema=schema) + assert table.schema.metadata.get(b'meta') == b'True' + + +def test_table_from_pandas_schema_field_order_metadata(): + # ARROW-10532 + # ensure that a different field order in specified schema doesn't + # mangle metadata + df = pd.DataFrame({ + "datetime": pd.date_range("2020-01-01T00:00:00Z", freq="h", periods=2), + "float": np.random.randn(2) + }) + + schema = pa.schema([ + pa.field("float", pa.float32(), nullable=True), + pa.field("datetime", pa.timestamp("s", tz="UTC"), nullable=False) + ]) + + table = pa.Table.from_pandas(df, schema=schema) + assert table.schema.equals(schema) + metadata_float = table.schema.pandas_metadata["columns"][0] + assert metadata_float["name"] == "float" + assert metadata_float["metadata"] is None + metadata_datetime = table.schema.pandas_metadata["columns"][1] + assert metadata_datetime["name"] == "datetime" + assert metadata_datetime["metadata"] == {'timezone': 'UTC'} + + result = table.to_pandas() + coerce_cols_to_types = {"float": "float32"} + if Version(pd.__version__) >= Version("2.0.0"): + # Pandas v2 now support non-nanosecond time units + coerce_cols_to_types["datetime"] = "datetime64[s, UTC]" + expected = df[["float", "datetime"]].astype(coerce_cols_to_types) + + tm.assert_frame_equal(result, expected) + + +# ---------------------------------------------------------------------- +# RecordBatch, Table + + +def test_recordbatch_from_to_pandas(): + data = pd.DataFrame({ + 'c1': np.array([1, 2, 3, 4, 5], dtype='int64'), + 'c2': np.array([1, 2, 3, 4, 5], dtype='uint32'), + 'c3': np.random.randn(5), + 'c4': ['foo', 'bar', None, 'baz', 'qux'], + 'c5': [False, True, False, True, False] + }) + + batch = pa.RecordBatch.from_pandas(data) + result = batch.to_pandas() + tm.assert_frame_equal(data, result) + + +def test_recordbatchlist_to_pandas(): + data1 = pd.DataFrame({ + 'c1': np.array([1, 1, 2], dtype='uint32'), + 'c2': np.array([1.0, 2.0, 3.0], dtype='float64'), + 'c3': [True, None, False], + 'c4': ['foo', 'bar', None] + }) + + data2 = pd.DataFrame({ + 'c1': np.array([3, 5], dtype='uint32'), + 'c2': np.array([4.0, 5.0], dtype='float64'), + 'c3': [True, True], + 'c4': ['baz', 'qux'] + }) + + batch1 = pa.RecordBatch.from_pandas(data1) + batch2 = pa.RecordBatch.from_pandas(data2) + + table = pa.Table.from_batches([batch1, batch2]) + result = table.to_pandas() + data = pd.concat([data1, data2]).reset_index(drop=True) + tm.assert_frame_equal(data, result) + + +def test_recordbatch_table_pass_name_to_pandas(): + rb = pa.record_batch([pa.array([1, 2, 3, 4])], names=['a0']) + t = pa.table([pa.array([1, 2, 3, 4])], names=['a0']) + assert rb[0].to_pandas().name == 'a0' + assert t[0].to_pandas().name == 'a0' + + +# ---------------------------------------------------------------------- +# Metadata serialization + + +@pytest.mark.parametrize( + ('type', 'expected'), + [ + (pa.null(), 'empty'), + (pa.bool_(), 'bool'), + (pa.int8(), 'int8'), + (pa.int16(), 'int16'), + (pa.int32(), 'int32'), + (pa.int64(), 'int64'), + (pa.uint8(), 'uint8'), + (pa.uint16(), 'uint16'), + (pa.uint32(), 'uint32'), + (pa.uint64(), 'uint64'), + (pa.float16(), 'float16'), + (pa.float32(), 'float32'), + (pa.float64(), 'float64'), + (pa.date32(), 'date'), + (pa.date64(), 'date'), + (pa.binary(), 'bytes'), + (pa.binary(length=4), 'bytes'), + (pa.string(), 'unicode'), + (pa.list_(pa.list_(pa.int16())), 'list[list[int16]]'), + (pa.decimal128(18, 3), 'decimal'), + (pa.timestamp('ms'), 'datetime'), + (pa.timestamp('us', 'UTC'), 'datetimetz'), + (pa.time32('s'), 'time'), + (pa.time64('us'), 'time') + ] +) +def test_logical_type(type, expected): + assert get_logical_type(type) == expected + + +# ---------------------------------------------------------------------- +# to_pandas uses MemoryPool + +def test_array_uses_memory_pool(): + # ARROW-6570 + N = 10000 + arr = pa.array(np.arange(N, dtype=np.int64), + mask=np.random.randint(0, 2, size=N).astype(np.bool_)) + + # In the case the gc is caught loading + gc.collect() + + prior_allocation = pa.total_allocated_bytes() + + x = arr.to_pandas() + assert pa.total_allocated_bytes() == (prior_allocation + N * 8) + x = None # noqa + gc.collect() + + assert pa.total_allocated_bytes() == prior_allocation + + # zero copy does not allocate memory + arr = pa.array(np.arange(N, dtype=np.int64)) + + prior_allocation = pa.total_allocated_bytes() + x = arr.to_pandas() # noqa + assert pa.total_allocated_bytes() == prior_allocation + + +def test_singleton_blocks_zero_copy(): + # Part of ARROW-3789 + t = pa.table([pa.array(np.arange(1000, dtype=np.int64))], ['f0']) + + # Zero copy if split_blocks=True + _check_to_pandas_memory_unchanged(t, split_blocks=True) + + prior_allocation = pa.total_allocated_bytes() + result = t.to_pandas() + # access private `_values` because the public `values` is made read-only by pandas + assert result['f0']._values.flags.writeable + assert pa.total_allocated_bytes() > prior_allocation + + +def _check_to_pandas_memory_unchanged(obj, **kwargs): + prior_allocation = pa.total_allocated_bytes() + x = obj.to_pandas(**kwargs) # noqa + + # Memory allocation unchanged -- either zero copy or self-destructing + assert pa.total_allocated_bytes() == prior_allocation + + +def test_to_pandas_split_blocks(): + # ARROW-3789 + t = pa.table([ + pa.array([1, 2, 3, 4, 5], type='i1'), + pa.array([1, 2, 3, 4, 5], type='i4'), + pa.array([1, 2, 3, 4, 5], type='i8'), + pa.array([1, 2, 3, 4, 5], type='f4'), + pa.array([1, 2, 3, 4, 5], type='f8'), + pa.array([1, 2, 3, 4, 5], type='f8'), + pa.array([1, 2, 3, 4, 5], type='f8'), + pa.array([1, 2, 3, 4, 5], type='f8'), + ], ['f{}'.format(i) for i in range(8)]) + + _check_blocks_created(t, 8) + _check_to_pandas_memory_unchanged(t, split_blocks=True) + + +def _get_mgr(df): + if Version(pd.__version__) < Version("1.1.0"): + return df._data + else: + return df._mgr + + +def _check_blocks_created(t, number): + x = t.to_pandas(split_blocks=True) + assert len(_get_mgr(x).blocks) == number + + +def test_to_pandas_self_destruct(): + K = 50 + + def _make_table(): + return pa.table([ + # Slice to force a copy + pa.array(np.random.randn(10000)[::2]) + for i in range(K) + ], ['f{}'.format(i) for i in range(K)]) + + t = _make_table() + _check_to_pandas_memory_unchanged(t, split_blocks=True, self_destruct=True) + + # Check non-split-block behavior + t = _make_table() + _check_to_pandas_memory_unchanged(t, self_destruct=True) + + +def test_table_uses_memory_pool(): + N = 10000 + arr = pa.array(np.arange(N, dtype=np.int64)) + t = pa.table([arr, arr, arr], ['f0', 'f1', 'f2']) + + prior_allocation = pa.total_allocated_bytes() + x = t.to_pandas() + + assert pa.total_allocated_bytes() == (prior_allocation + 3 * N * 8) + + # Check successful garbage collection + x = None # noqa + gc.collect() + assert pa.total_allocated_bytes() == prior_allocation + + +def test_object_leak_in_numpy_array(): + # ARROW-6876 + arr = pa.array([{'a': 1}]) + np_arr = arr.to_pandas() + assert np_arr.dtype == np.dtype('object') + obj = np_arr[0] + refcount = sys.getrefcount(obj) + assert sys.getrefcount(obj) == refcount + del np_arr + assert sys.getrefcount(obj) == refcount - 1 + + +def test_object_leak_in_dataframe(): + # ARROW-6876 + arr = pa.array([{'a': 1}]) + table = pa.table([arr], ['f0']) + col = table.to_pandas()['f0'] + assert col.dtype == np.dtype('object') + obj = col[0] + refcount = sys.getrefcount(obj) + assert sys.getrefcount(obj) == refcount + del col + assert sys.getrefcount(obj) == refcount - 1 + + +# ---------------------------------------------------------------------- +# Some nested array tests array tests + + +def test_array_from_py_float32(): + data = [[1.2, 3.4], [9.0, 42.0]] + + t = pa.float32() + + arr1 = pa.array(data[0], type=t) + arr2 = pa.array(data, type=pa.list_(t)) + + expected1 = np.array(data[0], dtype=np.float32) + expected2 = pd.Series([np.array(data[0], dtype=np.float32), + np.array(data[1], dtype=np.float32)]) + + assert arr1.type == t + assert arr1.equals(pa.array(expected1)) + assert arr2.equals(pa.array(expected2)) + + +# ---------------------------------------------------------------------- +# Timestamp tests + + +def test_cast_timestamp_unit(): + # ARROW-1680 + val = datetime.now() + s = pd.Series([val]) + s_nyc = s.dt.tz_localize('tzlocal()').dt.tz_convert('America/New_York') + + us_with_tz = pa.timestamp('us', tz='America/New_York') + + arr = pa.Array.from_pandas(s_nyc, type=us_with_tz) + + # ARROW-1906 + assert arr.type == us_with_tz + + arr2 = pa.Array.from_pandas(s, type=pa.timestamp('us')) + + assert arr[0].as_py() == s_nyc[0].to_pydatetime() + assert arr2[0].as_py() == s[0].to_pydatetime() + + # Disallow truncation + arr = pa.array([123123], type='int64').cast(pa.timestamp('ms')) + expected = pa.array([123], type='int64').cast(pa.timestamp('s')) + + # sanity check that the cast worked right + assert arr.type == pa.timestamp('ms') + + target = pa.timestamp('s') + with pytest.raises(ValueError): + arr.cast(target) + + result = arr.cast(target, safe=False) + assert result.equals(expected) + + # ARROW-1949 + series = pd.Series([pd.Timestamp(1), pd.Timestamp(10), pd.Timestamp(1000)]) + expected = pa.array([0, 0, 1], type=pa.timestamp('us')) + + with pytest.raises(ValueError): + pa.array(series, type=pa.timestamp('us')) + + with pytest.raises(ValueError): + pa.Array.from_pandas(series, type=pa.timestamp('us')) + + result = pa.Array.from_pandas(series, type=pa.timestamp('us'), safe=False) + assert result.equals(expected) + + result = pa.array(series, type=pa.timestamp('us'), safe=False) + assert result.equals(expected) + + +def test_nested_with_timestamp_tz_round_trip(): + ts = pd.Timestamp.now() + ts_dt = ts.to_pydatetime() + arr = pa.array([ts_dt], type=pa.timestamp('us', tz='America/New_York')) + struct = pa.StructArray.from_arrays([arr, arr], ['start', 'stop']) + + result = struct.to_pandas() + restored = pa.array(result) + assert restored.equals(struct) + + +def test_nested_with_timestamp_tz(): + # ARROW-7723 + ts = pd.Timestamp.now() + ts_dt = ts.to_pydatetime() + + # XXX: Ensure that this data does not get promoted to nanoseconds (and thus + # integers) to preserve behavior in 0.15.1 + for unit in ['s', 'ms', 'us']: + if unit in ['s', 'ms']: + # This is used for verifying timezone conversion to micros are not + # important + def truncate(x): return x.replace(microsecond=0) + else: + def truncate(x): return x + arr = pa.array([ts], type=pa.timestamp(unit)) + arr2 = pa.array([ts], type=pa.timestamp(unit, tz='America/New_York')) + + arr3 = pa.StructArray.from_arrays([arr, arr], ['start', 'stop']) + arr4 = pa.StructArray.from_arrays([arr2, arr2], ['start', 'stop']) + + result = arr3.to_pandas() + assert isinstance(result[0]['start'], datetime) + assert result[0]['start'].tzinfo is None + assert isinstance(result[0]['stop'], datetime) + assert result[0]['stop'].tzinfo is None + + result = arr4.to_pandas() + assert isinstance(result[0]['start'], datetime) + assert result[0]['start'].tzinfo is not None + utc_dt = result[0]['start'].astimezone(timezone.utc) + assert truncate(utc_dt).replace(tzinfo=None) == truncate(ts_dt) + assert isinstance(result[0]['stop'], datetime) + assert result[0]['stop'].tzinfo is not None + + # same conversion for table + result = pa.table({'a': arr3}).to_pandas() + assert isinstance(result['a'][0]['start'], datetime) + assert result['a'][0]['start'].tzinfo is None + assert isinstance(result['a'][0]['stop'], datetime) + assert result['a'][0]['stop'].tzinfo is None + + result = pa.table({'a': arr4}).to_pandas() + assert isinstance(result['a'][0]['start'], datetime) + assert result['a'][0]['start'].tzinfo is not None + assert isinstance(result['a'][0]['stop'], datetime) + assert result['a'][0]['stop'].tzinfo is not None + + +# ---------------------------------------------------------------------- +# DictionaryArray tests + + +def test_dictionary_with_pandas(): + src_indices = np.repeat([0, 1, 2], 2) + dictionary = np.array(['foo', 'bar', 'baz'], dtype=object) + mask = np.array([False, False, True, False, False, False]) + + for index_type in ['uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', + 'uint64', 'int64']: + indices = src_indices.astype(index_type) + d1 = pa.DictionaryArray.from_arrays(indices, dictionary) + d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask) + + if index_type[0] == 'u': + # TODO: unsigned dictionary indices to pandas + with pytest.raises(TypeError): + d1.to_pandas() + continue + + pandas1 = d1.to_pandas() + ex_pandas1 = pd.Categorical.from_codes(indices, categories=dictionary) + + tm.assert_series_equal(pd.Series(pandas1), pd.Series(ex_pandas1)) + + pandas2 = d2.to_pandas() + assert pandas2.isnull().sum() == 1 + + # Unsigned integers converted to signed + signed_indices = indices + if index_type[0] == 'u': + signed_indices = indices.astype(index_type[1:]) + ex_pandas2 = pd.Categorical.from_codes(np.where(mask, -1, + signed_indices), + categories=dictionary) + + tm.assert_series_equal(pd.Series(pandas2), pd.Series(ex_pandas2)) + + +def random_strings(n, item_size, pct_null=0, dictionary=None): + if dictionary is not None: + result = dictionary[np.random.randint(0, len(dictionary), size=n)] + else: + result = np.array([random_ascii(item_size) for i in range(n)], + dtype=object) + + if pct_null > 0: + result[np.random.rand(n) < pct_null] = None + + return result + + +def test_variable_dictionary_to_pandas(): + np.random.seed(12345) + + d1 = pa.array(random_strings(100, 32), type='string') + d2 = pa.array(random_strings(100, 16), type='string') + d3 = pa.array(random_strings(10000, 10), type='string') + + a1 = pa.DictionaryArray.from_arrays( + np.random.randint(0, len(d1), size=1000, dtype='i4'), + d1 + ) + a2 = pa.DictionaryArray.from_arrays( + np.random.randint(0, len(d2), size=1000, dtype='i4'), + d2 + ) + + # With some nulls + a3 = pa.DictionaryArray.from_arrays( + np.random.randint(0, len(d3), size=1000, dtype='i4'), d3) + + i4 = pa.array( + np.random.randint(0, len(d3), size=1000, dtype='i4'), + mask=np.random.rand(1000) < 0.1 + ) + a4 = pa.DictionaryArray.from_arrays(i4, d3) + + expected_dict = pa.concat_arrays([d1, d2, d3]) + + a = pa.chunked_array([a1, a2, a3, a4]) + a_dense = pa.chunked_array([a1.cast('string'), + a2.cast('string'), + a3.cast('string'), + a4.cast('string')]) + + result = a.to_pandas() + result_dense = a_dense.to_pandas() + + assert (result.cat.categories == expected_dict.to_pandas()).all() + + expected_dense = result.astype('str') + expected_dense[result_dense.isnull()] = None + tm.assert_series_equal(result_dense, expected_dense) + + +def test_dictionary_encoded_nested_to_pandas(): + # ARROW-6899 + child = pa.array(['a', 'a', 'a', 'b', 'b']).dictionary_encode() + + arr = pa.ListArray.from_arrays([0, 3, 5], child) + + result = arr.to_pandas() + expected = pd.Series([np.array(['a', 'a', 'a'], dtype=object), + np.array(['b', 'b'], dtype=object)]) + + tm.assert_series_equal(result, expected) + + +def test_dictionary_from_pandas(): + cat = pd.Categorical(['a', 'b', 'a']) + expected_type = pa.dictionary(pa.int8(), pa.string()) + + result = pa.array(cat) + assert result.to_pylist() == ['a', 'b', 'a'] + assert result.type.equals(expected_type) + + # with missing values in categorical + cat = pd.Categorical(['a', 'b', None, 'a']) + + result = pa.array(cat) + assert result.to_pylist() == ['a', 'b', None, 'a'] + assert result.type.equals(expected_type) + + # with additional mask + result = pa.array(cat, mask=np.array([False, False, False, True])) + assert result.to_pylist() == ['a', 'b', None, None] + assert result.type.equals(expected_type) + + +def test_dictionary_from_pandas_specified_type(): + # ARROW-7168 - ensure specified type is always respected + + # the same as cat = pd.Categorical(['a', 'b']) but explicit about dtypes + cat = pd.Categorical.from_codes( + np.array([0, 1], dtype='int8'), np.array(['a', 'b'], dtype=object)) + + # different index type -> allow this + # (the type of the 'codes' in pandas is not part of the data type) + typ = pa.dictionary(index_type=pa.int16(), value_type=pa.string()) + result = pa.array(cat, type=typ) + assert result.type.equals(typ) + assert result.to_pylist() == ['a', 'b'] + + # mismatching values type -> raise error + typ = pa.dictionary(index_type=pa.int8(), value_type=pa.int64()) + with pytest.raises(pa.ArrowInvalid): + result = pa.array(cat, type=typ) + + # mismatching order -> raise error + typ = pa.dictionary( + index_type=pa.int8(), value_type=pa.string(), ordered=True) + msg = "The 'ordered' flag of the passed categorical values " + with pytest.raises(ValueError, match=msg): + result = pa.array(cat, type=typ) + assert result.to_pylist() == ['a', 'b'] + + # with mask + typ = pa.dictionary(index_type=pa.int16(), value_type=pa.string()) + result = pa.array(cat, type=typ, mask=np.array([False, True])) + assert result.type.equals(typ) + assert result.to_pylist() == ['a', None] + + # empty categorical -> be flexible in values type to allow + cat = pd.Categorical([]) + + typ = pa.dictionary(index_type=pa.int8(), value_type=pa.string()) + result = pa.array(cat, type=typ) + assert result.type.equals(typ) + assert result.to_pylist() == [] + typ = pa.dictionary(index_type=pa.int8(), value_type=pa.int64()) + result = pa.array(cat, type=typ) + assert result.type.equals(typ) + assert result.to_pylist() == [] + + # passing non-dictionary type + cat = pd.Categorical(['a', 'b']) + result = pa.array(cat, type=pa.string()) + expected = pa.array(['a', 'b'], type=pa.string()) + assert result.equals(expected) + assert result.to_pylist() == ['a', 'b'] + + +def test_convert_categories_to_array_with_string_pyarrow_dtype(): + # gh-33727: categories should be converted to pa.Array + if Version(pd.__version__) < Version("1.3.0"): + pytest.skip("PyArrow backed string data type introduced in pandas 1.3.0") + + df = pd.DataFrame({"x": ["foo", "bar", "foo"]}, dtype="string[pyarrow]") + df = df.astype("category") + indices = pa.array(df['x'].cat.codes) + dictionary = pa.array(df["x"].cat.categories.values) + assert isinstance(dictionary, pa.Array) + + expected = pa.Array.from_pandas(df['x']) + result = pa.DictionaryArray.from_arrays(indices, dictionary) + assert result == expected + + +# ---------------------------------------------------------------------- +# Array protocol in pandas conversions tests + + +def test_array_protocol(): + df = pd.DataFrame({'a': pd.Series([1, 2, None], dtype='Int64')}) + + # __arrow_array__ added to pandas IntegerArray in 0.26.0.dev + + # default conversion + result = pa.table(df) + expected = pa.array([1, 2, None], pa.int64()) + assert result[0].chunk(0).equals(expected) + + # with specifying schema + schema = pa.schema([('a', pa.float64())]) + result = pa.table(df, schema=schema) + expected2 = pa.array([1, 2, None], pa.float64()) + assert result[0].chunk(0).equals(expected2) + + # pass Series to pa.array + result = pa.array(df['a']) + assert result.equals(expected) + result = pa.array(df['a'], type=pa.float64()) + assert result.equals(expected2) + + # pass actual ExtensionArray to pa.array + result = pa.array(df['a'].values) + assert result.equals(expected) + result = pa.array(df['a'].values, type=pa.float64()) + assert result.equals(expected2) + + +class DummyExtensionType(pa.ExtensionType): + + def __init__(self): + super().__init__(pa.int64(), + 'pyarrow.tests.test_pandas.DummyExtensionType') + + def __arrow_ext_serialize__(self): + return b'' + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + assert serialized == b'' + assert storage_type == pa.int64() + return cls() + + +def PandasArray__arrow_array__(self, type=None): + # hardcode dummy return regardless of self - we only want to check that + # this method is correctly called + storage = pa.array([1, 2, 3], type=pa.int64()) + return pa.ExtensionArray.from_storage(DummyExtensionType(), storage) + + +def test_array_protocol_pandas_extension_types(monkeypatch): + # ARROW-7022 - ensure protocol works for Period / Interval extension dtypes + + storage = pa.array([1, 2, 3], type=pa.int64()) + expected = pa.ExtensionArray.from_storage(DummyExtensionType(), storage) + + monkeypatch.setattr(pd.arrays.PeriodArray, "__arrow_array__", + PandasArray__arrow_array__, raising=False) + monkeypatch.setattr(pd.arrays.IntervalArray, "__arrow_array__", + PandasArray__arrow_array__, raising=False) + for arr in [pd.period_range("2012-01-01", periods=3, freq="D").array, + pd.interval_range(1, 4).array]: + result = pa.array(arr) + assert result.equals(expected) + result = pa.array(pd.Series(arr)) + assert result.equals(expected) + result = pa.array(pd.Index(arr)) + assert result.equals(expected) + result = pa.table(pd.DataFrame({'a': arr})).column('a').chunk(0) + assert result.equals(expected) + + +# ---------------------------------------------------------------------- +# Pandas ExtensionArray support + + +def _Int64Dtype__from_arrow__(self, array): + # for test only deal with single chunk for now + # TODO: do we require handling of chunked arrays in the protocol? + if isinstance(array, pa.Array): + arr = array + else: + # ChunkedArray - here only deal with a single chunk for the test + arr = array.chunk(0) + buflist = arr.buffers() + data = np.frombuffer(buflist[-1], dtype='int64')[ + arr.offset:arr.offset + len(arr)] + bitmask = buflist[0] + if bitmask is not None: + mask = pa.BooleanArray.from_buffers( + pa.bool_(), len(arr), [None, bitmask]) + mask = np.asarray(mask) + else: + mask = np.ones(len(arr), dtype=bool) + int_arr = pd.arrays.IntegerArray(data.copy(), ~mask, copy=False) + return int_arr + + +def test_convert_to_extension_array(monkeypatch): + # table converted from dataframe with extension types (so pandas_metadata + # has this information) + df = pd.DataFrame( + {'a': [1, 2, 3], 'b': pd.array([2, 3, 4], dtype='Int64'), + 'c': [4, 5, 6]}) + table = pa.table(df) + + # Int64Dtype is recognized -> convert to extension block by default + # for a proper roundtrip + result = table.to_pandas() + assert _get_mgr(result).blocks[0].values.dtype == np.dtype("int64") + assert _get_mgr(result).blocks[1].values.dtype == pd.Int64Dtype() + tm.assert_frame_equal(result, df) + + # test with missing values + df2 = pd.DataFrame({'a': pd.array([1, 2, None], dtype='Int64')}) + table2 = pa.table(df2) + result = table2.to_pandas() + assert _get_mgr(result).blocks[0].values.dtype == pd.Int64Dtype() + tm.assert_frame_equal(result, df2) + + # monkeypatch pandas Int64Dtype to *not* have the protocol method + if Version(pd.__version__) < Version("1.3.0.dev"): + monkeypatch.delattr( + pd.core.arrays.integer._IntegerDtype, "__from_arrow__") + else: + monkeypatch.delattr( + pd.core.arrays.integer.NumericDtype, "__from_arrow__") + # Int64Dtype has no __from_arrow__ -> use normal conversion + result = table.to_pandas() + assert len(_get_mgr(result).blocks) == 1 + assert _get_mgr(result).blocks[0].values.dtype == np.dtype("int64") + + +class MyCustomIntegerType(pa.ExtensionType): + + def __init__(self): + super().__init__(pa.int64(), + 'pyarrow.tests.test_pandas.MyCustomIntegerType') + + def __arrow_ext_serialize__(self): + return b'' + + def to_pandas_dtype(self): + return pd.Int64Dtype() + + +def test_conversion_extensiontype_to_extensionarray(monkeypatch): + # converting extension type to linked pandas ExtensionDtype/Array + storage = pa.array([1, 2, 3, 4], pa.int64()) + arr = pa.ExtensionArray.from_storage(MyCustomIntegerType(), storage) + table = pa.table({'a': arr}) + + # extension type points to Int64Dtype, which knows how to create a + # pandas ExtensionArray + result = arr.to_pandas() + assert _get_mgr(result).blocks[0].values.dtype == pd.Int64Dtype() + expected = pd.Series([1, 2, 3, 4], dtype='Int64') + tm.assert_series_equal(result, expected) + + result = table.to_pandas() + assert _get_mgr(result).blocks[0].values.dtype == pd.Int64Dtype() + expected = pd.DataFrame({'a': pd.array([1, 2, 3, 4], dtype='Int64')}) + tm.assert_frame_equal(result, expected) + + # monkeypatch pandas Int64Dtype to *not* have the protocol method + # (remove the version added above and the actual version for recent pandas) + if Version(pd.__version__) < Version("1.3.0.dev"): + monkeypatch.delattr( + pd.core.arrays.integer._IntegerDtype, "__from_arrow__") + else: + monkeypatch.delattr( + pd.core.arrays.integer.NumericDtype, "__from_arrow__") + + result = arr.to_pandas() + assert _get_mgr(result).blocks[0].values.dtype == np.dtype("int64") + expected = pd.Series([1, 2, 3, 4]) + tm.assert_series_equal(result, expected) + + with pytest.raises(ValueError): + table.to_pandas() + + +def test_to_pandas_extension_dtypes_mapping(): + table = pa.table({'a': pa.array([1, 2, 3], pa.int64())}) + + # default use numpy dtype + result = table.to_pandas() + assert result['a'].dtype == np.dtype('int64') + + # specify to override the default + result = table.to_pandas(types_mapper={pa.int64(): pd.Int64Dtype()}.get) + assert isinstance(result['a'].dtype, pd.Int64Dtype) + + # types that return None in function get normal conversion + table = pa.table({'a': pa.array([1, 2, 3], pa.int32())}) + result = table.to_pandas(types_mapper={pa.int64(): pd.Int64Dtype()}.get) + assert result['a'].dtype == np.dtype('int32') + + # `types_mapper` overrules the pandas metadata + table = pa.table(pd.DataFrame({'a': pd.array([1, 2, 3], dtype="Int64")})) + result = table.to_pandas() + assert isinstance(result['a'].dtype, pd.Int64Dtype) + result = table.to_pandas( + types_mapper={pa.int64(): pd.PeriodDtype('D')}.get) + assert isinstance(result['a'].dtype, pd.PeriodDtype) + + +def test_array_to_pandas(): + if Version(pd.__version__) < Version("1.1"): + pytest.skip("ExtensionDtype to_pandas method missing") + + for arr in [pd.period_range("2012-01-01", periods=3, freq="D").array, + pd.interval_range(1, 4).array]: + result = pa.array(arr).to_pandas() + expected = pd.Series(arr) + tm.assert_series_equal(result, expected) + + result = pa.table({"col": arr})["col"].to_pandas() + expected = pd.Series(arr, name="col") + tm.assert_series_equal(result, expected) + + +def test_roundtrip_empty_table_with_extension_dtype_index(): + df = pd.DataFrame(index=pd.interval_range(start=0, end=3)) + table = pa.table(df) + if Version(pd.__version__) > Version("1.0"): + tm.assert_index_equal(table.to_pandas().index, df.index) + else: + tm.assert_index_equal(table.to_pandas().index, + pd.Index([{'left': 0, 'right': 1}, + {'left': 1, 'right': 2}, + {'left': 2, 'right': 3}], + dtype='object')) + + +@pytest.mark.parametrize("index", ["a", ["a", "b"]]) +def test_to_pandas_types_mapper_index(index): + if Version(pd.__version__) < Version("1.5.0"): + pytest.skip("ArrowDtype missing") + df = pd.DataFrame( + { + "a": [1, 2], + "b": [3, 4], + "c": [5, 6], + }, + dtype=pd.ArrowDtype(pa.int64()), + ).set_index(index) + expected = df.copy() + table = pa.table(df) + result = table.to_pandas(types_mapper=pd.ArrowDtype) + tm.assert_frame_equal(result, expected) + + +def test_array_to_pandas_types_mapper(): + # https://issues.apache.org/jira/browse/ARROW-9664 + if Version(pd.__version__) < Version("1.2.0"): + pytest.skip("Float64Dtype extension dtype missing") + + data = pa.array([1, 2, 3], pa.int64()) + + # Test with mapper function + types_mapper = {pa.int64(): pd.Int64Dtype()}.get + result = data.to_pandas(types_mapper=types_mapper) + assert result.dtype == pd.Int64Dtype() + + # Test mapper function returning None + types_mapper = {pa.int64(): None}.get + result = data.to_pandas(types_mapper=types_mapper) + assert result.dtype == np.dtype("int64") + + # Test mapper function not containing the dtype + types_mapper = {pa.float64(): pd.Float64Dtype()}.get + result = data.to_pandas(types_mapper=types_mapper) + assert result.dtype == np.dtype("int64") + + +@pytest.mark.pandas +def test_chunked_array_to_pandas_types_mapper(): + # https://issues.apache.org/jira/browse/ARROW-9664 + if Version(pd.__version__) < Version("1.2.0"): + pytest.skip("Float64Dtype extension dtype missing") + + data = pa.chunked_array([pa.array([1, 2, 3], pa.int64())]) + assert isinstance(data, pa.ChunkedArray) + + # Test with mapper function + types_mapper = {pa.int64(): pd.Int64Dtype()}.get + result = data.to_pandas(types_mapper=types_mapper) + assert result.dtype == pd.Int64Dtype() + + # Test mapper function returning None + types_mapper = {pa.int64(): None}.get + result = data.to_pandas(types_mapper=types_mapper) + assert result.dtype == np.dtype("int64") + + # Test mapper function not containing the dtype + types_mapper = {pa.float64(): pd.Float64Dtype()}.get + result = data.to_pandas(types_mapper=types_mapper) + assert result.dtype == np.dtype("int64") + + +# ---------------------------------------------------------------------- +# Legacy metadata compatibility tests + + +def test_metadata_compat_range_index_pre_0_12(): + # Forward compatibility for metadata created from pandas.RangeIndex + # prior to pyarrow 0.13.0 + a_values = ['foo', 'bar', None, 'baz'] + b_values = ['a', 'a', 'b', 'b'] + a_arrow = pa.array(a_values, type='utf8') + b_arrow = pa.array(b_values, type='utf8') + + rng_index_arrow = pa.array([0, 2, 4, 6], type='int64') + + gen_name_0 = '__index_level_0__' + gen_name_1 = '__index_level_1__' + + # Case 1: named RangeIndex + e1 = pd.DataFrame({ + 'a': a_values + }, index=pd.RangeIndex(0, 8, step=2, name='qux')) + t1 = pa.Table.from_arrays([a_arrow, rng_index_arrow], + names=['a', 'qux']) + t1 = t1.replace_schema_metadata({ + b'pandas': json.dumps( + {'index_columns': ['qux'], + 'column_indexes': [{'name': None, + 'field_name': None, + 'pandas_type': 'unicode', + 'numpy_type': 'object', + 'metadata': {'encoding': 'UTF-8'}}], + 'columns': [{'name': 'a', + 'field_name': 'a', + 'pandas_type': 'unicode', + 'numpy_type': 'object', + 'metadata': None}, + {'name': 'qux', + 'field_name': 'qux', + 'pandas_type': 'int64', + 'numpy_type': 'int64', + 'metadata': None}], + 'pandas_version': '0.23.4'} + )}) + r1 = t1.to_pandas() + tm.assert_frame_equal(r1, e1) + + # Case 2: named RangeIndex, but conflicts with an actual column + e2 = pd.DataFrame({ + 'qux': a_values + }, index=pd.RangeIndex(0, 8, step=2, name='qux')) + t2 = pa.Table.from_arrays([a_arrow, rng_index_arrow], + names=['qux', gen_name_0]) + t2 = t2.replace_schema_metadata({ + b'pandas': json.dumps( + {'index_columns': [gen_name_0], + 'column_indexes': [{'name': None, + 'field_name': None, + 'pandas_type': 'unicode', + 'numpy_type': 'object', + 'metadata': {'encoding': 'UTF-8'}}], + 'columns': [{'name': 'a', + 'field_name': 'a', + 'pandas_type': 'unicode', + 'numpy_type': 'object', + 'metadata': None}, + {'name': 'qux', + 'field_name': gen_name_0, + 'pandas_type': 'int64', + 'numpy_type': 'int64', + 'metadata': None}], + 'pandas_version': '0.23.4'} + )}) + r2 = t2.to_pandas() + tm.assert_frame_equal(r2, e2) + + # Case 3: unnamed RangeIndex + e3 = pd.DataFrame({ + 'a': a_values + }, index=pd.RangeIndex(0, 8, step=2, name=None)) + t3 = pa.Table.from_arrays([a_arrow, rng_index_arrow], + names=['a', gen_name_0]) + t3 = t3.replace_schema_metadata({ + b'pandas': json.dumps( + {'index_columns': [gen_name_0], + 'column_indexes': [{'name': None, + 'field_name': None, + 'pandas_type': 'unicode', + 'numpy_type': 'object', + 'metadata': {'encoding': 'UTF-8'}}], + 'columns': [{'name': 'a', + 'field_name': 'a', + 'pandas_type': 'unicode', + 'numpy_type': 'object', + 'metadata': None}, + {'name': None, + 'field_name': gen_name_0, + 'pandas_type': 'int64', + 'numpy_type': 'int64', + 'metadata': None}], + 'pandas_version': '0.23.4'} + )}) + r3 = t3.to_pandas() + tm.assert_frame_equal(r3, e3) + + # Case 4: MultiIndex with named RangeIndex + e4 = pd.DataFrame({ + 'a': a_values + }, index=[pd.RangeIndex(0, 8, step=2, name='qux'), b_values]) + t4 = pa.Table.from_arrays([a_arrow, rng_index_arrow, b_arrow], + names=['a', 'qux', gen_name_1]) + t4 = t4.replace_schema_metadata({ + b'pandas': json.dumps( + {'index_columns': ['qux', gen_name_1], + 'column_indexes': [{'name': None, + 'field_name': None, + 'pandas_type': 'unicode', + 'numpy_type': 'object', + 'metadata': {'encoding': 'UTF-8'}}], + 'columns': [{'name': 'a', + 'field_name': 'a', + 'pandas_type': 'unicode', + 'numpy_type': 'object', + 'metadata': None}, + {'name': 'qux', + 'field_name': 'qux', + 'pandas_type': 'int64', + 'numpy_type': 'int64', + 'metadata': None}, + {'name': None, + 'field_name': gen_name_1, + 'pandas_type': 'unicode', + 'numpy_type': 'object', + 'metadata': None}], + 'pandas_version': '0.23.4'} + )}) + r4 = t4.to_pandas() + tm.assert_frame_equal(r4, e4) + + # Case 4: MultiIndex with unnamed RangeIndex + e5 = pd.DataFrame({ + 'a': a_values + }, index=[pd.RangeIndex(0, 8, step=2, name=None), b_values]) + t5 = pa.Table.from_arrays([a_arrow, rng_index_arrow, b_arrow], + names=['a', gen_name_0, gen_name_1]) + t5 = t5.replace_schema_metadata({ + b'pandas': json.dumps( + {'index_columns': [gen_name_0, gen_name_1], + 'column_indexes': [{'name': None, + 'field_name': None, + 'pandas_type': 'unicode', + 'numpy_type': 'object', + 'metadata': {'encoding': 'UTF-8'}}], + 'columns': [{'name': 'a', + 'field_name': 'a', + 'pandas_type': 'unicode', + 'numpy_type': 'object', + 'metadata': None}, + {'name': None, + 'field_name': gen_name_0, + 'pandas_type': 'int64', + 'numpy_type': 'int64', + 'metadata': None}, + {'name': None, + 'field_name': gen_name_1, + 'pandas_type': 'unicode', + 'numpy_type': 'object', + 'metadata': None}], + 'pandas_version': '0.23.4'} + )}) + r5 = t5.to_pandas() + tm.assert_frame_equal(r5, e5) + + +def test_metadata_compat_missing_field_name(): + # Combination of missing field name but with index column as metadata. + # This combo occurs in the latest versions of fastparquet (0.3.2), but not + # in pyarrow itself (since field_name was added in 0.8, index as metadata + # only added later) + + a_values = [1, 2, 3, 4] + b_values = ['a', 'b', 'c', 'd'] + a_arrow = pa.array(a_values, type='int64') + b_arrow = pa.array(b_values, type='utf8') + + expected = pd.DataFrame({ + 'a': a_values, + 'b': b_values, + }, index=pd.RangeIndex(0, 8, step=2, name='qux')) + table = pa.table({'a': a_arrow, 'b': b_arrow}) + + # metadata generated by fastparquet 0.3.2 with missing field_names + table = table.replace_schema_metadata({ + b'pandas': json.dumps({ + 'column_indexes': [ + {'field_name': None, + 'metadata': None, + 'name': None, + 'numpy_type': 'object', + 'pandas_type': 'mixed-integer'} + ], + 'columns': [ + {'metadata': None, + 'name': 'a', + 'numpy_type': 'int64', + 'pandas_type': 'int64'}, + {'metadata': None, + 'name': 'b', + 'numpy_type': 'object', + 'pandas_type': 'unicode'} + ], + 'index_columns': [ + {'kind': 'range', + 'name': 'qux', + 'start': 0, + 'step': 2, + 'stop': 8} + ], + 'pandas_version': '0.25.0'} + + )}) + result = table.to_pandas() + tm.assert_frame_equal(result, expected) + + +def test_metadata_index_name_not_json_serializable(): + name = np.int64(6) # not json serializable by default + table = pa.table(pd.DataFrame(index=pd.RangeIndex(0, 4, name=name))) + metadata = table.schema.pandas_metadata + assert metadata['index_columns'][0]['name'] == '6' + + +def test_metadata_index_name_is_json_serializable(): + name = 6 # json serializable by default + table = pa.table(pd.DataFrame(index=pd.RangeIndex(0, 4, name=name))) + metadata = table.schema.pandas_metadata + assert metadata['index_columns'][0]['name'] == 6 + + +def make_df_with_timestamps(): + # Some of the milliseconds timestamps deliberately don't fit in the range + # that is possible with nanosecond timestamps. + df = pd.DataFrame({ + 'dateTimeMs': [ + np.datetime64('0001-01-01 00:00', 'ms'), + np.datetime64('2012-05-02 12:35', 'ms'), + np.datetime64('2012-05-03 15:42', 'ms'), + np.datetime64('3000-05-03 15:42', 'ms'), + ], + 'dateTimeNs': [ + np.datetime64('1991-01-01 00:00', 'ns'), + np.datetime64('2012-05-02 12:35', 'ns'), + np.datetime64('2012-05-03 15:42', 'ns'), + np.datetime64('2050-05-03 15:42', 'ns'), + ], + }) + # Not part of what we're testing, just ensuring that the inputs are what we + # expect. + assert (df.dateTimeMs.dtype, df.dateTimeNs.dtype) == ( + # O == object, M8[ns] == timestamp64[ns] + np.dtype("O"), np.dtype("M8[ns]") + ) + return df + + +@pytest.mark.parquet +@pytest.mark.filterwarnings("ignore:Parquet format '2.0':FutureWarning") +def test_timestamp_as_object_parquet(tempdir): + # Timestamps can be stored as Parquet and reloaded into Pandas with no loss + # of information if the timestamp_as_object option is True. + df = make_df_with_timestamps() + table = pa.Table.from_pandas(df) + filename = tempdir / "timestamps_from_pandas.parquet" + pq.write_table(table, filename, version="2.0") + result = pq.read_table(filename) + df2 = result.to_pandas(timestamp_as_object=True) + tm.assert_frame_equal(df, df2) + + +def test_timestamp_as_object_out_of_range(): + # Out of range timestamps can be converted Arrow and reloaded into Pandas + # with no loss of information if the timestamp_as_object option is True. + df = make_df_with_timestamps() + table = pa.Table.from_pandas(df) + df2 = table.to_pandas(timestamp_as_object=True) + tm.assert_frame_equal(df, df2) + + +@pytest.mark.parametrize("resolution", ["s", "ms", "us"]) +@pytest.mark.parametrize("tz", [None, "America/New_York"]) +# One datetime outside nanosecond range, one inside nanosecond range: +@pytest.mark.parametrize("dt", [datetime(1553, 1, 1), datetime(2020, 1, 1)]) +def test_timestamp_as_object_non_nanosecond(resolution, tz, dt): + # Timestamps can be converted Arrow and reloaded into Pandas with no loss + # of information if the timestamp_as_object option is True. + arr = pa.array([dt], type=pa.timestamp(resolution, tz=tz)) + table = pa.table({'a': arr}) + + for result in [ + arr.to_pandas(timestamp_as_object=True), + table.to_pandas(timestamp_as_object=True)['a'] + ]: + assert result.dtype == object + assert isinstance(result[0], datetime) + if tz: + assert result[0].tzinfo is not None + expected = result[0].tzinfo.fromutc(dt) + else: + assert result[0].tzinfo is None + expected = dt + assert result[0] == expected + + +def test_timestamp_as_object_fixed_offset(): + # ARROW-16547 to_pandas with timestamp_as_object=True and FixedOffset + pytz = pytest.importorskip("pytz") + import datetime + timezone = pytz.FixedOffset(120) + dt = timezone.localize(datetime.datetime(2022, 5, 12, 16, 57)) + + table = pa.table({"timestamp_col": pa.array([dt])}) + result = table.to_pandas(timestamp_as_object=True) + assert pa.table(result) == table + + +def test_threaded_pandas_import(): + invoke_script("pandas_threaded_import.py") + + +def test_does_not_mutate_timedelta_dtype(): + expected = np.dtype('m8') + + assert np.dtype(np.timedelta64) == expected + + df = pd.DataFrame({"a": [np.timedelta64()]}) + t = pa.Table.from_pandas(df) + t.to_pandas() + + assert np.dtype(np.timedelta64) == expected + + +def test_does_not_mutate_timedelta_nested(): + # ARROW-17893: dataframe with timedelta and a list of dictionary + # also with timedelta produces wrong result with to_pandas + + from datetime import timedelta + timedelta_1 = [{"timedelta_1": timedelta(seconds=12, microseconds=1)}] + timedelta_2 = [timedelta(hours=3, minutes=40, seconds=23)] + table = pa.table({"timedelta_1": timedelta_1, "timedelta_2": timedelta_2}) + df = table.to_pandas() + + assert df["timedelta_2"][0].to_pytimedelta() == timedelta_2[0] + + +def test_roundtrip_nested_map_table_with_pydicts(): + schema = pa.schema([ + pa.field( + "a", + pa.list_( + pa.map_(pa.int8(), pa.struct([pa.field("b", pa.binary())])) + ) + ) + ]) + table = pa.table([[ + [[(1, None)]], + None, + [ + [(2, {"b": b"abc"})], + [(3, {"b": None}), (4, {"b": b"def"})], + ] + ]], + schema=schema, + ) + + expected_default_df = pd.DataFrame( + {"a": [[[(1, None)]], None, [[(2, {"b": b"abc"})], + [(3, {"b": None}), (4, {"b": b"def"})]]]} + ) + expected_as_pydicts_df = pd.DataFrame( + {"a": [ + [{1: None}], + None, + [{2: {"b": b"abc"}}, {3: {"b": None}, 4: {"b": b"def"}}], + ]} + ) + + default_df = table.to_pandas() + as_pydicts_df = table.to_pandas(maps_as_pydicts="strict") + + tm.assert_frame_equal(default_df, expected_default_df) + tm.assert_frame_equal(as_pydicts_df, expected_as_pydicts_df) + + table_default_roundtrip = pa.Table.from_pandas(default_df, schema=schema) + assert table.equals(table_default_roundtrip) + + table_as_pydicts_roundtrip = pa.Table.from_pandas(as_pydicts_df, schema=schema) + assert table.equals(table_as_pydicts_roundtrip) + + +def test_roundtrip_nested_map_array_with_pydicts_sliced(): + """ + Slightly more robust test with chunking and slicing + """ + keys_1 = pa.array(['foo', 'bar']) + keys_2 = pa.array(['baz', 'qux', 'quux', 'quz']) + keys_3 = pa.array([], pa.string()) + + items_1 = pa.array( + [['a', 'b'], ['c', 'd']], + pa.list_(pa.string()), + ) + items_2 = pa.array( + [[], None, [None, 'e'], ['f', 'g']], + pa.list_(pa.string()), + ) + items_3 = pa.array( + [], + pa.list_(pa.string()), + ) + + map_chunk_1 = pa.MapArray.from_arrays([0, 2], keys_1, items_1) + map_chunk_2 = pa.MapArray.from_arrays([0, 3, 4], keys_2, items_2) + map_chunk_3 = pa.MapArray.from_arrays([0, 0], keys_3, items_3) + chunked_array = pa.chunked_array([ + pa.ListArray.from_arrays([0, 1], map_chunk_1).slice(0), + pa.ListArray.from_arrays([0, 1], map_chunk_2.slice(1)).slice(0), + pa.ListArray.from_arrays([0, 0], map_chunk_3).slice(0), + ]) + + series_default = chunked_array.to_pandas() + expected_series_default = pd.Series([ + [[('foo', ['a', 'b']), ('bar', ['c', 'd'])]], + [[('quz', ['f', 'g'])]], + [], + ]) + + series_pydicts = chunked_array.to_pandas(maps_as_pydicts="strict") + expected_series_pydicts = pd.Series([ + [{'foo': ['a', 'b'], 'bar': ['c', 'd']}], + [{'quz': ['f', 'g']}], + [], + ]) + + sliced = chunked_array.slice(1, 3) + series_default_sliced = sliced.to_pandas() + expected_series_default_sliced = pd.Series([ + [[('quz', ['f', 'g'])]], + [], + ]) + + series_pydicts_sliced = sliced.to_pandas(maps_as_pydicts="strict") + expected_series_pydicts_sliced = pd.Series([ + [{'quz': ['f', 'g']}], + [], + ]) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "elementwise comparison failed", + DeprecationWarning) + tm.assert_series_equal(series_default, expected_series_default) + tm.assert_series_equal(series_pydicts, expected_series_pydicts) + tm.assert_series_equal(series_default_sliced, expected_series_default_sliced) + tm.assert_series_equal(series_pydicts_sliced, expected_series_pydicts_sliced) + + ty = pa.list_(pa.map_(pa.string(), pa.list_(pa.string()))) + + def assert_roundtrip(series: pd.Series, data) -> None: + array_roundtrip = pa.chunked_array(pa.Array.from_pandas(series, type=ty)) + array_roundtrip.validate(full=True) + assert data.equals(array_roundtrip) + + assert_roundtrip(series_default, chunked_array) + assert_roundtrip(series_pydicts, chunked_array) + assert_roundtrip(series_default_sliced, sliced) + assert_roundtrip(series_pydicts_sliced, sliced) + + +def test_roundtrip_map_array_with_pydicts_duplicate_keys(): + keys = pa.array(['foo', 'bar', 'foo']) + items = pa.array( + [['a', 'b'], ['c', 'd'], ['1', '2']], + pa.list_(pa.string()), + ) + offsets = [0, 3] + maps = pa.MapArray.from_arrays(offsets, keys, items) + ty = pa.map_(pa.string(), pa.list_(pa.string())) + + # ------------------------ + # With maps as pydicts + with pytest.raises(pa.lib.ArrowException): + # raises because of duplicate keys + maps.to_pandas(maps_as_pydicts="strict") + series_pydicts = maps.to_pandas(maps_as_pydicts="lossy") + # some data loss occurs for duplicate keys + expected_series_pydicts = pd.Series([ + {'foo': ['1', '2'], 'bar': ['c', 'd']}, + ]) + # roundtrip is not possible because of data loss + assert not maps.equals(pa.Array.from_pandas(series_pydicts, type=ty)) + + # ------------------------ + # With default assoc list of tuples + series_default = maps.to_pandas() + expected_series_default = pd.Series([ + [('foo', ['a', 'b']), ('bar', ['c', 'd']), ('foo', ['1', '2'])], + ]) + assert maps.equals(pa.Array.from_pandas(series_default, type=ty)) + + # custom comparison for compatibility w/ Pandas 1.0.0 + # would otherwise run: + # tm.assert_series_equal(series_pydicts, expected_series_pydicts) + assert len(series_pydicts) == len(expected_series_pydicts) + for row1, row2 in zip(series_pydicts, expected_series_pydicts): + assert len(row1) == len(row2) + for tup1, tup2 in zip(row1.items(), row2.items()): + assert tup1[0] == tup2[0] + assert np.array_equal(tup1[1], tup2[1]) + + # custom comparison for compatibility w/ Pandas 1.0.0 + # would otherwise run: + # tm.assert_series_equal(series_default, expected_series_default) + assert len(series_default) == len(expected_series_default) + for row1, row2 in zip(series_default, expected_series_default): + assert len(row1) == len(row2) + for tup1, tup2 in zip(row1, row2): + assert tup1[0] == tup2[0] + assert np.array_equal(tup1[1], tup2[1]) + + +def test_unhashable_map_keys_with_pydicts(): + keys = pa.array( + [['a', 'b'], ['c', 'd'], [], ['e'], [None, 'f'], ['g', 'h']], + pa.list_(pa.string()), + ) + items = pa.array(['foo', 'bar', 'baz', 'qux', 'quux', 'quz']) + offsets = [0, 2, 6] + maps = pa.MapArray.from_arrays(offsets, keys, items) + + # ------------------------ + # With maps as pydicts + with pytest.raises(TypeError): + maps.to_pandas(maps_as_pydicts="lossy") + + # ------------------------ + # With default assoc list of tuples + series = maps.to_pandas() + expected_series_default = pd.Series([ + [(['a', 'b'], 'foo'), (['c', 'd'], 'bar')], + [([], 'baz'), (['e'], 'qux'), ([None, 'f'], 'quux'), (['g', 'h'], 'quz')], + ]) + + # custom comparison for compatibility w/ Pandas 1.0.0 + # would otherwise run: + # tm.assert_series_equal(series, expected_series_default) + assert len(series) == len(expected_series_default) + for row1, row2 in zip(series, expected_series_default): + assert len(row1) == len(row2) + for tup1, tup2 in zip(row1, row2): + assert np.array_equal(tup1[0], tup2[0]) + assert tup1[1] == tup2[1] + + +def test_table_column_conversion_for_datetime(): + # GH-35235 + # pandas implemented __from_arrow__ for DatetimeTZDtype, + # but we choose to do the conversion in Arrow instead. + # https://github.com/pandas-dev/pandas/pull/52201 + series = pd.Series(pd.date_range("2012", periods=2, tz="Europe/Brussels"), + name="datetime_column") + table = pa.table({"datetime_column": pa.array(series)}) + table_col = table.column("datetime_column") + + result = table_col.to_pandas() + assert result.name == "datetime_column" + tm.assert_series_equal(result, series) + + +def test_array_conversion_for_datetime(): + # GH-35235 + # pandas implemented __from_arrow__ for DatetimeTZDtype, + # but we choose to do the conversion in Arrow instead. + # https://github.com/pandas-dev/pandas/pull/52201 + series = pd.Series(pd.date_range("2012", periods=2, tz="Europe/Brussels")) + arr = pa.array(series) + + result = arr.to_pandas() + tm.assert_series_equal(result, series) + + +@pytest.mark.large_memory +def test_nested_chunking_valid(): + # GH-32439: Chunking can cause arrays to be in invalid state + # when nested types are involved. + # Here we simply ensure we validate correctly. + + def roundtrip(df, schema=None): + tab = pa.Table.from_pandas(df, schema=schema) + tab.validate(full=True) + # we expect to trigger chunking internally + # an assertion failure here may just mean this threshold has changed + num_chunks = tab.column(0).num_chunks + assert num_chunks > 1 + tm.assert_frame_equal(tab.to_pandas(self_destruct=True, + maps_as_pydicts="strict"), df) + + x = b"0" * 720000000 + roundtrip(pd.DataFrame({"strings": [x, x, x]})) + + struct = {"struct_field": x} + roundtrip(pd.DataFrame({"structs": [struct, struct, struct]})) + + lists = [x] + roundtrip(pd.DataFrame({"lists": [lists, lists, lists]})) + + los = [struct] + roundtrip(pd.DataFrame({"los": [los, los, los]})) + + sol = {"struct_field": lists} + roundtrip(pd.DataFrame({"sol": [sol, sol, sol]})) + + map_of_los = {"a": los} + map_type = pa.map_(pa.string(), + pa.list_(pa.struct([("struct_field", pa.binary())]))) + schema = pa.schema([("maps", map_type)]) + roundtrip(pd.DataFrame({"maps": [map_of_los, map_of_los, map_of_los]}), + schema=schema) + + +def test_is_data_frame_race_condition(): + # See https://github.com/apache/arrow/issues/39313 + test_util.invoke_script('arrow_39313.py') diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_scalars.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_scalars.py new file mode 100644 index 0000000000000000000000000000000000000000..6a814111898b781f66edfb14c1eff46ce7ca19ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_scalars.py @@ -0,0 +1,865 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import datetime +import decimal +import pytest +import sys +import weakref + +import numpy as np + +import pyarrow as pa +import pyarrow.compute as pc +from pyarrow.tests import util + + +@pytest.mark.parametrize(['value', 'ty', 'klass'], [ + (False, None, pa.BooleanScalar), + (True, None, pa.BooleanScalar), + (1, None, pa.Int64Scalar), + (-1, None, pa.Int64Scalar), + (1, pa.int8(), pa.Int8Scalar), + (1, pa.uint8(), pa.UInt8Scalar), + (1, pa.int16(), pa.Int16Scalar), + (1, pa.uint16(), pa.UInt16Scalar), + (1, pa.int32(), pa.Int32Scalar), + (1, pa.uint32(), pa.UInt32Scalar), + (1, pa.int64(), pa.Int64Scalar), + (1, pa.uint64(), pa.UInt64Scalar), + (1.0, None, pa.DoubleScalar), + (np.float16(1.0), pa.float16(), pa.HalfFloatScalar), + (1.0, pa.float32(), pa.FloatScalar), + (decimal.Decimal("1.123"), None, pa.Decimal128Scalar), + (decimal.Decimal("1.1234567890123456789012345678901234567890"), + None, pa.Decimal256Scalar), + ("string", None, pa.StringScalar), + (b"bytes", None, pa.BinaryScalar), + ("largestring", pa.large_string(), pa.LargeStringScalar), + (b"largebytes", pa.large_binary(), pa.LargeBinaryScalar), + ("string_view", pa.string_view(), pa.StringViewScalar), + (b"bytes_view", pa.binary_view(), pa.BinaryViewScalar), + (b"abc", pa.binary(3), pa.FixedSizeBinaryScalar), + ([1, 2, 3], None, pa.ListScalar), + ([1, 2, 3, 4], pa.large_list(pa.int8()), pa.LargeListScalar), + ([1, 2, 3, 4, 5], pa.list_(pa.int8(), 5), pa.FixedSizeListScalar), + ([1, 2, 3], pa.list_view(pa.int8()), pa.ListViewScalar), + ([1, 2, 3, 4], pa.large_list_view(pa.int8()), pa.LargeListViewScalar), + (datetime.date.today(), None, pa.Date32Scalar), + (datetime.date.today(), pa.date64(), pa.Date64Scalar), + (datetime.datetime.now(), None, pa.TimestampScalar), + (datetime.datetime.now().time().replace(microsecond=0), pa.time32('s'), + pa.Time32Scalar), + (datetime.datetime.now().time(), None, pa.Time64Scalar), + (datetime.timedelta(days=1), None, pa.DurationScalar), + (pa.MonthDayNano([1, -1, -10100]), None, + pa.MonthDayNanoIntervalScalar), + ({'a': 1, 'b': [1, 2]}, None, pa.StructScalar), + ([('a', 1), ('b', 2)], pa.map_(pa.string(), pa.int8()), pa.MapScalar), +]) +def test_basics(value, ty, klass, pickle_module): + s = pa.scalar(value, type=ty) + s.validate() + s.validate(full=True) + assert isinstance(s, klass) + assert s.as_py() == value + assert s == pa.scalar(value, type=ty) + assert s != value + assert s != "else" + assert hash(s) == hash(s) + assert s.is_valid is True + assert s != None # noqa: E711 + + s = pa.scalar(None, type=s.type) + assert s.is_valid is False + assert s.as_py() is None + assert s != pa.scalar(value, type=ty) + + # test pickle roundtrip + restored = pickle_module.loads(pickle_module.dumps(s)) + assert s.equals(restored) + + # test that scalars are weak-referenceable + wr = weakref.ref(s) + assert wr() is not None + del s + assert wr() is None + + +def test_invalid_scalar(): + s = pc.cast(pa.scalar(b"\xff"), pa.string(), safe=False) + s.validate() + with pytest.raises(ValueError, + match="string scalar contains invalid UTF8 data"): + s.validate(full=True) + + +def test_null_singleton(): + with pytest.raises(RuntimeError): + pa.NullScalar() + + +def test_nulls(pickle_module): + null = pa.scalar(None) + assert null is pa.NA + assert null.as_py() is None + assert null != "something" + assert (null == pa.scalar(None)) is True + assert (null == 0) is False + assert pa.NA == pa.NA + assert pa.NA not in [5] + + arr = pa.array([None, None]) + for v in arr: + assert v is pa.NA + assert v.as_py() is None + + # test pickle roundtrip + restored = pickle_module.loads(pickle_module.dumps(null)) + assert restored.equals(null) + + # test that scalars are weak-referenceable + wr = weakref.ref(null) + assert wr() is not None + del null + assert wr() is not None # singleton + + +def test_hashing(): + # ARROW-640 + values = list(range(500)) + arr = pa.array(values + values) + set_from_array = set(arr) + assert isinstance(set_from_array, set) + assert len(set_from_array) == 500 + + +def test_hashing_struct_scalar(): + # GH-35360 + a = pa.array([[{'a': 5}, {'a': 6}], [{'a': 7}, None]]) + b = pa.array([[{'a': 7}, None]]) + hash1 = hash(a[1]) + hash2 = hash(b[0]) + assert hash1 == hash2 + + +@pytest.mark.skipif(sys.platform == "win32" and not util.windows_has_tzdata(), + reason="Timezone database is not installed on Windows") +def test_timestamp_scalar(): + a = repr(pa.scalar("0000-01-01").cast(pa.timestamp("s"))) + assert a == "" + b = repr(pa.scalar(datetime.datetime(2015, 1, 1), type=pa.timestamp('s', tz='UTC'))) + assert b == "" + c = repr(pa.scalar(datetime.datetime(2015, 1, 1), type=pa.timestamp('us'))) + assert c == "" + d = repr(pc.assume_timezone( + pa.scalar("2000-01-01").cast(pa.timestamp("s")), "America/New_York")) + assert d == "" + + +def test_bool(): + false = pa.scalar(False) + true = pa.scalar(True) + + assert isinstance(false, pa.BooleanScalar) + assert isinstance(true, pa.BooleanScalar) + + assert repr(true) == "" + assert str(true) == "True" + assert repr(false) == "" + assert str(false) == "False" + + assert true.as_py() is True + assert false.as_py() is False + + +def test_numerics(): + # int64 + s = pa.scalar(1) + assert isinstance(s, pa.Int64Scalar) + assert repr(s) == "" + assert str(s) == "1" + assert s.as_py() == 1 + + with pytest.raises(OverflowError): + pa.scalar(-1, type='uint8') + + # float64 + s = pa.scalar(1.5) + assert isinstance(s, pa.DoubleScalar) + assert repr(s) == "" + assert str(s) == "1.5" + assert s.as_py() == 1.5 + + # float16 + s = pa.scalar(np.float16(0.5), type='float16') + assert isinstance(s, pa.HalfFloatScalar) + # on numpy2 repr(np.float16(0.5)) == "np.float16(0.5)" + # on numpy1 repr(np.float16(0.5)) == "0.5" + assert repr(s) == f"" + assert str(s) == "0.5" + assert s.as_py() == 0.5 + + +def test_decimal128(): + v = decimal.Decimal("1.123") + s = pa.scalar(v) + assert isinstance(s, pa.Decimal128Scalar) + assert s.as_py() == v + assert s.type == pa.decimal128(4, 3) + + v = decimal.Decimal("1.1234") + with pytest.raises(pa.ArrowInvalid): + pa.scalar(v, type=pa.decimal128(4, scale=3)) + with pytest.raises(pa.ArrowInvalid): + pa.scalar(v, type=pa.decimal128(5, scale=3)) + + s = pa.scalar(v, type=pa.decimal128(5, scale=4)) + assert isinstance(s, pa.Decimal128Scalar) + assert s.as_py() == v + + +def test_decimal256(): + v = decimal.Decimal("1234567890123456789012345678901234567890.123") + s = pa.scalar(v) + assert isinstance(s, pa.Decimal256Scalar) + assert s.as_py() == v + assert s.type == pa.decimal256(43, 3) + + v = decimal.Decimal("1.1234") + with pytest.raises(pa.ArrowInvalid): + pa.scalar(v, type=pa.decimal256(4, scale=3)) + with pytest.raises(pa.ArrowInvalid): + pa.scalar(v, type=pa.decimal256(5, scale=3)) + + s = pa.scalar(v, type=pa.decimal256(5, scale=4)) + assert isinstance(s, pa.Decimal256Scalar) + assert s.as_py() == v + + +def test_date(): + # ARROW-5125 + d1 = datetime.date(3200, 1, 1) + d2 = datetime.date(1960, 1, 1) + + for ty in [pa.date32(), pa.date64()]: + for d in [d1, d2]: + s = pa.scalar(d, type=ty) + assert s.as_py() == d + + +def test_date_cast(): + # ARROW-10472 - casting fo scalars doesn't segfault + scalar = pa.scalar(datetime.datetime(2012, 1, 1), type=pa.timestamp("us")) + expected = datetime.date(2012, 1, 1) + for ty in [pa.date32(), pa.date64()]: + result = scalar.cast(ty) + assert result.as_py() == expected + + +def test_time_from_datetime_time(): + t1 = datetime.time(18, 0) + t2 = datetime.time(21, 0) + + types = [pa.time32('s'), pa.time32('ms'), pa.time64('us'), pa.time64('ns')] + for ty in types: + for t in [t1, t2]: + s = pa.scalar(t, type=ty) + assert s.as_py() == t + + +@pytest.mark.parametrize(['value', 'time_type'], [ + (1, pa.time32("s")), + (2**30, pa.time32("s")), + (None, pa.time32("s")), + (1, pa.time32("ms")), + (2**30, pa.time32("ms")), + (None, pa.time32("ms")), + (1, pa.time64("us")), + (2**62, pa.time64("us")), + (None, pa.time64("us")), + (1, pa.time64("ns")), + (2**62, pa.time64("ns")), + (None, pa.time64("ns")), + (1, pa.date32()), + (2**30, pa.date32()), + (None, pa.date32()), + (1, pa.date64()), + (2**62, pa.date64()), + (None, pa.date64()), + (1, pa.timestamp("ns")), + (2**62, pa.timestamp("ns")), + (None, pa.timestamp("ns")), + (1, pa.duration("ns")), + (2**62, pa.duration("ns")), + (None, pa.duration("ns")), + ((1, 2, -3), pa.month_day_nano_interval()), + (None, pa.month_day_nano_interval()), +]) +def test_temporal_values(value, time_type: pa.DataType): + time_scalar = pa.scalar(value, type=time_type) + time_scalar.validate(full=True) + assert time_scalar.value == value + + +def test_cast(): + val = pa.scalar(5, type='int8') + assert val.cast('int64') == pa.scalar(5, type='int64') + assert val.cast('uint32') == pa.scalar(5, type='uint32') + assert val.cast('string') == pa.scalar('5', type='string') + with pytest.raises(ValueError): + pa.scalar('foo').cast('int32') + + +@pytest.mark.skipif(sys.platform == "win32" and not util.windows_has_tzdata(), + reason="Timezone database is not installed on Windows") +def test_cast_timestamp_to_string(): + # GH-35370 + pytest.importorskip("pytz") + import pytz + dt = datetime.datetime(2000, 1, 1, 0, 0, 0, tzinfo=pytz.utc) + ts = pa.scalar(dt, type=pa.timestamp("ns", tz="UTC")) + assert ts.cast(pa.string()) == pa.scalar('2000-01-01 00:00:00.000000000Z') + + +def test_cast_float_to_int(): + # GH-35040 + float_scalar = pa.scalar(1.5, type=pa.float64()) + unsafe_cast = float_scalar.cast(pa.int64(), safe=False) + expected_unsafe_cast = pa.scalar(1, type=pa.int64()) + assert unsafe_cast == expected_unsafe_cast + with pytest.raises(pa.ArrowInvalid): + float_scalar.cast(pa.int64()) # verify default is safe cast + + +def test_cast_int_to_float(): + # GH-34901 + int_scalar = pa.scalar(18014398509481983, type=pa.int64()) + unsafe_cast = int_scalar.cast(pa.float64(), safe=False) + expected_unsafe_cast = pa.scalar(18014398509481983.0, type=pa.float64()) + assert unsafe_cast == expected_unsafe_cast + with pytest.raises(pa.ArrowInvalid): + int_scalar.cast(pa.float64()) # verify default is safe cast + + +@pytest.mark.parametrize("typ", [pa.date32(), pa.date64()]) +def test_cast_string_to_date(typ): + scalar = pa.scalar('2021-01-01') + result = scalar.cast(typ) + assert result == pa.scalar(datetime.date(2021, 1, 1), type=typ) + + +@pytest.mark.pandas +def test_timestamp(): + import pandas as pd + arr = pd.date_range('2000-01-01 12:34:56', periods=10).values + + units = ['ns', 'us', 'ms', 's'] + + for i, unit in enumerate(units): + dtype = 'datetime64[{}]'.format(unit) + arrow_arr = pa.Array.from_pandas(arr.astype(dtype)) + expected = pd.Timestamp('2000-01-01 12:34:56') + + assert arrow_arr[0].as_py() == expected + assert arrow_arr[0].value * 1000**i == expected.value + + tz = 'America/New_York' + arrow_type = pa.timestamp(unit, tz=tz) + + dtype = 'datetime64[{}]'.format(unit) + arrow_arr = pa.Array.from_pandas(arr.astype(dtype), type=arrow_type) + expected = (pd.Timestamp('2000-01-01 12:34:56') + .tz_localize('utc') + .tz_convert(tz)) + + assert arrow_arr[0].as_py() == expected + assert arrow_arr[0].value * 1000**i == expected.value + + +@pytest.mark.nopandas +def test_timestamp_nanos_nopandas(): + # ARROW-5450 + pytest.importorskip("pytz") + import pytz + tz = 'America/New_York' + ty = pa.timestamp('ns', tz=tz) + + # 2000-01-01 00:00:00 + 1 microsecond + s = pa.scalar(946684800000000000 + 1000, type=ty) + + tzinfo = pytz.timezone(tz) + expected = datetime.datetime(2000, 1, 1, microsecond=1, tzinfo=tzinfo) + expected = tzinfo.fromutc(expected) + result = s.as_py() + assert result == expected + assert result.year == 1999 + assert result.hour == 19 + + # Non-zero nanos yields ValueError + s = pa.scalar(946684800000000001, type=ty) + with pytest.raises(ValueError): + s.as_py() + + +def test_timestamp_no_overflow(): + # ARROW-5450 + pytest.importorskip("pytz") + import pytz + + timestamps = [ + datetime.datetime(1, 1, 1, 0, 0, 0, tzinfo=pytz.utc), + datetime.datetime(9999, 12, 31, 23, 59, 59, 999999, tzinfo=pytz.utc), + datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=pytz.utc), + ] + for ts in timestamps: + s = pa.scalar(ts, type=pa.timestamp("us", tz="UTC")) + assert s.as_py() == ts + + +def test_timestamp_fixed_offset_print(): + # ARROW-13896 + pytest.importorskip("pytz") + arr = pa.array([0], pa.timestamp('s', tz='+02:00')) + assert str(arr[0]) == "1970-01-01 02:00:00+02:00" + + +def test_duration(): + arr = np.array([0, 3600000000000], dtype='timedelta64[ns]') + + units = ['us', 'ms', 's'] + + for i, unit in enumerate(units): + dtype = 'timedelta64[{}]'.format(unit) + arrow_arr = pa.array(arr.astype(dtype)) + expected = datetime.timedelta(seconds=60*60) + assert isinstance(arrow_arr[1].as_py(), datetime.timedelta) + assert arrow_arr[1].as_py() == expected + assert (arrow_arr[1].value * 1000**(i+1) == + expected.total_seconds() * 1e9) + + +@pytest.mark.pandas +def test_duration_nanos_pandas(): + import pandas as pd + arr = pa.array([0, 3600000000000], type=pa.duration('ns')) + expected = pd.Timedelta('1 hour') + assert isinstance(arr[1].as_py(), pd.Timedelta) + assert arr[1].as_py() == expected + assert arr[1].value == expected.value + + # Non-zero nanos work fine + arr = pa.array([946684800000000001], type=pa.duration('ns')) + assert arr[0].as_py() == pd.Timedelta(946684800000000001, unit='ns') + + +@pytest.mark.nopandas +def test_duration_nanos_nopandas(): + arr = pa.array([0, 3600000000000], pa.duration('ns')) + expected = datetime.timedelta(seconds=60*60) + assert isinstance(arr[1].as_py(), datetime.timedelta) + assert arr[1].as_py() == expected + assert arr[1].value == expected.total_seconds() * 1e9 + + # Non-zero nanos yields ValueError + arr = pa.array([946684800000000001], type=pa.duration('ns')) + with pytest.raises(ValueError): + arr[0].as_py() + + +def test_month_day_nano_interval(): + triple = pa.MonthDayNano([-3600, 1800, -50]) + arr = pa.array([triple]) + assert isinstance(arr[0].as_py(), pa.MonthDayNano) + assert arr[0].as_py() == triple + assert arr[0].value == triple + + +@pytest.mark.parametrize('value', ['foo', 'mañana']) +@pytest.mark.parametrize(('ty', 'scalar_typ'), [ + (pa.string(), pa.StringScalar), + (pa.large_string(), pa.LargeStringScalar), + (pa.string_view(), pa.StringViewScalar), +]) +def test_string(value, ty, scalar_typ): + s = pa.scalar(value, type=ty) + assert isinstance(s, scalar_typ) + assert s.as_py() == value + assert s.as_py() != 'something' + assert repr(value) in repr(s) + assert str(s) == str(value) + + buf = s.as_buffer() + assert isinstance(buf, pa.Buffer) + assert buf.to_pybytes() == value.encode() + + +@pytest.mark.parametrize('value', [b'foo', b'bar']) +@pytest.mark.parametrize(('ty', 'scalar_typ'), [ + (pa.binary(), pa.BinaryScalar), + (pa.large_binary(), pa.LargeBinaryScalar), + (pa.binary_view(), pa.BinaryViewScalar), +]) +def test_binary(value, ty, scalar_typ): + s = pa.scalar(value, type=ty) + assert isinstance(s, scalar_typ) + assert s.as_py() == value + assert str(s) == str(value) + assert repr(value) in repr(s) + assert s.as_py() == value + assert s != b'xxxxx' + + buf = s.as_buffer() + assert isinstance(buf, pa.Buffer) + assert buf.to_pybytes() == value + + +def test_fixed_size_binary(): + s = pa.scalar(b'foof', type=pa.binary(4)) + assert isinstance(s, pa.FixedSizeBinaryScalar) + assert s.as_py() == b'foof' + + with pytest.raises(pa.ArrowInvalid): + pa.scalar(b'foof5', type=pa.binary(4)) + + +@pytest.mark.parametrize(('ty', 'klass'), [ + (pa.list_(pa.string()), pa.ListScalar), + (pa.large_list(pa.string()), pa.LargeListScalar), + (pa.list_view(pa.string()), pa.ListViewScalar), + (pa.large_list_view(pa.string()), pa.LargeListViewScalar) +]) +def test_list(ty, klass): + v = ['foo', None] + s = pa.scalar(v, type=ty) + assert s.type == ty + assert len(s) == 2 + assert isinstance(s.values, pa.Array) + assert s.values.to_pylist() == v + assert isinstance(s, klass) + assert repr(v) in repr(s) + assert s.as_py() == v + assert s[0].as_py() == 'foo' + assert s[1].as_py() is None + assert s[-1] == s[1] + assert s[-2] == s[0] + with pytest.raises(IndexError): + s[-3] + with pytest.raises(IndexError): + s[2] + + +@pytest.mark.parametrize('ty', [ + pa.list_(pa.int64()), + pa.large_list(pa.int64()), + pa.list_view(pa.int64()), + pa.large_list_view(pa.int64()), + None +]) +def test_list_from_numpy(ty): + s = pa.scalar(np.array([1, 2, 3], dtype=np.int64()), type=ty) + if ty is None: + ty = pa.list_(pa.int64()) # expected inferred type + assert s.type == ty + assert s.as_py() == [1, 2, 3] + + +@pytest.mark.pandas +@pytest.mark.parametrize('factory', [ + pa.list_, + pa.large_list, + pa.list_view, + pa.large_list_view +]) +def test_list_from_pandas(factory): + import pandas as pd + + s = pa.scalar(pd.Series([1, 2, 3])) + assert s.as_py() == [1, 2, 3] + + cases = [ + (np.nan, 'null'), + (['string', np.nan], factory(pa.binary())), + (['string', np.nan], factory(pa.utf8())), + ([b'string', np.nan], factory(pa.binary(6))), + ([True, np.nan], factory(pa.bool_())), + ([decimal.Decimal('0'), np.nan], factory(pa.decimal128(12, 2))), + ] + for case, ty in cases: + # Both types of exceptions are raised. May want to clean that up + with pytest.raises((ValueError, TypeError)): + pa.scalar(case, type=ty) + + # from_pandas option suppresses failure + s = pa.scalar(case, type=ty, from_pandas=True) + + +def test_fixed_size_list(): + s = pa.scalar([1, None, 3], type=pa.list_(pa.int64(), 3)) + + assert len(s) == 3 + assert isinstance(s, pa.FixedSizeListScalar) + assert repr(s) == "" + assert s.as_py() == [1, None, 3] + assert s[0].as_py() == 1 + assert s[1].as_py() is None + assert s[-1] == s[2] + with pytest.raises(IndexError): + s[-4] + with pytest.raises(IndexError): + s[3] + + +def test_struct(): + ty = pa.struct([ + pa.field('x', pa.int16()), + pa.field('y', pa.float32()) + ]) + + v = {'x': 2, 'y': 3.5} + s = pa.scalar(v, type=ty) + assert list(s) == list(s.keys()) == ['x', 'y'] + assert list(s.values()) == [ + pa.scalar(2, type=pa.int16()), + pa.scalar(3.5, type=pa.float32()) + ] + assert list(s.items()) == [ + ('x', pa.scalar(2, type=pa.int16())), + ('y', pa.scalar(3.5, type=pa.float32())) + ] + assert 'x' in s + assert 'y' in s + assert 'z' not in s + assert 0 not in s + + assert s.as_py() == v + assert repr(s) != repr(v) + assert repr(s.as_py()) == repr(v) + assert len(s) == 2 + assert isinstance(s['x'], pa.Int16Scalar) + assert isinstance(s['y'], pa.FloatScalar) + assert s['x'].as_py() == 2 + assert s['y'].as_py() == 3.5 + + with pytest.raises(KeyError): + s['nonexistent'] + + s = pa.scalar(None, type=ty) + assert list(s) == list(s.keys()) == ['x', 'y'] + assert s.as_py() is None + assert 'x' in s + assert 'y' in s + assert isinstance(s['x'], pa.Int16Scalar) + assert isinstance(s['y'], pa.FloatScalar) + assert s['x'].is_valid is False + assert s['y'].is_valid is False + assert s['x'].as_py() is None + assert s['y'].as_py() is None + + +def test_struct_duplicate_fields(): + ty = pa.struct([ + pa.field('x', pa.int16()), + pa.field('y', pa.float32()), + pa.field('x', pa.int64()), + ]) + s = pa.scalar([('x', 1), ('y', 2.0), ('x', 3)], type=ty) + + assert list(s) == list(s.keys()) == ['x', 'y', 'x'] + assert len(s) == 3 + assert s == s + assert list(s.items()) == [ + ('x', pa.scalar(1, pa.int16())), + ('y', pa.scalar(2.0, pa.float32())), + ('x', pa.scalar(3, pa.int64())) + ] + + assert 'x' in s + assert 'y' in s + assert 'z' not in s + assert 0 not in s + + # getitem with field names fails for duplicate fields, works for others + with pytest.raises(KeyError): + s['x'] + + assert isinstance(s['y'], pa.FloatScalar) + assert s['y'].as_py() == 2.0 + + # getitem with integer index works for all fields + assert isinstance(s[0], pa.Int16Scalar) + assert s[0].as_py() == 1 + assert isinstance(s[1], pa.FloatScalar) + assert s[1].as_py() == 2.0 + assert isinstance(s[2], pa.Int64Scalar) + assert s[2].as_py() == 3 + + assert "pyarrow.StructScalar" in repr(s) + + with pytest.raises(ValueError, match="duplicate field names"): + s.as_py() + + +def test_map(pickle_module): + ty = pa.map_(pa.string(), pa.int8()) + v = [('a', 1), ('b', 2)] + s = pa.scalar(v, type=ty) + + assert len(s) == 2 + assert isinstance(s, pa.MapScalar) + assert isinstance(s.values, pa.Array) + assert repr(s) == "" + assert s.values.to_pylist() == [ + {'key': 'a', 'value': 1}, + {'key': 'b', 'value': 2} + ] + + # test iteration + for i, j in zip(s, v): + assert i == j + + # test iteration with missing values + for _ in pa.scalar(None, type=ty): + pass + + assert s.as_py() == v + assert s[1] == ( + pa.scalar('b', type=pa.string()), + pa.scalar(2, type=pa.int8()) + ) + assert s[-1] == s[1] + assert s[-2] == s[0] + with pytest.raises(IndexError): + s[-3] + with pytest.raises(IndexError): + s[2] + + restored = pickle_module.loads(pickle_module.dumps(s)) + assert restored.equals(s) + + +def test_dictionary(pickle_module): + indices = pa.array([2, None, 1, 2, 0, None]) + dictionary = pa.array(['foo', 'bar', 'baz']) + + arr = pa.DictionaryArray.from_arrays(indices, dictionary) + expected = ['baz', None, 'bar', 'baz', 'foo', None] + assert arr.to_pylist() == expected + + for j, (i, v) in enumerate(zip(indices, expected)): + s = arr[j] + + assert s.as_py() == v + assert s.value.as_py() == v + assert s.index.equals(i) + assert s.dictionary.equals(dictionary) + + restored = pickle_module.loads(pickle_module.dumps(s)) + assert restored.equals(s) + + +def test_run_end_encoded(): + run_ends = [3, 5, 10, 12, 19] + values = [1, 2, 1, None, 3] + arr = pa.RunEndEncodedArray.from_arrays(run_ends, values) + + scalar = arr[0] + assert isinstance(scalar, pa.RunEndEncodedScalar) + assert isinstance(scalar.value, pa.Int64Scalar) + assert scalar.value == pa.array(values)[0] + assert scalar.as_py() == 1 + + # null -> .value is still a scalar, as_py returns None + scalar = arr[10] + assert isinstance(scalar.value, pa.Int64Scalar) + assert scalar.as_py() is None + + # constructing a scalar directly doesn't work yet + with pytest.raises(NotImplementedError): + pa.scalar(1, pa.run_end_encoded(pa.int64(), pa.int64())) + + +def test_union(pickle_module): + # sparse + arr = pa.UnionArray.from_sparse( + pa.array([0, 0, 1, 1], type=pa.int8()), + [ + pa.array(["a", "b", "c", "d"]), + pa.array([1, 2, 3, 4]) + ] + ) + for s in arr: + s.validate(full=True) + assert isinstance(s, pa.UnionScalar) + assert s.type.equals(arr.type) + assert s.is_valid is True + with pytest.raises(pa.ArrowNotImplementedError): + pickle_module.loads(pickle_module.dumps(s)) + + assert arr[0].type_code == 0 + assert arr[0].as_py() == "a" + assert arr[1].type_code == 0 + assert arr[1].as_py() == "b" + assert arr[2].type_code == 1 + assert arr[2].as_py() == 3 + assert arr[3].type_code == 1 + assert arr[3].as_py() == 4 + + # dense + arr = pa.UnionArray.from_dense( + types=pa.array([0, 1, 0, 0, 1, 1, 0], type='int8'), + value_offsets=pa.array([0, 0, 2, 1, 1, 2, 3], type='int32'), + children=[ + pa.array([b'a', b'b', b'c', b'd'], type='binary'), + pa.array([1, 2, 3], type='int64') + ] + ) + for s in arr: + s.validate(full=True) + assert isinstance(s, pa.UnionScalar) + assert s.type.equals(arr.type) + assert s.is_valid is True + with pytest.raises(pa.ArrowNotImplementedError): + pickle_module.loads(pickle_module.dumps(s)) + + assert arr[0].type_code == 0 + assert arr[0].as_py() == b'a' + assert arr[5].type_code == 1 + assert arr[5].as_py() == 3 + + +def test_map_scalar_as_py_with_custom_field_name(): + """ + Check we can call `MapScalar.as_py` with custom field names + + See https://github.com/apache/arrow/issues/36809 + """ + assert pa.scalar( + [("foo", "bar")], + pa.map_( + pa.string(), + pa.string() + ), + ).as_py() == [("foo", "bar")] + + assert pa.scalar( + [("foo", "bar")], + pa.map_( + pa.field("custom_key", pa.string(), nullable=False), + pa.field("custom_value", pa.string()), + ), + ).as_py() == [("foo", "bar")] diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_schema.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..8793c9e773c1d91faff4c5b9ed723454c381eb85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_schema.py @@ -0,0 +1,744 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from collections import OrderedDict +import sys +import weakref + +import pytest +import numpy as np +import pyarrow as pa + +import pyarrow.tests.util as test_util +from pyarrow.vendored.version import Version + +try: + import pandas as pd +except ImportError: + pass + + +def test_schema_constructor_errors(): + msg = ("Do not call Schema's constructor directly, use `pyarrow.schema` " + "instead") + with pytest.raises(TypeError, match=msg): + pa.Schema() + + +def test_type_integers(): + dtypes = ['int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64'] + + for name in dtypes: + factory = getattr(pa, name) + t = factory() + assert str(t) == name + + +@pytest.mark.pandas +def test_type_to_pandas_dtype(): + M8 = np.dtype('datetime64[ms]') + if Version(pd.__version__) < Version("2.0.0"): + M8 = np.dtype('datetime64[ns]') + cases = [ + (pa.null(), np.object_), + (pa.bool_(), np.bool_), + (pa.int8(), np.int8), + (pa.int16(), np.int16), + (pa.int32(), np.int32), + (pa.int64(), np.int64), + (pa.uint8(), np.uint8), + (pa.uint16(), np.uint16), + (pa.uint32(), np.uint32), + (pa.uint64(), np.uint64), + (pa.float16(), np.float16), + (pa.float32(), np.float32), + (pa.float64(), np.float64), + (pa.date32(), M8), + (pa.date64(), M8), + (pa.timestamp('ms'), M8), + (pa.binary(), np.object_), + (pa.binary(12), np.object_), + (pa.string(), np.object_), + (pa.list_(pa.int8()), np.object_), + # (pa.list_(pa.int8(), 2), np.object_), # TODO needs pandas conversion + (pa.map_(pa.int64(), pa.float64()), np.object_), + ] + for arrow_type, numpy_type in cases: + assert arrow_type.to_pandas_dtype() == numpy_type + + +@pytest.mark.pandas +def test_type_to_pandas_dtype_check_import(): + # ARROW-7980 + test_util.invoke_script('arrow_7980.py') + + +def test_type_list(): + value_type = pa.int32() + list_type = pa.list_(value_type) + assert str(list_type) == 'list' + + field = pa.field('my_item', pa.string()) + l2 = pa.list_(field) + assert str(l2) == 'list' + + +def test_type_comparisons(): + val = pa.int32() + assert val == pa.int32() + assert val == 'int32' + assert val != 5 + + +def test_type_for_alias(): + cases = [ + ('i1', pa.int8()), + ('int8', pa.int8()), + ('i2', pa.int16()), + ('int16', pa.int16()), + ('i4', pa.int32()), + ('int32', pa.int32()), + ('i8', pa.int64()), + ('int64', pa.int64()), + ('u1', pa.uint8()), + ('uint8', pa.uint8()), + ('u2', pa.uint16()), + ('uint16', pa.uint16()), + ('u4', pa.uint32()), + ('uint32', pa.uint32()), + ('u8', pa.uint64()), + ('uint64', pa.uint64()), + ('f4', pa.float32()), + ('float32', pa.float32()), + ('f8', pa.float64()), + ('float64', pa.float64()), + ('date32', pa.date32()), + ('date64', pa.date64()), + ('string', pa.string()), + ('str', pa.string()), + ('binary', pa.binary()), + ('time32[s]', pa.time32('s')), + ('time32[ms]', pa.time32('ms')), + ('time64[us]', pa.time64('us')), + ('time64[ns]', pa.time64('ns')), + ('timestamp[s]', pa.timestamp('s')), + ('timestamp[ms]', pa.timestamp('ms')), + ('timestamp[us]', pa.timestamp('us')), + ('timestamp[ns]', pa.timestamp('ns')), + ('duration[s]', pa.duration('s')), + ('duration[ms]', pa.duration('ms')), + ('duration[us]', pa.duration('us')), + ('duration[ns]', pa.duration('ns')), + ('month_day_nano_interval', pa.month_day_nano_interval()), + ] + + for val, expected in cases: + assert pa.type_for_alias(val) == expected + + +def test_type_string(): + t = pa.string() + assert str(t) == 'string' + + +def test_type_timestamp_with_tz(): + tz = 'America/Los_Angeles' + t = pa.timestamp('ns', tz=tz) + assert t.unit == 'ns' + assert t.tz == tz + + +def test_time_types(): + t1 = pa.time32('s') + t2 = pa.time32('ms') + t3 = pa.time64('us') + t4 = pa.time64('ns') + + assert t1.unit == 's' + assert t2.unit == 'ms' + assert t3.unit == 'us' + assert t4.unit == 'ns' + + assert str(t1) == 'time32[s]' + assert str(t4) == 'time64[ns]' + + with pytest.raises(ValueError): + pa.time32('us') + + with pytest.raises(ValueError): + pa.time64('s') + + +def test_from_numpy_dtype(): + cases = [ + (np.dtype('bool'), pa.bool_()), + (np.dtype('int8'), pa.int8()), + (np.dtype('int16'), pa.int16()), + (np.dtype('int32'), pa.int32()), + (np.dtype('int64'), pa.int64()), + (np.dtype('uint8'), pa.uint8()), + (np.dtype('uint16'), pa.uint16()), + (np.dtype('uint32'), pa.uint32()), + (np.dtype('float16'), pa.float16()), + (np.dtype('float32'), pa.float32()), + (np.dtype('float64'), pa.float64()), + (np.dtype('U'), pa.string()), + (np.dtype('S'), pa.binary()), + (np.dtype('datetime64[s]'), pa.timestamp('s')), + (np.dtype('datetime64[ms]'), pa.timestamp('ms')), + (np.dtype('datetime64[us]'), pa.timestamp('us')), + (np.dtype('datetime64[ns]'), pa.timestamp('ns')), + (np.dtype('timedelta64[s]'), pa.duration('s')), + (np.dtype('timedelta64[ms]'), pa.duration('ms')), + (np.dtype('timedelta64[us]'), pa.duration('us')), + (np.dtype('timedelta64[ns]'), pa.duration('ns')), + ] + + for dt, pt in cases: + result = pa.from_numpy_dtype(dt) + assert result == pt + + # Things convertible to numpy dtypes work + assert pa.from_numpy_dtype('U') == pa.string() + assert pa.from_numpy_dtype(np.str_) == pa.string() + assert pa.from_numpy_dtype('int32') == pa.int32() + assert pa.from_numpy_dtype(bool) == pa.bool_() + + with pytest.raises(NotImplementedError): + pa.from_numpy_dtype(np.dtype('O')) + + with pytest.raises(TypeError): + pa.from_numpy_dtype('not_convertible_to_dtype') + + +def test_schema(): + fields = [ + pa.field('foo', pa.int32()), + pa.field('bar', pa.string()), + pa.field('baz', pa.list_(pa.int8())) + ] + sch = pa.schema(fields) + + assert sch.names == ['foo', 'bar', 'baz'] + assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())] + + assert len(sch) == 3 + assert sch[0].name == 'foo' + assert sch[0].type == fields[0].type + assert sch.field('foo').name == 'foo' + assert sch.field('foo').type == fields[0].type + + assert repr(sch) == """\ +foo: int32 +bar: string +baz: list + child 0, item: int8""" + + with pytest.raises(TypeError): + pa.schema([None]) + + +def test_schema_weakref(): + fields = [ + pa.field('foo', pa.int32()), + pa.field('bar', pa.string()), + pa.field('baz', pa.list_(pa.int8())) + ] + schema = pa.schema(fields) + wr = weakref.ref(schema) + assert wr() is not None + del schema + assert wr() is None + + +def test_schema_to_string_with_metadata(): + lorem = """\ +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla accumsan vel +turpis et mollis. Aliquam tincidunt arcu id tortor blandit blandit. Donec +eget leo quis lectus scelerisque varius. Class aptent taciti sociosqu ad +litora torquent per conubia nostra, per inceptos himenaeos. Praesent +faucibus, diam eu volutpat iaculis, tellus est porta ligula, a efficitur +turpis nulla facilisis quam. Aliquam vitae lorem erat. Proin a dolor ac libero +dignissim mollis vitae eu mauris. Quisque posuere tellus vitae massa +pellentesque sagittis. Aenean feugiat, diam ac dignissim fermentum, lorem +sapien commodo massa, vel volutpat orci nisi eu justo. Nulla non blandit +sapien. Quisque pretium vestibulum urna eu vehicula.""" + # ARROW-7063 + my_schema = pa.schema([pa.field("foo", "int32", False, + metadata={"key1": "value1"}), + pa.field("bar", "string", True, + metadata={"key3": "value3"})], + metadata={"lorem": lorem}) + + assert my_schema.to_string() == """\ +foo: int32 not null + -- field metadata -- + key1: 'value1' +bar: string + -- field metadata -- + key3: 'value3' +-- schema metadata -- +lorem: '""" + lorem[:65] + "' + " + str(len(lorem) - 65) + + # Metadata that exactly fits + result = pa.schema([('f0', 'int32')], + metadata={'key': 'value' + 'x' * 62}).to_string() + assert result == """\ +f0: int32 +-- schema metadata -- +key: 'valuexxxxxxxxxxxxxxxxxxxxxxxxxxxxx\ +xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'""" + + assert my_schema.to_string(truncate_metadata=False) == """\ +foo: int32 not null + -- field metadata -- + key1: 'value1' +bar: string + -- field metadata -- + key3: 'value3' +-- schema metadata -- +lorem: '{}'""".format(lorem) + + assert my_schema.to_string(truncate_metadata=False, + show_field_metadata=False) == """\ +foo: int32 not null +bar: string +-- schema metadata -- +lorem: '{}'""".format(lorem) + + assert my_schema.to_string(truncate_metadata=False, + show_schema_metadata=False) == """\ +foo: int32 not null + -- field metadata -- + key1: 'value1' +bar: string + -- field metadata -- + key3: 'value3'""" + + assert my_schema.to_string(truncate_metadata=False, + show_field_metadata=False, + show_schema_metadata=False) == """\ +foo: int32 not null +bar: string""" + + +def test_schema_from_tuples(): + fields = [ + ('foo', pa.int32()), + ('bar', pa.string()), + ('baz', pa.list_(pa.int8())), + ] + sch = pa.schema(fields) + assert sch.names == ['foo', 'bar', 'baz'] + assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())] + assert len(sch) == 3 + assert repr(sch) == """\ +foo: int32 +bar: string +baz: list + child 0, item: int8""" + + with pytest.raises(TypeError): + pa.schema([('foo', None)]) + + +def test_schema_from_mapping(): + fields = OrderedDict([ + ('foo', pa.int32()), + ('bar', pa.string()), + ('baz', pa.list_(pa.int8())), + ]) + sch = pa.schema(fields) + assert sch.names == ['foo', 'bar', 'baz'] + assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())] + assert len(sch) == 3 + assert repr(sch) == """\ +foo: int32 +bar: string +baz: list + child 0, item: int8""" + + fields = OrderedDict([('foo', None)]) + with pytest.raises(TypeError): + pa.schema(fields) + + +def test_schema_duplicate_fields(): + fields = [ + pa.field('foo', pa.int32()), + pa.field('bar', pa.string()), + pa.field('foo', pa.list_(pa.int8())), + ] + sch = pa.schema(fields) + assert sch.names == ['foo', 'bar', 'foo'] + assert sch.types == [pa.int32(), pa.string(), pa.list_(pa.int8())] + assert len(sch) == 3 + assert repr(sch) == """\ +foo: int32 +bar: string +foo: list + child 0, item: int8""" + + assert sch[0].name == 'foo' + assert sch[0].type == fields[0].type + with pytest.warns(FutureWarning): + assert sch.field_by_name('bar') == fields[1] + with pytest.warns(FutureWarning): + assert sch.field_by_name('xxx') is None + with pytest.warns((UserWarning, FutureWarning)): + assert sch.field_by_name('foo') is None + + # Schema::GetFieldIndex + assert sch.get_field_index('foo') == -1 + + # Schema::GetAllFieldIndices + assert sch.get_all_field_indices('foo') == [0, 2] + + +def test_field_flatten(): + f0 = pa.field('foo', pa.int32()).with_metadata({b'foo': b'bar'}) + assert f0.flatten() == [f0] + + f1 = pa.field('bar', pa.float64(), nullable=False) + ff = pa.field('ff', pa.struct([f0, f1]), nullable=False) + assert ff.flatten() == [ + pa.field('ff.foo', pa.int32()).with_metadata({b'foo': b'bar'}), + pa.field('ff.bar', pa.float64(), nullable=False)] # XXX + + # Nullable parent makes flattened child nullable + ff = pa.field('ff', pa.struct([f0, f1])) + assert ff.flatten() == [ + pa.field('ff.foo', pa.int32()).with_metadata({b'foo': b'bar'}), + pa.field('ff.bar', pa.float64())] + + fff = pa.field('fff', pa.struct([ff])) + assert fff.flatten() == [pa.field('fff.ff', pa.struct([f0, f1]))] + + +def test_schema_add_remove_metadata(): + fields = [ + pa.field('foo', pa.int32()), + pa.field('bar', pa.string()), + pa.field('baz', pa.list_(pa.int8())) + ] + + s1 = pa.schema(fields) + + assert s1.metadata is None + + metadata = {b'foo': b'bar', b'pandas': b'badger'} + + s2 = s1.with_metadata(metadata) + assert s2.metadata == metadata + + s3 = s2.remove_metadata() + assert s3.metadata is None + + # idempotent + s4 = s3.remove_metadata() + assert s4.metadata is None + + +def test_schema_equals(): + fields = [ + pa.field('foo', pa.int32()), + pa.field('bar', pa.string()), + pa.field('baz', pa.list_(pa.int8())) + ] + metadata = {b'foo': b'bar', b'pandas': b'badger'} + + sch1 = pa.schema(fields) + sch2 = pa.schema(fields) + sch3 = pa.schema(fields, metadata=metadata) + sch4 = pa.schema(fields, metadata=metadata) + + assert sch1.equals(sch2, check_metadata=True) + assert sch3.equals(sch4, check_metadata=True) + assert sch1.equals(sch3) + assert not sch1.equals(sch3, check_metadata=True) + assert not sch1.equals(sch3, check_metadata=True) + + del fields[-1] + sch3 = pa.schema(fields) + assert not sch1.equals(sch3) + + +def test_schema_equals_propagates_check_metadata(): + # ARROW-4088 + schema1 = pa.schema([ + pa.field('foo', pa.int32()), + pa.field('bar', pa.string()) + ]) + schema2 = pa.schema([ + pa.field('foo', pa.int32()), + pa.field('bar', pa.string(), metadata={'a': 'alpha'}), + ]) + assert not schema1.equals(schema2, check_metadata=True) + assert schema1.equals(schema2) + + +def test_schema_equals_invalid_type(): + # ARROW-5873 + schema = pa.schema([pa.field("a", pa.int64())]) + + for val in [None, 'string', pa.array([1, 2])]: + with pytest.raises(TypeError): + schema.equals(val) + + +def test_schema_equality_operators(): + fields = [ + pa.field('foo', pa.int32()), + pa.field('bar', pa.string()), + pa.field('baz', pa.list_(pa.int8())) + ] + metadata = {b'foo': b'bar', b'pandas': b'badger'} + + sch1 = pa.schema(fields) + sch2 = pa.schema(fields) + sch3 = pa.schema(fields, metadata=metadata) + sch4 = pa.schema(fields, metadata=metadata) + + assert sch1 == sch2 + assert sch3 == sch4 + + # __eq__ and __ne__ do not check metadata + assert sch1 == sch3 + assert not sch1 != sch3 + + assert sch2 == sch4 + + # comparison with other types doesn't raise + assert sch1 != [] + assert sch3 != 'foo' + + +def test_schema_get_fields(): + fields = [ + pa.field('foo', pa.int32()), + pa.field('bar', pa.string()), + pa.field('baz', pa.list_(pa.int8())) + ] + + schema = pa.schema(fields) + + assert schema.field('foo').name == 'foo' + assert schema.field(0).name == 'foo' + assert schema.field(-1).name == 'baz' + + with pytest.raises(KeyError): + schema.field('other') + with pytest.raises(TypeError): + schema.field(0.0) + with pytest.raises(IndexError): + schema.field(4) + + +def test_schema_negative_indexing(): + fields = [ + pa.field('foo', pa.int32()), + pa.field('bar', pa.string()), + pa.field('baz', pa.list_(pa.int8())) + ] + + schema = pa.schema(fields) + + assert schema[-1].equals(schema[2]) + assert schema[-2].equals(schema[1]) + assert schema[-3].equals(schema[0]) + + with pytest.raises(IndexError): + schema[-4] + + with pytest.raises(IndexError): + schema[3] + + +def test_schema_repr_with_dictionaries(): + fields = [ + pa.field('one', pa.dictionary(pa.int16(), pa.string())), + pa.field('two', pa.int32()) + ] + sch = pa.schema(fields) + + expected = ( + """\ +one: dictionary +two: int32""") + + assert repr(sch) == expected + + +def test_type_schema_pickling(pickle_module): + cases = [ + pa.int8(), + pa.string(), + pa.binary(), + pa.binary(10), + pa.list_(pa.string()), + pa.map_(pa.string(), pa.int8()), + pa.struct([ + pa.field('a', 'int8'), + pa.field('b', 'string') + ]), + pa.union([ + pa.field('a', pa.int8()), + pa.field('b', pa.int16()) + ], pa.lib.UnionMode_SPARSE), + pa.union([ + pa.field('a', pa.int8()), + pa.field('b', pa.int16()) + ], pa.lib.UnionMode_DENSE), + pa.time32('s'), + pa.time64('us'), + pa.date32(), + pa.date64(), + pa.timestamp('ms'), + pa.timestamp('ns'), + pa.decimal128(12, 2), + pa.decimal256(76, 38), + pa.field('a', 'string', metadata={b'foo': b'bar'}), + pa.list_(pa.field("element", pa.int64())), + pa.large_list(pa.field("element", pa.int64())), + pa.map_(pa.field("key", pa.string(), nullable=False), + pa.field("value", pa.int8())) + ] + + for val in cases: + roundtripped = pickle_module.loads(pickle_module.dumps(val)) + assert val == roundtripped + + fields = [] + for i, f in enumerate(cases): + if isinstance(f, pa.Field): + fields.append(f) + else: + fields.append(pa.field('_f{}'.format(i), f)) + + schema = pa.schema(fields, metadata={b'foo': b'bar'}) + roundtripped = pickle_module.loads(pickle_module.dumps(schema)) + assert schema == roundtripped + + +def test_empty_table(): + schema1 = pa.schema([ + pa.field('f0', pa.int64()), + pa.field('f1', pa.dictionary(pa.int32(), pa.string())), + pa.field('f2', pa.list_(pa.list_(pa.int64()))), + ]) + # test it preserves field nullability + schema2 = pa.schema([ + pa.field('a', pa.int64(), nullable=False), + pa.field('b', pa.int64()) + ]) + + for schema in [schema1, schema2]: + table = schema.empty_table() + assert isinstance(table, pa.Table) + assert table.num_rows == 0 + assert table.schema == schema + + +@pytest.mark.pandas +def test_schema_from_pandas(): + import pandas as pd + inputs = [ + list(range(10)), + pd.Categorical(list(range(10))), + ['foo', 'bar', None, 'baz', 'qux'], + np.array([ + '2007-07-13T01:23:34.123456789', + '2006-01-13T12:34:56.432539784', + '2010-08-13T05:46:57.437699912' + ], dtype='datetime64[ns]'), + pd.array([1, 2, None], dtype=pd.Int32Dtype()), + ] + for data in inputs: + df = pd.DataFrame({'a': data}, index=data) + schema = pa.Schema.from_pandas(df) + expected = pa.Table.from_pandas(df).schema + assert schema == expected + + +def test_schema_sizeof(): + schema = pa.schema([ + pa.field('foo', pa.int32()), + pa.field('bar', pa.string()), + ]) + + # Note: pa.schema is twice as large on 64-bit systems + assert sys.getsizeof(schema) > (30 if sys.maxsize > 2**32 else 15) + + schema2 = schema.with_metadata({"key": "some metadata"}) + assert sys.getsizeof(schema2) > sys.getsizeof(schema) + schema3 = schema.with_metadata({"key": "some more metadata"}) + assert sys.getsizeof(schema3) > sys.getsizeof(schema2) + + +def test_schema_merge(): + a = pa.schema([ + pa.field('foo', pa.int32()), + pa.field('bar', pa.string()), + pa.field('baz', pa.list_(pa.int8())) + ]) + b = pa.schema([ + pa.field('foo', pa.int32()), + pa.field('qux', pa.bool_()) + ]) + c = pa.schema([ + pa.field('quux', pa.dictionary(pa.int32(), pa.string())) + ]) + d = pa.schema([ + pa.field('foo', pa.int64()), + pa.field('qux', pa.bool_()) + ]) + + result = pa.unify_schemas([a, b, c]) + expected = pa.schema([ + pa.field('foo', pa.int32()), + pa.field('bar', pa.string()), + pa.field('baz', pa.list_(pa.int8())), + pa.field('qux', pa.bool_()), + pa.field('quux', pa.dictionary(pa.int32(), pa.string())) + ]) + assert result.equals(expected) + + with pytest.raises(pa.ArrowTypeError): + pa.unify_schemas([b, d]) + + # ARROW-14002: Try with tuple instead of list + result = pa.unify_schemas((a, b, c)) + assert result.equals(expected) + + result = pa.unify_schemas([b, d], promote_options="permissive") + assert result.equals(d) + + # raise proper error when passing a non-Schema value + with pytest.raises(TypeError): + pa.unify_schemas([a, 1]) + + +def test_undecodable_metadata(): + # ARROW-10214: undecodable metadata shouldn't fail repr() + data1 = b'abcdef\xff\x00' + data2 = b'ghijkl\xff\x00' + schema = pa.schema( + [pa.field('ints', pa.int16(), metadata={'key': data1})], + metadata={'key': data2}) + assert 'abcdef' in str(schema) + assert 'ghijkl' in str(schema) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_sparse_tensor.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_sparse_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..aa7da0a7420867ec7c4486db20d1046199828c19 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_sparse_tensor.py @@ -0,0 +1,491 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest +import sys +import weakref + +import numpy as np +import pyarrow as pa + +try: + from scipy.sparse import csr_matrix, coo_matrix +except ImportError: + coo_matrix = None + csr_matrix = None + +try: + import sparse +except ImportError: + sparse = None + + +tensor_type_pairs = [ + ('i1', pa.int8()), + ('i2', pa.int16()), + ('i4', pa.int32()), + ('i8', pa.int64()), + ('u1', pa.uint8()), + ('u2', pa.uint16()), + ('u4', pa.uint32()), + ('u8', pa.uint64()), + ('f2', pa.float16()), + ('f4', pa.float32()), + ('f8', pa.float64()) +] + + +@pytest.mark.parametrize('sparse_tensor_type', [ + pa.SparseCSRMatrix, + pa.SparseCSCMatrix, + pa.SparseCOOTensor, + pa.SparseCSFTensor, +]) +def test_sparse_tensor_attrs(sparse_tensor_type): + data = np.array([ + [8, 0, 2, 0, 0, 0], + [0, 0, 0, 0, 0, 5], + [3, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 4, 6], + ]) + dim_names = ('x', 'y') + sparse_tensor = sparse_tensor_type.from_dense_numpy(data, dim_names) + + assert sparse_tensor.ndim == 2 + assert sparse_tensor.size == 24 + assert sparse_tensor.shape == data.shape + assert sparse_tensor.is_mutable + assert sparse_tensor.dim_name(0) == dim_names[0] + assert sparse_tensor.dim_names == dim_names + assert sparse_tensor.non_zero_length == 6 + + wr = weakref.ref(sparse_tensor) + assert wr() is not None + del sparse_tensor + assert wr() is None + + +def test_sparse_coo_tensor_base_object(): + expected_data = np.array([[8, 2, 5, 3, 4, 6]]).T + expected_coords = np.array([ + [0, 0, 1, 2, 3, 3], + [0, 2, 5, 0, 4, 5], + ]).T + array = np.array([ + [8, 0, 2, 0, 0, 0], + [0, 0, 0, 0, 0, 5], + [3, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 4, 6], + ]) + sparse_tensor = pa.SparseCOOTensor.from_dense_numpy(array) + n = sys.getrefcount(sparse_tensor) + result_data, result_coords = sparse_tensor.to_numpy() + assert sparse_tensor.has_canonical_format + assert sys.getrefcount(sparse_tensor) == n + 2 + + sparse_tensor = None + assert np.array_equal(expected_data, result_data) + assert np.array_equal(expected_coords, result_coords) + assert result_coords.flags.c_contiguous # row-major + + +def test_sparse_csr_matrix_base_object(): + data = np.array([[8, 2, 5, 3, 4, 6]]).T + indptr = np.array([0, 2, 3, 4, 6]) + indices = np.array([0, 2, 5, 0, 4, 5]) + array = np.array([ + [8, 0, 2, 0, 0, 0], + [0, 0, 0, 0, 0, 5], + [3, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 4, 6], + ]) + sparse_tensor = pa.SparseCSRMatrix.from_dense_numpy(array) + n = sys.getrefcount(sparse_tensor) + result_data, result_indptr, result_indices = sparse_tensor.to_numpy() + assert sys.getrefcount(sparse_tensor) == n + 3 + + sparse_tensor = None + assert np.array_equal(data, result_data) + assert np.array_equal(indptr, result_indptr) + assert np.array_equal(indices, result_indices) + + +def test_sparse_csf_tensor_base_object(): + data = np.array([[8, 2, 5, 3, 4, 6]]).T + indptr = [np.array([0, 2, 3, 4, 6])] + indices = [ + np.array([0, 1, 2, 3]), + np.array([0, 2, 5, 0, 4, 5]) + ] + array = np.array([ + [8, 0, 2, 0, 0, 0], + [0, 0, 0, 0, 0, 5], + [3, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 4, 6], + ]) + sparse_tensor = pa.SparseCSFTensor.from_dense_numpy(array) + n = sys.getrefcount(sparse_tensor) + result_data, result_indptr, result_indices = sparse_tensor.to_numpy() + assert sys.getrefcount(sparse_tensor) == n + 4 + + sparse_tensor = None + assert np.array_equal(data, result_data) + assert np.array_equal(indptr[0], result_indptr[0]) + assert np.array_equal(indices[0], result_indices[0]) + assert np.array_equal(indices[1], result_indices[1]) + + +@pytest.mark.parametrize('sparse_tensor_type', [ + pa.SparseCSRMatrix, + pa.SparseCSCMatrix, + pa.SparseCOOTensor, + pa.SparseCSFTensor, +]) +def test_sparse_tensor_equals(sparse_tensor_type): + def eq(a, b): + assert a.equals(b) + assert a == b + assert not (a != b) + + def ne(a, b): + assert not a.equals(b) + assert not (a == b) + assert a != b + + data = np.random.randn(10, 6)[::, ::2] + sparse_tensor1 = sparse_tensor_type.from_dense_numpy(data) + sparse_tensor2 = sparse_tensor_type.from_dense_numpy( + np.ascontiguousarray(data)) + eq(sparse_tensor1, sparse_tensor2) + data = data.copy() + data[9, 0] = 1.0 + sparse_tensor2 = sparse_tensor_type.from_dense_numpy( + np.ascontiguousarray(data)) + ne(sparse_tensor1, sparse_tensor2) + + +@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs) +def test_sparse_coo_tensor_from_dense(dtype_str, arrow_type): + dtype = np.dtype(dtype_str) + expected_data = np.array([[8, 2, 5, 3, 4, 6]]).T.astype(dtype) + expected_coords = np.array([ + [0, 0, 1, 2, 3, 3], + [0, 2, 5, 0, 4, 5], + ]).T + array = np.array([ + [8, 0, 2, 0, 0, 0], + [0, 0, 0, 0, 0, 5], + [3, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 4, 6], + ]).astype(dtype) + tensor = pa.Tensor.from_numpy(array) + + # Test from numpy array + sparse_tensor = pa.SparseCOOTensor.from_dense_numpy(array) + repr(sparse_tensor) + result_data, result_coords = sparse_tensor.to_numpy() + assert sparse_tensor.type == arrow_type + assert np.array_equal(expected_data, result_data) + assert np.array_equal(expected_coords, result_coords) + + # Test from Tensor + sparse_tensor = pa.SparseCOOTensor.from_tensor(tensor) + repr(sparse_tensor) + result_data, result_coords = sparse_tensor.to_numpy() + assert sparse_tensor.type == arrow_type + assert np.array_equal(expected_data, result_data) + assert np.array_equal(expected_coords, result_coords) + + +@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs) +def test_sparse_csr_matrix_from_dense(dtype_str, arrow_type): + dtype = np.dtype(dtype_str) + data = np.array([[8, 2, 5, 3, 4, 6]]).T.astype(dtype) + indptr = np.array([0, 2, 3, 4, 6]) + indices = np.array([0, 2, 5, 0, 4, 5]) + array = np.array([ + [8, 0, 2, 0, 0, 0], + [0, 0, 0, 0, 0, 5], + [3, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 4, 6], + ]).astype(dtype) + tensor = pa.Tensor.from_numpy(array) + + # Test from numpy array + sparse_tensor = pa.SparseCSRMatrix.from_dense_numpy(array) + repr(sparse_tensor) + result_data, result_indptr, result_indices = sparse_tensor.to_numpy() + assert sparse_tensor.type == arrow_type + assert np.array_equal(data, result_data) + assert np.array_equal(indptr, result_indptr) + assert np.array_equal(indices, result_indices) + + # Test from Tensor + sparse_tensor = pa.SparseCSRMatrix.from_tensor(tensor) + repr(sparse_tensor) + result_data, result_indptr, result_indices = sparse_tensor.to_numpy() + assert sparse_tensor.type == arrow_type + assert np.array_equal(data, result_data) + assert np.array_equal(indptr, result_indptr) + assert np.array_equal(indices, result_indices) + + +@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs) +def test_sparse_csf_tensor_from_dense_numpy(dtype_str, arrow_type): + dtype = np.dtype(dtype_str) + data = np.array([[8, 2, 5, 3, 4, 6]]).T.astype(dtype) + indptr = [np.array([0, 2, 3, 4, 6])] + indices = [ + np.array([0, 1, 2, 3]), + np.array([0, 2, 5, 0, 4, 5]) + ] + array = np.array([ + [8, 0, 2, 0, 0, 0], + [0, 0, 0, 0, 0, 5], + [3, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 4, 6], + ]).astype(dtype) + + # Test from numpy array + sparse_tensor = pa.SparseCSFTensor.from_dense_numpy(array) + repr(sparse_tensor) + result_data, result_indptr, result_indices = sparse_tensor.to_numpy() + assert sparse_tensor.type == arrow_type + assert np.array_equal(data, result_data) + assert np.array_equal(indptr[0], result_indptr[0]) + assert np.array_equal(indices[0], result_indices[0]) + assert np.array_equal(indices[1], result_indices[1]) + + +@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs) +def test_sparse_csf_tensor_from_dense_tensor(dtype_str, arrow_type): + dtype = np.dtype(dtype_str) + data = np.array([[8, 2, 5, 3, 4, 6]]).T.astype(dtype) + indptr = [np.array([0, 2, 3, 4, 6])] + indices = [ + np.array([0, 1, 2, 3]), + np.array([0, 2, 5, 0, 4, 5]) + ] + array = np.array([ + [8, 0, 2, 0, 0, 0], + [0, 0, 0, 0, 0, 5], + [3, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 4, 6], + ]).astype(dtype) + tensor = pa.Tensor.from_numpy(array) + + # Test from Tensor + sparse_tensor = pa.SparseCSFTensor.from_tensor(tensor) + repr(sparse_tensor) + result_data, result_indptr, result_indices = sparse_tensor.to_numpy() + assert sparse_tensor.type == arrow_type + assert np.array_equal(data, result_data) + assert np.array_equal(indptr[0], result_indptr[0]) + assert np.array_equal(indices[0], result_indices[0]) + assert np.array_equal(indices[1], result_indices[1]) + + +@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs) +def test_sparse_coo_tensor_numpy_roundtrip(dtype_str, arrow_type): + dtype = np.dtype(dtype_str) + data = np.array([[1, 2, 3, 4, 5, 6]]).T.astype(dtype) + coords = np.array([ + [0, 0, 2, 3, 1, 3], + [0, 2, 0, 4, 5, 5], + ]).T + shape = (4, 6) + dim_names = ('x', 'y') + + sparse_tensor = pa.SparseCOOTensor.from_numpy(data, coords, shape, + dim_names) + repr(sparse_tensor) + result_data, result_coords = sparse_tensor.to_numpy() + assert sparse_tensor.type == arrow_type + assert np.array_equal(data, result_data) + assert np.array_equal(coords, result_coords) + assert sparse_tensor.dim_names == dim_names + + +@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs) +def test_sparse_csr_matrix_numpy_roundtrip(dtype_str, arrow_type): + dtype = np.dtype(dtype_str) + data = np.array([[8, 2, 5, 3, 4, 6]]).T.astype(dtype) + indptr = np.array([0, 2, 3, 4, 6]) + indices = np.array([0, 2, 5, 0, 4, 5]) + shape = (4, 6) + dim_names = ('x', 'y') + + sparse_tensor = pa.SparseCSRMatrix.from_numpy(data, indptr, indices, + shape, dim_names) + repr(sparse_tensor) + result_data, result_indptr, result_indices = sparse_tensor.to_numpy() + assert sparse_tensor.type == arrow_type + assert np.array_equal(data, result_data) + assert np.array_equal(indptr, result_indptr) + assert np.array_equal(indices, result_indices) + assert sparse_tensor.dim_names == dim_names + + +@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs) +def test_sparse_csf_tensor_numpy_roundtrip(dtype_str, arrow_type): + dtype = np.dtype(dtype_str) + data = np.array([[8, 2, 5, 3, 4, 6]]).T.astype(dtype) + indptr = [np.array([0, 2, 3, 4, 6])] + indices = [ + np.array([0, 1, 2, 3]), + np.array([0, 2, 5, 0, 4, 5]) + ] + axis_order = (0, 1) + shape = (4, 6) + dim_names = ('x', 'y') + + sparse_tensor = pa.SparseCSFTensor.from_numpy(data, indptr, indices, + shape, axis_order, + dim_names) + repr(sparse_tensor) + result_data, result_indptr, result_indices = sparse_tensor.to_numpy() + assert sparse_tensor.type == arrow_type + assert np.array_equal(data, result_data) + assert np.array_equal(indptr[0], result_indptr[0]) + assert np.array_equal(indices[0], result_indices[0]) + assert np.array_equal(indices[1], result_indices[1]) + assert sparse_tensor.dim_names == dim_names + + +@pytest.mark.parametrize('sparse_tensor_type', [ + pa.SparseCSRMatrix, + pa.SparseCSCMatrix, + pa.SparseCOOTensor, + pa.SparseCSFTensor, +]) +@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs) +def test_dense_to_sparse_tensor(dtype_str, arrow_type, sparse_tensor_type): + dtype = np.dtype(dtype_str) + array = np.array([[4, 0, 9, 0], + [0, 7, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 5]]).astype(dtype) + dim_names = ('x', 'y') + + sparse_tensor = sparse_tensor_type.from_dense_numpy(array, dim_names) + tensor = sparse_tensor.to_tensor() + result_array = tensor.to_numpy() + + assert sparse_tensor.type == arrow_type + assert tensor.type == arrow_type + assert sparse_tensor.dim_names == dim_names + assert np.array_equal(array, result_array) + + +@pytest.mark.skipif(not coo_matrix, reason="requires scipy") +@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs) +def test_sparse_coo_tensor_scipy_roundtrip(dtype_str, arrow_type): + dtype = np.dtype(dtype_str) + data = np.array([1, 2, 3, 4, 5, 6]).astype(dtype) + row = np.array([0, 0, 2, 3, 1, 3]) + col = np.array([0, 2, 0, 4, 5, 5]) + shape = (4, 6) + dim_names = ('x', 'y') + + # non-canonical sparse coo matrix + scipy_matrix = coo_matrix((data, (row, col)), shape=shape) + sparse_tensor = pa.SparseCOOTensor.from_scipy(scipy_matrix, + dim_names=dim_names) + out_scipy_matrix = sparse_tensor.to_scipy() + + assert not scipy_matrix.has_canonical_format + assert not sparse_tensor.has_canonical_format + assert not out_scipy_matrix.has_canonical_format + assert sparse_tensor.type == arrow_type + assert sparse_tensor.dim_names == dim_names + assert scipy_matrix.dtype == out_scipy_matrix.dtype + assert np.array_equal(scipy_matrix.data, out_scipy_matrix.data) + assert np.array_equal(scipy_matrix.row, out_scipy_matrix.row) + assert np.array_equal(scipy_matrix.col, out_scipy_matrix.col) + + if dtype_str == 'f2': + dense_array = \ + scipy_matrix.astype(np.float32).toarray().astype(np.float16) + else: + dense_array = scipy_matrix.toarray() + assert np.array_equal(dense_array, sparse_tensor.to_tensor().to_numpy()) + + # canonical sparse coo matrix + scipy_matrix.sum_duplicates() + sparse_tensor = pa.SparseCOOTensor.from_scipy(scipy_matrix, + dim_names=dim_names) + out_scipy_matrix = sparse_tensor.to_scipy() + + assert scipy_matrix.has_canonical_format + assert sparse_tensor.has_canonical_format + assert out_scipy_matrix.has_canonical_format + + +@pytest.mark.skipif(not csr_matrix, reason="requires scipy") +@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs) +def test_sparse_csr_matrix_scipy_roundtrip(dtype_str, arrow_type): + dtype = np.dtype(dtype_str) + data = np.array([8, 2, 5, 3, 4, 6]).astype(dtype) + indptr = np.array([0, 2, 3, 4, 6]) + indices = np.array([0, 2, 5, 0, 4, 5]) + shape = (4, 6) + dim_names = ('x', 'y') + + sparse_array = csr_matrix((data, indices, indptr), shape=shape) + sparse_tensor = pa.SparseCSRMatrix.from_scipy(sparse_array, + dim_names=dim_names) + out_sparse_array = sparse_tensor.to_scipy() + + assert sparse_tensor.type == arrow_type + assert sparse_tensor.dim_names == dim_names + assert sparse_array.dtype == out_sparse_array.dtype + assert np.array_equal(sparse_array.data, out_sparse_array.data) + assert np.array_equal(sparse_array.indptr, out_sparse_array.indptr) + assert np.array_equal(sparse_array.indices, out_sparse_array.indices) + + if dtype_str == 'f2': + dense_array = \ + sparse_array.astype(np.float32).toarray().astype(np.float16) + else: + dense_array = sparse_array.toarray() + assert np.array_equal(dense_array, sparse_tensor.to_tensor().to_numpy()) + + +@pytest.mark.skipif(not sparse, reason="requires pydata/sparse") +@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs) +def test_pydata_sparse_sparse_coo_tensor_roundtrip(dtype_str, arrow_type): + dtype = np.dtype(dtype_str) + data = np.array([1, 2, 3, 4, 5, 6]).astype(dtype) + coords = np.array([ + [0, 0, 2, 3, 1, 3], + [0, 2, 0, 4, 5, 5], + ]) + shape = (4, 6) + dim_names = ("x", "y") + + sparse_array = sparse.COO(data=data, coords=coords, shape=shape) + sparse_tensor = pa.SparseCOOTensor.from_pydata_sparse(sparse_array, + dim_names=dim_names) + out_sparse_array = sparse_tensor.to_pydata_sparse() + + assert sparse_tensor.type == arrow_type + assert sparse_tensor.dim_names == dim_names + assert sparse_array.dtype == out_sparse_array.dtype + assert np.array_equal(sparse_array.data, out_sparse_array.data) + assert np.array_equal(sparse_array.coords, out_sparse_array.coords) + assert np.array_equal(sparse_array.todense(), + sparse_tensor.to_tensor().to_numpy()) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_strategies.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_strategies.py new file mode 100644 index 0000000000000000000000000000000000000000..14fc949928c33af1e13e59022e04b4a022f11646 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_strategies.py @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import hypothesis as h + +import pyarrow as pa +import pyarrow.tests.strategies as past + + +@h.given(past.all_types) +def test_types(ty): + assert isinstance(ty, pa.lib.DataType) + + +@h.given(past.all_fields) +def test_fields(field): + assert isinstance(field, pa.lib.Field) + + +@h.given(past.all_schemas) +def test_schemas(schema): + assert isinstance(schema, pa.lib.Schema) + + +@h.given(past.all_arrays) +def test_arrays(array): + assert isinstance(array, pa.lib.Array) + + +@h.given(past.arrays(past.primitive_types, nullable=False)) +def test_array_nullability(array): + assert array.null_count == 0 + + +@h.given(past.all_chunked_arrays) +def test_chunked_arrays(chunked_array): + assert isinstance(chunked_array, pa.lib.ChunkedArray) + + +@h.given(past.all_record_batches) +def test_record_batches(record_bath): + assert isinstance(record_bath, pa.lib.RecordBatch) + + +@h.given(past.all_tables) +def test_tables(table): + assert isinstance(table, pa.lib.Table) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_substrait.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_substrait.py new file mode 100644 index 0000000000000000000000000000000000000000..40700e4741321fd45e66de8032d6cd4b8e372c2b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_substrait.py @@ -0,0 +1,1077 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import pathlib + +import pytest + +import pyarrow as pa +import pyarrow.compute as pc +from pyarrow.lib import tobytes +from pyarrow.lib import ArrowInvalid, ArrowNotImplementedError + +try: + import pyarrow.substrait as substrait +except ImportError: + substrait = None + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not substrait' +pytestmark = pytest.mark.substrait + + +def mock_udf_context(batch_length=10): + from pyarrow._compute import _get_udf_context + return _get_udf_context(pa.default_memory_pool(), batch_length) + + +def _write_dummy_data_to_disk(tmpdir, file_name, table): + path = os.path.join(str(tmpdir), file_name) + with pa.ipc.RecordBatchFileWriter(path, schema=table.schema) as writer: + writer.write_table(table) + return path + + +@pytest.mark.parametrize("use_threads", [True, False]) +def test_run_serialized_query(tmpdir, use_threads): + substrait_query = """ + { + "version": { "major": 9999 }, + "relations": [ + {"rel": { + "read": { + "base_schema": { + "struct": { + "types": [ + {"i64": {}} + ] + }, + "names": [ + "foo" + ] + }, + "local_files": { + "items": [ + { + "uri_file": "FILENAME_PLACEHOLDER", + "arrow": {} + } + ] + } + } + }} + ] + } + """ + + file_name = "read_data.arrow" + table = pa.table([[1, 2, 3, 4, 5]], names=['foo']) + path = _write_dummy_data_to_disk(tmpdir, file_name, table) + query = tobytes(substrait_query.replace( + "FILENAME_PLACEHOLDER", pathlib.Path(path).as_uri())) + + buf = pa._substrait._parse_json_plan(query) + + reader = substrait.run_query(buf, use_threads=use_threads) + res_tb = reader.read_all() + + assert table.select(["foo"]) == res_tb.select(["foo"]) + + +@pytest.mark.parametrize("query", (pa.py_buffer(b'buffer'), b"bytes", 1)) +def test_run_query_input_types(tmpdir, query): + + # Passing unsupported type, like int, will not segfault. + if not isinstance(query, (pa.Buffer, bytes)): + msg = f"Expected 'pyarrow.Buffer' or bytes, got '{type(query)}'" + with pytest.raises(TypeError, match=msg): + substrait.run_query(query) + return + + # Otherwise error for invalid query + msg = "ParseFromZeroCopyStream failed for substrait.Plan" + with pytest.raises(OSError, match=msg): + substrait.run_query(query) + + +def test_invalid_plan(): + query = """ + { + "relations": [ + ] + } + """ + buf = pa._substrait._parse_json_plan(tobytes(query)) + exec_message = "Plan has no relations" + with pytest.raises(ArrowInvalid, match=exec_message): + substrait.run_query(buf) + + +@pytest.mark.parametrize("use_threads", [True, False]) +def test_binary_conversion_with_json_options(tmpdir, use_threads): + substrait_query = """ + { + "version": { "major": 9999 }, + "relations": [ + {"rel": { + "read": { + "base_schema": { + "struct": { + "types": [ + {"i64": {}} + ] + }, + "names": [ + "bar" + ] + }, + "local_files": { + "items": [ + { + "uri_file": "FILENAME_PLACEHOLDER", + "arrow": {}, + "metadata" : { + "created_by" : {}, + } + } + ] + } + } + }} + ] + } + """ + + file_name = "binary_json_data.arrow" + table = pa.table([[1, 2, 3, 4, 5]], names=['bar']) + path = _write_dummy_data_to_disk(tmpdir, file_name, table) + query = tobytes(substrait_query.replace( + "FILENAME_PLACEHOLDER", pathlib.Path(path).as_uri())) + buf = pa._substrait._parse_json_plan(tobytes(query)) + + reader = substrait.run_query(buf, use_threads=use_threads) + res_tb = reader.read_all() + + assert table.select(["bar"]) == res_tb.select(["bar"]) + + +# Substrait has not finalized what the URI should be for standard functions +# In the meantime, lets just check the suffix +def has_function(fns, ext_file, fn_name): + suffix = f'{ext_file}#{fn_name}' + for fn in fns: + if fn.endswith(suffix): + return True + return False + + +def test_get_supported_functions(): + supported_functions = pa._substrait.get_supported_functions() + # It probably doesn't make sense to exhaustively verify this list but + # we can check a sample aggregate and a sample non-aggregate entry + assert has_function(supported_functions, + 'functions_arithmetic.yaml', 'add') + assert has_function(supported_functions, + 'functions_arithmetic.yaml', 'sum') + + +@pytest.mark.parametrize("use_threads", [True, False]) +def test_named_table(use_threads): + test_table_1 = pa.Table.from_pydict({"x": [1, 2, 3]}) + test_table_2 = pa.Table.from_pydict({"x": [4, 5, 6]}) + schema_1 = pa.schema([pa.field("x", pa.int64())]) + + def table_provider(names, schema): + if not names: + raise Exception("No names provided") + elif names[0] == "t1": + assert schema == schema_1 + return test_table_1 + elif names[1] == "t2": + return test_table_2 + else: + raise Exception("Unrecognized table name") + + substrait_query = """ + { + "version": { "major": 9999 }, + "relations": [ + {"rel": { + "read": { + "base_schema": { + "struct": { + "types": [ + {"i64": {}} + ] + }, + "names": [ + "x" + ] + }, + "namedTable": { + "names": ["t1"] + } + } + }} + ] + } + """ + + buf = pa._substrait._parse_json_plan(tobytes(substrait_query)) + reader = pa.substrait.run_query( + buf, table_provider=table_provider, use_threads=use_threads) + res_tb = reader.read_all() + assert res_tb == test_table_1 + + +def test_named_table_invalid_table_name(): + test_table_1 = pa.Table.from_pydict({"x": [1, 2, 3]}) + + def table_provider(names, _): + if not names: + raise Exception("No names provided") + elif names[0] == "t1": + return test_table_1 + else: + raise Exception("Unrecognized table name") + + substrait_query = """ + { + "version": { "major": 9999 }, + "relations": [ + {"rel": { + "read": { + "base_schema": { + "struct": { + "types": [ + {"i64": {}} + ] + }, + "names": [ + "x" + ] + }, + "namedTable": { + "names": ["t3"] + } + } + }} + ] + } + """ + + buf = pa._substrait._parse_json_plan(tobytes(substrait_query)) + exec_message = "Invalid NamedTable Source" + with pytest.raises(ArrowInvalid, match=exec_message): + substrait.run_query(buf, table_provider=table_provider) + + +def test_named_table_empty_names(): + test_table_1 = pa.Table.from_pydict({"x": [1, 2, 3]}) + + def table_provider(names, _): + if not names: + raise Exception("No names provided") + elif names[0] == "t1": + return test_table_1 + else: + raise Exception("Unrecognized table name") + + substrait_query = """ + { + "version": { "major": 9999 }, + "relations": [ + {"rel": { + "read": { + "base_schema": { + "struct": { + "types": [ + {"i64": {}} + ] + }, + "names": [ + "x" + ] + }, + "namedTable": { + "names": [] + } + } + }} + ] + } + """ + query = tobytes(substrait_query) + buf = pa._substrait._parse_json_plan(tobytes(query)) + exec_message = "names for NamedTable not provided" + with pytest.raises(ArrowInvalid, match=exec_message): + substrait.run_query(buf, table_provider=table_provider) + + +@pytest.mark.parametrize("use_threads", [True, False]) +def test_udf_via_substrait(unary_func_fixture, use_threads): + test_table = pa.Table.from_pydict({"x": [1, 2, 3]}) + + def table_provider(names, _): + if not names: + raise Exception("No names provided") + elif names[0] == "t1": + return test_table + else: + raise Exception("Unrecognized table name") + + substrait_query = b""" + { + "extensionUris": [ + { + "extensionUriAnchor": 1 + }, + { + "extensionUriAnchor": 2, + "uri": "urn:arrow:substrait_simple_extension_function" + } + ], + "extensions": [ + { + "extensionFunction": { + "extensionUriReference": 2, + "functionAnchor": 1, + "name": "y=x+1" + } + } + ], + "relations": [ + { + "root": { + "input": { + "project": { + "common": { + "emit": { + "outputMapping": [ + 1, + 2, + ] + } + }, + "input": { + "read": { + "baseSchema": { + "names": [ + "t", + ], + "struct": { + "types": [ + { + "i64": { + "nullability": "NULLABILITY_REQUIRED" + } + }, + ], + "nullability": "NULLABILITY_REQUIRED" + } + }, + "namedTable": { + "names": [ + "t1" + ] + } + } + }, + "expressions": [ + { + "selection": { + "directReference": { + "structField": {} + }, + "rootReference": {} + } + }, + { + "scalarFunction": { + "functionReference": 1, + "outputType": { + "i64": { + "nullability": "NULLABILITY_NULLABLE" + } + }, + "arguments": [ + { + "value": { + "selection": { + "directReference": { + "structField": {} + }, + "rootReference": {} + } + } + } + ] + } + } + ] + } + }, + "names": [ + "x", + "y", + ] + } + } + ] +} + """ + + buf = pa._substrait._parse_json_plan(substrait_query) + reader = pa.substrait.run_query( + buf, table_provider=table_provider, use_threads=use_threads) + res_tb = reader.read_all() + + function, name = unary_func_fixture + expected_tb = test_table.add_column(1, 'y', function( + mock_udf_context(10), test_table['x'])) + assert res_tb == expected_tb + + +def test_udf_via_substrait_wrong_udf_name(): + test_table = pa.Table.from_pydict({"x": [1, 2, 3]}) + + def table_provider(names, _): + if not names: + raise Exception("No names provided") + elif names[0] == "t1": + return test_table + else: + raise Exception("Unrecognized table name") + + substrait_query = b""" + { + "extensionUris": [ + { + "extensionUriAnchor": 1 + }, + { + "extensionUriAnchor": 2, + "uri": "urn:arrow:substrait_simple_extension_function" + } + ], + "extensions": [ + { + "extensionFunction": { + "extensionUriReference": 2, + "functionAnchor": 1, + "name": "wrong_udf_name" + } + } + ], + "relations": [ + { + "root": { + "input": { + "project": { + "common": { + "emit": { + "outputMapping": [ + 1, + 2, + ] + } + }, + "input": { + "read": { + "baseSchema": { + "names": [ + "t", + ], + "struct": { + "types": [ + { + "i64": { + "nullability": "NULLABILITY_REQUIRED" + } + }, + ], + "nullability": "NULLABILITY_REQUIRED" + } + }, + "namedTable": { + "names": [ + "t1" + ] + } + } + }, + "expressions": [ + { + "selection": { + "directReference": { + "structField": {} + }, + "rootReference": {} + } + }, + { + "scalarFunction": { + "functionReference": 1, + "outputType": { + "i64": { + "nullability": "NULLABILITY_NULLABLE" + } + }, + "arguments": [ + { + "value": { + "selection": { + "directReference": { + "structField": {} + }, + "rootReference": {} + } + } + } + ] + } + } + ] + } + }, + "names": [ + "x", + "y", + ] + } + } + ] +} + """ + + buf = pa._substrait._parse_json_plan(substrait_query) + with pytest.raises(pa.ArrowKeyError) as excinfo: + pa.substrait.run_query(buf, table_provider=table_provider) + assert "No function registered" in str(excinfo.value) + + +@pytest.mark.parametrize("use_threads", [True, False]) +def test_output_field_names(use_threads): + in_table = pa.Table.from_pydict({"x": [1, 2, 3]}) + + def table_provider(names, schema): + return in_table + + substrait_query = """ + { + "version": { "major": 9999 }, + "relations": [ + { + "root": { + "input": { + "read": { + "base_schema": { + "struct": { + "types": [{"i64": {}}] + }, + "names": ["x"] + }, + "namedTable": { + "names": ["t1"] + } + } + }, + "names": ["out"] + } + } + ] + } + """ + + buf = pa._substrait._parse_json_plan(tobytes(substrait_query)) + reader = pa.substrait.run_query( + buf, table_provider=table_provider, use_threads=use_threads) + res_tb = reader.read_all() + + expected = pa.Table.from_pydict({"out": [1, 2, 3]}) + + assert res_tb == expected + + +def test_scalar_aggregate_udf_basic(varargs_agg_func_fixture): + + test_table = pa.Table.from_pydict( + {"k": [1, 1, 2, 2], "v1": [1, 2, 3, 4], + "v2": [1.0, 1.0, 1.0, 1.0]} + ) + + def table_provider(names, _): + return test_table + + substrait_query = b""" +{ + "extensionUris": [ + { + "extensionUriAnchor": 1, + "uri": "urn:arrow:substrait_simple_extension_function" + }, + ], + "extensions": [ + { + "extensionFunction": { + "extensionUriReference": 1, + "functionAnchor": 1, + "name": "sum_mean" + } + } + ], + "relations": [ + { + "root": { + "input": { + "extensionSingle": { + "common": { + "emit": { + "outputMapping": [ + 0, + 1 + ] + } + }, + "input": { + "read": { + "baseSchema": { + "names": [ + "k", + "v1", + "v2", + ], + "struct": { + "types": [ + { + "i64": { + "nullability": "NULLABILITY_REQUIRED" + } + }, + { + "i64": { + "nullability": "NULLABILITY_NULLABLE" + } + }, + { + "fp64": { + "nullability": "NULLABILITY_NULLABLE" + } + } + ], + "nullability": "NULLABILITY_REQUIRED" + } + }, + "namedTable": { + "names": ["t1"] + } + } + }, + "detail": { + "@type": "/arrow.substrait_ext.SegmentedAggregateRel", + "segmentKeys": [ + { + "directReference": { + "structField": {} + }, + "rootReference": {} + } + ], + "measures": [ + { + "measure": { + "functionReference": 1, + "phase": "AGGREGATION_PHASE_INITIAL_TO_RESULT", + "outputType": { + "fp64": { + "nullability": "NULLABILITY_NULLABLE" + } + }, + "arguments": [ + { + "value": { + "selection": { + "directReference": { + "structField": { + "field": 1 + } + }, + "rootReference": {} + } + } + }, + { + "value": { + "selection": { + "directReference": { + "structField": { + "field": 2 + } + }, + "rootReference": {} + } + } + } + ] + } + } + ] + } + } + }, + "names": [ + "k", + "v_avg" + ] + } + } + ], +} +""" + buf = pa._substrait._parse_json_plan(substrait_query) + reader = pa.substrait.run_query( + buf, table_provider=table_provider, use_threads=False) + res_tb = reader.read_all() + + expected_tb = pa.Table.from_pydict({ + 'k': [1, 2], + 'v_avg': [2.5, 4.5] + }) + + assert res_tb == expected_tb + + +def test_hash_aggregate_udf_basic(varargs_agg_func_fixture): + + test_table = pa.Table.from_pydict( + {"t": [1, 1, 1, 1, 2, 2, 2, 2], + "k": [1, 0, 0, 1, 0, 1, 0, 1], + "v1": [1, 2, 3, 4, 5, 6, 7, 8], + "v2": [1.0, 1.0, 1.0, 1.0, 2.0, 3.0, 4.0, 5.0]} + ) + + def table_provider(names, _): + return test_table + + substrait_query = b""" +{ + "extensionUris": [ + { + "extensionUriAnchor": 1, + "uri": "urn:arrow:substrait_simple_extension_function" + }, + ], + "extensions": [ + { + "extensionFunction": { + "extensionUriReference": 1, + "functionAnchor": 1, + "name": "sum_mean" + } + } + ], + "relations": [ + { + "root": { + "input": { + "extensionSingle": { + "common": { + "emit": { + "outputMapping": [ + 0, + 1, + 2 + ] + } + }, + "input": { + "read": { + "baseSchema": { + "names": [ + "t", + "k", + "v1", + "v2", + ], + "struct": { + "types": [ + { + "i64": { + "nullability": "NULLABILITY_REQUIRED" + } + }, + { + "i64": { + "nullability": "NULLABILITY_REQUIRED" + } + }, + { + "i64": { + "nullability": "NULLABILITY_NULLABLE" + } + }, + { + "fp64": { + "nullability": "NULLABILITY_NULLABLE" + } + } + ], + "nullability": "NULLABILITY_REQUIRED" + } + }, + "namedTable": { + "names": ["t1"] + } + } + }, + "detail": { + "@type": "/arrow.substrait_ext.SegmentedAggregateRel", + "groupingKeys": [ + { + "directReference": { + "structField": { + "field": 1 + } + }, + "rootReference": {} + } + ], + "segmentKeys": [ + { + "directReference": { + "structField": {} + }, + "rootReference": {} + } + ], + "measures": [ + { + "measure": { + "functionReference": 1, + "phase": "AGGREGATION_PHASE_INITIAL_TO_RESULT", + "outputType": { + "fp64": { + "nullability": "NULLABILITY_NULLABLE" + } + }, + "arguments": [ + { + "value": { + "selection": { + "directReference": { + "structField": { + "field": 2 + } + }, + "rootReference": {} + } + } + }, + { + "value": { + "selection": { + "directReference": { + "structField": { + "field": 3 + } + }, + "rootReference": {} + } + } + } + ] + } + } + ] + } + } + }, + "names": [ + "t", + "k", + "v_avg" + ] + } + } + ], +} +""" + buf = pa._substrait._parse_json_plan(substrait_query) + reader = pa.substrait.run_query( + buf, table_provider=table_provider, use_threads=False) + res_tb = reader.read_all() + + expected_tb = pa.Table.from_pydict({ + 't': [1, 1, 2, 2], + 'k': [1, 0, 0, 1], + 'v_avg': [3.5, 3.5, 9.0, 11.0] + }) + + # Ordering of k is deterministic because this is running with serial execution + assert res_tb == expected_tb + + +@pytest.mark.parametrize("expr", [ + pc.equal(pc.field("x"), 7), + pc.equal(pc.field("x"), pc.field("y")), + pc.field("x") > 50 +]) +def test_serializing_expressions(expr): + schema = pa.schema([ + pa.field("x", pa.int32()), + pa.field("y", pa.int32()) + ]) + + buf = pa.substrait.serialize_expressions([expr], ["test_expr"], schema) + returned = pa.substrait.deserialize_expressions(buf) + assert schema == returned.schema + assert len(returned.expressions) == 1 + assert "test_expr" in returned.expressions + + +def test_arrow_specific_types(): + fields = { + "time_seconds": (pa.time32("s"), 0), + "time_millis": (pa.time32("ms"), 0), + "time_nanos": (pa.time64("ns"), 0), + "date_millis": (pa.date64(), 0), + "large_string": (pa.large_string(), "test_string"), + "large_binary": (pa.large_binary(), b"test_string"), + } + schema = pa.schema([pa.field(name, typ) for name, (typ, _) in fields.items()]) + + def check_round_trip(expr): + buf = pa.substrait.serialize_expressions([expr], ["test_expr"], schema) + returned = pa.substrait.deserialize_expressions(buf) + assert schema == returned.schema + + for name, (typ, val) in fields.items(): + check_round_trip(pc.field(name) == pa.scalar(val, type=typ)) + + +def test_arrow_one_way_types(): + schema = pa.schema( + [ + pa.field("binary_view", pa.binary_view()), + pa.field("string_view", pa.string_view()), + pa.field("dictionary", pa.dictionary(pa.int32(), pa.string())), + pa.field("ree", pa.run_end_encoded(pa.int32(), pa.string())), + ] + ) + alt_schema = pa.schema( + [ + pa.field("binary_view", pa.binary()), + pa.field("string_view", pa.string()), + pa.field("dictionary", pa.string()), + pa.field("ree", pa.string()) + ] + ) + + def check_one_way(field): + expr = pc.is_null(pc.field(field.name)) + buf = pa.substrait.serialize_expressions([expr], ["test_expr"], schema) + returned = pa.substrait.deserialize_expressions(buf) + assert alt_schema == returned.schema + + for field in schema: + check_one_way(field) + + +def test_invalid_expression_ser_des(): + schema = pa.schema([ + pa.field("x", pa.int32()), + pa.field("y", pa.int32()) + ]) + expr = pc.equal(pc.field("x"), 7) + bad_expr = pc.equal(pc.field("z"), 7) + # Invalid number of names + with pytest.raises(ValueError) as excinfo: + pa.substrait.serialize_expressions([expr], [], schema) + assert 'need to have the same length' in str(excinfo.value) + with pytest.raises(ValueError) as excinfo: + pa.substrait.serialize_expressions([expr], ["foo", "bar"], schema) + assert 'need to have the same length' in str(excinfo.value) + # Expression doesn't match schema + with pytest.raises(ValueError) as excinfo: + pa.substrait.serialize_expressions([bad_expr], ["expr"], schema) + assert 'No match for FieldRef' in str(excinfo.value) + + +def test_serializing_multiple_expressions(): + schema = pa.schema([ + pa.field("x", pa.int32()), + pa.field("y", pa.int32()) + ]) + exprs = [pc.equal(pc.field("x"), 7), pc.equal(pc.field("x"), pc.field("y"))] + buf = pa.substrait.serialize_expressions(exprs, ["first", "second"], schema) + returned = pa.substrait.deserialize_expressions(buf) + assert schema == returned.schema + assert len(returned.expressions) == 2 + + norm_exprs = [pc.equal(pc.field(0), 7), pc.equal(pc.field(0), pc.field(1))] + assert str(returned.expressions["first"]) == str(norm_exprs[0]) + assert str(returned.expressions["second"]) == str(norm_exprs[1]) + + +def test_serializing_with_compute(): + schema = pa.schema([ + pa.field("x", pa.int32()), + pa.field("y", pa.int32()) + ]) + expr = pc.equal(pc.field("x"), 7) + expr_norm = pc.equal(pc.field(0), 7) + buf = expr.to_substrait(schema) + returned = pa.substrait.deserialize_expressions(buf) + + assert schema == returned.schema + assert len(returned.expressions) == 1 + + assert str(returned.expressions["expression"]) == str(expr_norm) + + # Compute can't deserialize messages with multiple expressions + buf = pa.substrait.serialize_expressions([expr, expr], ["first", "second"], schema) + with pytest.raises(ValueError) as excinfo: + pc.Expression.from_substrait(buf) + assert 'contained multiple expressions' in str(excinfo.value) + + # Deserialization should be possible regardless of the expression name + buf = pa.substrait.serialize_expressions([expr], ["weirdname"], schema) + expr2 = pc.Expression.from_substrait(buf) + assert str(expr2) == str(expr_norm) + + +def test_serializing_udfs(): + # Note, UDF in this context means a function that is not + # recognized by Substrait. It might still be a builtin pyarrow + # function. + schema = pa.schema([ + pa.field("x", pa.uint32()) + ]) + a = pc.scalar(10) + b = pc.scalar(4) + exprs = [pc.shift_left(a, b)] + + with pytest.raises(ArrowNotImplementedError): + pa.substrait.serialize_expressions(exprs, ["expr"], schema) + + buf = pa.substrait.serialize_expressions( + exprs, ["expr"], schema, allow_arrow_extensions=True) + returned = pa.substrait.deserialize_expressions(buf) + assert schema == returned.schema + assert len(returned.expressions) == 1 + assert str(returned.expressions["expr"]) == str(exprs[0]) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_table.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_table.py new file mode 100644 index 0000000000000000000000000000000000000000..e11efa64db4d47f4f117184e799dc2ae4c863d03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_table.py @@ -0,0 +1,3279 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from collections import OrderedDict +from collections.abc import Iterable +import sys +import weakref + +import numpy as np +import pytest +import pyarrow as pa +import pyarrow.compute as pc +from pyarrow.vendored.version import Version + + +def test_chunked_array_basics(): + data = pa.chunked_array([], type=pa.string()) + assert data.type == pa.string() + assert data.to_pylist() == [] + data.validate() + + data2 = pa.chunked_array([], type='binary') + assert data2.type == pa.binary() + + with pytest.raises(ValueError): + pa.chunked_array([]) + + data = pa.chunked_array([ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9] + ]) + assert isinstance(data.chunks, list) + assert all(isinstance(c, pa.lib.Int64Array) for c in data.chunks) + assert all(isinstance(c, pa.lib.Int64Array) for c in data.iterchunks()) + assert len(data.chunks) == 3 + assert data.get_total_buffer_size() == sum(c.get_total_buffer_size() + for c in data.iterchunks()) + assert sys.getsizeof(data) >= object.__sizeof__( + data) + data.get_total_buffer_size() + assert data.nbytes == 3 * 3 * 8 # 3 items per 3 lists with int64 size(8) + data.validate() + + wr = weakref.ref(data) + assert wr() is not None + del data + assert wr() is None + + +def test_chunked_array_construction(): + arr = pa.chunked_array([ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + ]) + assert arr.type == pa.int64() + assert len(arr) == 9 + assert len(arr.chunks) == 3 + + arr = pa.chunked_array([ + [1, 2, 3], + [4., 5., 6.], + [7, 8, 9], + ]) + assert arr.type == pa.int64() + assert len(arr) == 9 + assert len(arr.chunks) == 3 + + arr = pa.chunked_array([ + [1, 2, 3], + [4., 5., 6.], + [7, 8, 9], + ], type=pa.int8()) + assert arr.type == pa.int8() + assert len(arr) == 9 + assert len(arr.chunks) == 3 + + arr = pa.chunked_array([ + [1, 2, 3], + [] + ]) + assert arr.type == pa.int64() + assert len(arr) == 3 + assert len(arr.chunks) == 2 + + msg = "cannot construct ChunkedArray from empty vector and omitted type" + with pytest.raises(ValueError, match=msg): + assert pa.chunked_array([]) + + assert pa.chunked_array([], type=pa.string()).type == pa.string() + assert pa.chunked_array([[]]).type == pa.null() + assert pa.chunked_array([[]], type=pa.string()).type == pa.string() + + +def test_combine_chunks(): + # ARROW-77363 + arr = pa.array([1, 2]) + chunked_arr = pa.chunked_array([arr, arr]) + res = chunked_arr.combine_chunks() + expected = pa.array([1, 2, 1, 2]) + assert res.equals(expected) + + +def test_chunked_array_can_combine_chunks_with_no_chunks(): + # https://issues.apache.org/jira/browse/ARROW-17256 + assert pa.chunked_array([], type=pa.bool_()).combine_chunks() == pa.array( + [], type=pa.bool_() + ) + assert pa.chunked_array( + [pa.array([], type=pa.bool_())], type=pa.bool_() + ).combine_chunks() == pa.array([], type=pa.bool_()) + + +def test_chunked_array_to_numpy(): + data = pa.chunked_array([ + [1, 2, 3], + [4, 5, 6], + [] + ]) + arr1 = np.asarray(data) + arr2 = data.to_numpy() + + assert isinstance(arr2, np.ndarray) + assert arr2.shape == (6,) + assert np.array_equal(arr1, arr2) + + +def test_chunked_array_mismatch_types(): + msg = "chunks must all be same type" + with pytest.raises(TypeError, match=msg): + # Given array types are different + pa.chunked_array([ + pa.array([1, 2, 3]), + pa.array([1., 2., 3.]) + ]) + + with pytest.raises(TypeError, match=msg): + # Given array type is different from explicit type argument + pa.chunked_array([pa.array([1, 2, 3])], type=pa.float64()) + + +def test_chunked_array_str(): + data = [ + pa.array([1, 2, 3]), + pa.array([4, 5, 6]) + ] + data = pa.chunked_array(data) + assert str(data) == """[ + [ + 1, + 2, + 3 + ], + [ + 4, + 5, + 6 + ] +]""" + + +def test_chunked_array_getitem(): + data = [ + pa.array([1, 2, 3]), + pa.array([4, 5, 6]) + ] + data = pa.chunked_array(data) + assert data[1].as_py() == 2 + assert data[-1].as_py() == 6 + assert data[-6].as_py() == 1 + with pytest.raises(IndexError): + data[6] + with pytest.raises(IndexError): + data[-7] + # Ensure this works with numpy scalars + assert data[np.int32(1)].as_py() == 2 + + data_slice = data[2:4] + assert data_slice.to_pylist() == [3, 4] + + data_slice = data[4:-1] + assert data_slice.to_pylist() == [5] + + data_slice = data[99:99] + assert data_slice.type == data.type + assert data_slice.to_pylist() == [] + + +def test_chunked_array_slice(): + data = [ + pa.array([1, 2, 3]), + pa.array([4, 5, 6]) + ] + data = pa.chunked_array(data) + + data_slice = data.slice(len(data)) + assert data_slice.type == data.type + assert data_slice.to_pylist() == [] + + data_slice = data.slice(len(data) + 10) + assert data_slice.type == data.type + assert data_slice.to_pylist() == [] + + table = pa.Table.from_arrays([data], names=["a"]) + table_slice = table.slice(len(table)) + assert len(table_slice) == 0 + + table = pa.Table.from_arrays([data], names=["a"]) + table_slice = table.slice(len(table) + 10) + assert len(table_slice) == 0 + + +def test_chunked_array_iter(): + data = [ + pa.array([0]), + pa.array([1, 2, 3]), + pa.array([4, 5, 6]), + pa.array([7, 8, 9]) + ] + arr = pa.chunked_array(data) + + for i, j in zip(range(10), arr): + assert i == j.as_py() + + assert isinstance(arr, Iterable) + + +def test_chunked_array_equals(): + def eq(xarrs, yarrs): + if isinstance(xarrs, pa.ChunkedArray): + x = xarrs + else: + x = pa.chunked_array(xarrs) + if isinstance(yarrs, pa.ChunkedArray): + y = yarrs + else: + y = pa.chunked_array(yarrs) + assert x.equals(y) + assert y.equals(x) + assert x == y + assert x != str(y) + + def ne(xarrs, yarrs): + if isinstance(xarrs, pa.ChunkedArray): + x = xarrs + else: + x = pa.chunked_array(xarrs) + if isinstance(yarrs, pa.ChunkedArray): + y = yarrs + else: + y = pa.chunked_array(yarrs) + assert not x.equals(y) + assert not y.equals(x) + assert x != y + + eq(pa.chunked_array([], type=pa.int32()), + pa.chunked_array([], type=pa.int32())) + ne(pa.chunked_array([], type=pa.int32()), + pa.chunked_array([], type=pa.int64())) + + a = pa.array([0, 2], type=pa.int32()) + b = pa.array([0, 2], type=pa.int64()) + c = pa.array([0, 3], type=pa.int32()) + d = pa.array([0, 2, 0, 3], type=pa.int32()) + + eq([a], [a]) + ne([a], [b]) + eq([a, c], [a, c]) + eq([a, c], [d]) + ne([c, a], [a, c]) + + # ARROW-4822 + assert not pa.chunked_array([], type=pa.int32()).equals(None) + + +@pytest.mark.parametrize( + ('data', 'typ'), + [ + ([True, False, True, True], pa.bool_()), + ([1, 2, 4, 6], pa.int64()), + ([1.0, 2.5, None], pa.float64()), + (['a', None, 'b'], pa.string()), + ([], pa.list_(pa.uint8())), + ([[1, 2], [3]], pa.list_(pa.int64())), + ([['a'], None, ['b', 'c']], pa.list_(pa.string())), + ([(1, 'a'), (2, 'c'), None], + pa.struct([pa.field('a', pa.int64()), pa.field('b', pa.string())])) + ] +) +def test_chunked_array_pickle(data, typ, pickle_module): + arrays = [] + while data: + arrays.append(pa.array(data[:2], type=typ)) + data = data[2:] + array = pa.chunked_array(arrays, type=typ) + array.validate() + result = pickle_module.loads(pickle_module.dumps(array)) + result.validate() + assert result.equals(array) + + +@pytest.mark.pandas +def test_chunked_array_to_pandas(): + import pandas as pd + + data = [ + pa.array([-10, -5, 0, 5, 10]) + ] + table = pa.table(data, names=['a']) + col = table.column(0) + assert isinstance(col, pa.ChunkedArray) + series = col.to_pandas() + assert isinstance(series, pd.Series) + assert series.shape == (5,) + assert series[0] == -10 + assert series.name == 'a' + + +@pytest.mark.pandas +def test_chunked_array_to_pandas_preserve_name(): + # https://issues.apache.org/jira/browse/ARROW-7709 + import pandas as pd + import pandas.testing as tm + + for data in [ + pa.array([1, 2, 3]), + pa.array(pd.Categorical(["a", "b", "a"])), + pa.array(pd.date_range("2012", periods=3)), + pa.array(pd.date_range("2012", periods=3, tz="Europe/Brussels")), + pa.array([1, 2, 3], pa.timestamp("ms")), + pa.array([1, 2, 3], pa.timestamp("ms", "Europe/Brussels"))]: + table = pa.table({"name": data}) + result = table.column("name").to_pandas() + assert result.name == "name" + expected = pd.Series(data.to_pandas(), name="name") + tm.assert_series_equal(result, expected) + + +@pytest.mark.pandas +def test_table_roundtrip_to_pandas_empty_dataframe(): + # https://issues.apache.org/jira/browse/ARROW-10643 + # The conversion should not results in a table with 0 rows if the original + # DataFrame has a RangeIndex but is empty. + import pandas as pd + + data = pd.DataFrame(index=pd.RangeIndex(0, 10, 1)) + table = pa.table(data) + result = table.to_pandas() + + assert table.num_rows == 10 + assert data.shape == (10, 0) + assert result.shape == (10, 0) + assert result.index.equals(data.index) + + data = pd.DataFrame(index=pd.RangeIndex(0, 10, 3)) + table = pa.table(data) + result = table.to_pandas() + + assert table.num_rows == 4 + assert data.shape == (4, 0) + assert result.shape == (4, 0) + assert result.index.equals(data.index) + + +@pytest.mark.pandas +def test_recordbatch_roundtrip_to_pandas_empty_dataframe(): + # https://issues.apache.org/jira/browse/ARROW-10643 + # The conversion should not results in a RecordBatch with 0 rows if + # the original DataFrame has a RangeIndex but is empty. + import pandas as pd + + data = pd.DataFrame(index=pd.RangeIndex(0, 10, 1)) + batch = pa.RecordBatch.from_pandas(data) + result = batch.to_pandas() + + assert batch.num_rows == 10 + assert data.shape == (10, 0) + assert result.shape == (10, 0) + assert result.index.equals(data.index) + + data = pd.DataFrame(index=pd.RangeIndex(0, 10, 3)) + batch = pa.RecordBatch.from_pandas(data) + result = batch.to_pandas() + + assert batch.num_rows == 4 + assert data.shape == (4, 0) + assert result.shape == (4, 0) + assert result.index.equals(data.index) + + +@pytest.mark.pandas +def test_to_pandas_empty_table(): + # https://issues.apache.org/jira/browse/ARROW-15370 + import pandas as pd + import pandas.testing as tm + + df = pd.DataFrame({'a': [1, 2], 'b': [0.1, 0.2]}) + table = pa.table(df) + result = table.schema.empty_table().to_pandas() + assert result.shape == (0, 2) + tm.assert_frame_equal(result, df.iloc[:0]) + + +@pytest.mark.pandas +@pytest.mark.nopandas +def test_chunked_array_asarray(): + # ensure this is tested both when pandas is present or not (ARROW-6564) + + data = [ + pa.array([0]), + pa.array([1, 2, 3]) + ] + chunked_arr = pa.chunked_array(data) + + np_arr = np.asarray(chunked_arr) + assert np_arr.tolist() == [0, 1, 2, 3] + assert np_arr.dtype == np.dtype('int64') + + # An optional type can be specified when calling np.asarray + np_arr = np.asarray(chunked_arr, dtype='str') + assert np_arr.tolist() == ['0', '1', '2', '3'] + + # Types are modified when there are nulls + data = [ + pa.array([1, None]), + pa.array([1, 2, 3]) + ] + chunked_arr = pa.chunked_array(data) + + np_arr = np.asarray(chunked_arr) + elements = np_arr.tolist() + assert elements[0] == 1. + assert np.isnan(elements[1]) + assert elements[2:] == [1., 2., 3.] + assert np_arr.dtype == np.dtype('float64') + + # DictionaryType data will be converted to dense numpy array + arr = pa.DictionaryArray.from_arrays( + pa.array([0, 1, 2, 0, 1]), pa.array(['a', 'b', 'c'])) + chunked_arr = pa.chunked_array([arr, arr]) + np_arr = np.asarray(chunked_arr) + assert np_arr.dtype == np.dtype('object') + assert np_arr.tolist() == ['a', 'b', 'c', 'a', 'b'] * 2 + + +def test_chunked_array_flatten(): + ty = pa.struct([pa.field('x', pa.int16()), + pa.field('y', pa.float32())]) + a = pa.array([(1, 2.5), (3, 4.5), (5, 6.5)], type=ty) + carr = pa.chunked_array(a) + x, y = carr.flatten() + assert x.equals(pa.chunked_array(pa.array([1, 3, 5], type=pa.int16()))) + assert y.equals(pa.chunked_array(pa.array([2.5, 4.5, 6.5], + type=pa.float32()))) + + # Empty column + a = pa.array([], type=ty) + carr = pa.chunked_array(a) + x, y = carr.flatten() + assert x.equals(pa.chunked_array(pa.array([], type=pa.int16()))) + assert y.equals(pa.chunked_array(pa.array([], type=pa.float32()))) + + +def test_chunked_array_unify_dictionaries(): + arr = pa.chunked_array([ + pa.array(["foo", "bar", None, "foo"]).dictionary_encode(), + pa.array(["quux", None, "foo"]).dictionary_encode(), + ]) + assert arr.chunk(0).dictionary.equals(pa.array(["foo", "bar"])) + assert arr.chunk(1).dictionary.equals(pa.array(["quux", "foo"])) + arr = arr.unify_dictionaries() + expected_dict = pa.array(["foo", "bar", "quux"]) + assert arr.chunk(0).dictionary.equals(expected_dict) + assert arr.chunk(1).dictionary.equals(expected_dict) + assert arr.to_pylist() == ["foo", "bar", None, "foo", "quux", None, "foo"] + + +def test_recordbatch_dunder_init(): + with pytest.raises(TypeError, match='RecordBatch'): + pa.RecordBatch() + + +def test_chunked_array_c_array_interface(): + class ArrayWrapper: + def __init__(self, array): + self.array = array + + def __arrow_c_array__(self, requested_schema=None): + return self.array.__arrow_c_array__(requested_schema) + + data = pa.array([1, 2, 3], pa.int64()) + chunked = pa.chunked_array([data]) + wrapper = ArrayWrapper(data) + + # Can roundtrip through the wrapper. + result = pa.chunked_array(wrapper) + assert result == chunked + + # Can also import with a type that implementer can cast to. + result = pa.chunked_array(wrapper, type=pa.int16()) + assert result == chunked.cast(pa.int16()) + + +def test_chunked_array_c_stream_interface(): + class ChunkedArrayWrapper: + def __init__(self, chunked): + self.chunked = chunked + + def __arrow_c_stream__(self, requested_schema=None): + return self.chunked.__arrow_c_stream__(requested_schema) + + data = pa.chunked_array([[1, 2, 3], [4, None, 6]]) + wrapper = ChunkedArrayWrapper(data) + + # Can roundtrip through the wrapper. + result = pa.chunked_array(wrapper) + assert result == data + + # Can also import with a type that implementer can cast to. + result = pa.chunked_array(wrapper, type=pa.int16()) + assert result == data.cast(pa.int16()) + + +def test_recordbatch_c_array_interface(): + class BatchWrapper: + def __init__(self, batch): + self.batch = batch + + def __arrow_c_array__(self, requested_schema=None): + return self.batch.__arrow_c_array__(requested_schema) + + data = pa.record_batch([ + pa.array([1, 2, 3], type=pa.int64()) + ], names=['a']) + wrapper = BatchWrapper(data) + + # Can roundtrip through the wrapper. + result = pa.record_batch(wrapper) + assert result == data + + # Can also import with a schema that implementer can cast to. + castable_schema = pa.schema([ + pa.field('a', pa.int32()) + ]) + result = pa.record_batch(wrapper, schema=castable_schema) + expected = pa.record_batch([ + pa.array([1, 2, 3], type=pa.int32()) + ], names=['a']) + assert result == expected + + +def test_table_c_array_interface(): + class BatchWrapper: + def __init__(self, batch): + self.batch = batch + + def __arrow_c_array__(self, requested_schema=None): + return self.batch.__arrow_c_array__(requested_schema) + + data = pa.record_batch([ + pa.array([1, 2, 3], type=pa.int64()) + ], names=['a']) + wrapper = BatchWrapper(data) + + # Can roundtrip through the wrapper. + result = pa.table(wrapper) + expected = pa.Table.from_batches([data]) + assert result == expected + + # Can also import with a schema that implementer can cast to. + castable_schema = pa.schema([ + pa.field('a', pa.int32()) + ]) + result = pa.table(wrapper, schema=castable_schema) + expected = pa.table({ + 'a': pa.array([1, 2, 3], type=pa.int32()) + }) + assert result == expected + + +def test_table_c_stream_interface(): + class StreamWrapper: + def __init__(self, batches): + self.batches = batches + + def __arrow_c_stream__(self, requested_schema=None): + reader = pa.RecordBatchReader.from_batches( + self.batches[0].schema, self.batches) + return reader.__arrow_c_stream__(requested_schema) + + data = [ + pa.record_batch([pa.array([1, 2, 3], type=pa.int64())], names=['a']), + pa.record_batch([pa.array([4, 5, 6], type=pa.int64())], names=['a']) + ] + wrapper = StreamWrapper(data) + + # Can roundtrip through the wrapper. + result = pa.table(wrapper) + expected = pa.Table.from_batches(data) + assert result == expected + + # Passing schema works if already that schema + result = pa.table(wrapper, schema=data[0].schema) + assert result == expected + + # Passing a different schema will cast + good_schema = pa.schema([pa.field('a', pa.int32())]) + result = pa.table(wrapper, schema=good_schema) + assert result == expected.cast(good_schema) + + # If schema doesn't match, raises NotImplementedError + with pytest.raises( + pa.lib.ArrowTypeError, match="Field 0 cannot be cast" + ): + pa.table( + wrapper, schema=pa.schema([pa.field('a', pa.list_(pa.int32()))]) + ) + + +def test_recordbatch_itercolumns(): + data = [ + pa.array(range(5), type='int16'), + pa.array([-10, -5, 0, None, 10], type='int32') + ] + batch = pa.record_batch(data, ['c0', 'c1']) + + columns = [] + for col in batch.itercolumns(): + columns.append(col) + + assert batch.columns == columns + assert batch == pa.record_batch(columns, names=batch.column_names) + assert batch != pa.record_batch(columns[1:], names=batch.column_names[1:]) + assert batch != columns + + +def test_recordbatch_equals(): + data1 = [ + pa.array(range(5), type='int16'), + pa.array([-10, -5, 0, None, 10], type='int32') + ] + data2 = [ + pa.array(['a', 'b', 'c']), + pa.array([['d'], ['e'], ['f']]), + ] + column_names = ['c0', 'c1'] + + batch = pa.record_batch(data1, column_names) + assert batch == pa.record_batch(data1, column_names) + assert batch.equals(pa.record_batch(data1, column_names)) + + assert batch != pa.record_batch(data2, column_names) + assert not batch.equals(pa.record_batch(data2, column_names)) + + batch_meta = pa.record_batch(data1, names=column_names, + metadata={'key': 'value'}) + assert batch_meta.equals(batch) + assert not batch_meta.equals(batch, check_metadata=True) + + # ARROW-8889 + assert not batch.equals(None) + assert batch != "foo" + + +def test_recordbatch_take(): + batch = pa.record_batch( + [pa.array([1, 2, 3, None, 5]), + pa.array(['a', 'b', 'c', 'd', 'e'])], + ['f1', 'f2']) + assert batch.take(pa.array([2, 3])).equals(batch.slice(2, 2)) + assert batch.take(pa.array([2, None])).equals( + pa.record_batch([pa.array([3, None]), pa.array(['c', None])], + ['f1', 'f2'])) + + +def test_recordbatch_column_sets_private_name(): + # ARROW-6429 + rb = pa.record_batch([pa.array([1, 2, 3, 4])], names=['a0']) + assert rb[0]._name == 'a0' + + +def test_recordbatch_from_arrays_validate_schema(): + # ARROW-6263 + arr = pa.array([1, 2]) + schema = pa.schema([pa.field('f0', pa.list_(pa.utf8()))]) + with pytest.raises(NotImplementedError): + pa.record_batch([arr], schema=schema) + + +def test_recordbatch_from_arrays_validate_lengths(): + # ARROW-2820 + data = [pa.array([1]), pa.array(["tokyo", "like", "happy"]), + pa.array(["derek"])] + + with pytest.raises(ValueError): + pa.record_batch(data, ['id', 'tags', 'name']) + + +def test_recordbatch_no_fields(): + batch = pa.record_batch([], []) + + assert len(batch) == 0 + assert batch.num_rows == 0 + assert batch.num_columns == 0 + + +def test_recordbatch_from_arrays_invalid_names(): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]) + ] + with pytest.raises(ValueError): + pa.record_batch(data, names=['a', 'b', 'c']) + + with pytest.raises(ValueError): + pa.record_batch(data, names=['a']) + + +def test_recordbatch_empty_metadata(): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]) + ] + + batch = pa.record_batch(data, ['c0', 'c1']) + assert batch.schema.metadata is None + + +def test_recordbatch_pickle(pickle_module): + data = [ + pa.array(range(5), type='int8'), + pa.array([-10, -5, 0, 5, 10], type='float32') + ] + fields = [ + pa.field('ints', pa.int8()), + pa.field('floats', pa.float32()), + ] + schema = pa.schema(fields, metadata={b'foo': b'bar'}) + batch = pa.record_batch(data, schema=schema) + + result = pickle_module.loads(pickle_module.dumps(batch)) + assert result.equals(batch) + assert result.schema == schema + + +def test_recordbatch_get_field(): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]), + pa.array(range(5, 10)) + ] + batch = pa.RecordBatch.from_arrays(data, names=('a', 'b', 'c')) + + assert batch.field('a').equals(batch.schema.field('a')) + assert batch.field(0).equals(batch.schema.field('a')) + + with pytest.raises(KeyError): + batch.field('d') + + with pytest.raises(TypeError): + batch.field(None) + + with pytest.raises(IndexError): + batch.field(4) + + +def test_recordbatch_select_column(): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]), + pa.array(range(5, 10)) + ] + batch = pa.RecordBatch.from_arrays(data, names=('a', 'b', 'c')) + + assert batch.column('a').equals(batch.column(0)) + + with pytest.raises( + KeyError, match='Field "d" does not exist in schema'): + batch.column('d') + + with pytest.raises(TypeError): + batch.column(None) + + with pytest.raises(IndexError): + batch.column(4) + + +def test_recordbatch_select(): + a1 = pa.array([1, 2, 3, None, 5]) + a2 = pa.array(['a', 'b', 'c', 'd', 'e']) + a3 = pa.array([[1, 2], [3, 4], [5, 6], None, [9, 10]]) + batch = pa.record_batch([a1, a2, a3], ['f1', 'f2', 'f3']) + + # selecting with string names + result = batch.select(['f1']) + expected = pa.record_batch([a1], ['f1']) + assert result.equals(expected) + + result = batch.select(['f3', 'f2']) + expected = pa.record_batch([a3, a2], ['f3', 'f2']) + assert result.equals(expected) + + # selecting with integer indices + result = batch.select([0]) + expected = pa.record_batch([a1], ['f1']) + assert result.equals(expected) + + result = batch.select([2, 1]) + expected = pa.record_batch([a3, a2], ['f3', 'f2']) + assert result.equals(expected) + + # preserve metadata + batch2 = batch.replace_schema_metadata({"a": "test"}) + result = batch2.select(["f1", "f2"]) + assert b"a" in result.schema.metadata + + # selecting non-existing column raises + with pytest.raises(KeyError, match='Field "f5" does not exist'): + batch.select(['f5']) + + with pytest.raises(IndexError, match="index out of bounds"): + batch.select([5]) + + # duplicate selection gives duplicated names in resulting recordbatch + result = batch.select(['f2', 'f2']) + expected = pa.record_batch([a2, a2], ['f2', 'f2']) + assert result.equals(expected) + + # selection duplicated column raises + batch = pa.record_batch([a1, a2, a3], ['f1', 'f2', 'f1']) + with pytest.raises(KeyError, match='Field "f1" exists 2 times'): + batch.select(['f1']) + + result = batch.select(['f2']) + expected = pa.record_batch([a2], ['f2']) + assert result.equals(expected) + + +def test_recordbatch_from_struct_array_invalid(): + with pytest.raises(TypeError): + pa.RecordBatch.from_struct_array(pa.array(range(5))) + + +def test_recordbatch_from_struct_array(): + struct_array = pa.array( + [{"ints": 1}, {"floats": 1.0}], + type=pa.struct([("ints", pa.int32()), ("floats", pa.float32())]), + ) + result = pa.RecordBatch.from_struct_array(struct_array) + assert result.equals(pa.RecordBatch.from_arrays( + [ + pa.array([1, None], type=pa.int32()), + pa.array([None, 1.0], type=pa.float32()), + ], ["ints", "floats"] + )) + + +def test_recordbatch_to_struct_array(): + batch = pa.RecordBatch.from_arrays( + [ + pa.array([1, None], type=pa.int32()), + pa.array([None, 1.0], type=pa.float32()), + ], ["ints", "floats"] + ) + result = batch.to_struct_array() + assert result.equals(pa.array( + [{"ints": 1}, {"floats": 1.0}], + type=pa.struct([("ints", pa.int32()), ("floats", pa.float32())]), + )) + + +def test_table_from_struct_array_invalid(): + with pytest.raises(TypeError, match="Argument 'struct_array' has incorrect type"): + pa.Table.from_struct_array(pa.array(range(5))) + + +def test_table_from_struct_array(): + struct_array = pa.array( + [{"ints": 1}, {"floats": 1.0}], + type=pa.struct([("ints", pa.int32()), ("floats", pa.float32())]), + ) + result = pa.Table.from_struct_array(struct_array) + assert result.equals(pa.Table.from_arrays( + [ + pa.array([1, None], type=pa.int32()), + pa.array([None, 1.0], type=pa.float32()), + ], ["ints", "floats"] + )) + + +def test_table_from_struct_array_chunked_array(): + chunked_struct_array = pa.chunked_array( + [[{"ints": 1}, {"floats": 1.0}]], + type=pa.struct([("ints", pa.int32()), ("floats", pa.float32())]), + ) + result = pa.Table.from_struct_array(chunked_struct_array) + assert result.equals(pa.Table.from_arrays( + [ + pa.array([1, None], type=pa.int32()), + pa.array([None, 1.0], type=pa.float32()), + ], ["ints", "floats"] + )) + + +def test_table_to_struct_array(): + table = pa.Table.from_arrays( + [ + pa.array([1, None], type=pa.int32()), + pa.array([None, 1.0], type=pa.float32()), + ], ["ints", "floats"] + ) + result = table.to_struct_array() + assert result.equals(pa.chunked_array( + [[{"ints": 1}, {"floats": 1.0}]], + type=pa.struct([("ints", pa.int32()), ("floats", pa.float32())]), + )) + + +def test_table_to_struct_array_with_max_chunksize(): + table = pa.Table.from_arrays( + [ + pa.array([1, None], type=pa.int32()), + pa.array([None, 1.0], type=pa.float32()), + ], ["ints", "floats"] + ) + result = table.to_struct_array(max_chunksize=1) + assert result.equals(pa.chunked_array( + [[{"ints": 1}], [{"floats": 1.0}]], + type=pa.struct([("ints", pa.int32()), ("floats", pa.float32())]), + )) + + +def check_tensors(tensor, expected_tensor, type, size): + assert tensor.equals(expected_tensor) + assert tensor.size == size + assert tensor.type == type + assert tensor.shape == expected_tensor.shape + assert tensor.strides == expected_tensor.strides + + +@pytest.mark.parametrize('typ', [ + np.uint8, np.uint16, np.uint32, np.uint64, + np.int8, np.int16, np.int32, np.int64, + np.float32, np.float64, +]) +def test_recordbatch_to_tensor_uniform_type(typ): + arr1 = [1, 2, 3, 4, 5, 6, 7, 8, 9] + arr2 = [10, 20, 30, 40, 50, 60, 70, 80, 90] + arr3 = [100, 100, 100, 100, 100, 100, 100, 100, 100] + batch = pa.RecordBatch.from_arrays( + [ + pa.array(arr1, type=pa.from_numpy_dtype(typ)), + pa.array(arr2, type=pa.from_numpy_dtype(typ)), + pa.array(arr3, type=pa.from_numpy_dtype(typ)), + ], ["a", "b", "c"] + ) + + result = batch.to_tensor(row_major=False) + x = np.column_stack([arr1, arr2, arr3]).astype(typ, order="F") + expected = pa.Tensor.from_numpy(x) + check_tensors(result, expected, pa.from_numpy_dtype(typ), 27) + + result = batch.to_tensor() + x = np.column_stack([arr1, arr2, arr3]).astype(typ, order="C") + expected = pa.Tensor.from_numpy(x) + check_tensors(result, expected, pa.from_numpy_dtype(typ), 27) + + # Test offset + batch1 = batch.slice(1) + arr1 = [2, 3, 4, 5, 6, 7, 8, 9] + arr2 = [20, 30, 40, 50, 60, 70, 80, 90] + arr3 = [100, 100, 100, 100, 100, 100, 100, 100] + + result = batch1.to_tensor(row_major=False) + x = np.column_stack([arr1, arr2, arr3]).astype(typ, order="F") + expected = pa.Tensor.from_numpy(x) + check_tensors(result, expected, pa.from_numpy_dtype(typ), 24) + + result = batch1.to_tensor() + x = np.column_stack([arr1, arr2, arr3]).astype(typ, order="C") + expected = pa.Tensor.from_numpy(x) + check_tensors(result, expected, pa.from_numpy_dtype(typ), 24) + + batch2 = batch.slice(1, 5) + arr1 = [2, 3, 4, 5, 6] + arr2 = [20, 30, 40, 50, 60] + arr3 = [100, 100, 100, 100, 100] + + result = batch2.to_tensor(row_major=False) + x = np.column_stack([arr1, arr2, arr3]).astype(typ, order="F") + expected = pa.Tensor.from_numpy(x) + check_tensors(result, expected, pa.from_numpy_dtype(typ), 15) + + result = batch2.to_tensor() + x = np.column_stack([arr1, arr2, arr3]).astype(typ, order="C") + expected = pa.Tensor.from_numpy(x) + check_tensors(result, expected, pa.from_numpy_dtype(typ), 15) + + +def test_recordbatch_to_tensor_uniform_float_16(): + arr1 = [1, 2, 3, 4, 5, 6, 7, 8, 9] + arr2 = [10, 20, 30, 40, 50, 60, 70, 80, 90] + arr3 = [100, 100, 100, 100, 100, 100, 100, 100, 100] + batch = pa.RecordBatch.from_arrays( + [ + pa.array(np.array(arr1, dtype=np.float16), type=pa.float16()), + pa.array(np.array(arr2, dtype=np.float16), type=pa.float16()), + pa.array(np.array(arr3, dtype=np.float16), type=pa.float16()), + ], ["a", "b", "c"] + ) + + result = batch.to_tensor(row_major=False) + x = np.column_stack([arr1, arr2, arr3]).astype(np.float16, order="F") + expected = pa.Tensor.from_numpy(x) + check_tensors(result, expected, pa.float16(), 27) + + result = batch.to_tensor() + x = np.column_stack([arr1, arr2, arr3]).astype(np.float16, order="C") + expected = pa.Tensor.from_numpy(x) + check_tensors(result, expected, pa.float16(), 27) + + +def test_recordbatch_to_tensor_mixed_type(): + # uint16 + int16 = int32 + arr1 = [1, 2, 3, 4, 5, 6, 7, 8, 9] + arr2 = [10, 20, 30, 40, 50, 60, 70, 80, 90] + arr3 = [100, 200, 300, np.nan, 500, 600, 700, 800, 900] + batch = pa.RecordBatch.from_arrays( + [ + pa.array(arr1, type=pa.uint16()), + pa.array(arr2, type=pa.int16()), + ], ["a", "b"] + ) + + result = batch.to_tensor(row_major=False) + x = np.column_stack([arr1, arr2]).astype(np.int32, order="F") + expected = pa.Tensor.from_numpy(x) + check_tensors(result, expected, pa.int32(), 18) + + result = batch.to_tensor() + x = np.column_stack([arr1, arr2]).astype(np.int32, order="C") + expected = pa.Tensor.from_numpy(x) + check_tensors(result, expected, pa.int32(), 18) + + # uint16 + int16 + float32 = float64 + batch = pa.RecordBatch.from_arrays( + [ + pa.array(arr1, type=pa.uint16()), + pa.array(arr2, type=pa.int16()), + pa.array(arr3, type=pa.float32()), + ], ["a", "b", "c"] + ) + result = batch.to_tensor(row_major=False) + x = np.column_stack([arr1, arr2, arr3]).astype(np.float64, order="F") + expected = pa.Tensor.from_numpy(x) + + np.testing.assert_equal(result.to_numpy(), x) + assert result.size == 27 + assert result.type == pa.float64() + assert result.shape == expected.shape + assert result.strides == expected.strides + + result = batch.to_tensor() + x = np.column_stack([arr1, arr2, arr3]).astype(np.float64, order="C") + expected = pa.Tensor.from_numpy(x) + + np.testing.assert_equal(result.to_numpy(), x) + assert result.size == 27 + assert result.type == pa.float64() + assert result.shape == expected.shape + assert result.strides == expected.strides + + +def test_recordbatch_to_tensor_unsupported_mixed_type_with_float16(): + arr1 = [1, 2, 3, 4, 5, 6, 7, 8, 9] + arr2 = [10, 20, 30, 40, 50, 60, 70, 80, 90] + arr3 = [100, 200, 300, 400, 500, 600, 700, 800, 900] + batch = pa.RecordBatch.from_arrays( + [ + pa.array(arr1, type=pa.uint16()), + pa.array(np.array(arr2, dtype=np.float16), type=pa.float16()), + pa.array(arr3, type=pa.float32()), + ], ["a", "b", "c"] + ) + + with pytest.raises( + NotImplementedError, + match="Casting from or to halffloat is not supported." + ): + batch.to_tensor() + + +def test_recordbatch_to_tensor_nan(): + arr1 = [1, 2, 3, 4, np.nan, 6, 7, 8, 9] + arr2 = [10, 20, 30, 40, 50, 60, 70, np.nan, 90] + batch = pa.RecordBatch.from_arrays( + [ + pa.array(arr1, type=pa.float32()), + pa.array(arr2, type=pa.float32()), + ], ["a", "b"] + ) + result = batch.to_tensor(row_major=False) + x = np.column_stack([arr1, arr2]).astype(np.float32, order="F") + expected = pa.Tensor.from_numpy(x) + + np.testing.assert_equal(result.to_numpy(), x) + assert result.size == 18 + assert result.type == pa.float32() + assert result.shape == expected.shape + assert result.strides == expected.strides + + +def test_recordbatch_to_tensor_null(): + arr1 = [1, 2, 3, 4, None, 6, 7, 8, 9] + arr2 = [10, 20, 30, 40, 50, 60, 70, None, 90] + batch = pa.RecordBatch.from_arrays( + [ + pa.array(arr1, type=pa.int32()), + pa.array(arr2, type=pa.float32()), + ], ["a", "b"] + ) + with pytest.raises( + pa.ArrowTypeError, + match="Can only convert a RecordBatch with no nulls." + ): + batch.to_tensor() + + result = batch.to_tensor(null_to_nan=True, row_major=False) + x = np.column_stack([arr1, arr2]).astype(np.float64, order="F") + expected = pa.Tensor.from_numpy(x) + + np.testing.assert_equal(result.to_numpy(), x) + assert result.size == 18 + assert result.type == pa.float64() + assert result.shape == expected.shape + assert result.strides == expected.strides + + # int32 -> float64 + batch = pa.RecordBatch.from_arrays( + [ + pa.array(arr1, type=pa.int32()), + pa.array(arr2, type=pa.int32()), + ], ["a", "b"] + ) + + result = batch.to_tensor(null_to_nan=True, row_major=False) + + np.testing.assert_equal(result.to_numpy(), x) + assert result.size == 18 + assert result.type == pa.float64() + assert result.shape == expected.shape + assert result.strides == expected.strides + + # int8 -> float32 + batch = pa.RecordBatch.from_arrays( + [ + pa.array(arr1, type=pa.int8()), + pa.array(arr2, type=pa.int8()), + ], ["a", "b"] + ) + + result = batch.to_tensor(null_to_nan=True, row_major=False) + x = np.column_stack([arr1, arr2]).astype(np.float32, order="F") + expected = pa.Tensor.from_numpy(x) + + np.testing.assert_equal(result.to_numpy(), x) + assert result.size == 18 + assert result.type == pa.float32() + assert result.shape == expected.shape + assert result.strides == expected.strides + + +def test_recordbatch_to_tensor_empty(): + batch = pa.RecordBatch.from_arrays( + [ + pa.array([], type=pa.float32()), + pa.array([], type=pa.float32()), + ], ["a", "b"] + ) + result = batch.to_tensor() + + x = np.column_stack([[], []]).astype(np.float32, order="F") + expected = pa.Tensor.from_numpy(x) + + assert result.size == expected.size + assert result.type == pa.float32() + assert result.shape == expected.shape + assert result.strides == (4, 4) + + +def test_recordbatch_to_tensor_unsupported(): + arr1 = [1, 2, 3, 4, 5, 6, 7, 8, 9] + # Unsupported data type + arr2 = ["a", "b", "c", "a", "b", "c", "a", "b", "c"] + batch = pa.RecordBatch.from_arrays( + [ + pa.array(arr1, type=pa.int32()), + pa.array(arr2, type=pa.utf8()), + ], ["a", "b"] + ) + with pytest.raises( + pa.ArrowTypeError, + match="DataType is not supported" + ): + batch.to_tensor() + + +def _table_like_slice_tests(factory): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]) + ] + names = ['c0', 'c1'] + + obj = factory(data, names=names) + + sliced = obj.slice(2) + assert sliced.num_rows == 3 + + expected = factory([x.slice(2) for x in data], names=names) + assert sliced.equals(expected) + + sliced2 = obj.slice(2, 2) + expected2 = factory([x.slice(2, 2) for x in data], names=names) + assert sliced2.equals(expected2) + + # 0 offset + assert obj.slice(0).equals(obj) + + # Slice past end of array + assert len(obj.slice(len(obj))) == 0 + + with pytest.raises(IndexError): + obj.slice(-1) + + # Check __getitem__-based slicing + assert obj.slice(0, 0).equals(obj[:0]) + assert obj.slice(0, 2).equals(obj[:2]) + assert obj.slice(2, 2).equals(obj[2:4]) + assert obj.slice(2, len(obj) - 2).equals(obj[2:]) + assert obj.slice(len(obj) - 2, 2).equals(obj[-2:]) + assert obj.slice(len(obj) - 4, 2).equals(obj[-4:-2]) + + +def test_recordbatch_slice_getitem(): + return _table_like_slice_tests(pa.RecordBatch.from_arrays) + + +def test_table_slice_getitem(): + return _table_like_slice_tests(pa.table) + + +@pytest.mark.pandas +def test_slice_zero_length_table(): + # ARROW-7907: a segfault on this code was fixed after 0.16.0 + table = pa.table({'a': pa.array([], type=pa.timestamp('us'))}) + table_slice = table.slice(0, 0) + table_slice.to_pandas() + + table = pa.table({'a': pa.chunked_array([], type=pa.string())}) + table.to_pandas() + + +def test_recordbatchlist_schema_equals(): + a1 = np.array([1], dtype='uint32') + a2 = np.array([4.0, 5.0], dtype='float64') + batch1 = pa.record_batch([pa.array(a1)], ['c1']) + batch2 = pa.record_batch([pa.array(a2)], ['c1']) + + with pytest.raises(pa.ArrowInvalid): + pa.Table.from_batches([batch1, batch2]) + + +def test_table_column_sets_private_name(): + # ARROW-6429 + t = pa.table([pa.array([1, 2, 3, 4])], names=['a0']) + assert t[0]._name == 'a0' + + +def test_table_equals(): + table = pa.Table.from_arrays([], names=[]) + assert table.equals(table) + + # ARROW-4822 + assert not table.equals(None) + + other = pa.Table.from_arrays([], names=[], metadata={'key': 'value'}) + assert not table.equals(other, check_metadata=True) + assert table.equals(other) + + +def test_table_from_batches_and_schema(): + schema = pa.schema([ + pa.field('a', pa.int64()), + pa.field('b', pa.float64()), + ]) + batch = pa.record_batch([pa.array([1]), pa.array([3.14])], + names=['a', 'b']) + table = pa.Table.from_batches([batch], schema) + assert table.schema.equals(schema) + assert table.column(0) == pa.chunked_array([[1]]) + assert table.column(1) == pa.chunked_array([[3.14]]) + + incompatible_schema = pa.schema([pa.field('a', pa.int64())]) + with pytest.raises(pa.ArrowInvalid): + pa.Table.from_batches([batch], incompatible_schema) + + incompatible_batch = pa.record_batch([pa.array([1])], ['a']) + with pytest.raises(pa.ArrowInvalid): + pa.Table.from_batches([incompatible_batch], schema) + + +@pytest.mark.pandas +def test_table_to_batches(): + from pandas.testing import assert_frame_equal + import pandas as pd + + df1 = pd.DataFrame({'a': list(range(10))}) + df2 = pd.DataFrame({'a': list(range(10, 30))}) + + batch1 = pa.RecordBatch.from_pandas(df1, preserve_index=False) + batch2 = pa.RecordBatch.from_pandas(df2, preserve_index=False) + + table = pa.Table.from_batches([batch1, batch2, batch1]) + + expected_df = pd.concat([df1, df2, df1], ignore_index=True) + + batches = table.to_batches() + assert len(batches) == 3 + + assert_frame_equal(pa.Table.from_batches(batches).to_pandas(), + expected_df) + + batches = table.to_batches(max_chunksize=15) + assert list(map(len, batches)) == [10, 15, 5, 10] + + assert_frame_equal(table.to_pandas(), expected_df) + assert_frame_equal(pa.Table.from_batches(batches).to_pandas(), + expected_df) + + table_from_iter = pa.Table.from_batches(iter([batch1, batch2, batch1])) + assert table.equals(table_from_iter) + + with pytest.raises(ValueError): + table.to_batches(max_chunksize=0) + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_basics(cls): + data = [ + pa.array(range(5), type='int16'), + pa.array([-10, -5, 0, None, 10], type='int32') + ] + table = cls.from_arrays(data, names=('a', 'b')) + table.validate() + + assert not table.schema.metadata + assert len(table) == 5 + assert table.num_rows == 5 + assert table.num_columns == len(data) + assert table.shape == (5, 2) + # (only the second array has a null bitmap) + assert table.get_total_buffer_size() == (5 * 2) + (5 * 4 + 1) + assert table.nbytes == (5 * 2) + (5 * 4 + 1) + assert sys.getsizeof(table) >= object.__sizeof__( + table) + table.get_total_buffer_size() + + pydict = table.to_pydict() + assert pydict == OrderedDict([ + ('a', [0, 1, 2, 3, 4]), + ('b', [-10, -5, 0, None, 10]) + ]) + assert isinstance(pydict, dict) + assert table == cls.from_pydict(pydict, schema=table.schema) + + with pytest.raises(IndexError): + # bounds checking + table[2] + + columns = [] + for col in table.itercolumns(): + + if cls is pa.Table: + assert type(col) is pa.ChunkedArray + + for chunk in col.iterchunks(): + assert chunk is not None + + with pytest.raises(IndexError): + col.chunk(-1) + + with pytest.raises(IndexError): + col.chunk(col.num_chunks) + + else: + assert issubclass(type(col), pa.Array) + + columns.append(col) + + assert table.columns == columns + assert table == cls.from_arrays(columns, names=table.column_names) + assert table != cls.from_arrays(columns[1:], names=table.column_names[1:]) + assert table != columns + + # Schema passed explicitly + schema = pa.schema([pa.field('c0', pa.int16(), + metadata={'key': 'value'}), + pa.field('c1', pa.int32())], + metadata={b'foo': b'bar'}) + table = cls.from_arrays(data, schema=schema) + assert table.schema == schema + + wr = weakref.ref(table) + assert wr() is not None + del table + assert wr() is None + + +def test_table_dunder_init(): + with pytest.raises(TypeError, match='Table'): + pa.Table() + + +def test_table_from_arrays_preserves_column_metadata(): + # Added to test https://issues.apache.org/jira/browse/ARROW-3866 + arr0 = pa.array([1, 2]) + arr1 = pa.array([3, 4]) + field0 = pa.field('field1', pa.int64(), metadata=dict(a="A", b="B")) + field1 = pa.field('field2', pa.int64(), nullable=False) + table = pa.Table.from_arrays([arr0, arr1], + schema=pa.schema([field0, field1])) + assert b"a" in table.field(0).metadata + assert table.field(1).nullable is False + + +def test_table_from_arrays_invalid_names(): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]) + ] + with pytest.raises(ValueError): + pa.Table.from_arrays(data, names=['a', 'b', 'c']) + + with pytest.raises(ValueError): + pa.Table.from_arrays(data, names=['a']) + + +def test_table_from_lists(): + data = [ + list(range(5)), + [-10, -5, 0, 5, 10] + ] + + result = pa.table(data, names=['a', 'b']) + expected = pa.Table.from_arrays(data, names=['a', 'b']) + assert result.equals(expected) + + schema = pa.schema([ + pa.field('a', pa.uint16()), + pa.field('b', pa.int64()) + ]) + result = pa.table(data, schema=schema) + expected = pa.Table.from_arrays(data, schema=schema) + assert result.equals(expected) + + +def test_table_pickle(pickle_module): + data = [ + pa.chunked_array([[1, 2], [3, 4]], type=pa.uint32()), + pa.chunked_array([["some", "strings", None, ""]], type=pa.string()), + ] + schema = pa.schema([pa.field('ints', pa.uint32()), + pa.field('strs', pa.string())], + metadata={b'foo': b'bar'}) + table = pa.Table.from_arrays(data, schema=schema) + + result = pickle_module.loads(pickle_module.dumps(table)) + result.validate() + assert result.equals(table) + + +def test_table_get_field(): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]), + pa.array(range(5, 10)) + ] + table = pa.Table.from_arrays(data, names=('a', 'b', 'c')) + + assert table.field('a').equals(table.schema.field('a')) + assert table.field(0).equals(table.schema.field('a')) + + with pytest.raises(KeyError): + table.field('d') + + with pytest.raises(TypeError): + table.field(None) + + with pytest.raises(IndexError): + table.field(4) + + +def test_table_select_column(): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]), + pa.array(range(5, 10)) + ] + table = pa.Table.from_arrays(data, names=('a', 'b', 'c')) + + assert table.column('a').equals(table.column(0)) + + with pytest.raises(KeyError, + match='Field "d" does not exist in schema'): + table.column('d') + + with pytest.raises(TypeError): + table.column(None) + + with pytest.raises(IndexError): + table.column(4) + + +def test_table_column_with_duplicates(): + # ARROW-8209 + table = pa.table([pa.array([1, 2, 3]), + pa.array([4, 5, 6]), + pa.array([7, 8, 9])], names=['a', 'b', 'a']) + + with pytest.raises(KeyError, + match='Field "a" exists 2 times in schema'): + table.column('a') + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_add_column(cls): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]), + pa.array(range(5, 10)) + ] + table = cls.from_arrays(data, names=('a', 'b', 'c')) + + new_field = pa.field('d', data[1].type) + t2 = table.add_column(3, new_field, data[1]) + t3 = table.append_column(new_field, data[1]) + + expected = cls.from_arrays(data + [data[1]], + names=('a', 'b', 'c', 'd')) + assert t2.equals(expected) + assert t3.equals(expected) + + t4 = table.add_column(0, new_field, data[1]) + expected = cls.from_arrays([data[1]] + data, + names=('d', 'a', 'b', 'c')) + assert t4.equals(expected) + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_set_column(cls): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]), + pa.array(range(5, 10)) + ] + table = cls.from_arrays(data, names=('a', 'b', 'c')) + + new_field = pa.field('d', data[1].type) + t2 = table.set_column(0, new_field, data[1]) + + expected_data = list(data) + expected_data[0] = data[1] + expected = cls.from_arrays(expected_data, + names=('d', 'b', 'c')) + assert t2.equals(expected) + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_drop_columns(cls): + """ drop one or more columns given labels""" + a = pa.array(range(5)) + b = pa.array([-10, -5, 0, 5, 10]) + c = pa.array(range(5, 10)) + + table = cls.from_arrays([a, b, c], names=('a', 'b', 'c')) + t2 = table.drop_columns(['a', 'b']) + t3 = table.drop_columns('a') + + exp_t2 = cls.from_arrays([c], names=('c',)) + assert exp_t2.equals(t2) + exp_t3 = cls.from_arrays([b, c], names=('b', 'c',)) + assert exp_t3.equals(t3) + + # -- raise KeyError if column not in Table + with pytest.raises(KeyError, match="Column 'd' not found"): + table.drop_columns(['d']) + + +def test_table_drop(): + """ verify the alias of drop_columns is working""" + a = pa.array(range(5)) + b = pa.array([-10, -5, 0, 5, 10]) + c = pa.array(range(5, 10)) + + table = pa.Table.from_arrays([a, b, c], names=('a', 'b', 'c')) + t2 = table.drop(['a', 'b']) + t3 = table.drop('a') + + exp_t2 = pa.Table.from_arrays([c], names=('c',)) + assert exp_t2.equals(t2) + exp_t3 = pa.Table.from_arrays([b, c], names=('b', 'c',)) + assert exp_t3.equals(t3) + + # -- raise KeyError if column not in Table + with pytest.raises(KeyError, match="Column 'd' not found"): + table.drop(['d']) + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_remove_column(cls): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]), + pa.array(range(5, 10)) + ] + table = cls.from_arrays(data, names=('a', 'b', 'c')) + + t2 = table.remove_column(0) + t2.validate() + expected = cls.from_arrays(data[1:], names=('b', 'c')) + assert t2.equals(expected) + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_remove_column_empty(cls): + # ARROW-1865 + data = [ + pa.array(range(5)), + ] + table = cls.from_arrays(data, names=['a']) + + t2 = table.remove_column(0) + t2.validate() + assert len(t2) == len(table) + + t3 = t2.add_column(0, table.field(0), table[0]) + t3.validate() + assert t3.equals(table) + + +def test_empty_table_with_names(): + # ARROW-13784 + data = [] + names = ["a", "b"] + message = ( + 'Length of names [(]2[)] does not match length of arrays [(]0[)]') + with pytest.raises(ValueError, match=message): + pa.Table.from_arrays(data, names=names) + + +def test_empty_table(): + table = pa.table([]) + + assert table.column_names == [] + assert table.equals(pa.Table.from_arrays([], [])) + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_rename_columns(cls): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]), + pa.array(range(5, 10)) + ] + table = cls.from_arrays(data, names=['a', 'b', 'c']) + assert table.column_names == ['a', 'b', 'c'] + + t2 = table.rename_columns(['eh', 'bee', 'sea']) + t2.validate() + assert t2.column_names == ['eh', 'bee', 'sea'] + + expected = cls.from_arrays(data, names=['eh', 'bee', 'sea']) + assert t2.equals(expected) + + +def test_table_flatten(): + ty1 = pa.struct([pa.field('x', pa.int16()), + pa.field('y', pa.float32())]) + ty2 = pa.struct([pa.field('nest', ty1)]) + a = pa.array([(1, 2.5), (3, 4.5)], type=ty1) + b = pa.array([((11, 12.5),), ((13, 14.5),)], type=ty2) + c = pa.array([False, True], type=pa.bool_()) + + table = pa.Table.from_arrays([a, b, c], names=['a', 'b', 'c']) + t2 = table.flatten() + t2.validate() + expected = pa.Table.from_arrays([ + pa.array([1, 3], type=pa.int16()), + pa.array([2.5, 4.5], type=pa.float32()), + pa.array([(11, 12.5), (13, 14.5)], type=ty1), + c], + names=['a.x', 'a.y', 'b.nest', 'c']) + assert t2.equals(expected) + + +def test_table_combine_chunks(): + batch1 = pa.record_batch([pa.array([1]), pa.array(["a"])], + names=['f1', 'f2']) + batch2 = pa.record_batch([pa.array([2]), pa.array(["b"])], + names=['f1', 'f2']) + table = pa.Table.from_batches([batch1, batch2]) + combined = table.combine_chunks() + combined.validate() + assert combined.equals(table) + for c in combined.columns: + assert c.num_chunks == 1 + + +def test_table_unify_dictionaries(): + batch1 = pa.record_batch([ + pa.array(["foo", "bar", None, "foo"]).dictionary_encode(), + pa.array([123, 456, 456, 789]).dictionary_encode(), + pa.array([True, False, None, None])], names=['a', 'b', 'c']) + batch2 = pa.record_batch([ + pa.array(["quux", "foo", None, "quux"]).dictionary_encode(), + pa.array([456, 789, 789, None]).dictionary_encode(), + pa.array([False, None, None, True])], names=['a', 'b', 'c']) + + table = pa.Table.from_batches([batch1, batch2]) + table = table.replace_schema_metadata({b"key1": b"value1"}) + assert table.column(0).chunk(0).dictionary.equals( + pa.array(["foo", "bar"])) + assert table.column(0).chunk(1).dictionary.equals( + pa.array(["quux", "foo"])) + assert table.column(1).chunk(0).dictionary.equals( + pa.array([123, 456, 789])) + assert table.column(1).chunk(1).dictionary.equals( + pa.array([456, 789])) + + table = table.unify_dictionaries(pa.default_memory_pool()) + expected_dict_0 = pa.array(["foo", "bar", "quux"]) + expected_dict_1 = pa.array([123, 456, 789]) + assert table.column(0).chunk(0).dictionary.equals(expected_dict_0) + assert table.column(0).chunk(1).dictionary.equals(expected_dict_0) + assert table.column(1).chunk(0).dictionary.equals(expected_dict_1) + assert table.column(1).chunk(1).dictionary.equals(expected_dict_1) + + assert table.to_pydict() == { + 'a': ["foo", "bar", None, "foo", "quux", "foo", None, "quux"], + 'b': [123, 456, 456, 789, 456, 789, 789, None], + 'c': [True, False, None, None, False, None, None, True], + } + assert table.schema.metadata == {b"key1": b"value1"} + + +def test_concat_tables(): + data = [ + list(range(5)), + [-10., -5., 0., 5., 10.] + ] + data2 = [ + list(range(5, 10)), + [1., 2., 3., 4., 5.] + ] + + t1 = pa.Table.from_arrays([pa.array(x) for x in data], + names=('a', 'b')) + t2 = pa.Table.from_arrays([pa.array(x) for x in data2], + names=('a', 'b')) + + result = pa.concat_tables([t1, t2]) + result.validate() + assert len(result) == 10 + + expected = pa.Table.from_arrays([pa.array(x + y) + for x, y in zip(data, data2)], + names=('a', 'b')) + + assert result.equals(expected) + + +def test_concat_tables_permissive(): + t1 = pa.Table.from_arrays([list(range(10))], names=('a',)) + t2 = pa.Table.from_arrays([list(('a', 'b', 'c'))], names=('a',)) + + with pytest.raises( + pa.ArrowTypeError, + match="Unable to merge: Field a has incompatible types: int64 vs string"): + _ = pa.concat_tables([t1, t2], promote_options="permissive") + + +def test_concat_tables_invalid_option(): + t = pa.Table.from_arrays([list(range(10))], names=('a',)) + + with pytest.raises(ValueError, match="Invalid promote options: invalid"): + pa.concat_tables([t, t], promote_options="invalid") + + +def test_concat_tables_none_table(): + # ARROW-11997 + with pytest.raises(AttributeError): + pa.concat_tables([None]) + + +@pytest.mark.pandas +def test_concat_tables_with_different_schema_metadata(): + import pandas as pd + + schema = pa.schema([ + pa.field('a', pa.string()), + pa.field('b', pa.string()), + ]) + + values = list('abcdefgh') + df1 = pd.DataFrame({'a': values, 'b': values}) + df2 = pd.DataFrame({'a': [np.nan] * 8, 'b': values}) + + table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False) + table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False) + assert table1.schema.equals(table2.schema) + assert not table1.schema.equals(table2.schema, check_metadata=True) + + table3 = pa.concat_tables([table1, table2]) + assert table1.schema.equals(table3.schema, check_metadata=True) + assert table2.schema.equals(table3.schema) + + +def test_concat_tables_with_promote_option(): + t1 = pa.Table.from_arrays( + [pa.array([1, 2], type=pa.int64())], ["int64_field"]) + t2 = pa.Table.from_arrays( + [pa.array([1.0, 2.0], type=pa.float32())], ["float_field"]) + + with pytest.warns(FutureWarning): + result = pa.concat_tables([t1, t2], promote=True) + + assert result.equals(pa.Table.from_arrays([ + pa.array([1, 2, None, None], type=pa.int64()), + pa.array([None, None, 1.0, 2.0], type=pa.float32()), + ], ["int64_field", "float_field"])) + + t1 = pa.Table.from_arrays( + [pa.array([1, 2], type=pa.int64())], ["f"]) + t2 = pa.Table.from_arrays( + [pa.array([1, 2], type=pa.float32())], ["f"]) + + with pytest.raises(pa.ArrowInvalid, match="Schema at index 1 was different:"): + with pytest.warns(FutureWarning): + pa.concat_tables([t1, t2], promote=False) + + +def test_concat_tables_with_promotion(): + t1 = pa.Table.from_arrays( + [pa.array([1, 2], type=pa.int64())], ["int64_field"]) + t2 = pa.Table.from_arrays( + [pa.array([1.0, 2.0], type=pa.float32())], ["float_field"]) + + result = pa.concat_tables([t1, t2], promote_options="default") + + assert result.equals(pa.Table.from_arrays([ + pa.array([1, 2, None, None], type=pa.int64()), + pa.array([None, None, 1.0, 2.0], type=pa.float32()), + ], ["int64_field", "float_field"])) + + t3 = pa.Table.from_arrays( + [pa.array([1, 2], type=pa.int32())], ["int64_field"]) + result = pa.concat_tables( + [t1, t3], promote_options="permissive") + assert result.equals(pa.Table.from_arrays([ + pa.array([1, 2, 1, 2], type=pa.int64()), + ], ["int64_field"])) + + +def test_concat_tables_with_promotion_error(): + t1 = pa.Table.from_arrays( + [pa.array([1, 2], type=pa.int64())], ["f"]) + t2 = pa.Table.from_arrays( + [pa.array([1, 2], type=pa.float32())], ["f"]) + + with pytest.raises(pa.ArrowTypeError, match="Unable to merge:"): + pa.concat_tables([t1, t2], promote_options="default") + + +def test_table_negative_indexing(): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]), + pa.array([1.0, 2.0, 3.0, 4.0, 5.0]), + pa.array(['ab', 'bc', 'cd', 'de', 'ef']), + ] + table = pa.Table.from_arrays(data, names=tuple('abcd')) + + assert table[-1].equals(table[3]) + assert table[-2].equals(table[2]) + assert table[-3].equals(table[1]) + assert table[-4].equals(table[0]) + + with pytest.raises(IndexError): + table[-5] + + with pytest.raises(IndexError): + table[4] + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_cast_to_incompatible_schema(cls): + data = [ + pa.array(range(5)), + pa.array([-10, -5, 0, 5, 10]), + ] + table = cls.from_arrays(data, names=tuple('ab')) + + target_schema1 = pa.schema([ + pa.field('A', pa.int32()), + pa.field('b', pa.int16()), + ]) + target_schema2 = pa.schema([ + pa.field('a', pa.int32()), + ]) + + if cls is pa.Table: + cls_name = 'table' + else: + cls_name = 'record batch' + message = ("Target schema's field names are not matching the " + f"{cls_name}'s field names:.*") + + with pytest.raises(ValueError, match=message): + table.cast(target_schema1) + with pytest.raises(ValueError, match=message): + table.cast(target_schema2) + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_safe_casting(cls): + data = [ + pa.array(range(5), type=pa.int64()), + pa.array([-10, -5, 0, 5, 10], type=pa.int32()), + pa.array([1.0, 2.0, 3.0, 4.0, 5.0], type=pa.float64()), + pa.array(['ab', 'bc', 'cd', 'de', 'ef'], type=pa.string()) + ] + table = cls.from_arrays(data, names=tuple('abcd')) + + expected_data = [ + pa.array(range(5), type=pa.int32()), + pa.array([-10, -5, 0, 5, 10], type=pa.int16()), + pa.array([1, 2, 3, 4, 5], type=pa.int64()), + pa.array(['ab', 'bc', 'cd', 'de', 'ef'], type=pa.string()) + ] + expected_table = cls.from_arrays(expected_data, names=tuple('abcd')) + + target_schema = pa.schema([ + pa.field('a', pa.int32()), + pa.field('b', pa.int16()), + pa.field('c', pa.int64()), + pa.field('d', pa.string()) + ]) + casted_table = table.cast(target_schema) + + assert casted_table.equals(expected_table) + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_unsafe_casting(cls): + data = [ + pa.array(range(5), type=pa.int64()), + pa.array([-10, -5, 0, 5, 10], type=pa.int32()), + pa.array([1.1, 2.2, 3.3, 4.4, 5.5], type=pa.float64()), + pa.array(['ab', 'bc', 'cd', 'de', 'ef'], type=pa.string()) + ] + table = cls.from_arrays(data, names=tuple('abcd')) + + expected_data = [ + pa.array(range(5), type=pa.int32()), + pa.array([-10, -5, 0, 5, 10], type=pa.int16()), + pa.array([1, 2, 3, 4, 5], type=pa.int64()), + pa.array(['ab', 'bc', 'cd', 'de', 'ef'], type=pa.string()) + ] + expected_table = cls.from_arrays(expected_data, names=tuple('abcd')) + + target_schema = pa.schema([ + pa.field('a', pa.int32()), + pa.field('b', pa.int16()), + pa.field('c', pa.int64()), + pa.field('d', pa.string()) + ]) + + with pytest.raises(pa.ArrowInvalid, match='truncated'): + table.cast(target_schema) + + casted_table = table.cast(target_schema, safe=False) + assert casted_table.equals(expected_table) + + +def test_invalid_table_construct(): + array = np.array([0, 1], dtype=np.uint8) + u8 = pa.uint8() + arrays = [pa.array(array, type=u8), pa.array(array[1:], type=u8)] + + with pytest.raises(pa.lib.ArrowInvalid): + pa.Table.from_arrays(arrays, names=["a1", "a2"]) + + +@pytest.mark.parametrize('data, klass', [ + ((['', 'foo', 'bar'], [4.5, 5, None]), list), + ((['', 'foo', 'bar'], [4.5, 5, None]), pa.array), + (([[''], ['foo', 'bar']], [[4.5], [5., None]]), pa.chunked_array), +]) +def test_from_arrays_schema(data, klass): + data = [klass(data[0]), klass(data[1])] + schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float32())]) + + table = pa.Table.from_arrays(data, schema=schema) + assert table.num_columns == 2 + assert table.num_rows == 3 + assert table.schema == schema + + # length of data and schema not matching + schema = pa.schema([('strs', pa.utf8())]) + with pytest.raises(ValueError): + pa.Table.from_arrays(data, schema=schema) + + # with different but compatible schema + schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float32())]) + table = pa.Table.from_arrays(data, schema=schema) + assert pa.types.is_float32(table.column('floats').type) + assert table.num_columns == 2 + assert table.num_rows == 3 + assert table.schema == schema + + # with different and incompatible schema + schema = pa.schema([('strs', pa.utf8()), ('floats', pa.timestamp('s'))]) + with pytest.raises((NotImplementedError, TypeError)): + pa.Table.from_pydict(data, schema=schema) + + # Cannot pass both schema and metadata / names + with pytest.raises(ValueError): + pa.Table.from_arrays(data, schema=schema, names=['strs', 'floats']) + + with pytest.raises(ValueError): + pa.Table.from_arrays(data, schema=schema, metadata={b'foo': b'bar'}) + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_from_pydict(cls): + table = cls.from_pydict({}) + assert table.num_columns == 0 + assert table.num_rows == 0 + assert table.schema == pa.schema([]) + assert table.to_pydict() == {} + + schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float64())]) + + # With lists as values + data = OrderedDict([('strs', ['', 'foo', 'bar']), + ('floats', [4.5, 5, None])]) + table = cls.from_pydict(data) + assert table.num_columns == 2 + assert table.num_rows == 3 + assert table.schema == schema + assert table.to_pydict() == data + + # With metadata and inferred schema + metadata = {b'foo': b'bar'} + schema = schema.with_metadata(metadata) + table = cls.from_pydict(data, metadata=metadata) + assert table.schema == schema + assert table.schema.metadata == metadata + assert table.to_pydict() == data + + # With explicit schema + table = cls.from_pydict(data, schema=schema) + assert table.schema == schema + assert table.schema.metadata == metadata + assert table.to_pydict() == data + + # Cannot pass both schema and metadata + with pytest.raises(ValueError): + cls.from_pydict(data, schema=schema, metadata=metadata) + + # Non-convertible values given schema + with pytest.raises(TypeError): + cls.from_pydict({'c0': [0, 1, 2]}, + schema=pa.schema([("c0", pa.string())])) + + # Missing schema fields from the passed mapping + with pytest.raises(KeyError, match="doesn\'t contain.* c, d"): + cls.from_pydict( + {'a': [1, 2, 3], 'b': [3, 4, 5]}, + schema=pa.schema([ + ('a', pa.int64()), + ('c', pa.int32()), + ('d', pa.int16()) + ]) + ) + + # Passed wrong schema type + with pytest.raises(TypeError): + cls.from_pydict({'a': [1, 2, 3]}, schema={}) + + +@pytest.mark.parametrize('data, klass', [ + ((['', 'foo', 'bar'], [4.5, 5, None]), pa.array), + (([[''], ['foo', 'bar']], [[4.5], [5., None]]), pa.chunked_array), +]) +def test_table_from_pydict_arrow_arrays(data, klass): + data = OrderedDict([('strs', klass(data[0])), ('floats', klass(data[1]))]) + schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float64())]) + + # With arrays as values + table = pa.Table.from_pydict(data) + assert table.num_columns == 2 + assert table.num_rows == 3 + assert table.schema == schema + + # With explicit (matching) schema + table = pa.Table.from_pydict(data, schema=schema) + assert table.num_columns == 2 + assert table.num_rows == 3 + assert table.schema == schema + + # with different but compatible schema + schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float32())]) + table = pa.Table.from_pydict(data, schema=schema) + assert pa.types.is_float32(table.column('floats').type) + assert table.num_columns == 2 + assert table.num_rows == 3 + assert table.schema == schema + + # with different and incompatible schema + schema = pa.schema([('strs', pa.utf8()), ('floats', pa.timestamp('s'))]) + with pytest.raises((NotImplementedError, TypeError)): + pa.Table.from_pydict(data, schema=schema) + + +@pytest.mark.parametrize('data, klass', [ + ((['', 'foo', 'bar'], [4.5, 5, None]), list), + ((['', 'foo', 'bar'], [4.5, 5, None]), pa.array), + (([[''], ['foo', 'bar']], [[4.5], [5., None]]), pa.chunked_array), +]) +def test_table_from_pydict_schema(data, klass): + # passed schema is source of truth for the columns + + data = OrderedDict([('strs', klass(data[0])), ('floats', klass(data[1]))]) + + # schema has columns not present in data -> error + schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float64()), + ('ints', pa.int64())]) + with pytest.raises(KeyError, match='ints'): + pa.Table.from_pydict(data, schema=schema) + + # data has columns not present in schema -> ignored + schema = pa.schema([('strs', pa.utf8())]) + table = pa.Table.from_pydict(data, schema=schema) + assert table.num_columns == 1 + assert table.schema == schema + assert table.column_names == ['strs'] + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_from_pylist(cls): + table = cls.from_pylist([]) + assert table.num_columns == 0 + assert table.num_rows == 0 + assert table.schema == pa.schema([]) + assert table.to_pylist() == [] + + schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float64())]) + + # With lists as values + data = [{'strs': '', 'floats': 4.5}, + {'strs': 'foo', 'floats': 5}, + {'strs': 'bar', 'floats': None}] + table = cls.from_pylist(data) + assert table.num_columns == 2 + assert table.num_rows == 3 + assert table.schema == schema + assert table.to_pylist() == data + + # With metadata and inferred schema + metadata = {b'foo': b'bar'} + schema = schema.with_metadata(metadata) + table = cls.from_pylist(data, metadata=metadata) + assert table.schema == schema + assert table.schema.metadata == metadata + assert table.to_pylist() == data + + # With explicit schema + table = cls.from_pylist(data, schema=schema) + assert table.schema == schema + assert table.schema.metadata == metadata + assert table.to_pylist() == data + + # Cannot pass both schema and metadata + with pytest.raises(ValueError): + cls.from_pylist(data, schema=schema, metadata=metadata) + + # Non-convertible values given schema + with pytest.raises(TypeError): + cls.from_pylist([{'c0': 0}, {'c0': 1}, {'c0': 2}], + schema=pa.schema([("c0", pa.string())])) + + # Missing schema fields in the passed mapping translate to None + schema = pa.schema([('a', pa.int64()), + ('c', pa.int32()), + ('d', pa.int16()) + ]) + table = cls.from_pylist( + [{'a': 1, 'b': 3}, {'a': 2, 'b': 4}, {'a': 3, 'b': 5}], + schema=schema + ) + data = [{'a': 1, 'c': None, 'd': None}, + {'a': 2, 'c': None, 'd': None}, + {'a': 3, 'c': None, 'd': None}] + assert table.schema == schema + assert table.to_pylist() == data + + # Passed wrong schema type + with pytest.raises(TypeError): + cls.from_pylist([{'a': 1}, {'a': 2}, {'a': 3}], schema={}) + + # If the dictionaries of rows are not same length + data = [{'strs': '', 'floats': 4.5}, + {'floats': 5}, + {'strs': 'bar'}] + data2 = [{'strs': '', 'floats': 4.5}, + {'strs': None, 'floats': 5}, + {'strs': 'bar', 'floats': None}] + table = cls.from_pylist(data) + assert table.num_columns == 2 + assert table.num_rows == 3 + assert table.to_pylist() == data2 + + data = [{'strs': ''}, + {'strs': 'foo', 'floats': 5}, + {'floats': None}] + data2 = [{'strs': ''}, + {'strs': 'foo'}, + {'strs': None}] + table = cls.from_pylist(data) + assert table.num_columns == 1 + assert table.num_rows == 3 + assert table.to_pylist() == data2 + + +@pytest.mark.pandas +def test_table_from_pandas_schema(): + # passed schema is source of truth for the columns + import pandas as pd + + df = pd.DataFrame(OrderedDict([('strs', ['', 'foo', 'bar']), + ('floats', [4.5, 5, None])])) + + # with different but compatible schema + schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float32())]) + table = pa.Table.from_pandas(df, schema=schema) + assert pa.types.is_float32(table.column('floats').type) + assert table.schema.remove_metadata() == schema + + # with different and incompatible schema + schema = pa.schema([('strs', pa.utf8()), ('floats', pa.timestamp('s'))]) + with pytest.raises((NotImplementedError, TypeError)): + pa.Table.from_pandas(df, schema=schema) + + # schema has columns not present in data -> error + schema = pa.schema([('strs', pa.utf8()), ('floats', pa.float64()), + ('ints', pa.int64())]) + with pytest.raises(KeyError, match='ints'): + pa.Table.from_pandas(df, schema=schema) + + # data has columns not present in schema -> ignored + schema = pa.schema([('strs', pa.utf8())]) + table = pa.Table.from_pandas(df, schema=schema) + assert table.num_columns == 1 + assert table.schema.remove_metadata() == schema + assert table.column_names == ['strs'] + + +@pytest.mark.pandas +def test_table_factory_function(): + import pandas as pd + + # Put in wrong order to make sure that lines up with schema + d = OrderedDict([('b', ['a', 'b', 'c']), ('a', [1, 2, 3])]) + + d_explicit = {'b': pa.array(['a', 'b', 'c'], type='string'), + 'a': pa.array([1, 2, 3], type='int32')} + + schema = pa.schema([('a', pa.int32()), ('b', pa.string())]) + + df = pd.DataFrame(d) + table1 = pa.table(df) + table2 = pa.Table.from_pandas(df) + assert table1.equals(table2) + table1 = pa.table(df, schema=schema) + table2 = pa.Table.from_pandas(df, schema=schema) + assert table1.equals(table2) + + table1 = pa.table(d_explicit) + table2 = pa.Table.from_pydict(d_explicit) + assert table1.equals(table2) + + # schema coerces type + table1 = pa.table(d, schema=schema) + table2 = pa.Table.from_pydict(d, schema=schema) + assert table1.equals(table2) + + +def test_table_factory_function_args(): + # from_pydict not accepting names: + with pytest.raises(ValueError): + pa.table({'a': [1, 2, 3]}, names=['a']) + + # backwards compatibility for schema as first positional argument + schema = pa.schema([('a', pa.int32())]) + table = pa.table({'a': pa.array([1, 2, 3], type=pa.int64())}, schema) + assert table.column('a').type == pa.int32() + + # from_arrays: accept both names and schema as positional first argument + data = [pa.array([1, 2, 3], type='int64')] + names = ['a'] + table = pa.table(data, names) + assert table.column_names == names + schema = pa.schema([('a', pa.int64())]) + table = pa.table(data, schema) + assert table.column_names == names + + +@pytest.mark.pandas +def test_table_factory_function_args_pandas(): + import pandas as pd + + # from_pandas not accepting names or metadata: + with pytest.raises(ValueError): + pa.table(pd.DataFrame({'a': [1, 2, 3]}), names=['a']) + + with pytest.raises(ValueError): + pa.table(pd.DataFrame({'a': [1, 2, 3]}), metadata={b'foo': b'bar'}) + + # backwards compatibility for schema as first positional argument + schema = pa.schema([('a', pa.int32())]) + table = pa.table(pd.DataFrame({'a': [1, 2, 3]}), schema) + assert table.column('a').type == pa.int32() + + +def test_factory_functions_invalid_input(): + with pytest.raises(TypeError, match="Expected pandas DataFrame, python"): + pa.table("invalid input") + + with pytest.raises(TypeError, match="Expected pandas DataFrame"): + pa.record_batch("invalid input") + + +def test_table_repr_to_string(): + # Schema passed explicitly + schema = pa.schema([pa.field('c0', pa.int16(), + metadata={'key': 'value'}), + pa.field('c1', pa.int32())], + metadata={b'foo': b'bar'}) + + tab = pa.table([pa.array([1, 2, 3, 4], type='int16'), + pa.array([10, 20, 30, 40], type='int32')], schema=schema) + assert str(tab) == """pyarrow.Table +c0: int16 +c1: int32 +---- +c0: [[1,2,3,4]] +c1: [[10,20,30,40]]""" + + assert tab.to_string(show_metadata=True) == """\ +pyarrow.Table +c0: int16 + -- field metadata -- + key: 'value' +c1: int32 +-- schema metadata -- +foo: 'bar'""" + + assert tab.to_string(preview_cols=5) == """\ +pyarrow.Table +c0: int16 +c1: int32 +---- +c0: [[1,2,3,4]] +c1: [[10,20,30,40]]""" + + assert tab.to_string(preview_cols=1) == """\ +pyarrow.Table +c0: int16 +c1: int32 +---- +c0: [[1,2,3,4]] +...""" + + +def test_table_repr_to_string_ellipsis(): + # Schema passed explicitly + schema = pa.schema([pa.field('c0', pa.int16(), + metadata={'key': 'value'}), + pa.field('c1', pa.int32())], + metadata={b'foo': b'bar'}) + + tab = pa.table([pa.array([1, 2, 3, 4]*10, type='int16'), + pa.array([10, 20, 30, 40]*10, type='int32')], + schema=schema) + assert str(tab) == """pyarrow.Table +c0: int16 +c1: int32 +---- +c0: [[1,2,3,4,1,...,4,1,2,3,4]] +c1: [[10,20,30,40,10,...,40,10,20,30,40]]""" + + +def test_record_batch_repr_to_string(): + # Schema passed explicitly + schema = pa.schema([pa.field('c0', pa.int16(), + metadata={'key': 'value'}), + pa.field('c1', pa.int32())], + metadata={b'foo': b'bar'}) + + batch = pa.record_batch([pa.array([1, 2, 3, 4], type='int16'), + pa.array([10, 20, 30, 40], type='int32')], + schema=schema) + assert str(batch) == """pyarrow.RecordBatch +c0: int16 +c1: int32 +---- +c0: [1,2,3,4] +c1: [10,20,30,40]""" + + assert batch.to_string(show_metadata=True) == """\ +pyarrow.RecordBatch +c0: int16 + -- field metadata -- + key: 'value' +c1: int32 +-- schema metadata -- +foo: 'bar'""" + + assert batch.to_string(preview_cols=5) == """\ +pyarrow.RecordBatch +c0: int16 +c1: int32 +---- +c0: [1,2,3,4] +c1: [10,20,30,40]""" + + assert batch.to_string(preview_cols=1) == """\ +pyarrow.RecordBatch +c0: int16 +c1: int32 +---- +c0: [1,2,3,4] +...""" + + +def test_record_batch_repr_to_string_ellipsis(): + # Schema passed explicitly + schema = pa.schema([pa.field('c0', pa.int16(), + metadata={'key': 'value'}), + pa.field('c1', pa.int32())], + metadata={b'foo': b'bar'}) + + batch = pa.record_batch([pa.array([1, 2, 3, 4]*10, type='int16'), + pa.array([10, 20, 30, 40]*10, type='int32')], + schema=schema) + assert str(batch) == """pyarrow.RecordBatch +c0: int16 +c1: int32 +---- +c0: [1,2,3,4,1,2,3,4,1,2,...,3,4,1,2,3,4,1,2,3,4] +c1: [10,20,30,40,10,20,30,40,10,20,...,30,40,10,20,30,40,10,20,30,40]""" + + +def test_table_function_unicode_schema(): + col_a = "äääh" + col_b = "öööf" + + # Put in wrong order to make sure that lines up with schema + d = OrderedDict([(col_b, ['a', 'b', 'c']), (col_a, [1, 2, 3])]) + + schema = pa.schema([(col_a, pa.int32()), (col_b, pa.string())]) + + result = pa.table(d, schema=schema) + assert result[0].chunk(0).equals(pa.array([1, 2, 3], type='int32')) + assert result[1].chunk(0).equals(pa.array(['a', 'b', 'c'], type='string')) + + +def test_table_take_vanilla_functionality(): + table = pa.table( + [pa.array([1, 2, 3, None, 5]), + pa.array(['a', 'b', 'c', 'd', 'e'])], + ['f1', 'f2']) + + assert table.take(pa.array([2, 3])).equals(table.slice(2, 2)) + + +def test_table_take_null_index(): + table = pa.table( + [pa.array([1, 2, 3, None, 5]), + pa.array(['a', 'b', 'c', 'd', 'e'])], + ['f1', 'f2']) + + result_with_null_index = pa.table( + [pa.array([1, None]), + pa.array(['a', None])], + ['f1', 'f2']) + + assert table.take(pa.array([0, None])).equals(result_with_null_index) + + +def test_table_take_non_consecutive(): + table = pa.table( + [pa.array([1, 2, 3, None, 5]), + pa.array(['a', 'b', 'c', 'd', 'e'])], + ['f1', 'f2']) + + result_non_consecutive = pa.table( + [pa.array([2, None]), + pa.array(['b', 'd'])], + ['f1', 'f2']) + + assert table.take(pa.array([1, 3])).equals(result_non_consecutive) + + +def test_table_select(): + a1 = pa.array([1, 2, 3, None, 5]) + a2 = pa.array(['a', 'b', 'c', 'd', 'e']) + a3 = pa.array([[1, 2], [3, 4], [5, 6], None, [9, 10]]) + table = pa.table([a1, a2, a3], ['f1', 'f2', 'f3']) + + # selecting with string names + result = table.select(['f1']) + expected = pa.table([a1], ['f1']) + assert result.equals(expected) + + result = table.select(['f3', 'f2']) + expected = pa.table([a3, a2], ['f3', 'f2']) + assert result.equals(expected) + + # selecting with integer indices + result = table.select([0]) + expected = pa.table([a1], ['f1']) + assert result.equals(expected) + + result = table.select([2, 1]) + expected = pa.table([a3, a2], ['f3', 'f2']) + assert result.equals(expected) + + # preserve metadata + table2 = table.replace_schema_metadata({"a": "test"}) + result = table2.select(["f1", "f2"]) + assert b"a" in result.schema.metadata + + # selecting non-existing column raises + with pytest.raises(KeyError, match='Field "f5" does not exist'): + table.select(['f5']) + + with pytest.raises(IndexError, match="index out of bounds"): + table.select([5]) + + # duplicate selection gives duplicated names in resulting table + result = table.select(['f2', 'f2']) + expected = pa.table([a2, a2], ['f2', 'f2']) + assert result.equals(expected) + + # selection duplicated column raises + table = pa.table([a1, a2, a3], ['f1', 'f2', 'f1']) + with pytest.raises(KeyError, match='Field "f1" exists 2 times'): + table.select(['f1']) + + result = table.select(['f2']) + expected = pa.table([a2], ['f2']) + assert result.equals(expected) + + +@pytest.mark.acero +def test_table_group_by(): + def sorted_by_keys(d): + # Ensure a guaranteed order of keys for aggregation results. + if "keys2" in d: + keys = tuple(zip(d["keys"], d["keys2"])) + else: + keys = d["keys"] + sorted_keys = sorted(keys) + sorted_d = {"keys": sorted(d["keys"])} + for entry in d: + if entry == "keys": + continue + values = dict(zip(keys, d[entry])) + for k in sorted_keys: + sorted_d.setdefault(entry, []).append(values[k]) + return sorted_d + + table = pa.table([ + pa.array(["a", "a", "b", "b", "c"]), + pa.array(["X", "X", "Y", "Z", "Z"]), + pa.array([1, 2, 3, 4, 5]), + pa.array([10, 20, 30, 40, 50]) + ], names=["keys", "keys2", "values", "bigvalues"]) + + r = table.group_by("keys").aggregate([ + ("values", "hash_sum") + ]) + assert sorted_by_keys(r.to_pydict()) == { + "keys": ["a", "b", "c"], + "values_sum": [3, 7, 5] + } + + r = table.group_by("keys").aggregate([ + ("values", "hash_sum"), + ("values", "hash_count") + ]) + assert sorted_by_keys(r.to_pydict()) == { + "keys": ["a", "b", "c"], + "values_sum": [3, 7, 5], + "values_count": [2, 2, 1] + } + + # Test without hash_ prefix + r = table.group_by("keys").aggregate([ + ("values", "sum") + ]) + assert sorted_by_keys(r.to_pydict()) == { + "keys": ["a", "b", "c"], + "values_sum": [3, 7, 5] + } + + r = table.group_by("keys").aggregate([ + ("values", "max"), + ("bigvalues", "sum") + ]) + assert sorted_by_keys(r.to_pydict()) == { + "keys": ["a", "b", "c"], + "values_max": [2, 4, 5], + "bigvalues_sum": [30, 70, 50] + } + + r = table.group_by("keys").aggregate([ + ("bigvalues", "max"), + ("values", "sum") + ]) + assert sorted_by_keys(r.to_pydict()) == { + "keys": ["a", "b", "c"], + "values_sum": [3, 7, 5], + "bigvalues_max": [20, 40, 50] + } + + r = table.group_by(["keys", "keys2"]).aggregate([ + ("values", "sum") + ]) + assert sorted_by_keys(r.to_pydict()) == { + "keys": ["a", "b", "b", "c"], + "keys2": ["X", "Y", "Z", "Z"], + "values_sum": [3, 3, 4, 5] + } + + # Test many arguments + r = table.group_by("keys").aggregate([ + ("values", "max"), + ("bigvalues", "sum"), + ("bigvalues", "max"), + ([], "count_all"), + ("values", "sum") + ]) + assert sorted_by_keys(r.to_pydict()) == { + "keys": ["a", "b", "c"], + "values_max": [2, 4, 5], + "bigvalues_sum": [30, 70, 50], + "bigvalues_max": [20, 40, 50], + "count_all": [2, 2, 1], + "values_sum": [3, 7, 5] + } + + table_with_nulls = pa.table([ + pa.array(["a", "a", "a"]), + pa.array([1, None, None]) + ], names=["keys", "values"]) + + r = table_with_nulls.group_by(["keys"]).aggregate([ + ("values", "count", pc.CountOptions(mode="all")) + ]) + assert r.to_pydict() == { + "keys": ["a"], + "values_count": [3] + } + + r = table_with_nulls.group_by(["keys"]).aggregate([ + ("values", "count", pc.CountOptions(mode="only_null")) + ]) + assert r.to_pydict() == { + "keys": ["a"], + "values_count": [2] + } + + r = table_with_nulls.group_by(["keys"]).aggregate([ + ("values", "count", pc.CountOptions(mode="only_valid")) + ]) + assert r.to_pydict() == { + "keys": ["a"], + "values_count": [1] + } + + r = table_with_nulls.group_by(["keys"]).aggregate([ + ([], "count_all"), # nullary count that takes no parameters + ("values", "count", pc.CountOptions(mode="only_valid")) + ]) + assert r.to_pydict() == { + "keys": ["a"], + "count_all": [3], + "values_count": [1] + } + + r = table_with_nulls.group_by(["keys"]).aggregate([ + ([], "count_all") + ]) + assert r.to_pydict() == { + "keys": ["a"], + "count_all": [3] + } + + table = pa.table({ + 'keys': ['a', 'b', 'a', 'b', 'a', 'b'], + 'values': range(6)}) + table_with_chunks = pa.Table.from_batches( + table.to_batches(max_chunksize=3)) + r = table_with_chunks.group_by('keys').aggregate([('values', 'sum')]) + assert sorted_by_keys(r.to_pydict()) == { + "keys": ["a", "b"], + "values_sum": [6, 9] + } + + +@pytest.mark.acero +def test_table_group_by_first(): + # "first" is an ordered aggregation -> requires to specify use_threads=False + table1 = pa.table({'a': [1, 2, 3, 4], 'b': ['a', 'b'] * 2}) + table2 = pa.table({'a': [1, 2, 3, 4], 'b': ['b', 'a'] * 2}) + table = pa.concat_tables([table1, table2]) + + with pytest.raises(NotImplementedError): + table.group_by("b").aggregate([("a", "first")]) + + result = table.group_by("b", use_threads=False).aggregate([("a", "first")]) + expected = pa.table({"b": ["a", "b"], "a_first": [1, 2]}) + assert result.equals(expected) + + +def test_table_to_recordbatchreader(): + table = pa.Table.from_pydict({'x': [1, 2, 3]}) + reader = table.to_reader() + assert table.schema == reader.schema + assert table == reader.read_all() + + reader = table.to_reader(max_chunksize=2) + assert reader.read_next_batch().num_rows == 2 + assert reader.read_next_batch().num_rows == 1 + + +@pytest.mark.acero +def test_table_join(): + t1 = pa.table({ + "colA": [1, 2, 6], + "col2": ["a", "b", "f"] + }) + + t2 = pa.table({ + "colB": [99, 2, 1], + "col3": ["Z", "B", "A"] + }) + + result = t1.join(t2, "colA", "colB") + assert result.combine_chunks() == pa.table({ + "colA": [1, 2, 6], + "col2": ["a", "b", "f"], + "col3": ["A", "B", None] + }) + + result = t1.join(t2, "colA", "colB", join_type="full outer") + assert result.combine_chunks().sort_by("colA") == pa.table({ + "colA": [1, 2, 6, 99], + "col2": ["a", "b", "f", None], + "col3": ["A", "B", None, "Z"] + }) + + +@pytest.mark.acero +def test_table_join_unique_key(): + t1 = pa.table({ + "colA": [1, 2, 6], + "col2": ["a", "b", "f"] + }) + + t2 = pa.table({ + "colA": [99, 2, 1], + "col3": ["Z", "B", "A"] + }) + + result = t1.join(t2, "colA") + assert result.combine_chunks() == pa.table({ + "colA": [1, 2, 6], + "col2": ["a", "b", "f"], + "col3": ["A", "B", None] + }) + + result = t1.join(t2, "colA", join_type="full outer", right_suffix="_r") + assert result.combine_chunks().sort_by("colA") == pa.table({ + "colA": [1, 2, 6, 99], + "col2": ["a", "b", "f", None], + "col3": ["A", "B", None, "Z"] + }) + + +@pytest.mark.acero +def test_table_join_collisions(): + t1 = pa.table({ + "colA": [1, 2, 6], + "colB": [10, 20, 60], + "colVals": ["a", "b", "f"] + }) + + t2 = pa.table({ + "colA": [99, 2, 1], + "colB": [99, 20, 10], + "colVals": ["Z", "B", "A"] + }) + + result = t1.join(t2, "colA", join_type="full outer") + assert result.combine_chunks().sort_by("colA") == pa.table([ + [1, 2, 6, 99], + [10, 20, 60, None], + ["a", "b", "f", None], + [10, 20, None, 99], + ["A", "B", None, "Z"], + ], names=["colA", "colB", "colVals", "colB", "colVals"]) + + +@pytest.mark.acero +def test_table_filter_expression(): + t1 = pa.table({ + "colA": [1, 2, 6], + "colB": [10, 20, 60], + "colVals": ["a", "b", "f"] + }) + + t2 = pa.table({ + "colA": [99, 2, 1], + "colB": [99, 20, 10], + "colVals": ["Z", "B", "A"] + }) + + t3 = pa.concat_tables([t1, t2]) + + result = t3.filter(pc.field("colA") < 10) + assert result.combine_chunks() == pa.table({ + "colA": [1, 2, 6, 2, 1], + "colB": [10, 20, 60, 20, 10], + "colVals": ["a", "b", "f", "B", "A"] + }) + + +@pytest.mark.acero +def test_table_join_many_columns(): + t1 = pa.table({ + "colA": [1, 2, 6], + "col2": ["a", "b", "f"] + }) + + t2 = pa.table({ + "colB": [99, 2, 1], + "col3": ["Z", "B", "A"], + "col4": ["Z", "B", "A"], + "col5": ["Z", "B", "A"], + "col6": ["Z", "B", "A"], + "col7": ["Z", "B", "A"] + }) + + result = t1.join(t2, "colA", "colB") + assert result.combine_chunks() == pa.table({ + "colA": [1, 2, 6], + "col2": ["a", "b", "f"], + "col3": ["A", "B", None], + "col4": ["A", "B", None], + "col5": ["A", "B", None], + "col6": ["A", "B", None], + "col7": ["A", "B", None] + }) + + result = t1.join(t2, "colA", "colB", join_type="full outer") + assert result.combine_chunks().sort_by("colA") == pa.table({ + "colA": [1, 2, 6, 99], + "col2": ["a", "b", "f", None], + "col3": ["A", "B", None, "Z"], + "col4": ["A", "B", None, "Z"], + "col5": ["A", "B", None, "Z"], + "col6": ["A", "B", None, "Z"], + "col7": ["A", "B", None, "Z"], + }) + + +@pytest.mark.dataset +def test_table_join_asof(): + t1 = pa.Table.from_pydict({ + "colA": [1, 1, 5, 6, 7], + "col2": ["a", "b", "a", "b", "f"] + }) + + t2 = pa.Table.from_pydict({ + "colB": [2, 9, 15], + "col3": ["a", "b", "g"], + "colC": [1., 3., 5.] + }) + + r = t1.join_asof( + t2, on="colA", by="col2", tolerance=1, + right_on="colB", right_by="col3", + ) + assert r.combine_chunks() == pa.table({ + "colA": [1, 1, 5, 6, 7], + "col2": ["a", "b", "a", "b", "f"], + "colC": [1., None, None, None, None], + }) + + +@pytest.mark.dataset +def test_table_join_asof_multiple_by(): + t1 = pa.table({ + "colA": [1, 2, 6], + "colB": [10, 20, 60], + "on": [1, 2, 3], + }) + + t2 = pa.table({ + "colB": [99, 20, 10], + "colVals": ["Z", "B", "A"], + "colA": [99, 2, 1], + "on": [2, 3, 4], + }) + + result = t1.join_asof( + t2, on="on", by=["colA", "colB"], tolerance=1 + ) + assert result.sort_by("colA") == pa.table({ + "colA": [1, 2, 6], + "colB": [10, 20, 60], + "on": [1, 2, 3], + "colVals": [None, "B", None], + }) + + +@pytest.mark.dataset +def test_table_join_asof_empty_by(): + t1 = pa.table({ + "on": [1, 2, 3], + }) + + t2 = pa.table({ + "colVals": ["Z", "B", "A"], + "on": [2, 3, 4], + }) + + result = t1.join_asof( + t2, on="on", by=[], tolerance=1 + ) + assert result == pa.table({ + "on": [1, 2, 3], + "colVals": ["Z", "Z", "B"], + }) + + +@pytest.mark.dataset +def test_table_join_asof_collisions(): + t1 = pa.table({ + "colA": [1, 2, 6], + "colB": [10, 20, 60], + "on": [1, 2, 3], + "colVals": ["a", "b", "f"] + }) + + t2 = pa.table({ + "colB": [99, 20, 10], + "colVals": ["Z", "B", "A"], + "colUniq": [100, 200, 300], + "colA": [99, 2, 1], + "on": [2, 3, 4], + }) + + msg = ( + "Columns {'colVals'} present in both tables. " + "AsofJoin does not support column collisions." + ) + with pytest.raises(ValueError, match=msg): + t1.join_asof( + t2, on="on", by=["colA", "colB"], tolerance=1, + right_on="on", right_by=["colA", "colB"], + ) + + +@pytest.mark.dataset +def test_table_join_asof_by_length_mismatch(): + t1 = pa.table({ + "colA": [1, 2, 6], + "colB": [10, 20, 60], + "on": [1, 2, 3], + }) + + t2 = pa.table({ + "colVals": ["Z", "B", "A"], + "colUniq": [100, 200, 300], + "colA": [99, 2, 1], + "on": [2, 3, 4], + }) + + msg = "inconsistent size of by-key across inputs" + with pytest.raises(pa.lib.ArrowInvalid, match=msg): + t1.join_asof( + t2, on="on", by=["colA", "colB"], tolerance=1, + right_on="on", right_by=["colA"], + ) + + +@pytest.mark.dataset +def test_table_join_asof_by_type_mismatch(): + t1 = pa.table({ + "colA": [1, 2, 6], + "on": [1, 2, 3], + }) + + t2 = pa.table({ + "colVals": ["Z", "B", "A"], + "colUniq": [100, 200, 300], + "colA": [99., 2., 1.], + "on": [2, 3, 4], + }) + + msg = "Expected by-key type int64 but got double for field colA in input 1" + with pytest.raises(pa.lib.ArrowInvalid, match=msg): + t1.join_asof( + t2, on="on", by=["colA"], tolerance=1, + right_on="on", right_by=["colA"], + ) + + +@pytest.mark.dataset +def test_table_join_asof_on_type_mismatch(): + t1 = pa.table({ + "colA": [1, 2, 6], + "on": [1, 2, 3], + }) + + t2 = pa.table({ + "colVals": ["Z", "B", "A"], + "colUniq": [100, 200, 300], + "colA": [99, 2, 1], + "on": [2., 3., 4.], + }) + + msg = "Expected on-key type int64 but got double for field on in input 1" + with pytest.raises(pa.lib.ArrowInvalid, match=msg): + t1.join_asof( + t2, on="on", by=["colA"], tolerance=1, + right_on="on", right_by=["colA"], + ) + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_cast_invalid(cls): + # Casting a nullable field to non-nullable should be invalid! + table = cls.from_pydict({'a': [None, 1], 'b': [None, True]}) + new_schema = pa.schema([pa.field("a", "int64", nullable=True), + pa.field("b", "bool", nullable=False)]) + with pytest.raises(ValueError): + table.cast(new_schema) + + table = cls.from_pydict({'a': [None, 1], 'b': [False, True]}) + assert table.cast(new_schema).schema == new_schema + + +@pytest.mark.parametrize( + ('cls'), + [ + (pa.Table), + (pa.RecordBatch) + ] +) +def test_table_sort_by(cls): + table = cls.from_arrays([ + pa.array([3, 1, 4, 2, 5]), + pa.array(["b", "a", "b", "a", "c"]), + ], names=["values", "keys"]) + + assert table.sort_by("values").to_pydict() == { + "keys": ["a", "a", "b", "b", "c"], + "values": [1, 2, 3, 4, 5] + } + + assert table.sort_by([("values", "descending")]).to_pydict() == { + "keys": ["c", "b", "b", "a", "a"], + "values": [5, 4, 3, 2, 1] + } + + tab = cls.from_arrays([ + pa.array([5, 7, 7, 35], type=pa.int64()), + pa.array(["foo", "car", "bar", "foobar"]) + ], names=["a", "b"]) + + sorted_tab = tab.sort_by([("a", "descending")]) + sorted_tab_dict = sorted_tab.to_pydict() + assert sorted_tab_dict["a"] == [35, 7, 7, 5] + assert sorted_tab_dict["b"] == ["foobar", "car", "bar", "foo"] + + sorted_tab = tab.sort_by([("a", "ascending")]) + sorted_tab_dict = sorted_tab.to_pydict() + assert sorted_tab_dict["a"] == [5, 7, 7, 35] + assert sorted_tab_dict["b"] == ["foo", "car", "bar", "foobar"] + + +@pytest.mark.parametrize("constructor", [pa.table, pa.record_batch]) +def test_numpy_asarray(constructor): + table = constructor([[1, 2, 3], [4.0, 5.0, 6.0]], names=["a", "b"]) + result = np.asarray(table) + expected = np.array([[1, 4], [2, 5], [3, 6]], dtype="float64") + np.testing.assert_allclose(result, expected) + + result = np.asarray(table, dtype="int32") + np.testing.assert_allclose(result, expected) + assert result.dtype == "int32" + + # no columns + table2 = table.select([]) + result = np.asarray(table2) + expected = np.empty((3, 0)) + np.testing.assert_allclose(result, expected) + assert result.dtype == "float64" + result = np.asarray(table2, dtype="int32") + np.testing.assert_allclose(result, expected) + assert result.dtype == "int32" + + # no rows + table3 = table.slice(0, 0) + result = np.asarray(table3) + expected = np.empty((0, 2)) + np.testing.assert_allclose(result, expected) + assert result.dtype == "float64" + result = np.asarray(table3, dtype="int32") + np.testing.assert_allclose(result, expected) + assert result.dtype == "int32" + + +@pytest.mark.parametrize("constructor", [pa.table, pa.record_batch]) +def test_numpy_array_protocol(constructor): + table = constructor([[1, 2, 3], [4.0, 5.0, 6.0]], names=["a", "b"]) + expected = np.array([[1, 4], [2, 5], [3, 6]], dtype="float64") + + if Version(np.__version__) < Version("2.0"): + # copy keyword is not strict and not passed down to __array__ + result = np.array(table, copy=False) + np.testing.assert_array_equal(result, expected) + else: + # starting with numpy 2.0, the copy=False keyword is assumed to be strict + with pytest.raises(ValueError, match="Unable to avoid a copy"): + np.array(table, copy=False) + + +@pytest.mark.acero +def test_invalid_non_join_column(): + NUM_ITEMS = 30 + t1 = pa.Table.from_pydict({ + 'id': range(NUM_ITEMS), + 'array_column': [[z for z in range(3)] for x in range(NUM_ITEMS)], + }) + t2 = pa.Table.from_pydict({ + 'id': range(NUM_ITEMS), + 'value': [x for x in range(NUM_ITEMS)] + }) + + # check as left table + with pytest.raises(pa.lib.ArrowInvalid) as excinfo: + t1.join(t2, 'id', join_type='inner') + exp_error_msg = "Data type list is not supported " \ + + "in join non-key field array_column" + assert exp_error_msg in str(excinfo.value) + + # check as right table + with pytest.raises(pa.lib.ArrowInvalid) as excinfo: + t2.join(t1, 'id', join_type='inner') + assert exp_error_msg in str(excinfo.value) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_tensor.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..3e6a4ca8ed222cb2f792dc5b5e9c864f5224f17b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_tensor.py @@ -0,0 +1,219 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import sys +import pytest +import warnings +import weakref + +import numpy as np +import pyarrow as pa + + +tensor_type_pairs = [ + ('i1', pa.int8()), + ('i2', pa.int16()), + ('i4', pa.int32()), + ('i8', pa.int64()), + ('u1', pa.uint8()), + ('u2', pa.uint16()), + ('u4', pa.uint32()), + ('u8', pa.uint64()), + ('f2', pa.float16()), + ('f4', pa.float32()), + ('f8', pa.float64()) +] + + +def test_tensor_attrs(): + data = np.random.randn(10, 4) + + tensor = pa.Tensor.from_numpy(data) + + assert tensor.ndim == 2 + assert tensor.dim_names == [] + assert tensor.size == 40 + assert tensor.shape == data.shape + assert tensor.strides == data.strides + + assert tensor.is_contiguous + assert tensor.is_mutable + + # not writeable + data2 = data.copy() + data2.flags.writeable = False + tensor = pa.Tensor.from_numpy(data2) + assert not tensor.is_mutable + + # With dim_names + tensor = pa.Tensor.from_numpy(data, dim_names=('x', 'y')) + assert tensor.ndim == 2 + assert tensor.dim_names == ['x', 'y'] + assert tensor.dim_name(0) == 'x' + assert tensor.dim_name(1) == 'y' + + wr = weakref.ref(tensor) + assert wr() is not None + del tensor + assert wr() is None + + +def test_tensor_base_object(): + tensor = pa.Tensor.from_numpy(np.random.randn(10, 4)) + n = sys.getrefcount(tensor) + array = tensor.to_numpy() # noqa + assert sys.getrefcount(tensor) == n + 1 + + +@pytest.mark.parametrize('dtype_str,arrow_type', tensor_type_pairs) +def test_tensor_numpy_roundtrip(dtype_str, arrow_type): + dtype = np.dtype(dtype_str) + # Casting np.float64 -> uint32 or uint64 throws a RuntimeWarning + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + data = (100 * np.random.randn(10, 4)).astype(dtype) + tensor = pa.Tensor.from_numpy(data) + assert tensor.type == arrow_type + + repr(tensor) + + result = tensor.to_numpy() + assert (data == result).all() + + +def test_tensor_ipc_roundtrip(tmpdir): + data = np.random.randn(10, 4) + tensor = pa.Tensor.from_numpy(data) + + path = os.path.join(str(tmpdir), 'pyarrow-tensor-ipc-roundtrip') + mmap = pa.create_memory_map(path, 1024) + + pa.ipc.write_tensor(tensor, mmap) + + mmap.seek(0) + result = pa.ipc.read_tensor(mmap) + + assert result.equals(tensor) + + +@pytest.mark.gzip +def test_tensor_ipc_read_from_compressed(tempdir): + # ARROW-5910 + data = np.random.randn(10, 4) + tensor = pa.Tensor.from_numpy(data) + + path = tempdir / 'tensor-compressed-file' + + out_stream = pa.output_stream(path, compression='gzip') + pa.ipc.write_tensor(tensor, out_stream) + out_stream.close() + + result = pa.ipc.read_tensor(pa.input_stream(path, compression='gzip')) + assert result.equals(tensor) + + +def test_tensor_ipc_strided(tmpdir): + data1 = np.random.randn(10, 4) + tensor1 = pa.Tensor.from_numpy(data1[::2]) + + data2 = np.random.randn(10, 6, 4) + tensor2 = pa.Tensor.from_numpy(data2[::, ::2, ::]) + + path = os.path.join(str(tmpdir), 'pyarrow-tensor-ipc-strided') + mmap = pa.create_memory_map(path, 2048) + + for tensor in [tensor1, tensor2]: + mmap.seek(0) + pa.ipc.write_tensor(tensor, mmap) + + mmap.seek(0) + result = pa.ipc.read_tensor(mmap) + + assert result.equals(tensor) + + +def test_tensor_equals(): + def eq(a, b): + assert a.equals(b) + assert a == b + assert not (a != b) + + def ne(a, b): + assert not a.equals(b) + assert not (a == b) + assert a != b + + data = np.random.randn(10, 6, 4)[::, ::2, ::] + tensor1 = pa.Tensor.from_numpy(data) + tensor2 = pa.Tensor.from_numpy(np.ascontiguousarray(data)) + eq(tensor1, tensor2) + data = data.copy() + data[9, 0, 0] = 1.0 + tensor2 = pa.Tensor.from_numpy(np.ascontiguousarray(data)) + ne(tensor1, tensor2) + + +def test_tensor_hashing(): + # Tensors are unhashable + with pytest.raises(TypeError, match="unhashable"): + hash(pa.Tensor.from_numpy(np.arange(10))) + + +def test_tensor_size(): + data = np.random.randn(10, 4) + tensor = pa.Tensor.from_numpy(data) + assert pa.ipc.get_tensor_size(tensor) > (data.size * 8) + + +def test_read_tensor(tmpdir): + # Create and write tensor tensor + data = np.random.randn(10, 4) + tensor = pa.Tensor.from_numpy(data) + data_size = pa.ipc.get_tensor_size(tensor) + path = os.path.join(str(tmpdir), 'pyarrow-tensor-ipc-read-tensor') + write_mmap = pa.create_memory_map(path, data_size) + pa.ipc.write_tensor(tensor, write_mmap) + # Try to read tensor + read_mmap = pa.memory_map(path, mode='r') + array = pa.ipc.read_tensor(read_mmap).to_numpy() + np.testing.assert_equal(data, array) + + +def test_tensor_memoryview(): + # Tensors support the PEP 3118 buffer protocol + for dtype, expected_format in [(np.int8, '=b'), + (np.int64, '=q'), + (np.uint64, '=Q'), + (np.float16, 'e'), + (np.float64, 'd'), + ]: + data = np.arange(10, dtype=dtype) + dtype = data.dtype + lst = data.tolist() + tensor = pa.Tensor.from_numpy(data) + m = memoryview(tensor) + assert m.format == expected_format + assert m.shape == data.shape + assert m.strides == data.strides + assert m.ndim == 1 + assert m.nbytes == data.nbytes + assert m.itemsize == data.itemsize + assert m.itemsize * 8 == tensor.type.bit_width + assert np.frombuffer(m, dtype).tolist() == lst + del tensor, data + assert np.frombuffer(m, dtype).tolist() == lst diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_types.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_types.py new file mode 100644 index 0000000000000000000000000000000000000000..4f66a6f41672dc33003917a24802839a8ea6ed82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_types.py @@ -0,0 +1,1359 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from collections import OrderedDict +from collections.abc import Iterator +from functools import partial +import datetime +import sys + +import pytest +import hypothesis as h +import hypothesis.strategies as st +try: + import hypothesis.extra.pytz as tzst +except ImportError: + tzst = None +import weakref + +import numpy as np +import pyarrow as pa +import pyarrow.types as types +import pyarrow.tests.strategies as past + + +def get_many_types(): + # returning them from a function is required because of pa.dictionary + # type holds a pyarrow array and test_array.py::test_toal_bytes_allocated + # checks that the default memory pool has zero allocated bytes + return ( + pa.null(), + pa.bool_(), + pa.int32(), + pa.time32('s'), + pa.time64('us'), + pa.date32(), + pa.timestamp('us'), + pa.timestamp('us', tz='UTC'), + pa.timestamp('us', tz='Europe/Paris'), + pa.duration('s'), + pa.float16(), + pa.float32(), + pa.float64(), + pa.decimal128(19, 4), + pa.decimal256(76, 38), + pa.string(), + pa.binary(), + pa.binary(10), + pa.large_string(), + pa.large_binary(), + pa.string_view(), + pa.binary_view(), + pa.list_(pa.int32()), + pa.list_(pa.int32(), 2), + pa.large_list(pa.uint16()), + pa.list_view(pa.int32()), + pa.large_list_view(pa.uint16()), + pa.map_(pa.string(), pa.int32()), + pa.map_(pa.field('key', pa.int32(), nullable=False), + pa.field('value', pa.int32())), + pa.struct([pa.field('a', pa.int32()), + pa.field('b', pa.int8()), + pa.field('c', pa.string())]), + pa.struct([pa.field('a', pa.int32(), nullable=False), + pa.field('b', pa.int8(), nullable=False), + pa.field('c', pa.string())]), + pa.union([pa.field('a', pa.binary(10)), + pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE), + pa.union([pa.field('a', pa.binary(10)), + pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE, + type_codes=[4, 8]), + pa.union([pa.field('a', pa.binary(10)), + pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE), + pa.union([pa.field('a', pa.binary(10), nullable=False), + pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE), + pa.dictionary(pa.int32(), pa.string()), + pa.run_end_encoded(pa.int16(), pa.int32()), + pa.run_end_encoded(pa.int32(), pa.string()), + pa.run_end_encoded(pa.int64(), pa.uint8()) + ) + + +def test_is_boolean(): + assert types.is_boolean(pa.bool_()) + assert not types.is_boolean(pa.int8()) + + +def test_is_integer(): + signed_ints = [pa.int8(), pa.int16(), pa.int32(), pa.int64()] + unsigned_ints = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()] + + for t in signed_ints + unsigned_ints: + assert types.is_integer(t) + + for t in signed_ints: + assert types.is_signed_integer(t) + assert not types.is_unsigned_integer(t) + + for t in unsigned_ints: + assert types.is_unsigned_integer(t) + assert not types.is_signed_integer(t) + + assert not types.is_integer(pa.float32()) + assert not types.is_signed_integer(pa.float32()) + + +def test_is_floating(): + for t in [pa.float16(), pa.float32(), pa.float64()]: + assert types.is_floating(t) + + assert not types.is_floating(pa.int32()) + + +def test_is_null(): + assert types.is_null(pa.null()) + assert not types.is_null(pa.list_(pa.int32())) + + +def test_null_field_may_not_be_non_nullable(): + # ARROW-7273 + with pytest.raises(ValueError): + pa.field('f0', pa.null(), nullable=False) + + +def test_is_decimal(): + decimal128 = pa.decimal128(19, 4) + decimal256 = pa.decimal256(76, 38) + int32 = pa.int32() + + assert types.is_decimal(decimal128) + assert types.is_decimal(decimal256) + assert not types.is_decimal(int32) + + assert types.is_decimal128(decimal128) + assert not types.is_decimal128(decimal256) + assert not types.is_decimal128(int32) + + assert not types.is_decimal256(decimal128) + assert types.is_decimal256(decimal256) + assert not types.is_decimal256(int32) + + +def test_is_list(): + a = pa.list_(pa.int32()) + b = pa.large_list(pa.int32()) + c = pa.list_(pa.int32(), 3) + + assert types.is_list(a) + assert not types.is_large_list(a) + assert not types.is_fixed_size_list(a) + assert types.is_large_list(b) + assert not types.is_list(b) + assert not types.is_fixed_size_list(b) + assert types.is_fixed_size_list(c) + assert not types.is_list(c) + assert not types.is_large_list(c) + + assert not types.is_list(pa.int32()) + + +def test_is_list_view(): + a = pa.list_view(pa.int32()) + b = pa.large_list_view(pa.int32()) + + assert types.is_list_view(a) + assert not types.is_large_list_view(a) + assert not types.is_list(a) + assert types.is_large_list_view(b) + assert not types.is_list_view(b) + assert not types.is_large_list(b) + + +def test_is_map(): + m = pa.map_(pa.utf8(), pa.int32()) + + assert types.is_map(m) + assert not types.is_map(pa.int32()) + + fields = pa.map_(pa.field('key_name', pa.utf8(), nullable=False), + pa.field('value_name', pa.int32())) + assert types.is_map(fields) + + entries_type = pa.struct([pa.field('key', pa.int8()), + pa.field('value', pa.int8())]) + list_type = pa.list_(entries_type) + assert not types.is_map(list_type) + + +def test_is_dictionary(): + assert types.is_dictionary(pa.dictionary(pa.int32(), pa.string())) + assert not types.is_dictionary(pa.int32()) + + +def test_is_nested_or_struct(): + struct_ex = pa.struct([pa.field('a', pa.int32()), + pa.field('b', pa.int8()), + pa.field('c', pa.string())]) + + assert types.is_struct(struct_ex) + assert not types.is_struct(pa.list_(pa.int32())) + + assert types.is_nested(struct_ex) + assert types.is_nested(pa.list_(pa.int32())) + assert types.is_nested(pa.list_(pa.int32(), 3)) + assert types.is_nested(pa.large_list(pa.int32())) + assert types.is_nested(pa.list_view(pa.int32())) + assert types.is_nested(pa.large_list_view(pa.int32())) + assert not types.is_nested(pa.int32()) + + +def test_is_union(): + for mode in [pa.lib.UnionMode_SPARSE, pa.lib.UnionMode_DENSE]: + assert types.is_union(pa.union([pa.field('a', pa.int32()), + pa.field('b', pa.int8()), + pa.field('c', pa.string())], + mode=mode)) + assert not types.is_union(pa.list_(pa.int32())) + + +def test_is_run_end_encoded(): + assert types.is_run_end_encoded(pa.run_end_encoded(pa.int32(), pa.int64())) + assert not types.is_run_end_encoded(pa.utf8()) + + +# TODO(wesm): is_map, once implemented + + +def test_is_binary_string(): + assert types.is_binary(pa.binary()) + assert not types.is_binary(pa.string()) + assert not types.is_binary(pa.large_binary()) + assert not types.is_binary(pa.large_string()) + + assert types.is_string(pa.string()) + assert types.is_unicode(pa.string()) + assert not types.is_string(pa.binary()) + assert not types.is_string(pa.large_string()) + assert not types.is_string(pa.large_binary()) + + assert types.is_large_binary(pa.large_binary()) + assert not types.is_large_binary(pa.large_string()) + assert not types.is_large_binary(pa.binary()) + assert not types.is_large_binary(pa.string()) + + assert types.is_large_string(pa.large_string()) + assert not types.is_large_string(pa.large_binary()) + assert not types.is_large_string(pa.string()) + assert not types.is_large_string(pa.binary()) + + assert types.is_fixed_size_binary(pa.binary(5)) + assert not types.is_fixed_size_binary(pa.binary()) + + assert types.is_string_view(pa.string_view()) + assert not types.is_string_view(pa.string()) + assert types.is_binary_view(pa.binary_view()) + assert not types.is_binary_view(pa.binary()) + assert not types.is_binary_view(pa.string_view()) + + +def test_is_temporal_date_time_timestamp(): + date_types = [pa.date32(), pa.date64()] + time_types = [pa.time32('s'), pa.time64('ns')] + timestamp_types = [pa.timestamp('ms')] + duration_types = [pa.duration('ms')] + interval_types = [pa.month_day_nano_interval()] + + for case in (date_types + time_types + timestamp_types + duration_types + + interval_types): + assert types.is_temporal(case) + + for case in date_types: + assert types.is_date(case) + assert not types.is_time(case) + assert not types.is_timestamp(case) + assert not types.is_duration(case) + assert not types.is_interval(case) + + for case in time_types: + assert types.is_time(case) + assert not types.is_date(case) + assert not types.is_timestamp(case) + assert not types.is_duration(case) + assert not types.is_interval(case) + + for case in timestamp_types: + assert types.is_timestamp(case) + assert not types.is_date(case) + assert not types.is_time(case) + assert not types.is_duration(case) + assert not types.is_interval(case) + + for case in duration_types: + assert types.is_duration(case) + assert not types.is_date(case) + assert not types.is_time(case) + assert not types.is_timestamp(case) + assert not types.is_interval(case) + + for case in interval_types: + assert types.is_interval(case) + assert not types.is_date(case) + assert not types.is_time(case) + assert not types.is_timestamp(case) + + assert not types.is_temporal(pa.int32()) + + +def test_is_primitive(): + assert types.is_primitive(pa.int32()) + assert not types.is_primitive(pa.list_(pa.int32())) + + +@pytest.mark.parametrize(('tz', 'expected'), [ + (datetime.timezone.utc, 'UTC'), + (datetime.timezone(datetime.timedelta(hours=1, minutes=30)), '+01:30') +]) +def test_tzinfo_to_string(tz, expected): + assert pa.lib.tzinfo_to_string(tz) == expected + + +def test_pytz_tzinfo_to_string(): + pytz = pytest.importorskip("pytz") + + tz = [pytz.utc, pytz.timezone('Europe/Paris')] + expected = ['UTC', 'Europe/Paris'] + assert [pa.lib.tzinfo_to_string(i) for i in tz] == expected + + # StaticTzInfo.tzname returns with '-09' so we need to infer the timezone's + # name from the tzinfo.zone attribute + tz = [pytz.timezone('Etc/GMT-9'), pytz.FixedOffset(180)] + expected = ['Etc/GMT-9', '+03:00'] + assert [pa.lib.tzinfo_to_string(i) for i in tz] == expected + + +def test_dateutil_tzinfo_to_string(): + if sys.platform == 'win32': + # Skip due to new release of python-dateutil + # https://github.com/apache/arrow/issues/40485 + pytest.skip('Skip on Win due to new release of python-dateutil') + + pytest.importorskip("dateutil") + import dateutil.tz + + tz = dateutil.tz.UTC + assert pa.lib.tzinfo_to_string(tz) == 'UTC' + tz = dateutil.tz.gettz('Europe/Paris') + assert pa.lib.tzinfo_to_string(tz) == 'Europe/Paris' + + +def test_zoneinfo_tzinfo_to_string(): + zoneinfo = pytest.importorskip('zoneinfo') + if sys.platform == 'win32': + # zoneinfo requires an additional dependency On Windows + # tzdata provides IANA time zone data + pytest.importorskip('tzdata') + + tz = zoneinfo.ZoneInfo('UTC') + assert pa.lib.tzinfo_to_string(tz) == 'UTC' + tz = zoneinfo.ZoneInfo('Europe/Paris') + assert pa.lib.tzinfo_to_string(tz) == 'Europe/Paris' + + +def test_tzinfo_to_string_errors(): + msg = "Not an instance of datetime.tzinfo" + with pytest.raises(TypeError): + pa.lib.tzinfo_to_string("Europe/Budapest") + + if sys.version_info >= (3, 8): + # before 3.8 it was only possible to create timezone objects with whole + # number of minutes + tz = datetime.timezone(datetime.timedelta(hours=1, seconds=30)) + msg = "Offset must represent whole number of minutes" + with pytest.raises(ValueError, match=msg): + pa.lib.tzinfo_to_string(tz) + + +if tzst: + timezones = tzst.timezones() +else: + timezones = st.none() + + +@h.given(timezones) +def test_pytz_timezone_roundtrip(tz): + if tz is None: + pytest.skip('requires timezone not None') + timezone_string = pa.lib.tzinfo_to_string(tz) + timezone_tzinfo = pa.lib.string_to_tzinfo(timezone_string) + assert timezone_tzinfo == tz + + +def test_convert_custom_tzinfo_objects_to_string(): + class CorrectTimezone1(datetime.tzinfo): + """ + Conversion is using utcoffset() + """ + + def tzname(self, dt): + return None + + def utcoffset(self, dt): + return datetime.timedelta(hours=-3, minutes=30) + + class CorrectTimezone2(datetime.tzinfo): + """ + Conversion is using tzname() + """ + + def tzname(self, dt): + return "+03:00" + + def utcoffset(self, dt): + return datetime.timedelta(hours=3) + + class BuggyTimezone1(datetime.tzinfo): + """ + Unable to infer name or offset + """ + + def tzname(self, dt): + return None + + def utcoffset(self, dt): + return None + + class BuggyTimezone2(datetime.tzinfo): + """ + Wrong offset type + """ + + def tzname(self, dt): + return None + + def utcoffset(self, dt): + return "one hour" + + class BuggyTimezone3(datetime.tzinfo): + """ + Wrong timezone name type + """ + + def tzname(self, dt): + return 240 + + def utcoffset(self, dt): + return None + + assert pa.lib.tzinfo_to_string(CorrectTimezone1()) == "-02:30" + assert pa.lib.tzinfo_to_string(CorrectTimezone2()) == "+03:00" + + msg = (r"Object returned by tzinfo.utcoffset\(None\) is not an instance " + r"of datetime.timedelta") + for wrong in [BuggyTimezone1(), BuggyTimezone2(), BuggyTimezone3()]: + with pytest.raises(ValueError, match=msg): + pa.lib.tzinfo_to_string(wrong) + + +def test_string_to_tzinfo(): + string = ['UTC', 'Europe/Paris', '+03:00', '+01:30', '-02:00'] + try: + import pytz + expected = [pytz.utc, pytz.timezone('Europe/Paris'), + pytz.FixedOffset(180), pytz.FixedOffset(90), + pytz.FixedOffset(-120)] + result = [pa.lib.string_to_tzinfo(i) for i in string] + assert result == expected + + except ImportError: + try: + import zoneinfo + expected = [zoneinfo.ZoneInfo(key='UTC'), + zoneinfo.ZoneInfo(key='Europe/Paris'), + datetime.timezone(datetime.timedelta(hours=3)), + datetime.timezone( + datetime.timedelta(hours=1, minutes=30)), + datetime.timezone(-datetime.timedelta(hours=2))] + result = [pa.lib.string_to_tzinfo(i) for i in string] + assert result == expected + + except ImportError: + pytest.skip('requires pytz or zoneinfo to be installed') + + +def test_timezone_string_roundtrip_pytz(): + pytz = pytest.importorskip("pytz") + + tz = [pytz.FixedOffset(90), pytz.FixedOffset(-90), + pytz.utc, pytz.timezone('America/New_York')] + name = ['+01:30', '-01:30', 'UTC', 'America/New_York'] + + assert [pa.lib.tzinfo_to_string(i) for i in tz] == name + assert [pa.lib.string_to_tzinfo(i)for i in name] == tz + + +def test_timestamp(): + for unit in ('s', 'ms', 'us', 'ns'): + for tz in (None, 'UTC', 'Europe/Paris'): + ty = pa.timestamp(unit, tz=tz) + assert ty.unit == unit + assert ty.tz == tz + + for invalid_unit in ('m', 'arbit', 'rary'): + with pytest.raises(ValueError, match='Invalid time unit'): + pa.timestamp(invalid_unit) + + +def test_timestamp_print(): + for unit in ('s', 'ms', 'us', 'ns'): + for tz in ('UTC', 'Europe/Paris', 'Pacific/Marquesas', + 'Mars/Mariner_Valley', '-00:42', '+42:00'): + ty = pa.timestamp(unit, tz=tz) + arr = pa.array([0], ty) + assert "Z" in str(arr) + arr = pa.array([0], pa.timestamp(unit)) + assert "Z" not in str(arr) + + +def test_time32_units(): + for valid_unit in ('s', 'ms'): + ty = pa.time32(valid_unit) + assert ty.unit == valid_unit + + for invalid_unit in ('m', 'us', 'ns'): + error_msg = 'Invalid time unit for time32: {!r}'.format(invalid_unit) + with pytest.raises(ValueError, match=error_msg): + pa.time32(invalid_unit) + + +def test_time64_units(): + for valid_unit in ('us', 'ns'): + ty = pa.time64(valid_unit) + assert ty.unit == valid_unit + + for invalid_unit in ('m', 's', 'ms'): + error_msg = 'Invalid time unit for time64: {!r}'.format(invalid_unit) + with pytest.raises(ValueError, match=error_msg): + pa.time64(invalid_unit) + + +def test_duration(): + for unit in ('s', 'ms', 'us', 'ns'): + ty = pa.duration(unit) + assert ty.unit == unit + + for invalid_unit in ('m', 'arbit', 'rary'): + with pytest.raises(ValueError, match='Invalid time unit'): + pa.duration(invalid_unit) + + +def test_list_type(): + ty = pa.list_(pa.int64()) + assert isinstance(ty, pa.ListType) + assert ty.value_type == pa.int64() + assert ty.value_field == pa.field("item", pa.int64(), nullable=True) + + # nullability matters in comparison + ty_non_nullable = pa.list_(pa.field("item", pa.int64(), nullable=False)) + assert ty != ty_non_nullable + + # field names don't matter by default + ty_named = pa.list_(pa.field("element", pa.int64())) + assert ty == ty_named + assert not ty.equals(ty_named, check_metadata=True) + + # metadata doesn't matter by default + ty_metadata = pa.list_( + pa.field("item", pa.int64(), metadata={"hello": "world"})) + assert ty == ty_metadata + assert not ty.equals(ty_metadata, check_metadata=True) + + with pytest.raises(TypeError): + pa.list_(None) + + +def test_large_list_type(): + ty = pa.large_list(pa.utf8()) + assert isinstance(ty, pa.LargeListType) + assert ty.value_type == pa.utf8() + assert ty.value_field == pa.field("item", pa.utf8(), nullable=True) + + with pytest.raises(TypeError): + pa.large_list(None) + + +def test_list_view_type(): + ty = pa.list_view(pa.int64()) + assert isinstance(ty, pa.ListViewType) + assert ty.value_type == pa.int64() + assert ty.value_field == pa.field("item", pa.int64(), nullable=True) + + # nullability matters in comparison + ty_non_nullable = pa.list_view(pa.field("item", pa.int64(), nullable=False)) + assert ty != ty_non_nullable + + # field names don't matter by default + ty_named = pa.list_view(pa.field("element", pa.int64())) + assert ty == ty_named + assert not ty.equals(ty_named, check_metadata=True) + + # metadata doesn't matter by default + ty_metadata = pa.list_view( + pa.field("item", pa.int64(), metadata={"hello": "world"})) + assert ty == ty_metadata + assert not ty.equals(ty_metadata, check_metadata=True) + + with pytest.raises(TypeError): + pa.list_view(None) + + +def test_large_list_view_type(): + ty = pa.large_list_view(pa.utf8()) + assert isinstance(ty, pa.LargeListViewType) + assert ty.value_type == pa.utf8() + assert ty.value_field == pa.field("item", pa.utf8(), nullable=True) + + with pytest.raises(TypeError): + pa.large_list_view(None) + + +def test_map_type(): + ty = pa.map_(pa.utf8(), pa.int32()) + assert isinstance(ty, pa.MapType) + assert ty.key_type == pa.utf8() + assert ty.key_field == pa.field("key", pa.utf8(), nullable=False) + assert ty.item_type == pa.int32() + assert ty.item_field == pa.field("value", pa.int32(), nullable=True) + + # nullability matters in comparison + ty_non_nullable = pa.map_(pa.utf8(), pa.field( + "value", pa.int32(), nullable=False)) + assert ty != ty_non_nullable + + # field names don't matter by default + ty_named = pa.map_(pa.field("x", pa.utf8(), nullable=False), + pa.field("y", pa.int32())) + assert ty == ty_named + assert not ty.equals(ty_named, check_metadata=True) + + # metadata doesn't matter by default + ty_metadata = pa.map_(pa.utf8(), pa.field( + "value", pa.int32(), metadata={"hello": "world"})) + assert ty == ty_metadata + assert not ty.equals(ty_metadata, check_metadata=True) + + for keys_sorted in [True, False]: + assert pa.map_(pa.utf8(), pa.int32(), + keys_sorted=keys_sorted).keys_sorted == keys_sorted + + with pytest.raises(TypeError): + pa.map_(None) + with pytest.raises(TypeError): + pa.map_(pa.int32(), None) + with pytest.raises(TypeError): + pa.map_(pa.field("name", pa.string(), nullable=True), pa.int64()) + + +def test_fixed_size_list_type(): + ty = pa.list_(pa.float64(), 2) + assert isinstance(ty, pa.FixedSizeListType) + assert ty.value_type == pa.float64() + assert ty.value_field == pa.field("item", pa.float64(), nullable=True) + assert ty.list_size == 2 + + with pytest.raises(ValueError): + pa.list_(pa.float64(), -2) + + +def test_struct_type(): + fields = [ + # Duplicate field name on purpose + pa.field('a', pa.int64()), + pa.field('a', pa.int32()), + pa.field('b', pa.int32()) + ] + ty = pa.struct(fields) + + assert len(ty) == ty.num_fields == 3 + assert list(ty) == fields + assert ty[0].name == 'a' + assert ty[2].type == pa.int32() + with pytest.raises(IndexError): + assert ty[3] + + assert ty['b'] == ty[2] + + assert ty['b'] == ty.field('b') + + assert ty[2] == ty.field(2) + + # Not found + with pytest.raises(KeyError): + ty['c'] + + with pytest.raises(KeyError): + ty.field('c') + + # Neither integer nor string + with pytest.raises(TypeError): + ty[None] + + with pytest.raises(TypeError): + ty.field(None) + + for a, b in zip(ty, fields): + a == b + + # Construct from list of tuples + ty = pa.struct([('a', pa.int64()), + ('a', pa.int32()), + ('b', pa.int32())]) + assert list(ty) == fields + for a, b in zip(ty, fields): + a == b + + # Construct from mapping + fields = [pa.field('a', pa.int64()), + pa.field('b', pa.int32())] + ty = pa.struct(OrderedDict([('a', pa.int64()), + ('b', pa.int32())])) + assert list(ty) == fields + for a, b in zip(ty, fields): + a == b + + # Invalid args + with pytest.raises(TypeError): + pa.struct([('a', None)]) + + +def test_struct_duplicate_field_names(): + fields = [ + pa.field('a', pa.int64()), + pa.field('b', pa.int32()), + pa.field('a', pa.int32()) + ] + ty = pa.struct(fields) + + # Duplicate + with pytest.warns(UserWarning): + with pytest.raises(KeyError): + ty['a'] + + # StructType::GetFieldIndex + assert ty.get_field_index('a') == -1 + + # StructType::GetAllFieldIndices + assert ty.get_all_field_indices('a') == [0, 2] + + +def test_union_type(): + def check_fields(ty, fields): + assert ty.num_fields == len(fields) + assert [ty[i] for i in range(ty.num_fields)] == fields + assert [ty.field(i) for i in range(ty.num_fields)] == fields + + fields = [pa.field('x', pa.list_(pa.int32())), + pa.field('y', pa.binary())] + type_codes = [5, 9] + + sparse_factories = [ + partial(pa.union, mode='sparse'), + partial(pa.union, mode=pa.lib.UnionMode_SPARSE), + pa.sparse_union, + ] + + dense_factories = [ + partial(pa.union, mode='dense'), + partial(pa.union, mode=pa.lib.UnionMode_DENSE), + pa.dense_union, + ] + + for factory in sparse_factories: + ty = factory(fields) + assert isinstance(ty, pa.SparseUnionType) + assert ty.mode == 'sparse' + check_fields(ty, fields) + assert ty.type_codes == [0, 1] + ty = factory(fields, type_codes=type_codes) + assert ty.mode == 'sparse' + check_fields(ty, fields) + assert ty.type_codes == type_codes + # Invalid number of type codes + with pytest.raises(ValueError): + factory(fields, type_codes=type_codes[1:]) + + for factory in dense_factories: + ty = factory(fields) + assert isinstance(ty, pa.DenseUnionType) + assert ty.mode == 'dense' + check_fields(ty, fields) + assert ty.type_codes == [0, 1] + ty = factory(fields, type_codes=type_codes) + assert ty.mode == 'dense' + check_fields(ty, fields) + assert ty.type_codes == type_codes + # Invalid number of type codes + with pytest.raises(ValueError): + factory(fields, type_codes=type_codes[1:]) + + for mode in ('unknown', 2): + with pytest.raises(ValueError, match='Invalid union mode'): + pa.union(fields, mode=mode) + + +def test_dictionary_type(): + ty0 = pa.dictionary(pa.int32(), pa.string()) + assert ty0.index_type == pa.int32() + assert ty0.value_type == pa.string() + assert ty0.ordered is False + + ty1 = pa.dictionary(pa.int8(), pa.float64(), ordered=True) + assert ty1.index_type == pa.int8() + assert ty1.value_type == pa.float64() + assert ty1.ordered is True + + # construct from non-arrow objects + ty2 = pa.dictionary('int8', 'string') + assert ty2.index_type == pa.int8() + assert ty2.value_type == pa.string() + assert ty2.ordered is False + + # allow unsigned integers for index type + ty3 = pa.dictionary(pa.uint32(), pa.string()) + assert ty3.index_type == pa.uint32() + assert ty3.value_type == pa.string() + assert ty3.ordered is False + + # invalid index type raises + with pytest.raises(TypeError): + pa.dictionary(pa.string(), pa.int64()) + + +def test_dictionary_ordered_equals(): + # Python side checking of ARROW-6345 + d1 = pa.dictionary('int32', 'binary', ordered=True) + d2 = pa.dictionary('int32', 'binary', ordered=False) + d3 = pa.dictionary('int8', 'binary', ordered=True) + d4 = pa.dictionary('int32', 'binary', ordered=True) + + assert not d1.equals(d2) + assert not d1.equals(d3) + assert d1.equals(d4) + + +def test_types_hashable(): + many_types = get_many_types() + in_dict = {} + for i, type_ in enumerate(many_types): + assert hash(type_) == hash(type_) + in_dict[type_] = i + assert len(in_dict) == len(many_types) + for i, type_ in enumerate(many_types): + assert in_dict[type_] == i + + +def test_types_picklable(pickle_module): + for ty in get_many_types(): + data = pickle_module.dumps(ty) + assert pickle_module.loads(data) == ty + + +def test_types_weakref(): + for ty in get_many_types(): + wr = weakref.ref(ty) + assert wr() is not None + # Note that ty may be a singleton and therefore outlive this loop + + wr = weakref.ref(pa.int32()) + assert wr() is not None # singleton + wr = weakref.ref(pa.list_(pa.int32())) + assert wr() is None # not a singleton + + +def test_fields_hashable(): + in_dict = {} + fields = [pa.field('a', pa.int32()), + pa.field('a', pa.int64()), + pa.field('a', pa.int64(), nullable=False), + pa.field('b', pa.int32()), + pa.field('b', pa.int32(), nullable=False)] + for i, field in enumerate(fields): + in_dict[field] = i + assert len(in_dict) == len(fields) + for i, field in enumerate(fields): + assert in_dict[field] == i + + +def test_fields_weakrefable(): + field = pa.field('a', pa.int32()) + wr = weakref.ref(field) + assert wr() is not None + del field + assert wr() is None + + +def test_run_end_encoded_type(): + ty = pa.run_end_encoded(pa.int64(), pa.utf8()) + assert isinstance(ty, pa.RunEndEncodedType) + assert ty.run_end_type == pa.int64() + assert ty.value_type == pa.utf8() + assert ty.num_buffers == 1 # buffers expected to be {NULLPTR} + assert ty.num_fields == 2 + + with pytest.raises(TypeError): + pa.run_end_encoded(pa.int64(), None) + + with pytest.raises(TypeError): + pa.run_end_encoded(None, pa.utf8()) + + with pytest.raises(ValueError): + pa.run_end_encoded(pa.int8(), pa.utf8()) + + +@pytest.mark.parametrize('t,check_func', [ + (pa.date32(), types.is_date32), + (pa.date64(), types.is_date64), + (pa.time32('s'), types.is_time32), + (pa.time64('ns'), types.is_time64), + (pa.int8(), types.is_int8), + (pa.int16(), types.is_int16), + (pa.int32(), types.is_int32), + (pa.int64(), types.is_int64), + (pa.uint8(), types.is_uint8), + (pa.uint16(), types.is_uint16), + (pa.uint32(), types.is_uint32), + (pa.uint64(), types.is_uint64), + (pa.float16(), types.is_float16), + (pa.float32(), types.is_float32), + (pa.float64(), types.is_float64) +]) +def test_exact_primitive_types(t, check_func): + assert check_func(t) + + +def test_type_id(): + # enum values are not exposed publicly + for ty in get_many_types(): + assert isinstance(ty.id, int) + + +def test_bit_and_byte_width(): + for ty, expected_bit_width, expected_byte_width in [ + (pa.bool_(), 1, 0), + (pa.int8(), 8, 1), + (pa.uint32(), 32, 4), + (pa.float16(), 16, 2), + (pa.timestamp('s'), 64, 8), + (pa.date32(), 32, 4), + (pa.decimal128(19, 4), 128, 16), + (pa.decimal256(76, 38), 256, 32), + (pa.binary(42), 42 * 8, 42), + (pa.binary(0), 0, 0), + ]: + assert ty.bit_width == expected_bit_width + + if 0 < expected_bit_width < 8: + with pytest.raises(ValueError, match="Less than one byte"): + ty.byte_width + else: + assert ty.byte_width == expected_byte_width + + for ty in [ + pa.binary(), + pa.string(), + pa.list_(pa.int16()), + pa.map_(pa.string(), pa.int32()), + pa.struct([('f1', pa.int32())]) + ]: + with pytest.raises(ValueError, match="fixed width"): + ty.bit_width + with pytest.raises(ValueError, match="fixed width"): + ty.byte_width + + +def test_fixed_size_binary_byte_width(): + ty = pa.binary(5) + assert ty.byte_width == 5 + + +def test_decimal_properties(): + ty = pa.decimal128(19, 4) + assert ty.byte_width == 16 + assert ty.precision == 19 + assert ty.scale == 4 + ty = pa.decimal256(76, 38) + assert ty.byte_width == 32 + assert ty.precision == 76 + assert ty.scale == 38 + + +def test_decimal_overflow(): + pa.decimal128(1, 0) + pa.decimal128(38, 0) + for i in (0, -1, 39): + with pytest.raises(ValueError): + pa.decimal128(i, 0) + + pa.decimal256(1, 0) + pa.decimal256(76, 0) + for i in (0, -1, 77): + with pytest.raises(ValueError): + pa.decimal256(i, 0) + + +def test_timedelta_overflow(): + # microsecond resolution, overflow + d = datetime.timedelta(days=-106751992, seconds=71945, microseconds=224192) + with pytest.raises(pa.ArrowInvalid): + pa.scalar(d) + + # microsecond resolution, overflow + d = datetime.timedelta(days=106751991, seconds=14454, microseconds=775808) + with pytest.raises(pa.ArrowInvalid): + pa.scalar(d) + + # nanosecond resolution, overflow + d = datetime.timedelta(days=-106752, seconds=763, microseconds=145224) + with pytest.raises(pa.ArrowInvalid): + pa.scalar(d, type=pa.duration('ns')) + + # microsecond resolution, not overflow + pa.scalar(d, type=pa.duration('us')).as_py() == d + + # second/millisecond resolution, not overflow + for d in [datetime.timedelta.min, datetime.timedelta.max]: + pa.scalar(d, type=pa.duration('ms')).as_py() == d + pa.scalar(d, type=pa.duration('s')).as_py() == d + + +def test_type_equality_operators(): + many_types = get_many_types() + non_pyarrow = ('foo', 16, {'s', 'e', 't'}) + + for index, ty in enumerate(many_types): + # could use two parametrization levels, + # but that'd bloat pytest's output + for i, other in enumerate(many_types + non_pyarrow): + if i == index: + assert ty == other + else: + assert ty != other + + +def test_key_value_metadata(): + m = pa.KeyValueMetadata({'a': 'A', 'b': 'B'}) + assert len(m) == 2 + assert m['a'] == b'A' + assert m[b'a'] == b'A' + assert m['b'] == b'B' + assert 'a' in m + assert b'a' in m + assert 'c' not in m + + m1 = pa.KeyValueMetadata({'a': 'A', 'b': 'B'}) + m2 = pa.KeyValueMetadata(a='A', b='B') + m3 = pa.KeyValueMetadata([('a', 'A'), ('b', 'B')]) + + assert m1 != 2 + assert m1 == m2 + assert m2 == m3 + assert m1 == {'a': 'A', 'b': 'B'} + assert m1 != {'a': 'A', 'b': 'C'} + + with pytest.raises(TypeError): + pa.KeyValueMetadata({'a': 1}) + with pytest.raises(TypeError): + pa.KeyValueMetadata({1: 'a'}) + with pytest.raises(TypeError): + pa.KeyValueMetadata(a=1) + + expected = [(b'a', b'A'), (b'b', b'B')] + result = [(k, v) for k, v in m3.items()] + assert result == expected + assert list(m3.items()) == expected + assert list(m3.keys()) == [b'a', b'b'] + assert list(m3.values()) == [b'A', b'B'] + assert len(m3) == 2 + + # test duplicate key support + md = pa.KeyValueMetadata([ + ('a', 'alpha'), + ('b', 'beta'), + ('a', 'Alpha'), + ('a', 'ALPHA'), + ]) + + expected = [ + (b'a', b'alpha'), + (b'b', b'beta'), + (b'a', b'Alpha'), + (b'a', b'ALPHA') + ] + assert len(md) == 4 + assert isinstance(md.keys(), Iterator) + assert isinstance(md.values(), Iterator) + assert isinstance(md.items(), Iterator) + assert list(md.items()) == expected + assert list(md.keys()) == [k for k, _ in expected] + assert list(md.values()) == [v for _, v in expected] + + # first occurrence + assert md['a'] == b'alpha' + assert md['b'] == b'beta' + assert md.get_all('a') == [b'alpha', b'Alpha', b'ALPHA'] + assert md.get_all('b') == [b'beta'] + assert md.get_all('unknown') == [] + + with pytest.raises(KeyError): + md = pa.KeyValueMetadata([ + ('a', 'alpha'), + ('b', 'beta'), + ('a', 'Alpha'), + ('a', 'ALPHA'), + ], b='BETA') + + +def test_key_value_metadata_duplicates(): + meta = pa.KeyValueMetadata({'a': '1', 'b': '2'}) + + with pytest.raises(KeyError): + pa.KeyValueMetadata(meta, a='3') + + +def test_field_basic(): + t = pa.string() + f = pa.field('foo', t) + + assert f.name == 'foo' + assert f.nullable + assert f.type is t + assert repr(f) == "pyarrow.Field" + + f = pa.field('foo', t, False) + assert not f.nullable + + with pytest.raises(TypeError): + pa.field('foo', None) + + +def test_field_equals(): + meta1 = {b'foo': b'bar'} + meta2 = {b'bizz': b'bazz'} + + f1 = pa.field('a', pa.int8(), nullable=True) + f2 = pa.field('a', pa.int8(), nullable=True) + f3 = pa.field('a', pa.int8(), nullable=False) + f4 = pa.field('a', pa.int16(), nullable=False) + f5 = pa.field('b', pa.int16(), nullable=False) + f6 = pa.field('a', pa.int8(), nullable=True, metadata=meta1) + f7 = pa.field('a', pa.int8(), nullable=True, metadata=meta1) + f8 = pa.field('a', pa.int8(), nullable=True, metadata=meta2) + + assert f1.equals(f2) + assert f6.equals(f7) + assert not f1.equals(f3) + assert not f1.equals(f4) + assert not f3.equals(f4) + assert not f4.equals(f5) + + # No metadata in f1, but metadata in f6 + assert f1.equals(f6) + assert not f1.equals(f6, check_metadata=True) + + # Different metadata + assert f6.equals(f7) + assert f7.equals(f8) + assert not f7.equals(f8, check_metadata=True) + + +def test_field_equality_operators(): + f1 = pa.field('a', pa.int8(), nullable=True) + f2 = pa.field('a', pa.int8(), nullable=True) + f3 = pa.field('b', pa.int8(), nullable=True) + f4 = pa.field('b', pa.int8(), nullable=False) + + assert f1 == f2 + assert f1 != f3 + assert f3 != f4 + assert f1 != 'foo' + + +def test_field_metadata(): + f1 = pa.field('a', pa.int8()) + f2 = pa.field('a', pa.int8(), metadata={}) + f3 = pa.field('a', pa.int8(), metadata={b'bizz': b'bazz'}) + + assert f1.metadata is None + assert f2.metadata == {} + assert f3.metadata[b'bizz'] == b'bazz' + + +def test_field_add_remove_metadata(): + import collections + + f0 = pa.field('foo', pa.int32()) + + assert f0.metadata is None + + metadata = {b'foo': b'bar', b'pandas': b'badger'} + metadata2 = collections.OrderedDict([ + (b'a', b'alpha'), + (b'b', b'beta') + ]) + + f1 = f0.with_metadata(metadata) + assert f1.metadata == metadata + + f2 = f0.with_metadata(metadata2) + assert f2.metadata == metadata2 + + with pytest.raises(TypeError): + f0.with_metadata([1, 2, 3]) + + f3 = f1.remove_metadata() + assert f3.metadata is None + + # idempotent + f4 = f3.remove_metadata() + assert f4.metadata is None + + f5 = pa.field('foo', pa.int32(), True, metadata) + f6 = f0.with_metadata(metadata) + assert f5.equals(f6) + + +def test_field_modified_copies(): + f0 = pa.field('foo', pa.int32(), True) + f0_ = pa.field('foo', pa.int32(), True) + assert f0.equals(f0_) + + f1 = pa.field('foo', pa.int64(), True) + f1_ = f0.with_type(pa.int64()) + assert f1.equals(f1_) + # Original instance is unmodified + assert f0.equals(f0_) + + f2 = pa.field('foo', pa.int32(), False) + f2_ = f0.with_nullable(False) + assert f2.equals(f2_) + # Original instance is unmodified + assert f0.equals(f0_) + + f3 = pa.field('bar', pa.int32(), True) + f3_ = f0.with_name('bar') + assert f3.equals(f3_) + # Original instance is unmodified + assert f0.equals(f0_) + + +def test_is_integer_value(): + assert pa.types.is_integer_value(1) + assert pa.types.is_integer_value(np.int64(1)) + assert not pa.types.is_integer_value('1') + + +def test_is_float_value(): + assert not pa.types.is_float_value(1) + assert pa.types.is_float_value(1.) + assert pa.types.is_float_value(np.float64(1)) + assert not pa.types.is_float_value('1.0') + + +def test_is_boolean_value(): + assert not pa.types.is_boolean_value(1) + assert pa.types.is_boolean_value(True) + assert pa.types.is_boolean_value(False) + assert pa.types.is_boolean_value(np.bool_(True)) + assert pa.types.is_boolean_value(np.bool_(False)) + + +@h.settings(suppress_health_check=(h.HealthCheck.too_slow,)) +@h.given( + past.all_types | + past.all_fields | + past.all_schemas +) +@h.example( + pa.field(name='', type=pa.null(), metadata={'0': '', '': ''}) +) +def test_pickling(pickle_module, field): + data = pickle_module.dumps(field) + assert pickle_module.loads(data) == field + + +@h.given( + st.lists(past.all_types) | + st.lists(past.all_fields) | + st.lists(past.all_schemas) +) +def test_hashing(items): + h.assume( + # well, this is still O(n^2), but makes the input unique + all(not a.equals(b) for i, a in enumerate(items) for b in items[:i]) + ) + + container = {} + for i, item in enumerate(items): + assert hash(item) == hash(item) + container[item] = i + + assert len(container) == len(items) + + for i, item in enumerate(items): + assert container[item] == i + + +def test_types_come_back_with_specific_type(): + for arrow_type in get_many_types(): + schema = pa.schema([pa.field("field_name", arrow_type)]) + type_back = schema.field("field_name").type + assert type(type_back) is type(arrow_type) + + +def test_schema_import_c_schema_interface(): + class Wrapper: + def __init__(self, schema): + self.schema = schema + + def __arrow_c_schema__(self): + return self.schema.__arrow_c_schema__() + + schema = pa.schema([pa.field("field_name", pa.int32())]) + wrapped_schema = Wrapper(schema) + + assert pa.schema(wrapped_schema) == schema + + +def test_field_import_c_schema_interface(): + class Wrapper: + def __init__(self, field): + self.field = field + + def __arrow_c_schema__(self): + return self.field.__arrow_c_schema__() + + field = pa.field("field_name", pa.int32(), metadata={"key": "value"}) + wrapped_field = Wrapper(field) + + assert pa.field(wrapped_field) == field + + with pytest.raises(ValueError, match="cannot specify 'type'"): + pa.field(wrapped_field, type=pa.int64()) + + # override nullable or metadata + assert pa.field(wrapped_field, nullable=False).nullable is False + result = pa.field(wrapped_field, metadata={"other": "meta"}) + assert result.metadata == {b"other": b"meta"} diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_udf.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_udf.py new file mode 100644 index 0000000000000000000000000000000000000000..c8e376fefb3b8a52f102002a89bc79f114c6bc10 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_udf.py @@ -0,0 +1,869 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import pytest + +import numpy as np + +import pyarrow as pa +from pyarrow import compute as pc + +# UDFs are all tested with a dataset scan +pytestmark = pytest.mark.dataset + +# For convenience, most of the test here doesn't care about udf func docs +empty_udf_doc = {"summary": "", "description": ""} + +try: + import pyarrow.dataset as ds +except ImportError: + ds = None + + +def mock_udf_context(batch_length=10): + from pyarrow._compute import _get_udf_context + return _get_udf_context(pa.default_memory_pool(), batch_length) + + +class MyError(RuntimeError): + pass + + +@pytest.fixture(scope="session") +def sum_agg_func_fixture(): + """ + Register a unary aggregate function (mean) + """ + def func(ctx, x, *args): + return pa.scalar(np.nansum(x)) + + func_name = "sum_udf" + func_doc = empty_udf_doc + + pc.register_aggregate_function(func, + func_name, + func_doc, + { + "x": pa.float64(), + }, + pa.float64() + ) + return func, func_name + + +@pytest.fixture(scope="session") +def exception_agg_func_fixture(): + def func(ctx, x): + raise RuntimeError("Oops") + return pa.scalar(len(x)) + + func_name = "y=exception_len(x)" + func_doc = empty_udf_doc + + pc.register_aggregate_function(func, + func_name, + func_doc, + { + "x": pa.int64(), + }, + pa.int64() + ) + return func, func_name + + +@pytest.fixture(scope="session") +def wrong_output_dtype_agg_func_fixture(scope="session"): + def func(ctx, x): + return pa.scalar(len(x), pa.int32()) + + func_name = "y=wrong_output_dtype(x)" + func_doc = empty_udf_doc + + pc.register_aggregate_function(func, + func_name, + func_doc, + { + "x": pa.int64(), + }, + pa.int64() + ) + return func, func_name + + +@pytest.fixture(scope="session") +def wrong_output_type_agg_func_fixture(scope="session"): + def func(ctx, x): + return len(x) + + func_name = "y=wrong_output_type(x)" + func_doc = empty_udf_doc + + pc.register_aggregate_function(func, + func_name, + func_doc, + { + "x": pa.int64(), + }, + pa.int64() + ) + return func, func_name + + +@pytest.fixture(scope="session") +def binary_func_fixture(): + """ + Register a binary scalar function. + """ + def binary_function(ctx, m, x): + return pc.call_function("multiply", [m, x], + memory_pool=ctx.memory_pool) + func_name = "y=mx" + binary_doc = {"summary": "y=mx", + "description": "find y from y = mx"} + pc.register_scalar_function(binary_function, + func_name, + binary_doc, + {"m": pa.int64(), + "x": pa.int64(), + }, + pa.int64()) + return binary_function, func_name + + +@pytest.fixture(scope="session") +def ternary_func_fixture(): + """ + Register a ternary scalar function. + """ + def ternary_function(ctx, m, x, c): + mx = pc.call_function("multiply", [m, x], + memory_pool=ctx.memory_pool) + return pc.call_function("add", [mx, c], + memory_pool=ctx.memory_pool) + ternary_doc = {"summary": "y=mx+c", + "description": "find y from y = mx + c"} + func_name = "y=mx+c" + pc.register_scalar_function(ternary_function, + func_name, + ternary_doc, + { + "array1": pa.int64(), + "array2": pa.int64(), + "array3": pa.int64(), + }, + pa.int64()) + return ternary_function, func_name + + +@pytest.fixture(scope="session") +def varargs_func_fixture(): + """ + Register a varargs scalar function with at least two arguments. + """ + def varargs_function(ctx, first, *values): + acc = first + for val in values: + acc = pc.call_function("add", [acc, val], + memory_pool=ctx.memory_pool) + return acc + func_name = "z=ax+by+c" + varargs_doc = {"summary": "z=ax+by+c", + "description": "find z from z = ax + by + c" + } + pc.register_scalar_function(varargs_function, + func_name, + varargs_doc, + { + "array1": pa.int64(), + "array2": pa.int64(), + }, + pa.int64()) + return varargs_function, func_name + + +@pytest.fixture(scope="session") +def nullary_func_fixture(): + """ + Register a nullary scalar function. + """ + def nullary_func(context): + return pa.array([42] * context.batch_length, type=pa.int64(), + memory_pool=context.memory_pool) + + func_doc = { + "summary": "random function", + "description": "generates a random value" + } + func_name = "test_nullary_func" + pc.register_scalar_function(nullary_func, + func_name, + func_doc, + {}, + pa.int64()) + + return nullary_func, func_name + + +@pytest.fixture(scope="session") +def wrong_output_type_func_fixture(): + """ + Register a scalar function which returns something that is neither + a Arrow scalar or array. + """ + def wrong_output_type(ctx): + return 42 + + func_name = "test_wrong_output_type" + in_types = {} + out_type = pa.int64() + doc = { + "summary": "return wrong output type", + "description": "" + } + pc.register_scalar_function(wrong_output_type, func_name, doc, + in_types, out_type) + return wrong_output_type, func_name + + +@pytest.fixture(scope="session") +def wrong_output_datatype_func_fixture(): + """ + Register a scalar function whose actual output DataType doesn't + match the declared output DataType. + """ + def wrong_output_datatype(ctx, array): + return pc.call_function("add", [array, 1]) + func_name = "test_wrong_output_datatype" + in_types = {"array": pa.int64()} + # The actual output DataType will be int64. + out_type = pa.int16() + doc = { + "summary": "return wrong output datatype", + "description": "" + } + pc.register_scalar_function(wrong_output_datatype, func_name, doc, + in_types, out_type) + return wrong_output_datatype, func_name + + +@pytest.fixture(scope="session") +def wrong_signature_func_fixture(): + """ + Register a scalar function with the wrong signature. + """ + # Missing the context argument + def wrong_signature(): + return pa.scalar(1, type=pa.int64()) + + func_name = "test_wrong_signature" + in_types = {} + out_type = pa.int64() + doc = { + "summary": "UDF with wrong signature", + "description": "" + } + pc.register_scalar_function(wrong_signature, func_name, doc, + in_types, out_type) + return wrong_signature, func_name + + +@pytest.fixture(scope="session") +def raising_func_fixture(): + """ + Register a scalar function which raises a custom exception. + """ + def raising_func(ctx): + raise MyError("error raised by scalar UDF") + func_name = "test_raise" + doc = { + "summary": "raising function", + "description": "" + } + pc.register_scalar_function(raising_func, func_name, doc, + {}, pa.int64()) + return raising_func, func_name + + +@pytest.fixture(scope="session") +def unary_vector_func_fixture(): + """ + Register a vector function + """ + def pct_rank(ctx, x): + # copy here to get around pandas 1.0 issue + return pa.array(x.to_pandas().copy().rank(pct=True)) + + func_name = "y=pct_rank(x)" + doc = empty_udf_doc + pc.register_vector_function(pct_rank, func_name, doc, { + 'x': pa.float64()}, pa.float64()) + + return pct_rank, func_name + + +@pytest.fixture(scope="session") +def struct_vector_func_fixture(): + """ + Register a vector function that returns a struct array + """ + def pivot(ctx, k, v, c): + df = pa.RecordBatch.from_arrays([k, v, c], names=['k', 'v', 'c']).to_pandas() + df_pivot = df.pivot(columns='c', values='v', index='k').reset_index() + return pa.RecordBatch.from_pandas(df_pivot).to_struct_array() + + func_name = "y=pivot(x)" + doc = empty_udf_doc + pc.register_vector_function( + pivot, func_name, doc, + {'k': pa.int64(), 'v': pa.float64(), 'c': pa.utf8()}, + pa.struct([('k', pa.int64()), ('v1', pa.float64()), ('v2', pa.float64())]) + ) + + return pivot, func_name + + +def check_scalar_function(func_fixture, + inputs, *, + run_in_dataset=True, + batch_length=None): + function, name = func_fixture + if batch_length is None: + all_scalar = True + for arg in inputs: + if isinstance(arg, pa.Array): + all_scalar = False + batch_length = len(arg) + if all_scalar: + batch_length = 1 + + func = pc.get_function(name) + assert func.name == name + + result = pc.call_function(name, inputs, length=batch_length) + expected_output = function(mock_udf_context(batch_length), *inputs) + assert result == expected_output + # At the moment there is an issue when handling nullary functions. + # See: ARROW-15286 and ARROW-16290. + if run_in_dataset: + field_names = [f'field{index}' for index, in_arr in inputs] + table = pa.Table.from_arrays(inputs, field_names) + dataset = ds.dataset(table) + func_args = [ds.field(field_name) for field_name in field_names] + result_table = dataset.to_table( + columns={'result': ds.field('')._call(name, func_args)}) + assert result_table.column(0).chunks[0] == expected_output + + +def test_udf_array_unary(unary_func_fixture): + check_scalar_function(unary_func_fixture, + [ + pa.array([10, 20], pa.int64()) + ] + ) + + +def test_udf_array_binary(binary_func_fixture): + check_scalar_function(binary_func_fixture, + [ + pa.array([10, 20], pa.int64()), + pa.array([2, 4], pa.int64()) + ] + ) + + +def test_udf_array_ternary(ternary_func_fixture): + check_scalar_function(ternary_func_fixture, + [ + pa.array([10, 20], pa.int64()), + pa.array([2, 4], pa.int64()), + pa.array([5, 10], pa.int64()) + ] + ) + + +def test_udf_array_varargs(varargs_func_fixture): + check_scalar_function(varargs_func_fixture, + [ + pa.array([2, 3], pa.int64()), + pa.array([10, 20], pa.int64()), + pa.array([3, 7], pa.int64()), + pa.array([20, 30], pa.int64()), + pa.array([5, 10], pa.int64()) + ] + ) + + +def test_registration_errors(): + # validate function name + doc = { + "summary": "test udf input", + "description": "parameters are validated" + } + in_types = {"scalar": pa.int64()} + out_type = pa.int64() + + def test_reg_function(context): + return pa.array([10]) + + with pytest.raises(TypeError): + pc.register_scalar_function(test_reg_function, + None, doc, in_types, + out_type) + + # validate function + with pytest.raises(TypeError, match="func must be a callable"): + pc.register_scalar_function(None, "test_none_function", doc, in_types, + out_type) + + # validate output type + expected_expr = "DataType expected, got " + with pytest.raises(TypeError, match=expected_expr): + pc.register_scalar_function(test_reg_function, + "test_output_function", doc, in_types, + None) + + # validate input type + expected_expr = "in_types must be a dictionary of DataType" + with pytest.raises(TypeError, match=expected_expr): + pc.register_scalar_function(test_reg_function, + "test_input_function", doc, None, + out_type) + + # register an already registered function + # first registration + pc.register_scalar_function(test_reg_function, + "test_reg_function", doc, {}, + out_type) + # second registration + expected_expr = "Already have a function registered with name:" \ + + " test_reg_function" + with pytest.raises(KeyError, match=expected_expr): + pc.register_scalar_function(test_reg_function, + "test_reg_function", doc, {}, + out_type) + + +def test_varargs_function_validation(varargs_func_fixture): + _, func_name = varargs_func_fixture + + error_msg = r"VarArgs function 'z=ax\+by\+c' needs at least 2 arguments" + + with pytest.raises(ValueError, match=error_msg): + pc.call_function(func_name, [42]) + + +def test_function_doc_validation(): + # validate arity + in_types = {"scalar": pa.int64()} + out_type = pa.int64() + + # doc with no summary + func_doc = { + "description": "desc" + } + + def add_const(ctx, scalar): + return pc.call_function("add", [scalar, 1]) + + with pytest.raises(ValueError, + match="Function doc must contain a summary"): + pc.register_scalar_function(add_const, "test_no_summary", + func_doc, in_types, + out_type) + + # doc with no description + func_doc = { + "summary": "test summary" + } + + with pytest.raises(ValueError, + match="Function doc must contain a description"): + pc.register_scalar_function(add_const, "test_no_desc", + func_doc, in_types, + out_type) + + +def test_nullary_function(nullary_func_fixture): + # XXX the Python compute layer API doesn't let us override batch_length, + # so only test with the default value of 1. + check_scalar_function(nullary_func_fixture, [], run_in_dataset=False, + batch_length=1) + + +def test_wrong_output_type(wrong_output_type_func_fixture): + _, func_name = wrong_output_type_func_fixture + + with pytest.raises(TypeError, + match="Unexpected output type: int"): + pc.call_function(func_name, [], length=1) + + +def test_wrong_output_datatype(wrong_output_datatype_func_fixture): + _, func_name = wrong_output_datatype_func_fixture + + expected_expr = ("Expected output datatype int16, " + "but function returned datatype int64") + + with pytest.raises(TypeError, match=expected_expr): + pc.call_function(func_name, [pa.array([20, 30])]) + + +def test_wrong_signature(wrong_signature_func_fixture): + _, func_name = wrong_signature_func_fixture + + expected_expr = (r"wrong_signature\(\) takes 0 positional arguments " + "but 1 was given") + + with pytest.raises(TypeError, match=expected_expr): + pc.call_function(func_name, [], length=1) + + +def test_wrong_datatype_declaration(): + def identity(ctx, val): + return val + + func_name = "test_wrong_datatype_declaration" + in_types = {"array": pa.int64()} + out_type = {} + doc = { + "summary": "test output value", + "description": "test output" + } + with pytest.raises(TypeError, + match="DataType expected, got "): + pc.register_scalar_function(identity, func_name, + doc, in_types, out_type) + + +def test_wrong_input_type_declaration(): + def identity(ctx, val): + return val + + func_name = "test_wrong_input_type_declaration" + in_types = {"array": None} + out_type = pa.int64() + doc = { + "summary": "test invalid input type", + "description": "invalid input function" + } + with pytest.raises(TypeError, + match="DataType expected, got "): + pc.register_scalar_function(identity, func_name, doc, + in_types, out_type) + + +def test_scalar_udf_context(unary_func_fixture): + # Check the memory_pool argument is properly propagated + proxy_pool = pa.proxy_memory_pool(pa.default_memory_pool()) + _, func_name = unary_func_fixture + + res = pc.call_function(func_name, + [pa.array([1] * 1000, type=pa.int64())], + memory_pool=proxy_pool) + assert res == pa.array([2] * 1000, type=pa.int64()) + assert proxy_pool.bytes_allocated() == 1000 * 8 + # Destroying Python array should destroy underlying C++ memory + res = None + assert proxy_pool.bytes_allocated() == 0 + + +def test_raising_func(raising_func_fixture): + _, func_name = raising_func_fixture + with pytest.raises(MyError, match="error raised by scalar UDF"): + pc.call_function(func_name, [], length=1) + + +def test_scalar_input(unary_func_fixture): + function, func_name = unary_func_fixture + res = pc.call_function(func_name, [pa.scalar(10)]) + assert res == pa.scalar(11) + + +def test_input_lifetime(unary_func_fixture): + function, func_name = unary_func_fixture + + proxy_pool = pa.proxy_memory_pool(pa.default_memory_pool()) + assert proxy_pool.bytes_allocated() == 0 + + v = pa.array([1] * 1000, type=pa.int64(), memory_pool=proxy_pool) + assert proxy_pool.bytes_allocated() == 1000 * 8 + pc.call_function(func_name, [v]) + assert proxy_pool.bytes_allocated() == 1000 * 8 + # Calling a UDF should not have kept `v` alive longer than required + v = None + assert proxy_pool.bytes_allocated() == 0 + + +def _record_batch_from_iters(schema, *iters): + arrays = [pa.array(list(v), type=schema[i].type) + for i, v in enumerate(iters)] + return pa.RecordBatch.from_arrays(arrays=arrays, schema=schema) + + +def _record_batch_for_range(schema, n): + return _record_batch_from_iters(schema, + range(n, n + 10), + range(n + 1, n + 11)) + + +def make_udt_func(schema, batch_gen): + def udf_func(ctx): + class UDT: + def __init__(self): + self.caller = None + + def __call__(self, ctx): + try: + if self.caller is None: + self.caller, ctx = batch_gen(ctx).send, None + batch = self.caller(ctx) + except StopIteration: + arrays = [pa.array([], type=field.type) + for field in schema] + batch = pa.RecordBatch.from_arrays( + arrays=arrays, schema=schema) + return batch.to_struct_array() + return UDT() + return udf_func + + +def datasource1_direct(): + """A short dataset""" + schema = datasource1_schema() + + class Generator: + def __init__(self): + self.n = 3 + + def __call__(self, ctx): + if self.n == 0: + batch = _record_batch_from_iters(schema, [], []) + else: + self.n -= 1 + batch = _record_batch_for_range(schema, self.n) + return batch.to_struct_array() + return lambda ctx: Generator() + + +def datasource1_generator(): + schema = datasource1_schema() + + def batch_gen(ctx): + for n in range(3, 0, -1): + # ctx = + yield _record_batch_for_range(schema, n - 1) + return make_udt_func(schema, batch_gen) + + +def datasource1_exception(): + schema = datasource1_schema() + + def batch_gen(ctx): + for n in range(3, 0, -1): + # ctx = + yield _record_batch_for_range(schema, n - 1) + raise RuntimeError("datasource1_exception") + return make_udt_func(schema, batch_gen) + + +def datasource1_schema(): + return pa.schema([('', pa.int32()), ('', pa.int32())]) + + +def datasource1_args(func, func_name): + func_doc = {"summary": f"{func_name} UDT", + "description": "test {func_name} UDT"} + in_types = {} + out_type = pa.struct([("", pa.int32()), ("", pa.int32())]) + return func, func_name, func_doc, in_types, out_type + + +def _test_datasource1_udt(func_maker): + schema = datasource1_schema() + func = func_maker() + func_name = func_maker.__name__ + func_args = datasource1_args(func, func_name) + pc.register_tabular_function(*func_args) + n = 3 + for item in pc.call_tabular_function(func_name): + n -= 1 + assert item == _record_batch_for_range(schema, n) + + +def test_udt_datasource1_direct(): + _test_datasource1_udt(datasource1_direct) + + +def test_udt_datasource1_generator(): + _test_datasource1_udt(datasource1_generator) + + +def test_udt_datasource1_exception(): + with pytest.raises(RuntimeError, match='datasource1_exception'): + _test_datasource1_udt(datasource1_exception) + + +def test_scalar_agg_basic(unary_agg_func_fixture): + arr = pa.array([10.0, 20.0, 30.0, 40.0, 50.0], pa.float64()) + result = pc.call_function("mean_udf", [arr]) + expected = pa.scalar(30.0) + assert result == expected + + +def test_scalar_agg_empty(unary_agg_func_fixture): + empty = pa.array([], pa.float64()) + + with pytest.raises(pa.ArrowInvalid, match='empty inputs'): + pc.call_function("mean_udf", [empty]) + + +def test_scalar_agg_wrong_output_dtype(wrong_output_dtype_agg_func_fixture): + arr = pa.array([10, 20, 30, 40, 50], pa.int64()) + with pytest.raises(pa.ArrowTypeError, match="output datatype"): + pc.call_function("y=wrong_output_dtype(x)", [arr]) + + +def test_scalar_agg_wrong_output_type(wrong_output_type_agg_func_fixture): + arr = pa.array([10, 20, 30, 40, 50], pa.int64()) + with pytest.raises(pa.ArrowTypeError, match="output type"): + pc.call_function("y=wrong_output_type(x)", [arr]) + + +def test_scalar_agg_varargs(varargs_agg_func_fixture): + arr1 = pa.array([10, 20, 30, 40, 50], pa.int64()) + arr2 = pa.array([1.0, 2.0, 3.0, 4.0, 5.0], pa.float64()) + + result = pc.call_function( + "sum_mean", [arr1, arr2] + ) + expected = pa.scalar(33.0) + assert result == expected + + +def test_scalar_agg_exception(exception_agg_func_fixture): + arr = pa.array([10, 20, 30, 40, 50, 60], pa.int64()) + + with pytest.raises(RuntimeError, match='Oops'): + pc.call_function("y=exception_len(x)", [arr]) + + +def test_hash_agg_basic(unary_agg_func_fixture): + arr1 = pa.array([10.0, 20.0, 30.0, 40.0, 50.0], pa.float64()) + arr2 = pa.array([4, 2, 1, 2, 1], pa.int32()) + + arr3 = pa.array([60.0, 70.0, 80.0, 90.0, 100.0], pa.float64()) + arr4 = pa.array([5, 1, 1, 4, 1], pa.int32()) + + table1 = pa.table([arr2, arr1], names=["id", "value"]) + table2 = pa.table([arr4, arr3], names=["id", "value"]) + table = pa.concat_tables([table1, table2]) + + result = table.group_by("id").aggregate([("value", "mean_udf")]) + expected = table.group_by("id").aggregate( + [("value", "mean")]).rename_columns(['id', 'value_mean_udf']) + + assert result.sort_by('id') == expected.sort_by('id') + + +def test_hash_agg_empty(unary_agg_func_fixture): + arr1 = pa.array([], pa.float64()) + arr2 = pa.array([], pa.int32()) + table = pa.table([arr2, arr1], names=["id", "value"]) + + result = table.group_by("id").aggregate([("value", "mean_udf")]) + expected = pa.table([pa.array([], pa.int32()), pa.array( + [], pa.float64())], names=['id', 'value_mean_udf']) + + assert result == expected + + +def test_hash_agg_wrong_output_dtype(wrong_output_dtype_agg_func_fixture): + arr1 = pa.array([10, 20, 30, 40, 50], pa.int64()) + arr2 = pa.array([4, 2, 1, 2, 1], pa.int32()) + + table = pa.table([arr2, arr1], names=["id", "value"]) + with pytest.raises(pa.ArrowTypeError, match="output datatype"): + table.group_by("id").aggregate([("value", "y=wrong_output_dtype(x)")]) + + +def test_hash_agg_wrong_output_type(wrong_output_type_agg_func_fixture): + arr1 = pa.array([10, 20, 30, 40, 50], pa.int64()) + arr2 = pa.array([4, 2, 1, 2, 1], pa.int32()) + table = pa.table([arr2, arr1], names=["id", "value"]) + + with pytest.raises(pa.ArrowTypeError, match="output type"): + table.group_by("id").aggregate([("value", "y=wrong_output_type(x)")]) + + +def test_hash_agg_exception(exception_agg_func_fixture): + arr1 = pa.array([10, 20, 30, 40, 50], pa.int64()) + arr2 = pa.array([4, 2, 1, 2, 1], pa.int32()) + table = pa.table([arr2, arr1], names=["id", "value"]) + + with pytest.raises(RuntimeError, match='Oops'): + table.group_by("id").aggregate([("value", "y=exception_len(x)")]) + + +def test_hash_agg_random(sum_agg_func_fixture): + """Test hash aggregate udf with randomly sampled data""" + + value_num = 1000000 + group_num = 1000 + + arr1 = pa.array(np.repeat(1, value_num), pa.float64()) + arr2 = pa.array(np.random.choice(group_num, value_num), pa.int32()) + + table = pa.table([arr2, arr1], names=['id', 'value']) + + result = table.group_by("id").aggregate([("value", "sum_udf")]) + expected = table.group_by("id").aggregate( + [("value", "sum")]).rename_columns(['id', 'value_sum_udf']) + + assert result.sort_by('id') == expected.sort_by('id') + + +@pytest.mark.pandas +def test_vector_basic(unary_vector_func_fixture): + arr = pa.array([10.0, 20.0, 30.0, 40.0, 50.0], pa.float64()) + result = pc.call_function("y=pct_rank(x)", [arr]) + expected = unary_vector_func_fixture[0](None, arr) + assert result == expected + + +@pytest.mark.pandas +def test_vector_empty(unary_vector_func_fixture): + arr = pa.array([1], pa.float64()) + result = pc.call_function("y=pct_rank(x)", [arr]) + expected = unary_vector_func_fixture[0](None, arr) + assert result == expected + + +@pytest.mark.pandas +def test_vector_struct(struct_vector_func_fixture): + k = pa.array( + [1, 1, 2, 2], pa.int64() + ) + v = pa.array( + [1.0, 2.0, 3.0, 4.0], pa.float64() + ) + c = pa.array( + ['v1', 'v2', 'v1', 'v2'] + ) + result = pc.call_function("y=pivot(x)", [k, v, c]) + expected = struct_vector_func_fixture[0](None, k, v, c) + assert result == expected diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/test_util.py b/venv/lib/python3.10/site-packages/pyarrow/tests/test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..e584b041114a82d1e6b3ddeea027a10d500180fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/test_util.py @@ -0,0 +1,229 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import gc +import os +import signal +import shutil +import sys +import textwrap +import weakref + +import pytest + +from pyarrow.util import (doc, _break_traceback_cycle_from_frame, + download_tzdata_on_windows) +from pyarrow.tests.util import disabled_gc + + +@doc(method="func_a", operation="A") +def func_a(whatever): + """ + This is the {method} method. + + It computes {operation}. + """ + pass + + +@doc( + func_a, + textwrap.dedent( + """ + Examples + -------- + + >>> func_b() + B + """ + ), + method="func_b", + operation="B", +) +def func_b(whatever): + pass + + +@doc( + func_a, + method="func_c", + operation="C", +) +def func_c(whatever): + """ + Examples + -------- + + >>> func_c() + C + """ + pass + + +@doc(func_a, method="func_d", operation="D") +def func_d(whatever): + pass + + +@doc(func_d, method="func_e", operation="E") +def func_e(whatever): + pass + + +@doc(method="func_f") +def func_f(whatever): + """ + This is the {method} method. + + {{ We can escape curly braces like this. }} + + Examples + -------- + We should replace curly brace usage in doctests. + + >>> dict(x = "x", y = "y") + >>> set((1, 2, 3)) + """ + pass + + +def test_docstring_formatting(): + docstr = textwrap.dedent( + """ + This is the func_a method. + + It computes A. + """ + ) + assert func_a.__doc__ == docstr + + +def test_docstring_concatenation(): + docstr = textwrap.dedent( + """ + This is the func_b method. + + It computes B. + + Examples + -------- + + >>> func_b() + B + """ + ) + assert func_b.__doc__ == docstr + + +def test_docstring_append(): + docstr = textwrap.dedent( + """ + This is the func_c method. + + It computes C. + + Examples + -------- + + >>> func_c() + C + """ + ) + assert func_c.__doc__ == docstr + + +def test_docstring_template_from_callable(): + docstr = textwrap.dedent( + """ + This is the func_d method. + + It computes D. + """ + ) + assert func_d.__doc__ == docstr + + +def test_inherit_docstring_template_from_callable(): + docstr = textwrap.dedent( + """ + This is the func_e method. + + It computes E. + """ + ) + assert func_e.__doc__ == docstr + + +def test_escaping_in_docstring(): + docstr = textwrap.dedent( + """ + This is the func_f method. + + { We can escape curly braces like this. } + + Examples + -------- + We should replace curly brace usage in doctests. + + >>> dict(x = "x", y = "y") + >>> set((1, 2, 3)) + """ + ) + assert func_f.__doc__ == docstr + + +def exhibit_signal_refcycle(): + # Put an object in the frame locals and return a weakref to it. + # If `signal.getsignal` has a bug where it creates a reference cycle + # keeping alive the current execution frames, `obj` will not be + # destroyed immediately when this function returns. + obj = set() + signal.getsignal(signal.SIGINT) + return weakref.ref(obj) + + +def test_signal_refcycle(): + # Test possible workaround for https://bugs.python.org/issue42248 + with disabled_gc(): + wr = exhibit_signal_refcycle() + if wr() is None: + pytest.skip( + "Python version does not have the bug we're testing for") + + gc.collect() + with disabled_gc(): + wr = exhibit_signal_refcycle() + assert wr() is not None + _break_traceback_cycle_from_frame(sys._getframe(0)) + assert wr() is None + + +@pytest.mark.skipif(sys.platform != "win32", + reason="Timezone database is already provided.") +def test_download_tzdata_on_windows(): + tzdata_path = os.path.expandvars(r"%USERPROFILE%\Downloads\tzdata") + + # Download timezone database and remove data in case it already exists + if (os.path.exists(tzdata_path)): + shutil.rmtree(tzdata_path) + download_tzdata_on_windows() + + # Inspect the folder + assert os.path.exists(tzdata_path) + assert os.path.exists(os.path.join(tzdata_path, "windowsZones.xml")) + assert os.path.exists(os.path.join(tzdata_path, "europe")) + assert 'version' in os.listdir(tzdata_path) diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/util.py b/venv/lib/python3.10/site-packages/pyarrow/tests/util.py new file mode 100644 index 0000000000000000000000000000000000000000..638eee9807335e3af9da83a73002ebcb6665ddec --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/util.py @@ -0,0 +1,465 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Utility functions for testing +""" + +import contextlib +import decimal +import gc +import numpy as np +import os +import random +import re +import shutil +import signal +import socket +import string +import subprocess +import sys +import time + +import pytest + +import pyarrow as pa +import pyarrow.fs + + +def randsign(): + """Randomly choose either 1 or -1. + + Returns + ------- + sign : int + """ + return random.choice((-1, 1)) + + +@contextlib.contextmanager +def random_seed(seed): + """Set the random seed inside of a context manager. + + Parameters + ---------- + seed : int + The seed to set + + Notes + ----- + This function is useful when you want to set a random seed but not affect + the random state of other functions using the random module. + """ + original_state = random.getstate() + random.seed(seed) + try: + yield + finally: + random.setstate(original_state) + + +def randdecimal(precision, scale): + """Generate a random decimal value with specified precision and scale. + + Parameters + ---------- + precision : int + The maximum number of digits to generate. Must be an integer between 1 + and 38 inclusive. + scale : int + The maximum number of digits following the decimal point. Must be an + integer greater than or equal to 0. + + Returns + ------- + decimal_value : decimal.Decimal + A random decimal.Decimal object with the specified precision and scale. + """ + assert 1 <= precision <= 38, 'precision must be between 1 and 38 inclusive' + if scale < 0: + raise ValueError( + 'randdecimal does not yet support generating decimals with ' + 'negative scale' + ) + max_whole_value = 10 ** (precision - scale) - 1 + whole = random.randint(-max_whole_value, max_whole_value) + + if not scale: + return decimal.Decimal(whole) + + max_fractional_value = 10 ** scale - 1 + fractional = random.randint(0, max_fractional_value) + + return decimal.Decimal( + '{}.{}'.format(whole, str(fractional).rjust(scale, '0')) + ) + + +def random_ascii(length): + return bytes(np.random.randint(65, 123, size=length, dtype='i1')) + + +def rands(nchars): + """ + Generate one random string. + """ + RANDS_CHARS = np.array( + list(string.ascii_letters + string.digits), dtype=(np.str_, 1)) + return "".join(np.random.choice(RANDS_CHARS, nchars)) + + +def make_dataframe(): + import pandas as pd + + N = 30 + df = pd.DataFrame( + {col: np.random.randn(N) for col in string.ascii_uppercase[:4]}, + index=pd.Index([rands(10) for _ in range(N)]) + ) + return df + + +def memory_leak_check(f, metric='rss', threshold=1 << 17, iterations=10, + check_interval=1): + """ + Execute the function and try to detect a clear memory leak either internal + to Arrow or caused by a reference counting problem in the Python binding + implementation. Raises exception if a leak detected + + Parameters + ---------- + f : callable + Function to invoke on each iteration + metric : {'rss', 'vms', 'shared'}, default 'rss' + Attribute of psutil.Process.memory_info to use for determining current + memory use + threshold : int, default 128K + Threshold in number of bytes to consider a leak + iterations : int, default 10 + Total number of invocations of f + check_interval : int, default 1 + Number of invocations of f in between each memory use check + """ + import psutil + proc = psutil.Process() + + def _get_use(): + gc.collect() + return getattr(proc.memory_info(), metric) + + baseline_use = _get_use() + + def _leak_check(): + current_use = _get_use() + if current_use - baseline_use > threshold: + raise Exception("Memory leak detected. " + "Departure from baseline {} after {} iterations" + .format(current_use - baseline_use, i)) + + for i in range(iterations): + f() + if i % check_interval == 0: + _leak_check() + + +def get_modified_env_with_pythonpath(): + # Prepend pyarrow root directory to PYTHONPATH + env = os.environ.copy() + existing_pythonpath = env.get('PYTHONPATH', '') + + module_path = os.path.abspath( + os.path.dirname(os.path.dirname(pa.__file__))) + + if existing_pythonpath: + new_pythonpath = os.pathsep.join((module_path, existing_pythonpath)) + else: + new_pythonpath = module_path + env['PYTHONPATH'] = new_pythonpath + return env + + +def invoke_script(script_name, *args): + subprocess_env = get_modified_env_with_pythonpath() + + dir_path = os.path.dirname(os.path.realpath(__file__)) + python_file = os.path.join(dir_path, script_name) + + cmd = [sys.executable, python_file] + cmd.extend(args) + + subprocess.check_call(cmd, env=subprocess_env) + + +@contextlib.contextmanager +def changed_environ(name, value): + """ + Temporarily set environment variable *name* to *value*. + """ + orig_value = os.environ.get(name) + os.environ[name] = value + try: + yield + finally: + if orig_value is None: + del os.environ[name] + else: + os.environ[name] = orig_value + + +@contextlib.contextmanager +def change_cwd(path): + curdir = os.getcwd() + os.chdir(str(path)) + try: + yield + finally: + os.chdir(curdir) + + +@contextlib.contextmanager +def disabled_gc(): + gc.disable() + try: + yield + finally: + gc.enable() + + +def _filesystem_uri(path): + # URIs on Windows must follow 'file:///C:...' or 'file:/C:...' patterns. + if os.name == 'nt': + uri = 'file:///{}'.format(path) + else: + uri = 'file://{}'.format(path) + return uri + + +class FSProtocolClass: + def __init__(self, path): + self._path = path + + def __fspath__(self): + return str(self._path) + + +class ProxyHandler(pyarrow.fs.FileSystemHandler): + """ + A dataset handler that proxies to an underlying filesystem. Useful + to partially wrap an existing filesystem with partial changes. + """ + + def __init__(self, fs): + self._fs = fs + + def __eq__(self, other): + if isinstance(other, ProxyHandler): + return self._fs == other._fs + return NotImplemented + + def __ne__(self, other): + if isinstance(other, ProxyHandler): + return self._fs != other._fs + return NotImplemented + + def get_type_name(self): + return "proxy::" + self._fs.type_name + + def normalize_path(self, path): + return self._fs.normalize_path(path) + + def get_file_info(self, paths): + return self._fs.get_file_info(paths) + + def get_file_info_selector(self, selector): + return self._fs.get_file_info(selector) + + def create_dir(self, path, recursive): + return self._fs.create_dir(path, recursive=recursive) + + def delete_dir(self, path): + return self._fs.delete_dir(path) + + def delete_dir_contents(self, path, missing_dir_ok): + return self._fs.delete_dir_contents(path, + missing_dir_ok=missing_dir_ok) + + def delete_root_dir_contents(self): + return self._fs.delete_dir_contents("", accept_root_dir=True) + + def delete_file(self, path): + return self._fs.delete_file(path) + + def move(self, src, dest): + return self._fs.move(src, dest) + + def copy_file(self, src, dest): + return self._fs.copy_file(src, dest) + + def open_input_stream(self, path): + return self._fs.open_input_stream(path) + + def open_input_file(self, path): + return self._fs.open_input_file(path) + + def open_output_stream(self, path, metadata): + return self._fs.open_output_stream(path, metadata=metadata) + + def open_append_stream(self, path, metadata): + return self._fs.open_append_stream(path, metadata=metadata) + + +def get_raise_signal(): + if sys.version_info >= (3, 8): + return signal.raise_signal + elif os.name == 'nt': + # On Windows, os.kill() doesn't actually send a signal, + # it just terminates the process with the given exit code. + pytest.skip("test requires Python 3.8+ on Windows") + else: + # On Unix, emulate raise_signal() with os.kill(). + def raise_signal(signum): + os.kill(os.getpid(), signum) + return raise_signal + + +@contextlib.contextmanager +def signal_wakeup_fd(*, warn_on_full_buffer=False): + # Use a socket pair, rather a self-pipe, so that select() can be used + # on Windows. + r, w = socket.socketpair() + old_fd = None + try: + r.setblocking(False) + w.setblocking(False) + old_fd = signal.set_wakeup_fd( + w.fileno(), warn_on_full_buffer=warn_on_full_buffer) + yield r + finally: + if old_fd is not None: + signal.set_wakeup_fd(old_fd) + r.close() + w.close() + + +def _ensure_minio_component_version(component, minimum_year): + full_args = [component, '--version'] + with subprocess.Popen(full_args, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, encoding='utf-8') as proc: + if proc.wait(10) != 0: + return False + stdout = proc.stdout.read() + pattern = component + r' version RELEASE\.(\d+)-.*' + version_match = re.search(pattern, stdout) + if version_match: + version_year = version_match.group(1) + return int(version_year) >= minimum_year + else: + raise FileNotFoundError( + "minio component older than the minimum year") + + +def _wait_for_minio_startup(mcdir, address, access_key, secret_key): + start = time.time() + while time.time() - start < 10: + try: + _run_mc_command(mcdir, 'alias', 'set', 'myminio', + f'http://{address}', access_key, secret_key) + return + except ChildProcessError: + time.sleep(1) + raise Exception("mc command could not connect to local minio") + + +def _run_mc_command(mcdir, *args): + full_args = ['mc', '-C', mcdir] + list(args) + with subprocess.Popen(full_args, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, encoding='utf-8') as proc: + retval = proc.wait(10) + cmd_str = ' '.join(full_args) + print(f'Cmd: {cmd_str}') + print(f' Return: {retval}') + print(f' Stdout: {proc.stdout.read()}') + print(f' Stderr: {proc.stderr.read()}') + if retval != 0: + raise ChildProcessError("Could not run mc") + + +def _configure_s3_limited_user(s3_server, policy): + """ + Attempts to use the mc command to configure the minio server + with a special user limited:limited123 which does not have + permission to create buckets. This mirrors some real life S3 + configurations where users are given strict permissions. + + Arrow S3 operations should still work in such a configuration + (e.g. see ARROW-13685) + """ + + if sys.platform == 'win32': + # Can't rely on FileNotFound check because + # there is sometimes an mc command on Windows + # which is unrelated to the minio mc + pytest.skip('The mc command is not installed on Windows') + + try: + # ensuring version of mc and minio for the capabilities we need + _ensure_minio_component_version('mc', 2021) + _ensure_minio_component_version('minio', 2021) + + tempdir = s3_server['tempdir'] + host, port, access_key, secret_key = s3_server['connection'] + address = '{}:{}'.format(host, port) + + mcdir = os.path.join(tempdir, 'mc') + if os.path.exists(mcdir): + shutil.rmtree(mcdir) + os.mkdir(mcdir) + policy_path = os.path.join(tempdir, 'limited-buckets-policy.json') + with open(policy_path, mode='w') as policy_file: + policy_file.write(policy) + # The s3_server fixture starts the minio process but + # it takes a few moments for the process to become available + _wait_for_minio_startup(mcdir, address, access_key, secret_key) + # These commands create a limited user with a specific + # policy and creates a sample bucket for that user to + # write to + _run_mc_command(mcdir, 'admin', 'policy', 'add', + 'myminio/', 'no-create-buckets', policy_path) + _run_mc_command(mcdir, 'admin', 'user', 'add', + 'myminio/', 'limited', 'limited123') + _run_mc_command(mcdir, 'admin', 'policy', 'set', + 'myminio', 'no-create-buckets', 'user=limited') + _run_mc_command(mcdir, 'mb', 'myminio/existing-bucket', + '--ignore-existing') + + except FileNotFoundError: + pytest.skip("Configuring limited s3 user failed") + + +def windows_has_tzdata(): + """ + This is the default location where tz.cpp will look for (until we make + this configurable at run-time) + """ + tzdata_bool = False + if "PYARROW_TZDATA_PATH" in os.environ: + tzdata_bool = os.path.exists(os.environ['PYARROW_TZDATA_PATH']) + if not tzdata_bool: + tzdata_path = os.path.expandvars(r"%USERPROFILE%\Downloads\tzdata") + tzdata_bool = os.path.exists(tzdata_path) + + return tzdata_bool diff --git a/venv/lib/python3.10/site-packages/pyarrow/vendored/__init__.py b/venv/lib/python3.10/site-packages/pyarrow/vendored/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..13a83393a9124bf6ec36540556b4808abd47e206 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/vendored/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/venv/lib/python3.10/site-packages/pyarrow/vendored/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/vendored/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f52e40ec00e66a1dce228b599c2ef498ff72643c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/vendored/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/vendored/__pycache__/docscrape.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/vendored/__pycache__/docscrape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99735fc053ad94d6d8bb53885636ad9cbe61a9ea Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/vendored/__pycache__/docscrape.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/vendored/__pycache__/version.cpython-310.pyc b/venv/lib/python3.10/site-packages/pyarrow/vendored/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83c124c3a544718476596e46272ced746e598a79 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pyarrow/vendored/__pycache__/version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pyarrow/vendored/docscrape.py b/venv/lib/python3.10/site-packages/pyarrow/vendored/docscrape.py new file mode 100644 index 0000000000000000000000000000000000000000..6c4d6e01400bfec8e5c7c276d2ec27c4611f2164 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/vendored/docscrape.py @@ -0,0 +1,716 @@ +# Vendored from https://github.com/numpy/numpydoc/, +# changeset 4ae1e00e72e522c126403c1814f0b99dc5978622 + +# This file is licensed under the BSD License. See the LICENSE.txt file +# in the root of the `numpydoc` repository for complete details. + +"""Extract reference documentation from the NumPy source tree. + +""" +import inspect +import textwrap +import re +import pydoc +from warnings import warn +from collections import namedtuple +from collections.abc import Callable, Mapping +import copy +import sys + + +def strip_blank_lines(l): + "Remove leading and trailing blank lines from a list of lines" + while l and not l[0].strip(): + del l[0] + while l and not l[-1].strip(): + del l[-1] + return l + + +class Reader: + """A line-based string reader. + + """ + + def __init__(self, data): + """ + Parameters + ---------- + data : str + String with lines separated by '\\n'. + + """ + if isinstance(data, list): + self._str = data + else: + self._str = data.split('\n') # store string as list of lines + + self.reset() + + def __getitem__(self, n): + return self._str[n] + + def reset(self): + self._l = 0 # current line nr + + def read(self): + if not self.eof(): + out = self[self._l] + self._l += 1 + return out + else: + return '' + + def seek_next_non_empty_line(self): + for l in self[self._l:]: + if l.strip(): + break + else: + self._l += 1 + + def eof(self): + return self._l >= len(self._str) + + def read_to_condition(self, condition_func): + start = self._l + for line in self[start:]: + if condition_func(line): + return self[start:self._l] + self._l += 1 + if self.eof(): + return self[start:self._l+1] + return [] + + def read_to_next_empty_line(self): + self.seek_next_non_empty_line() + + def is_empty(line): + return not line.strip() + + return self.read_to_condition(is_empty) + + def read_to_next_unindented_line(self): + def is_unindented(line): + return (line.strip() and (len(line.lstrip()) == len(line))) + return self.read_to_condition(is_unindented) + + def peek(self, n=0): + if self._l + n < len(self._str): + return self[self._l + n] + else: + return '' + + def is_empty(self): + return not ''.join(self._str).strip() + + +class ParseError(Exception): + def __str__(self): + message = self.args[0] + if hasattr(self, 'docstring'): + message = "%s in %r" % (message, self.docstring) + return message + + +Parameter = namedtuple('Parameter', ['name', 'type', 'desc']) + + +class NumpyDocString(Mapping): + """Parses a numpydoc string to an abstract representation + + Instances define a mapping from section title to structured data. + + """ + + sections = { + 'Signature': '', + 'Summary': [''], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Yields': [], + 'Receives': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Attributes': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'Warnings': [], + 'References': '', + 'Examples': '', + 'index': {} + } + + def __init__(self, docstring, config=None): + orig_docstring = docstring + docstring = textwrap.dedent(docstring).split('\n') + + self._doc = Reader(docstring) + self._parsed_data = copy.deepcopy(self.sections) + + try: + self._parse() + except ParseError as e: + e.docstring = orig_docstring + raise + + def __getitem__(self, key): + return self._parsed_data[key] + + def __setitem__(self, key, val): + if key not in self._parsed_data: + self._error_location("Unknown section %s" % key, error=False) + else: + self._parsed_data[key] = val + + def __iter__(self): + return iter(self._parsed_data) + + def __len__(self): + return len(self._parsed_data) + + def _is_at_section(self): + self._doc.seek_next_non_empty_line() + + if self._doc.eof(): + return False + + l1 = self._doc.peek().strip() # e.g. Parameters + + if l1.startswith('.. index::'): + return True + + l2 = self._doc.peek(1).strip() # ---------- or ========== + if len(l2) >= 3 and (set(l2) in ({'-'}, {'='})) and len(l2) != len(l1): + snip = '\n'.join(self._doc._str[:2])+'...' + self._error_location("potentially wrong underline length... \n%s \n%s in \n%s" + % (l1, l2, snip), error=False) + return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) + + def _strip(self, doc): + i = 0 + j = 0 + for i, line in enumerate(doc): + if line.strip(): + break + + for j, line in enumerate(doc[::-1]): + if line.strip(): + break + + return doc[i:len(doc)-j] + + def _read_to_next_section(self): + section = self._doc.read_to_next_empty_line() + + while not self._is_at_section() and not self._doc.eof(): + if not self._doc.peek(-1).strip(): # previous line was empty + section += [''] + + section += self._doc.read_to_next_empty_line() + + return section + + def _read_sections(self): + while not self._doc.eof(): + data = self._read_to_next_section() + name = data[0].strip() + + if name.startswith('..'): # index section + yield name, data[1:] + elif len(data) < 2: + yield StopIteration + else: + yield name, self._strip(data[2:]) + + def _parse_param_list(self, content, single_element_is_type=False): + content = dedent_lines(content) + r = Reader(content) + params = [] + while not r.eof(): + header = r.read().strip() + if ' :' in header: + arg_name, arg_type = header.split(' :', maxsplit=1) + arg_name, arg_type = arg_name.strip(), arg_type.strip() + else: + if single_element_is_type: + arg_name, arg_type = '', header + else: + arg_name, arg_type = header, '' + + desc = r.read_to_next_unindented_line() + desc = dedent_lines(desc) + desc = strip_blank_lines(desc) + + params.append(Parameter(arg_name, arg_type, desc)) + + return params + + # See also supports the following formats. + # + # + # SPACE* COLON SPACE+ SPACE* + # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE* + # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE* + + # is one of + # + # COLON COLON BACKTICK BACKTICK + # where + # is a legal function name, and + # is any nonempty sequence of word characters. + # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j` + # is a string describing the function. + + _role = r":(?P(py:)?\w+):" + _funcbacktick = r"`(?P(?:~\w+\.)?[a-zA-Z0-9_\.-]+)`" + _funcplain = r"(?P[a-zA-Z0-9_\.-]+)" + _funcname = r"(" + _role + _funcbacktick + r"|" + _funcplain + r")" + _funcnamenext = _funcname.replace('role', 'rolenext') + _funcnamenext = _funcnamenext.replace('name', 'namenext') + _description = r"(?P\s*:(\s+(?P\S+.*))?)?\s*$" + _func_rgx = re.compile(r"^\s*" + _funcname + r"\s*") + _line_rgx = re.compile( + r"^\s*" + + r"(?P" + # group for all function names + _funcname + + r"(?P([,]\s+" + _funcnamenext + r")*)" + + r")" + # end of "allfuncs" + # Some function lists have a trailing comma (or period) '\s*' + r"(?P[,\.])?" + + _description) + + # Empty elements are replaced with '..' + empty_description = '..' + + def _parse_see_also(self, content): + """ + func_name : Descriptive text + continued text + another_func_name : Descriptive text + func_name1, func_name2, :meth:`func_name`, func_name3 + + """ + + content = dedent_lines(content) + + items = [] + + def parse_item_name(text): + """Match ':role:`name`' or 'name'.""" + m = self._func_rgx.match(text) + if not m: + self._error_location(f"Error parsing See Also entry {line!r}") + role = m.group('role') + name = m.group('name') if role else m.group('name2') + return name, role, m.end() + + rest = [] + for line in content: + if not line.strip(): + continue + + line_match = self._line_rgx.match(line) + description = None + if line_match: + description = line_match.group('desc') + if line_match.group('trailing') and description: + self._error_location( + 'Unexpected comma or period after function list at index %d of ' + 'line "%s"' % (line_match.end('trailing'), line), + error=False) + if not description and line.startswith(' '): + rest.append(line.strip()) + elif line_match: + funcs = [] + text = line_match.group('allfuncs') + while True: + if not text.strip(): + break + name, role, match_end = parse_item_name(text) + funcs.append((name, role)) + text = text[match_end:].strip() + if text and text[0] == ',': + text = text[1:].strip() + rest = list(filter(None, [description])) + items.append((funcs, rest)) + else: + self._error_location(f"Error parsing See Also entry {line!r}") + return items + + def _parse_index(self, section, content): + """ + .. index: default + :refguide: something, else, and more + + """ + def strip_each_in(lst): + return [s.strip() for s in lst] + + out = {} + section = section.split('::') + if len(section) > 1: + out['default'] = strip_each_in(section[1].split(','))[0] + for line in content: + line = line.split(':') + if len(line) > 2: + out[line[1]] = strip_each_in(line[2].split(',')) + return out + + def _parse_summary(self): + """Grab signature (if given) and summary""" + if self._is_at_section(): + return + + # If several signatures present, take the last one + while True: + summary = self._doc.read_to_next_empty_line() + summary_str = " ".join([s.strip() for s in summary]).strip() + compiled = re.compile(r'^([\w., ]+=)?\s*[\w\.]+\(.*\)$') + if compiled.match(summary_str): + self['Signature'] = summary_str + if not self._is_at_section(): + continue + break + + if summary is not None: + self['Summary'] = summary + + if not self._is_at_section(): + self['Extended Summary'] = self._read_to_next_section() + + def _parse(self): + self._doc.reset() + self._parse_summary() + + sections = list(self._read_sections()) + section_names = set([section for section, content in sections]) + + has_returns = 'Returns' in section_names + has_yields = 'Yields' in section_names + # We could do more tests, but we are not. Arbitrarily. + if has_returns and has_yields: + msg = 'Docstring contains both a Returns and Yields section.' + raise ValueError(msg) + if not has_yields and 'Receives' in section_names: + msg = 'Docstring contains a Receives section but not Yields.' + raise ValueError(msg) + + for (section, content) in sections: + if not section.startswith('..'): + section = (s.capitalize() for s in section.split(' ')) + section = ' '.join(section) + if self.get(section): + self._error_location("The section %s appears twice in %s" + % (section, '\n'.join(self._doc._str))) + + if section in ('Parameters', 'Other Parameters', 'Attributes', + 'Methods'): + self[section] = self._parse_param_list(content) + elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'): + self[section] = self._parse_param_list( + content, single_element_is_type=True) + elif section.startswith('.. index::'): + self['index'] = self._parse_index(section, content) + elif section == 'See Also': + self['See Also'] = self._parse_see_also(content) + else: + self[section] = content + + @property + def _obj(self): + if hasattr(self, '_cls'): + return self._cls + elif hasattr(self, '_f'): + return self._f + return None + + def _error_location(self, msg, error=True): + if self._obj is not None: + # we know where the docs came from: + try: + filename = inspect.getsourcefile(self._obj) + except TypeError: + filename = None + msg += f" in the docstring of {self._obj.__name__}" + msg += f" in {filename}." if filename else "" + if error: + raise ValueError(msg) + else: + warn(msg) + + # string conversion routines + + def _str_header(self, name, symbol='-'): + return [name, len(name)*symbol] + + def _str_indent(self, doc, indent=4): + return [' '*indent + line for line in doc] + + def _str_signature(self): + if self['Signature']: + return [self['Signature'].replace('*', r'\*')] + [''] + return [''] + + def _str_summary(self): + if self['Summary']: + return self['Summary'] + [''] + return [] + + def _str_extended_summary(self): + if self['Extended Summary']: + return self['Extended Summary'] + [''] + return [] + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_header(name) + for param in self[name]: + parts = [] + if param.name: + parts.append(param.name) + if param.type: + parts.append(param.type) + out += [' : '.join(parts)] + if param.desc and ''.join(param.desc).strip(): + out += self._str_indent(param.desc) + out += [''] + return out + + def _str_section(self, name): + out = [] + if self[name]: + out += self._str_header(name) + out += self[name] + out += [''] + return out + + def _str_see_also(self, func_role): + if not self['See Also']: + return [] + out = [] + out += self._str_header("See Also") + out += [''] + last_had_desc = True + for funcs, desc in self['See Also']: + assert isinstance(funcs, list) + links = [] + for func, role in funcs: + if role: + link = ':%s:`%s`' % (role, func) + elif func_role: + link = ':%s:`%s`' % (func_role, func) + else: + link = "`%s`_" % func + links.append(link) + link = ', '.join(links) + out += [link] + if desc: + out += self._str_indent([' '.join(desc)]) + last_had_desc = True + else: + last_had_desc = False + out += self._str_indent([self.empty_description]) + + if last_had_desc: + out += [''] + out += [''] + return out + + def _str_index(self): + idx = self['index'] + out = [] + output_index = False + default_index = idx.get('default', '') + if default_index: + output_index = True + out += ['.. index:: %s' % default_index] + for section, references in idx.items(): + if section == 'default': + continue + output_index = True + out += [' :%s: %s' % (section, ', '.join(references))] + if output_index: + return out + return '' + + def __str__(self, func_role=''): + out = [] + out += self._str_signature() + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Returns', 'Yields', 'Receives', + 'Other Parameters', 'Raises', 'Warns'): + out += self._str_param_list(param_list) + out += self._str_section('Warnings') + out += self._str_see_also(func_role) + for s in ('Notes', 'References', 'Examples'): + out += self._str_section(s) + for param_list in ('Attributes', 'Methods'): + out += self._str_param_list(param_list) + out += self._str_index() + return '\n'.join(out) + + +def dedent_lines(lines): + """Deindent a list of lines maximally""" + return textwrap.dedent("\n".join(lines)).split("\n") + + +class FunctionDoc(NumpyDocString): + def __init__(self, func, role='func', doc=None, config=None): + self._f = func + self._role = role # e.g. "func" or "meth" + + if doc is None: + if func is None: + raise ValueError("No function or docstring given") + doc = inspect.getdoc(func) or '' + if config is None: + config = {} + NumpyDocString.__init__(self, doc, config) + + def get_func(self): + func_name = getattr(self._f, '__name__', self.__class__.__name__) + if inspect.isclass(self._f): + func = getattr(self._f, '__call__', self._f.__init__) + else: + func = self._f + return func, func_name + + def __str__(self): + out = '' + + func, func_name = self.get_func() + + roles = {'func': 'function', + 'meth': 'method'} + + if self._role: + if self._role not in roles: + print("Warning: invalid role %s" % self._role) + out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''), + func_name) + + out += super().__str__(func_role=self._role) + return out + + +class ObjDoc(NumpyDocString): + def __init__(self, obj, doc=None, config=None): + self._f = obj + if config is None: + config = {} + NumpyDocString.__init__(self, doc, config=config) + + +class ClassDoc(NumpyDocString): + + extra_public_methods = ['__call__'] + + def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, + config=None): + if not inspect.isclass(cls) and cls is not None: + raise ValueError("Expected a class or None, but got %r" % cls) + self._cls = cls + + if 'sphinx' in sys.modules: + from sphinx.ext.autodoc import ALL + else: + ALL = object() + + if config is None: + config = {} + self.show_inherited_members = config.get( + 'show_inherited_class_members', True) + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + + if doc is None: + if cls is None: + raise ValueError("No class or documentation string given") + doc = pydoc.getdoc(cls) + + NumpyDocString.__init__(self, doc) + + _members = config.get('members', []) + if _members is ALL: + _members = None + _exclude = config.get('exclude-members', []) + + if config.get('show_class_members', True) and _exclude is not ALL: + def splitlines_x(s): + if not s: + return [] + else: + return s.splitlines() + for field, items in [('Methods', self.methods), + ('Attributes', self.properties)]: + if not self[field]: + doc_list = [] + for name in sorted(items): + if (name in _exclude or + (_members and name not in _members)): + continue + try: + doc_item = pydoc.getdoc(getattr(self._cls, name)) + doc_list.append( + Parameter(name, '', splitlines_x(doc_item))) + except AttributeError: + pass # method doesn't exist + self[field] = doc_list + + @property + def methods(self): + if self._cls is None: + return [] + return [name for name, func in inspect.getmembers(self._cls) + if ((not name.startswith('_') or + name in self.extra_public_methods) and + isinstance(func, Callable) and + self._is_show_member(name))] + + @property + def properties(self): + if self._cls is None: + return [] + return [name for name, func in inspect.getmembers(self._cls) + if (not name.startswith('_') and + (func is None or isinstance(func, property) or + inspect.isdatadescriptor(func)) and + self._is_show_member(name))] + + def _is_show_member(self, name): + if self.show_inherited_members: + return True # show all class members + if name not in self._cls.__dict__: + return False # class member is inherited, we do not show it + return True + + +def get_doc_object(obj, what=None, doc=None, config=None): + if what is None: + if inspect.isclass(obj): + what = 'class' + elif inspect.ismodule(obj): + what = 'module' + elif isinstance(obj, Callable): + what = 'function' + else: + what = 'object' + if config is None: + config = {} + + if what == 'class': + return ClassDoc(obj, func_doc=FunctionDoc, doc=doc, config=config) + elif what in ('function', 'method'): + return FunctionDoc(obj, doc=doc, config=config) + else: + if doc is None: + doc = pydoc.getdoc(obj) + return ObjDoc(obj, doc, config=config) diff --git a/venv/lib/python3.10/site-packages/pyarrow/vendored/version.py b/venv/lib/python3.10/site-packages/pyarrow/vendored/version.py new file mode 100644 index 0000000000000000000000000000000000000000..b74f1da978378be1ac15d707132fe6b187d5bd0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/vendored/version.py @@ -0,0 +1,545 @@ +# Vendored from https://github.com/pypa/packaging, +# changeset b5878c977206f60302536db969a8cef420853ade + +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of the +# `packaging` repository for complete details. + +import collections +import itertools +import re +import warnings + +__all__ = ["parse", "Version", "LegacyVersion", + "InvalidVersion", "VERSION_PATTERN"] + + +class InfinityType: + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + + +NegativeInfinity = NegativeInfinityType() + + +_Version = collections.namedtuple( + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + + def __hash__(self): + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +class LegacyVersion(_BaseVersion): + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release", + DeprecationWarning, + ) + + def __str__(self): + return self._version + + def __repr__(self): + return f"" + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def epoch(self): + return -1 + + @property + def release(self): + return None + + @property + def pre(self): + return None + + @property + def post(self): + return None + + @property + def dev(self): + return None + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + @property + def is_devrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) + +_legacy_version_replacement_map = { + "pre": "c", + "preview": "c", + "-": "final-", + "rc": "c", + "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + + return epoch, tuple(parts) + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN +
+                        r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    def __init__(self, version):
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group(
+                    "post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return f""
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self):
+        _epoch = self._version.epoch
+        return _epoch
+
+    @property
+    def release(self):
+        _release = self._version.release
+        return _release
+
+    @property
+    def pre(self):
+        _pre = self._version.pre
+        return _pre
+
+    @property
+    def post(self):
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self):
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self):
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self):
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self):
+        return self.post is not None
+
+    @property
+    def is_devrelease(self):
+        return self.dev is not None
+
+    @property
+    def major(self):
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self):
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self):
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(letter, number):
+
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0,
+                                          reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i)
+            for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local