diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..183fe3ab886a7f835252f7a530fddea83efd681e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/arrow_16597.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/arrow_16597.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e2a38c5b367dba99c733aca62d384bf81e535a6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/arrow_16597.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/arrow_39313.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/arrow_39313.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33885dc4e732092a757751cd48c6265222fd120b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/arrow_39313.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/arrow_7980.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/arrow_7980.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fff2aef8c2aa584f8e49220b1c2a9e5cd0af8c2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/arrow_7980.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4576eba49a2c05e1295353d8b304d99202a9312 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/conftest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/pandas_examples.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/pandas_examples.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e1be5c6eaa91515c6e25582f92609fc36436072 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/pandas_examples.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/pandas_threaded_import.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/pandas_threaded_import.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2647a0548259094fb3c7ccfffff0651cb11e8db0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/pandas_threaded_import.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/read_record_batch.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/read_record_batch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4be0f31f96879e97e9a0aa2b2146ddea6f3abe36 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/read_record_batch.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/strategies.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/strategies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80a34823666583bc1fce504357c00f4089cd7eb9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/strategies.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_acero.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_acero.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1d34ded9b788bc23979c9dbc5450dee1d52cb11 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_acero.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_adhoc_memory_leak.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_adhoc_memory_leak.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fe9adb89146bb512d655f6ba0483a8b844a6b0f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_adhoc_memory_leak.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed4af674720722d61632500b568f03c26b4f48ee Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_builder.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c94f1e92091693067ab3a844378d64d49475f0c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_builder.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cffi.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cffi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6b1e889c31b707a2f4220c1f87f4ff40d49c9f6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cffi.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_compute.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_compute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb3111e32d93f29753752fa452649d1245c06719 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_compute.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_convert_builtin.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_convert_builtin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cc0feace0ef44661ef805c58f91777b6bc4b067 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_convert_builtin.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cpp_internals.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cpp_internals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa94f7ee8c39be6c101b94d5c6d0819597901977 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cpp_internals.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_csv.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_csv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b27bdeddf0bc2c46b5f6a419be34e6e945d6947 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_csv.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cuda.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cuda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ae32021208d6ba9737efa67ab84dcfc845c960e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cuda.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cuda_numba_interop.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cuda_numba_interop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67a90368234ff8426ea54782f9125f503c80497e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cuda_numba_interop.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cython.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cython.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..934fb0df24a626bb4ef2cf812d3855ea1b3275fa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_cython.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dataset.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90dfb3415bc091647a668d06bbe4be36db8dace0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dataset.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dataset_encryption.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dataset_encryption.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8da50d3804f2d9e2a057d559bf5dcdf13d64871b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dataset_encryption.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_deprecations.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_deprecations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddef76e670ac335334f8b52e3e45f54d9c1c2c74 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_deprecations.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dlpack.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dlpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80e6074c9c6abcca4d12a827fc4f6c0d0f0c56d7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_dlpack.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_exec_plan.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_exec_plan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9b7095455b9052ee37ed68e58fdf2dd0ec91fe4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_exec_plan.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_extension_type.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_extension_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97948f8b48de0fbaaa5c5cefdd40de3f3e3fbb74 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_extension_type.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_feather.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_feather.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8415abdc30d9afebad0c73f44961daff434f01de Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_feather.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_filesystem.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_filesystem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1e505761cc48cb0517f7fa2648e657ce3abb3de Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_filesystem.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_flight.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_flight.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e43582228e3ae58b6744f9decc10278607941b9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_flight.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_flight_async.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_flight_async.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e38852d2b455d15e49f3a8e8dd923305c0e59a6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_flight_async.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_fs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_fs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9324a8050edc2849c8ba7f9d881cfa9e2b17cdf5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_fs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_gandiva.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_gandiva.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..022738739aa03a36919025f185bebca37a5d1272 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_gandiva.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_gdb.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_gdb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5707764f6144a6772a53aa76557543b194140a1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_gdb.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_hdfs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_hdfs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a63ae195412fa3e608954a840f386cfbf16653c0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_hdfs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_io.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91818b67c9b1cc882b39e5aceef90e5239f116bb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_io.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_ipc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_ipc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2523a2d2823d81547f7e92369a0874ea75765be9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_ipc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_json.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_json.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1adaa10ebf9155da202eca3bfe3224e17b7c284 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_json.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_jvm.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_jvm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98753ca3ba013f475378a145a9489ca365c3857f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_jvm.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_memory.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ac86878a3ca05fa1f0aee4a3d802fb20f454d02 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_memory.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_misc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b696bae7de0345f64c15721349340cddcdbe3519 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_misc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_orc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_orc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31365e78dfedef377cd21ba5d700da1aaa28c57f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_orc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_pandas.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_pandas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86b5c771b62602752f41ba574d7e05aaf012de10 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_pandas.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_schema.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09187b0c49ea0ce1aa14231d19ed6768ba3e6fd1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_schema.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_sparse_tensor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_sparse_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32571a40e6b5b4187de22336a7e7b9ff1825df56 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_sparse_tensor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_strategies.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_strategies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe19e19cd7bcb533fcde8ef63ea090df2c8f6516 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_strategies.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_substrait.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_substrait.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..926493d887e13cc803c10488196cb5dc6dbb87ae Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_substrait.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_table.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_table.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6594970e94e70752e7745b5f52384ce3e64ebad1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_table.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_tensor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f40fac6803aafade38d0d8ed96375d5d5db1a18f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_tensor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_types.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1afdb99b8f812c63482ebdf78042cb0f20b4241d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_types.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_udf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_udf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d2a80f54ef231422d43936d939e952f277d3022 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/test_udf.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea8ae844ab8e3766277fa07071497ad541aad2c2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/__pycache__/util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/extensions.pyx b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/extensions.pyx new file mode 100644 index 0000000000000000000000000000000000000000..c1bf9aae1ec03053355671431b951d50a42d56f9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/extensions.pyx @@ -0,0 +1,94 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# distutils: language=c++ +# cython: language_level = 3 + +from pyarrow.lib cimport * + +cdef extern from * namespace "arrow::py" nogil: + """ + #include "arrow/status.h" + #include "arrow/extension_type.h" + #include "arrow/ipc/json_simple.h" + + namespace arrow { + namespace py { + + class UuidArray : public ExtensionArray { + public: + using ExtensionArray::ExtensionArray; + }; + + class UuidType : public ExtensionType { + public: + UuidType() : ExtensionType(fixed_size_binary(16)) {} + std::string extension_name() const override { return "uuid"; } + + bool ExtensionEquals(const ExtensionType& other) const override { + return other.extension_name() == this->extension_name(); + } + + std::shared_ptr MakeArray(std::shared_ptr data) const override { + return std::make_shared(data); + } + + Result> Deserialize( + std::shared_ptr storage_type, + const std::string& serialized) const override { + return std::make_shared(); + } + + std::string Serialize() const override { return ""; } + }; + + + std::shared_ptr MakeUuidType() { + return std::make_shared(); + } + + std::shared_ptr MakeUuidArray() { + auto uuid_type = MakeUuidType(); + auto json = "[\\"abcdefghijklmno0\\", \\"0onmlkjihgfedcba\\"]"; + auto result = ipc::internal::json::ArrayFromJSON(fixed_size_binary(16), json); + return ExtensionType::WrapArray(uuid_type, result.ValueOrDie()); + } + + std::once_flag uuid_registered; + + static bool RegisterUuidType() { + std::call_once(uuid_registered, RegisterExtensionType, + std::make_shared()); + return true; + } + + static auto uuid_type_registered = RegisterUuidType(); + + } // namespace py + } // namespace arrow + """ + + cdef shared_ptr[CDataType] CMakeUuidType" arrow::py::MakeUuidType"() + cdef shared_ptr[CArray] CMakeUuidArray" arrow::py::MakeUuidArray"() + + +def _make_uuid_type(): + return pyarrow_wrap_data_type(CMakeUuidType()) + + +def _make_uuid_array(): + return pyarrow_wrap_array(CMakeUuidArray()) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bab187de8cf7c07d01cd55eac1b5f222c52d5b72 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/__pycache__/test_conversion.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/__pycache__/test_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ede974ca7a7e6f1b04f4197bd4eb77905efe5fa9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/__pycache__/test_conversion.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/__pycache__/test_interchange_spec.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/__pycache__/test_interchange_spec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c784517bdff181b9e88bdadf3a476aeb639281d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/__pycache__/test_interchange_spec.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/test_interchange_spec.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/test_interchange_spec.py new file mode 100644 index 0000000000000000000000000000000000000000..826089652bca6bbe51f05f7702d5f54ecac93429 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/interchange/test_interchange_spec.py @@ -0,0 +1,288 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import ctypes +import hypothesis as h +import hypothesis.strategies as st + +import numpy as np +import pyarrow as pa +import pyarrow.tests.strategies as past +import pytest + + +all_types = st.deferred( + lambda: ( + past.signed_integer_types | + past.unsigned_integer_types | + past.floating_types | + past.bool_type | + past.string_type | + past.large_string_type + ) +) + + +# datetime is tested in test_extra.py +# dictionary is tested in test_categorical() +@h.given(past.arrays(all_types, size=3)) +def test_dtypes(arr): + table = pa.table([arr], names=["a"]) + df = table.__dataframe__() + + null_count = df.get_column(0).null_count + assert null_count == arr.null_count + assert isinstance(null_count, int) + assert df.get_column(0).size() == 3 + assert df.get_column(0).offset == 0 + + +@pytest.mark.parametrize( + "uint, uint_bw", + [ + (pa.uint8(), 8), + (pa.uint16(), 16), + (pa.uint32(), 32) + ] +) +@pytest.mark.parametrize( + "int, int_bw", [ + (pa.int8(), 8), + (pa.int16(), 16), + (pa.int32(), 32), + (pa.int64(), 64) + ] +) +@pytest.mark.parametrize( + "float, float_bw, np_float", [ + (pa.float16(), 16, np.float16), + (pa.float32(), 32, np.float32), + (pa.float64(), 64, np.float64) + ] +) +@pytest.mark.parametrize("unit", ['s', 'ms', 'us', 'ns']) +@pytest.mark.parametrize("tz", ['', 'America/New_York', '+07:30', '-04:30']) +@pytest.mark.parametrize("use_batch", [False, True]) +def test_mixed_dtypes(uint, uint_bw, int, int_bw, + float, float_bw, np_float, unit, tz, + use_batch): + from datetime import datetime as dt + arr = [1, 2, 3] + dt_arr = [dt(2007, 7, 13), dt(2007, 7, 14), dt(2007, 7, 15)] + table = pa.table( + { + "a": pa.array(arr, type=uint), + "b": pa.array(arr, type=int), + "c": pa.array(np.array(arr, dtype=np_float), type=float), + "d": [True, False, True], + "e": ["a", "", "c"], + "f": pa.array(dt_arr, type=pa.timestamp(unit, tz=tz)) + } + ) + if use_batch: + table = table.to_batches()[0] + df = table.__dataframe__() + # 0 = DtypeKind.INT, 1 = DtypeKind.UINT, 2 = DtypeKind.FLOAT, + # 20 = DtypeKind.BOOL, 21 = DtypeKind.STRING, 22 = DtypeKind.DATETIME + # see DtypeKind class in column.py + columns = {"a": 1, "b": 0, "c": 2, "d": 20, "e": 21, "f": 22} + + for column, kind in columns.items(): + col = df.get_column_by_name(column) + + assert col.null_count == 0 + assert col.size() == 3 + assert col.offset == 0 + assert col.dtype[0] == kind + + assert df.get_column_by_name("a").dtype[1] == uint_bw + assert df.get_column_by_name("b").dtype[1] == int_bw + assert df.get_column_by_name("c").dtype[1] == float_bw + + +def test_na_float(): + table = pa.table({"a": [1.0, None, 2.0]}) + df = table.__dataframe__() + col = df.get_column_by_name("a") + assert col.null_count == 1 + assert isinstance(col.null_count, int) + + +def test_noncategorical(): + table = pa.table({"a": [1, 2, 3]}) + df = table.__dataframe__() + col = df.get_column_by_name("a") + with pytest.raises(TypeError, match=".*categorical.*"): + col.describe_categorical + + +@pytest.mark.parametrize("use_batch", [False, True]) +def test_categorical(use_batch): + import pyarrow as pa + arr = ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", None] + table = pa.table( + {"weekday": pa.array(arr).dictionary_encode()} + ) + if use_batch: + table = table.to_batches()[0] + + col = table.__dataframe__().get_column_by_name("weekday") + categorical = col.describe_categorical + assert isinstance(categorical["is_ordered"], bool) + assert isinstance(categorical["is_dictionary"], bool) + + +@pytest.mark.parametrize("use_batch", [False, True]) +def test_dataframe(use_batch): + n = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + a = pa.chunked_array([["Flamingo", "Parrot", "Cow"], + ["Horse", "Brittle stars", "Centipede"]]) + table = pa.table([n, a], names=['n_legs', 'animals']) + if use_batch: + table = table.combine_chunks().to_batches()[0] + df = table.__dataframe__() + + assert df.num_columns() == 2 + assert df.num_rows() == 6 + if use_batch: + assert df.num_chunks() == 1 + else: + assert df.num_chunks() == 2 + assert list(df.column_names()) == ['n_legs', 'animals'] + assert list(df.select_columns((1,)).column_names()) == list( + df.select_columns_by_name(("animals",)).column_names() + ) + + +@pytest.mark.parametrize("use_batch", [False, True]) +@pytest.mark.parametrize(["size", "n_chunks"], [(10, 3), (12, 3), (12, 5)]) +def test_df_get_chunks(use_batch, size, n_chunks): + table = pa.table({"x": list(range(size))}) + if use_batch: + table = table.to_batches()[0] + df = table.__dataframe__() + chunks = list(df.get_chunks(n_chunks)) + assert len(chunks) == n_chunks + assert sum(chunk.num_rows() for chunk in chunks) == size + + +@pytest.mark.parametrize("use_batch", [False, True]) +@pytest.mark.parametrize(["size", "n_chunks"], [(10, 3), (12, 3), (12, 5)]) +def test_column_get_chunks(use_batch, size, n_chunks): + table = pa.table({"x": list(range(size))}) + if use_batch: + table = table.to_batches()[0] + df = table.__dataframe__() + chunks = list(df.get_column(0).get_chunks(n_chunks)) + assert len(chunks) == n_chunks + assert sum(chunk.size() for chunk in chunks) == size + + +@pytest.mark.pandas +@pytest.mark.parametrize( + "uint", [pa.uint8(), pa.uint16(), pa.uint32()] +) +@pytest.mark.parametrize( + "int", [pa.int8(), pa.int16(), pa.int32(), pa.int64()] +) +@pytest.mark.parametrize( + "float, np_float", [ + (pa.float16(), np.float16), + (pa.float32(), np.float32), + (pa.float64(), np.float64) + ] +) +@pytest.mark.parametrize("use_batch", [False, True]) +def test_get_columns(uint, int, float, np_float, use_batch): + arr = [[1, 2, 3], [4, 5]] + arr_float = np.array([1, 2, 3, 4, 5], dtype=np_float) + table = pa.table( + { + "a": pa.chunked_array(arr, type=uint), + "b": pa.chunked_array(arr, type=int), + "c": pa.array(arr_float, type=float) + } + ) + if use_batch: + table = table.combine_chunks().to_batches()[0] + df = table.__dataframe__() + for col in df.get_columns(): + assert col.size() == 5 + assert col.num_chunks() == 1 + + # 0 = DtypeKind.INT, 1 = DtypeKind.UINT, 2 = DtypeKind.FLOAT, + # see DtypeKind class in column.py + assert df.get_column(0).dtype[0] == 1 # UINT + assert df.get_column(1).dtype[0] == 0 # INT + assert df.get_column(2).dtype[0] == 2 # FLOAT + + +@pytest.mark.parametrize( + "int", [pa.int8(), pa.int16(), pa.int32(), pa.int64()] +) +@pytest.mark.parametrize("use_batch", [False, True]) +def test_buffer(int, use_batch): + arr = [0, 1, -1] + table = pa.table({"a": pa.array(arr, type=int)}) + if use_batch: + table = table.to_batches()[0] + df = table.__dataframe__() + col = df.get_column(0) + buf = col.get_buffers() + + dataBuf, dataDtype = buf["data"] + + assert dataBuf.bufsize > 0 + assert dataBuf.ptr != 0 + device, _ = dataBuf.__dlpack_device__() + + # 0 = DtypeKind.INT + # see DtypeKind class in column.py + assert dataDtype[0] == 0 + + if device == 1: # CPU-only as we're going to directly read memory here + bitwidth = dataDtype[1] + ctype = { + 8: ctypes.c_int8, + 16: ctypes.c_int16, + 32: ctypes.c_int32, + 64: ctypes.c_int64, + }[bitwidth] + + for idx, truth in enumerate(arr): + val = ctype.from_address(dataBuf.ptr + idx * (bitwidth // 8)).value + assert val == truth, f"Buffer at index {idx} mismatch" + + +@pytest.mark.parametrize( + "indices_type, bitwidth, f_string", [ + (pa.int8(), 8, "c"), + (pa.int16(), 16, "s"), + (pa.int32(), 32, "i"), + (pa.int64(), 64, "l") + ] +) +def test_categorical_dtype(indices_type, bitwidth, f_string): + type = pa.dictionary(indices_type, pa.string()) + arr = pa.array(["a", "b", None, "d"], type) + table = pa.table({'a': arr}) + + df = table.__dataframe__() + col = df.get_column(0) + assert col.dtype[0] == 23 # + assert col.dtype[1] == bitwidth + assert col.dtype[2] == f_string diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/pandas_examples.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/pandas_examples.py new file mode 100644 index 0000000000000000000000000000000000000000..466c14eeb6f5f80d5ccfe1d4a6bc7f5216b23561 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/pandas_examples.py @@ -0,0 +1,172 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from collections import OrderedDict +from datetime import date, time + +import numpy as np +import pandas as pd +import pyarrow as pa + + +def dataframe_with_arrays(include_index=False): + """ + Dataframe with numpy arrays columns of every possible primitive type. + + Returns + ------- + df: pandas.DataFrame + schema: pyarrow.Schema + Arrow schema definition that is in line with the constructed df. + """ + dtypes = [('i1', pa.int8()), ('i2', pa.int16()), + ('i4', pa.int32()), ('i8', pa.int64()), + ('u1', pa.uint8()), ('u2', pa.uint16()), + ('u4', pa.uint32()), ('u8', pa.uint64()), + ('f4', pa.float32()), ('f8', pa.float64())] + + arrays = OrderedDict() + fields = [] + for dtype, arrow_dtype in dtypes: + fields.append(pa.field(dtype, pa.list_(arrow_dtype))) + arrays[dtype] = [ + np.arange(10, dtype=dtype), + np.arange(5, dtype=dtype), + None, + np.arange(1, dtype=dtype) + ] + + fields.append(pa.field('str', pa.list_(pa.string()))) + arrays['str'] = [ + np.array(["1", "ä"], dtype="object"), + None, + np.array(["1"], dtype="object"), + np.array(["1", "2", "3"], dtype="object") + ] + + fields.append(pa.field('datetime64', pa.list_(pa.timestamp('ms')))) + arrays['datetime64'] = [ + np.array(['2007-07-13T01:23:34.123456789', + None, + '2010-08-13T05:46:57.437699912'], + dtype='datetime64[ms]'), + None, + None, + np.array(['2007-07-13T02', + None, + '2010-08-13T05:46:57.437699912'], + dtype='datetime64[ms]'), + ] + + if include_index: + fields.append(pa.field('__index_level_0__', pa.int64())) + df = pd.DataFrame(arrays) + schema = pa.schema(fields) + + return df, schema + + +def dataframe_with_lists(include_index=False, parquet_compatible=False): + """ + Dataframe with list columns of every possible primitive type. + + Returns + ------- + df: pandas.DataFrame + schema: pyarrow.Schema + Arrow schema definition that is in line with the constructed df. + parquet_compatible: bool + Exclude types not supported by parquet + """ + arrays = OrderedDict() + fields = [] + + fields.append(pa.field('int64', pa.list_(pa.int64()))) + arrays['int64'] = [ + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + [0, 1, 2, 3, 4], + None, + [], + np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9] * 2, + dtype=np.int64)[::2] + ] + fields.append(pa.field('double', pa.list_(pa.float64()))) + arrays['double'] = [ + [0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], + [0., 1., 2., 3., 4.], + None, + [], + np.array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.] * 2)[::2], + ] + fields.append(pa.field('bytes_list', pa.list_(pa.binary()))) + arrays['bytes_list'] = [ + [b"1", b"f"], + None, + [b"1"], + [b"1", b"2", b"3"], + [], + ] + fields.append(pa.field('str_list', pa.list_(pa.string()))) + arrays['str_list'] = [ + ["1", "ä"], + None, + ["1"], + ["1", "2", "3"], + [], + ] + + date_data = [ + [], + [date(2018, 1, 1), date(2032, 12, 30)], + [date(2000, 6, 7)], + None, + [date(1969, 6, 9), date(1972, 7, 3)] + ] + time_data = [ + [time(23, 11, 11), time(1, 2, 3), time(23, 59, 59)], + [], + [time(22, 5, 59)], + None, + [time(0, 0, 0), time(18, 0, 2), time(12, 7, 3)] + ] + + temporal_pairs = [ + (pa.date32(), date_data), + (pa.date64(), date_data), + (pa.time32('s'), time_data), + (pa.time32('ms'), time_data), + (pa.time64('us'), time_data) + ] + if not parquet_compatible: + temporal_pairs += [ + (pa.time64('ns'), time_data), + ] + + for value_type, data in temporal_pairs: + field_name = '{}_list'.format(value_type) + field_type = pa.list_(value_type) + field = pa.field(field_name, field_type) + fields.append(field) + arrays[field_name] = data + + if include_index: + fields.append(pa.field('__index_level_0__', pa.int64())) + + df = pd.DataFrame(arrays) + schema = pa.schema(fields) + + return df, schema diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07f63094b07f8bb8ab29e4710e2bab3a0c0f007a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/conftest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/encryption.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/encryption.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce3bf78b7e8e2d42f5f94c5674f314814a0bc6f7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/encryption.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_basic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..497f42c1ad5dfd50ad092321a0c100d82b7d3856 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_basic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_data_types.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_data_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b002b938d8e7c36909200dd36c963c0c911f9a0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_data_types.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_encryption.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_encryption.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52012a446b146ee4f7309ba0f27a96623c405896 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_encryption.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_parquet_file.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_parquet_file.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ded1e7ec5c054bf76b57fa17dd01b1dd90652409 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_parquet_file.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_parquet_writer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_parquet_writer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc51c04aa005a7f199e3967126ec84420089614c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/__pycache__/test_parquet_writer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/conftest.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..461c24af22aa96974617804a62c93635b516c1b3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/conftest.py @@ -0,0 +1,90 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from pyarrow.util import guid + + +@pytest.fixture(scope='module') +def datadir(base_datadir): + return base_datadir / 'parquet' + + +@pytest.fixture +def s3_bucket(s3_server): + boto3 = pytest.importorskip('boto3') + botocore = pytest.importorskip('botocore') + s3_bucket_name = 'test-s3fs' + + host, port, access_key, secret_key = s3_server['connection'] + s3_client = boto3.client( + 's3', + endpoint_url='http://{}:{}'.format(host, port), + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + config=botocore.client.Config(signature_version='s3v4'), + region_name='us-east-1' + ) + + try: + s3_client.create_bucket(Bucket=s3_bucket_name) + except Exception: + pass # we get BucketAlreadyOwnedByYou error with fsspec handler + finally: + s3_client.close() + + return s3_bucket_name + + +@pytest.fixture +def s3_example_s3fs(s3_server, s3_bucket): + s3fs = pytest.importorskip('s3fs') + + host, port, access_key, secret_key = s3_server['connection'] + fs = s3fs.S3FileSystem( + key=access_key, + secret=secret_key, + client_kwargs={ + 'endpoint_url': 'http://{}:{}'.format(host, port) + } + ) + + test_path = '{}/{}'.format(s3_bucket, guid()) + + fs.mkdir(test_path) + yield fs, test_path + try: + fs.rm(test_path, recursive=True) + except FileNotFoundError: + pass + + +@pytest.fixture +def s3_example_fs(s3_server): + from pyarrow.fs import FileSystem + + host, port, access_key, secret_key = s3_server['connection'] + uri = ( + "s3://{}:{}@mybucket/data.parquet?scheme=http&endpoint_override={}:{}" + .format(access_key, secret_key, host, port) + ) + fs, path = FileSystem.from_uri(uri) + + fs.create_dir("mybucket") + + yield fs, uri, path diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_compliant_nested_type.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_compliant_nested_type.py new file mode 100644 index 0000000000000000000000000000000000000000..2345855a3321b6af48acfc0fcba0732e4af2c92c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_compliant_nested_type.py @@ -0,0 +1,109 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +import pyarrow as pa + +try: + import pyarrow.parquet as pq + from pyarrow.tests.parquet.common import (_read_table, + _check_roundtrip) +except ImportError: + pq = None + +try: + import pandas as pd + import pandas.testing as tm + + from pyarrow.tests.parquet.common import _roundtrip_pandas_dataframe +except ImportError: + pd = tm = None + + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not parquet' +pytestmark = pytest.mark.parquet + + +# Tests for ARROW-11497 +_test_data_simple = [ + {'items': [1, 2]}, + {'items': [0]}, +] + +_test_data_complex = [ + {'items': [{'name': 'elem1', 'value': '1'}, + {'name': 'elem2', 'value': '2'}]}, + {'items': [{'name': 'elem1', 'value': '0'}]}, +] + +parametrize_test_data = pytest.mark.parametrize( + "test_data", [_test_data_simple, _test_data_complex]) + + +@pytest.mark.pandas +@parametrize_test_data +def test_write_compliant_nested_type_enable(tempdir, test_data): + # prepare dataframe for testing + df = pd.DataFrame(data=test_data) + # verify that we can read/write pandas df with new flag (default behaviour) + _roundtrip_pandas_dataframe(df, + write_kwargs={}) + + # Write to a parquet file with compliant nested type + table = pa.Table.from_pandas(df, preserve_index=False) + path = str(tempdir / 'data.parquet') + with pq.ParquetWriter(path, table.schema, + version='2.6') as writer: + writer.write_table(table) + # Read back as a table + new_table = _read_table(path) + # Validate that "items" columns compliant to Parquet nested format + # Should be like this: list> + assert isinstance(new_table.schema.types[0], pa.ListType) + assert new_table.schema.types[0].value_field.name == 'element' + + # Verify that the new table can be read/written correctly + _check_roundtrip(new_table) + + +@pytest.mark.pandas +@parametrize_test_data +def test_write_compliant_nested_type_disable(tempdir, test_data): + # prepare dataframe for testing + df = pd.DataFrame(data=test_data) + # verify that we can read/write with new flag disabled + _roundtrip_pandas_dataframe(df, write_kwargs={ + 'use_compliant_nested_type': False}) + + # Write to a parquet file while disabling compliant nested type + table = pa.Table.from_pandas(df, preserve_index=False) + path = str(tempdir / 'data.parquet') + with pq.ParquetWriter(path, table.schema, version='2.6', + use_compliant_nested_type=False) as writer: + writer.write_table(table) + new_table = _read_table(path) + + # Validate that "items" columns is not compliant to Parquet nested format + # Should be like this: list> + assert isinstance(new_table.schema.types[0], pa.ListType) + assert new_table.schema.types[0].value_field.name == 'item' + + # Verify that the new table can be read/written correctly + _check_roundtrip(new_table, + use_compliant_nested_type=False) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_datetime.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_datetime.py new file mode 100644 index 0000000000000000000000000000000000000000..6a9cbd4f73d4f26851c449ca75ec2f8ece1732c6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/parquet/test_datetime.py @@ -0,0 +1,457 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import datetime +import io +import warnings + +import numpy as np +import pytest + +import pyarrow as pa +from pyarrow.tests.parquet.common import _check_roundtrip + +try: + import pyarrow.parquet as pq + from pyarrow.tests.parquet.common import _read_table, _write_table +except ImportError: + pq = None + + +try: + import pandas as pd + import pandas.testing as tm + + from pyarrow.tests.parquet.common import _roundtrip_pandas_dataframe +except ImportError: + pd = tm = None + + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not parquet' +pytestmark = pytest.mark.parquet + + +@pytest.mark.pandas +def test_pandas_parquet_datetime_tz(): + # Pandas v2 defaults to [ns], but Arrow defaults to [us] time units + # so we need to cast the pandas dtype. Pandas v1 will always silently + # coerce to [ns] due to lack of non-[ns] support. + s = pd.Series([datetime.datetime(2017, 9, 6)], dtype='datetime64[us]') + s = s.dt.tz_localize('utc') + s.index = s + + # Both a column and an index to hit both use cases + df = pd.DataFrame({'tz_aware': s, + 'tz_eastern': s.dt.tz_convert('US/Eastern')}, + index=s) + + f = io.BytesIO() + + arrow_table = pa.Table.from_pandas(df) + + _write_table(arrow_table, f) + f.seek(0) + + table_read = pq.read_pandas(f) + + df_read = table_read.to_pandas() + tm.assert_frame_equal(df, df_read) + + +@pytest.mark.pandas +def test_datetime_timezone_tzinfo(): + value = datetime.datetime(2018, 1, 1, 1, 23, 45, + tzinfo=datetime.timezone.utc) + df = pd.DataFrame({'foo': [value]}) + + _roundtrip_pandas_dataframe(df, write_kwargs={}) + + +@pytest.mark.pandas +def test_coerce_timestamps(tempdir): + from collections import OrderedDict + + # ARROW-622 + arrays = OrderedDict() + fields = [pa.field('datetime64', + pa.list_(pa.timestamp('ms')))] + arrays['datetime64'] = [ + np.array(['2007-07-13T01:23:34.123456789', + None, + '2010-08-13T05:46:57.437699912'], + dtype='datetime64[ms]'), + None, + None, + np.array(['2007-07-13T02', + None, + '2010-08-13T05:46:57.437699912'], + dtype='datetime64[ms]'), + ] + + df = pd.DataFrame(arrays) + schema = pa.schema(fields) + + filename = tempdir / 'pandas_roundtrip.parquet' + arrow_table = pa.Table.from_pandas(df, schema=schema) + + _write_table(arrow_table, filename, version='2.6', coerce_timestamps='us') + table_read = _read_table(filename) + df_read = table_read.to_pandas() + + df_expected = df.copy() + for i, x in enumerate(df_expected['datetime64']): + if isinstance(x, np.ndarray): + df_expected['datetime64'][i] = x.astype('M8[us]') + + tm.assert_frame_equal(df_expected, df_read) + + with pytest.raises(ValueError): + _write_table(arrow_table, filename, version='2.6', + coerce_timestamps='unknown') + + +@pytest.mark.pandas +def test_coerce_timestamps_truncated(tempdir): + """ + ARROW-2555: Test that we can truncate timestamps when coercing if + explicitly allowed. + """ + dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1, + second=1, microsecond=1) + dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1, + second=1) + + fields_us = [pa.field('datetime64', pa.timestamp('us'))] + arrays_us = {'datetime64': [dt_us, dt_ms]} + + df_us = pd.DataFrame(arrays_us) + schema_us = pa.schema(fields_us) + + filename = tempdir / 'pandas_truncated.parquet' + table_us = pa.Table.from_pandas(df_us, schema=schema_us) + + _write_table(table_us, filename, version='2.6', coerce_timestamps='ms', + allow_truncated_timestamps=True) + table_ms = _read_table(filename) + df_ms = table_ms.to_pandas() + + arrays_expected = {'datetime64': [dt_ms, dt_ms]} + df_expected = pd.DataFrame(arrays_expected, dtype='datetime64[ms]') + tm.assert_frame_equal(df_expected, df_ms) + + +@pytest.mark.pandas +def test_date_time_types(tempdir): + t1 = pa.date32() + data1 = np.array([17259, 17260, 17261], dtype='int32') + a1 = pa.array(data1, type=t1) + + t2 = pa.date64() + data2 = data1.astype('int64') * 86400000 + a2 = pa.array(data2, type=t2) + + t3 = pa.timestamp('us') + start = pd.Timestamp('2001-01-01').value / 1000 + data3 = np.array([start, start + 1, start + 2], dtype='int64') + a3 = pa.array(data3, type=t3) + + t4 = pa.time32('ms') + data4 = np.arange(3, dtype='i4') + a4 = pa.array(data4, type=t4) + + t5 = pa.time64('us') + a5 = pa.array(data4.astype('int64'), type=t5) + + t6 = pa.time32('s') + a6 = pa.array(data4, type=t6) + + ex_t6 = pa.time32('ms') + ex_a6 = pa.array(data4 * 1000, type=ex_t6) + + t7 = pa.timestamp('ns') + start = pd.Timestamp('2001-01-01').value + data7 = np.array([start, start + 1000, start + 2000], + dtype='int64') + a7 = pa.array(data7, type=t7) + + table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7], + ['date32', 'date64', 'timestamp[us]', + 'time32[s]', 'time64[us]', + 'time32_from64[s]', + 'timestamp[ns]']) + + # date64 as date32 + # time32[s] to time32[ms] + expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7], + ['date32', 'date64', 'timestamp[us]', + 'time32[s]', 'time64[us]', + 'time32_from64[s]', + 'timestamp[ns]']) + + _check_roundtrip(table, expected=expected, version='2.6') + + t0 = pa.timestamp('ms') + data0 = np.arange(4, dtype='int64') + a0 = pa.array(data0, type=t0) + + t1 = pa.timestamp('us') + data1 = np.arange(4, dtype='int64') + a1 = pa.array(data1, type=t1) + + t2 = pa.timestamp('ns') + data2 = np.arange(4, dtype='int64') + a2 = pa.array(data2, type=t2) + + table = pa.Table.from_arrays([a0, a1, a2], + ['ts[ms]', 'ts[us]', 'ts[ns]']) + expected = pa.Table.from_arrays([a0, a1, a2], + ['ts[ms]', 'ts[us]', 'ts[ns]']) + + # int64 for all timestamps supported by default + filename = tempdir / 'int64_timestamps.parquet' + _write_table(table, filename, version='2.6') + parquet_schema = pq.ParquetFile(filename).schema + for i in range(3): + assert parquet_schema.column(i).physical_type == 'INT64' + read_table = _read_table(filename) + assert read_table.equals(expected) + + t0_ns = pa.timestamp('ns') + data0_ns = np.array(data0 * 1000000, dtype='int64') + a0_ns = pa.array(data0_ns, type=t0_ns) + + t1_ns = pa.timestamp('ns') + data1_ns = np.array(data1 * 1000, dtype='int64') + a1_ns = pa.array(data1_ns, type=t1_ns) + + expected = pa.Table.from_arrays([a0_ns, a1_ns, a2], + ['ts[ms]', 'ts[us]', 'ts[ns]']) + + # int96 nanosecond timestamps produced upon request + filename = tempdir / 'explicit_int96_timestamps.parquet' + _write_table(table, filename, version='2.6', + use_deprecated_int96_timestamps=True) + parquet_schema = pq.ParquetFile(filename).schema + for i in range(3): + assert parquet_schema.column(i).physical_type == 'INT96' + read_table = _read_table(filename) + assert read_table.equals(expected) + + # int96 nanosecond timestamps implied by flavor 'spark' + filename = tempdir / 'spark_int96_timestamps.parquet' + _write_table(table, filename, version='2.6', + flavor='spark') + parquet_schema = pq.ParquetFile(filename).schema + for i in range(3): + assert parquet_schema.column(i).physical_type == 'INT96' + read_table = _read_table(filename) + assert read_table.equals(expected) + + +@pytest.mark.pandas +@pytest.mark.parametrize('unit', ['s', 'ms', 'us', 'ns']) +def test_coerce_int96_timestamp_unit(unit): + i_s = pd.Timestamp('2010-01-01').value / 1000000000 # := 1262304000 + + d_s = np.arange(i_s, i_s + 10, 1, dtype='int64') + d_ms = d_s * 1000 + d_us = d_ms * 1000 + d_ns = d_us * 1000 + + a_s = pa.array(d_s, type=pa.timestamp('s')) + a_ms = pa.array(d_ms, type=pa.timestamp('ms')) + a_us = pa.array(d_us, type=pa.timestamp('us')) + a_ns = pa.array(d_ns, type=pa.timestamp('ns')) + + arrays = {"s": a_s, "ms": a_ms, "us": a_us, "ns": a_ns} + names = ['ts_s', 'ts_ms', 'ts_us', 'ts_ns'] + table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names) + + # For either Parquet version, coercing to nanoseconds is allowed + # if Int96 storage is used + expected = pa.Table.from_arrays([arrays.get(unit)]*4, names) + read_table_kwargs = {"coerce_int96_timestamp_unit": unit} + _check_roundtrip(table, expected, + read_table_kwargs=read_table_kwargs, + use_deprecated_int96_timestamps=True) + _check_roundtrip(table, expected, version='2.6', + read_table_kwargs=read_table_kwargs, + use_deprecated_int96_timestamps=True) + + +@pytest.mark.pandas +@pytest.mark.parametrize('pq_reader_method', ['ParquetFile', 'read_table']) +def test_coerce_int96_timestamp_overflow(pq_reader_method, tempdir): + + def get_table(pq_reader_method, filename, **kwargs): + if pq_reader_method == "ParquetFile": + return pq.ParquetFile(filename, **kwargs).read() + elif pq_reader_method == "read_table": + return pq.read_table(filename, **kwargs) + + # Recreating the initial JIRA issue referenced in ARROW-12096 + oob_dts = [ + datetime.datetime(1000, 1, 1), + datetime.datetime(2000, 1, 1), + datetime.datetime(3000, 1, 1) + ] + df = pd.DataFrame({"a": oob_dts}) + table = pa.table(df) + + filename = tempdir / "test_round_trip_overflow.parquet" + pq.write_table(table, filename, use_deprecated_int96_timestamps=True, + version="1.0") + + # with the default resolution of ns, we get wrong values for INT96 + # that are out of bounds for nanosecond range + tab_error = get_table(pq_reader_method, filename) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", + "Discarding nonzero nanoseconds in conversion", + UserWarning) + assert tab_error["a"].to_pylist() != oob_dts + + # avoid this overflow by specifying the resolution to use for INT96 values + tab_correct = get_table( + pq_reader_method, filename, coerce_int96_timestamp_unit="s" + ) + df_correct = tab_correct.to_pandas(timestamp_as_object=True) + tm.assert_frame_equal(df, df_correct) + + +@pytest.mark.parametrize('unit', ['ms', 'us', 'ns']) +def test_timestamp_restore_timezone(unit): + # ARROW-5888, restore timezone from serialized metadata + ty = pa.timestamp(unit, tz='America/New_York') + arr = pa.array([1, 2, 3], type=ty) + t = pa.table([arr], names=['f0']) + _check_roundtrip(t) + + +def test_timestamp_restore_timezone_nanosecond(): + # ARROW-9634, also restore timezone for nanosecond data that get stored + # as microseconds in the parquet file for Parquet ver 2.4 and less + ty = pa.timestamp('ns', tz='America/New_York') + arr = pa.array([1000, 2000, 3000], type=ty) + table = pa.table([arr], names=['f0']) + ty_us = pa.timestamp('us', tz='America/New_York') + expected = pa.table([arr.cast(ty_us)], names=['f0']) + _check_roundtrip(table, expected=expected, version='2.4') + + +@pytest.mark.pandas +def test_list_of_datetime_time_roundtrip(): + # ARROW-4135 + times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00', + '11:30', '12:00'], format="%H:%M") + df = pd.DataFrame({'time': [times.time]}) + _roundtrip_pandas_dataframe(df, write_kwargs={}) + + +@pytest.mark.pandas +def test_parquet_version_timestamp_differences(): + i_s = pd.Timestamp('2010-01-01').value / 1000000000 # := 1262304000 + + d_s = np.arange(i_s, i_s + 10, 1, dtype='int64') + d_ms = d_s * 1000 + d_us = d_ms * 1000 + d_ns = d_us * 1000 + + a_s = pa.array(d_s, type=pa.timestamp('s')) + a_ms = pa.array(d_ms, type=pa.timestamp('ms')) + a_us = pa.array(d_us, type=pa.timestamp('us')) + a_ns = pa.array(d_ns, type=pa.timestamp('ns')) + + all_versions = ['1.0', '2.4', '2.6'] + + names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns'] + table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names) + + # Using Parquet version 1.0 and 2.4, seconds should be coerced to milliseconds + # and nanoseconds should be coerced to microseconds by default + expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names) + _check_roundtrip(table, expected, version='1.0') + _check_roundtrip(table, expected, version='2.4') + + # Using Parquet version 2.6, seconds should be coerced to milliseconds + # and nanoseconds should be retained by default + expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names) + _check_roundtrip(table, expected, version='2.6') + + # For either Parquet version coercing to milliseconds or microseconds + # is allowed + expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names) + for ver in all_versions: + _check_roundtrip(table, expected, coerce_timestamps='ms', version=ver) + + expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names) + for ver in all_versions: + _check_roundtrip(table, expected, version=ver, coerce_timestamps='us') + + # TODO: after pyarrow allows coerce_timestamps='ns', tests like the + # following should pass ... + + # Using Parquet version 1.0, coercing to nanoseconds is not allowed + # expected = None + # with pytest.raises(NotImplementedError): + # _roundtrip_table(table, coerce_timestamps='ns') + + # Using Parquet version 2.0, coercing to nanoseconds is allowed + # expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names) + # _check_roundtrip(table, expected, version='2.6', coerce_timestamps='ns') + + # For either Parquet version, coercing to nanoseconds is allowed + # if Int96 storage is used + expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names) + for ver in all_versions: + _check_roundtrip(table, expected, version=ver, + use_deprecated_int96_timestamps=True) + + +@pytest.mark.pandas +def test_noncoerced_nanoseconds_written_without_exception(tempdir): + # ARROW-1957: the Parquet version 2.0 writer preserves Arrow + # nanosecond timestamps by default + n = 9 + df = pd.DataFrame({'x': range(n)}, + index=pd.date_range('2017-01-01', freq='1n', periods=n)) + tb = pa.Table.from_pandas(df) + + filename = tempdir / 'written.parquet' + try: + pq.write_table(tb, filename, version='2.6') + except Exception: + pass + assert filename.exists() + + recovered_table = pq.read_table(filename) + assert tb.equals(recovered_table) + + # Loss of data through coercion (without explicit override) still an error + filename = tempdir / 'not_written.parquet' + with pytest.raises(ValueError): + pq.write_table(tb, filename, coerce_timestamps='ms', version='2.6') + + +def test_duration_type(): + # ARROW-6780 + arrays = [pa.array([0, 1, 2, 3], type=pa.duration(unit)) + for unit in ["s", "ms", "us", "ns"]] + table = pa.Table.from_arrays(arrays, ["d[s]", "d[ms]", "d[us]", "d[ns]"]) + + _check_roundtrip(table) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/read_record_batch.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/read_record_batch.py new file mode 100644 index 0000000000000000000000000000000000000000..d565d254143fb4a1ef54df11843dd225739e5abe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/read_record_batch.py @@ -0,0 +1,25 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# This file is called from a test in test_ipc.py. + +import sys + +import pyarrow as pa + +with open(sys.argv[1], 'rb') as f: + pa.ipc.open_file(f).read_all().to_pandas() diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_cpp_internals.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_cpp_internals.py new file mode 100644 index 0000000000000000000000000000000000000000..83800b77f894b7b348310d032c364d5d1f68948a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_cpp_internals.py @@ -0,0 +1,50 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os.path +from os.path import join as pjoin + +from pyarrow._pyarrow_cpp_tests import get_cpp_tests + + +def inject_cpp_tests(ns): + """ + Inject C++ tests as Python functions into namespace `ns` (a dict). + """ + for case in get_cpp_tests(): + def wrapper(case=case): + case() + wrapper.__name__ = wrapper.__qualname__ = case.name + wrapper.__module__ = ns['__name__'] + ns[case.name] = wrapper + + +inject_cpp_tests(globals()) + + +def test_pyarrow_include(): + # We need to make sure that pyarrow/include is always + # created. Either with PyArrow C++ header files or with + # Arrow C++ and PyArrow C++ header files together + + source = os.path.dirname(os.path.abspath(__file__)) + pyarrow_dir = pjoin(source, '..') + pyarrow_include = pjoin(pyarrow_dir, 'include') + pyarrow_cpp_include = pjoin(pyarrow_include, 'arrow', 'python') + + assert os.path.exists(pyarrow_include) + assert os.path.exists(pyarrow_cpp_include) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_cython.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_cython.py new file mode 100644 index 0000000000000000000000000000000000000000..59875e7b0113279213943d7c987db66e929b798c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_cython.py @@ -0,0 +1,201 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import shutil +import subprocess +import sys + +import pytest + +import pyarrow as pa +import pyarrow.tests.util as test_util + + +here = os.path.dirname(os.path.abspath(__file__)) +test_ld_path = os.environ.get('PYARROW_TEST_LD_PATH', '') +if os.name == 'posix': + compiler_opts = ['-std=c++17'] +elif os.name == 'nt': + compiler_opts = ['-D_ENABLE_EXTENDED_ALIGNED_STORAGE', '/std:c++17'] +else: + compiler_opts = [] + + +setup_template = """if 1: + from setuptools import setup + from Cython.Build import cythonize + + import numpy as np + + import pyarrow as pa + + ext_modules = cythonize({pyx_file!r}) + compiler_opts = {compiler_opts!r} + custom_ld_path = {test_ld_path!r} + + for ext in ext_modules: + # XXX required for numpy/numpyconfig.h, + # included from arrow/python/api.h + ext.include_dirs.append(np.get_include()) + ext.include_dirs.append(pa.get_include()) + ext.libraries.extend(pa.get_libraries()) + ext.library_dirs.extend(pa.get_library_dirs()) + if custom_ld_path: + ext.library_dirs.append(custom_ld_path) + ext.extra_compile_args.extend(compiler_opts) + print("Extension module:", + ext, ext.include_dirs, ext.libraries, ext.library_dirs) + + setup( + ext_modules=ext_modules, + ) +""" + + +def check_cython_example_module(mod): + arr = pa.array([1, 2, 3]) + assert mod.get_array_length(arr) == 3 + with pytest.raises(TypeError, match="not an array"): + mod.get_array_length(None) + + scal = pa.scalar(123) + cast_scal = mod.cast_scalar(scal, pa.utf8()) + assert cast_scal == pa.scalar("123") + with pytest.raises(NotImplementedError, + match="casting scalars of type int64 to type list"): + mod.cast_scalar(scal, pa.list_(pa.int64())) + + +@pytest.mark.cython +def test_cython_api(tmpdir): + """ + Basic test for the Cython API. + """ + # Fail early if cython is not found + import cython # noqa + + with tmpdir.as_cwd(): + # Set up temporary workspace + pyx_file = 'pyarrow_cython_example.pyx' + shutil.copyfile(os.path.join(here, pyx_file), + os.path.join(str(tmpdir), pyx_file)) + # Create setup.py file + setup_code = setup_template.format(pyx_file=pyx_file, + compiler_opts=compiler_opts, + test_ld_path=test_ld_path) + with open('setup.py', 'w') as f: + f.write(setup_code) + + # ARROW-2263: Make environment with this pyarrow/ package first on the + # PYTHONPATH, for local dev environments + subprocess_env = test_util.get_modified_env_with_pythonpath() + + # Compile extension module + subprocess.check_call([sys.executable, 'setup.py', + 'build_ext', '--inplace'], + env=subprocess_env) + + # Check basic functionality + orig_path = sys.path[:] + sys.path.insert(0, str(tmpdir)) + try: + mod = __import__('pyarrow_cython_example') + check_cython_example_module(mod) + finally: + sys.path = orig_path + + # Check the extension module is loadable from a subprocess without + # pyarrow imported first. + code = """if 1: + import sys + import os + + try: + # Add dll directory was added on python 3.8 + # and is required in order to find extra DLLs + # only for win32 + for dir in {library_dirs}: + os.add_dll_directory(dir) + except AttributeError: + pass + + mod = __import__({mod_name!r}) + arr = mod.make_null_array(5) + assert mod.get_array_length(arr) == 5 + assert arr.null_count == 5 + """.format(mod_name='pyarrow_cython_example', + library_dirs=pa.get_library_dirs()) + + path_var = None + if sys.platform == 'win32': + if not hasattr(os, 'add_dll_directory'): + # Python 3.8 onwards don't check extension module DLLs on path + # we have to use os.add_dll_directory instead. + delim, path_var = ';', 'PATH' + elif sys.platform == 'darwin': + delim, path_var = ':', 'DYLD_LIBRARY_PATH' + else: + delim, path_var = ':', 'LD_LIBRARY_PATH' + + if path_var: + paths = sys.path + paths += pa.get_library_dirs() + paths += [subprocess_env.get(path_var, '')] + paths = [path for path in paths if path] + subprocess_env[path_var] = delim.join(paths) + subprocess.check_call([sys.executable, '-c', code], + stdout=subprocess.PIPE, + env=subprocess_env) + + +@pytest.mark.cython +def test_visit_strings(tmpdir): + with tmpdir.as_cwd(): + # Set up temporary workspace + pyx_file = 'bound_function_visit_strings.pyx' + shutil.copyfile(os.path.join(here, pyx_file), + os.path.join(str(tmpdir), pyx_file)) + # Create setup.py file + setup_code = setup_template.format(pyx_file=pyx_file, + compiler_opts=compiler_opts, + test_ld_path=test_ld_path) + with open('setup.py', 'w') as f: + f.write(setup_code) + + subprocess_env = test_util.get_modified_env_with_pythonpath() + + # Compile extension module + subprocess.check_call([sys.executable, 'setup.py', + 'build_ext', '--inplace'], + env=subprocess_env) + + sys.path.insert(0, str(tmpdir)) + mod = __import__('bound_function_visit_strings') + + strings = ['a', 'b', 'c'] + visited = [] + mod._visit_strings(strings, visited.append) + + assert visited == strings + + with pytest.raises(ValueError, match="wtf"): + def raise_on_b(s): + if s == 'b': + raise ValueError('wtf') + + mod._visit_strings(strings, raise_on_b) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_dataset_encryption.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_dataset_encryption.py new file mode 100644 index 0000000000000000000000000000000000000000..d25b22990abfb68a29be4e7e343de018245150cd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_dataset_encryption.py @@ -0,0 +1,153 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from datetime import timedelta +import pyarrow.fs as fs +import pyarrow as pa +import pytest + +encryption_unavailable = False + +try: + import pyarrow.dataset as ds +except ImportError: + ds = None + +try: + from pyarrow.tests.parquet.encryption import InMemoryKmsClient + import pyarrow.parquet.encryption as pe +except ImportError: + encryption_unavailable = True + + +# Marks all of the tests in this module +pytestmark = pytest.mark.dataset + + +FOOTER_KEY = b"0123456789112345" +FOOTER_KEY_NAME = "footer_key" +COL_KEY = b"1234567890123450" +COL_KEY_NAME = "col_key" + + +def create_sample_table(): + return pa.table( + { + "year": [2020, 2022, 2021, 2022, 2019, 2021], + "n_legs": [2, 2, 4, 4, 5, 100], + "animal": [ + "Flamingo", + "Parrot", + "Dog", + "Horse", + "Brittle stars", + "Centipede", + ], + } + ) + + +def create_encryption_config(): + return pe.EncryptionConfiguration( + footer_key=FOOTER_KEY_NAME, + plaintext_footer=False, + column_keys={COL_KEY_NAME: ["n_legs", "animal"]}, + encryption_algorithm="AES_GCM_V1", + # requires timedelta or an assertion is raised + cache_lifetime=timedelta(minutes=5.0), + data_key_length_bits=256, + ) + + +def create_decryption_config(): + return pe.DecryptionConfiguration(cache_lifetime=300) + + +def create_kms_connection_config(): + return pe.KmsConnectionConfig( + custom_kms_conf={ + FOOTER_KEY_NAME: FOOTER_KEY.decode("UTF-8"), + COL_KEY_NAME: COL_KEY.decode("UTF-8"), + } + ) + + +def kms_factory(kms_connection_configuration): + return InMemoryKmsClient(kms_connection_configuration) + + +@pytest.mark.skipif( + encryption_unavailable, reason="Parquet Encryption is not currently enabled" +) +def test_dataset_encryption_decryption(): + table = create_sample_table() + + encryption_config = create_encryption_config() + decryption_config = create_decryption_config() + kms_connection_config = create_kms_connection_config() + + crypto_factory = pe.CryptoFactory(kms_factory) + parquet_encryption_cfg = ds.ParquetEncryptionConfig( + crypto_factory, kms_connection_config, encryption_config + ) + parquet_decryption_cfg = ds.ParquetDecryptionConfig( + crypto_factory, kms_connection_config, decryption_config + ) + + # create write_options with dataset encryption config + pformat = pa.dataset.ParquetFileFormat() + write_options = pformat.make_write_options(encryption_config=parquet_encryption_cfg) + + mockfs = fs._MockFileSystem() + mockfs.create_dir("/") + + ds.write_dataset( + data=table, + base_dir="sample_dataset", + format=pformat, + file_options=write_options, + filesystem=mockfs, + ) + + # read without decryption config -> should error is dataset was properly encrypted + pformat = pa.dataset.ParquetFileFormat() + with pytest.raises(IOError, match=r"no decryption"): + ds.dataset("sample_dataset", format=pformat, filesystem=mockfs) + + # set decryption config for parquet fragment scan options + pq_scan_opts = ds.ParquetFragmentScanOptions( + decryption_config=parquet_decryption_cfg + ) + pformat = pa.dataset.ParquetFileFormat(default_fragment_scan_options=pq_scan_opts) + dataset = ds.dataset("sample_dataset", format=pformat, filesystem=mockfs) + + assert table.equals(dataset.to_table()) + + +@pytest.mark.skipif( + not encryption_unavailable, reason="Parquet Encryption is currently enabled" +) +def test_write_dataset_parquet_without_encryption(): + """Test write_dataset with ParquetFileFormat and test if an exception is thrown + if you try to set encryption_config using make_write_options""" + + # Set the encryption configuration using ParquetFileFormat + # and make_write_options + pformat = pa.dataset.ParquetFileFormat() + + with pytest.raises(NotImplementedError): + _ = pformat.make_write_options(encryption_config="some value") diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_orc.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_orc.py new file mode 100644 index 0000000000000000000000000000000000000000..1b467d523304c44614ef23f17b5558bad9e26840 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_orc.py @@ -0,0 +1,637 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest +import decimal +import datetime + +import pyarrow as pa +from pyarrow import fs +from pyarrow.tests import util + + +# Marks all of the tests in this module +# Ignore these with pytest ... -m 'not orc' +pytestmark = pytest.mark.orc + + +try: + from pandas.testing import assert_frame_equal + import pandas as pd +except ImportError: + pass + + +@pytest.fixture(scope="module") +def datadir(base_datadir): + return base_datadir / "orc" + + +def fix_example_values(actual_cols, expected_cols): + """ + Fix type of expected values (as read from JSON) according to + actual ORC datatype. + """ + for name in expected_cols: + expected = expected_cols[name] + actual = actual_cols[name] + if (name == "map" and + [d.keys() == {'key', 'value'} for m in expected for d in m]): + # convert [{'key': k, 'value': v}, ...] to [(k, v), ...] + col = expected_cols[name].copy() + for i, m in enumerate(expected): + col[i] = [(d['key'], d['value']) for d in m] + expected_cols[name] = col + continue + + typ = actual[0].__class__ + if issubclass(typ, datetime.datetime): + # timestamp fields are represented as strings in JSON files + expected = pd.to_datetime(expected) + elif issubclass(typ, datetime.date): + # date fields are represented as strings in JSON files + expected = expected.dt.date + elif typ is decimal.Decimal: + converted_decimals = [None] * len(expected) + # decimal fields are represented as reals in JSON files + for i, (d, v) in enumerate(zip(actual, expected)): + if not pd.isnull(v): + exp = d.as_tuple().exponent + factor = 10 ** -exp + converted_decimals[i] = ( + decimal.Decimal(round(v * factor)).scaleb(exp)) + expected = pd.Series(converted_decimals) + + expected_cols[name] = expected + + +def check_example_values(orc_df, expected_df, start=None, stop=None): + if start is not None or stop is not None: + expected_df = expected_df[start:stop].reset_index(drop=True) + assert_frame_equal(orc_df, expected_df, check_dtype=False) + + +def check_example_file(orc_path, expected_df, need_fix=False): + """ + Check a ORC file against the expected columns dictionary. + """ + from pyarrow import orc + + orc_file = orc.ORCFile(orc_path) + # Exercise ORCFile.read() + table = orc_file.read() + assert isinstance(table, pa.Table) + table.validate() + + # This workaround needed because of ARROW-3080 + orc_df = pd.DataFrame(table.to_pydict()) + + assert set(expected_df.columns) == set(orc_df.columns) + + # reorder columns if necessary + if not orc_df.columns.equals(expected_df.columns): + expected_df = expected_df.reindex(columns=orc_df.columns) + + if need_fix: + fix_example_values(orc_df, expected_df) + + check_example_values(orc_df, expected_df) + # Exercise ORCFile.read_stripe() + json_pos = 0 + for i in range(orc_file.nstripes): + batch = orc_file.read_stripe(i) + check_example_values(pd.DataFrame(batch.to_pydict()), + expected_df, + start=json_pos, + stop=json_pos + len(batch)) + json_pos += len(batch) + assert json_pos == orc_file.nrows + + +@pytest.mark.pandas +@pytest.mark.parametrize('filename', [ + 'TestOrcFile.test1.orc', + 'TestOrcFile.testDate1900.orc', + 'decimal.orc' +]) +def test_example_using_json(filename, datadir): + """ + Check a ORC file example against the equivalent JSON file, as given + in the Apache ORC repository (the JSON file has one JSON object per + line, corresponding to one row in the ORC file). + """ + # Read JSON file + path = datadir / filename + table = pd.read_json(str(path.with_suffix('.jsn.gz')), lines=True) + check_example_file(path, table, need_fix=True) + + +def test_orcfile_empty(datadir): + from pyarrow import orc + + table = orc.ORCFile(datadir / "TestOrcFile.emptyFile.orc").read() + assert table.num_rows == 0 + + expected_schema = pa.schema([ + ("boolean1", pa.bool_()), + ("byte1", pa.int8()), + ("short1", pa.int16()), + ("int1", pa.int32()), + ("long1", pa.int64()), + ("float1", pa.float32()), + ("double1", pa.float64()), + ("bytes1", pa.binary()), + ("string1", pa.string()), + ("middle", pa.struct( + [("list", pa.list_( + pa.struct([("int1", pa.int32()), + ("string1", pa.string())]))) + ])), + ("list", pa.list_( + pa.struct([("int1", pa.int32()), + ("string1", pa.string())]) + )), + ("map", pa.map_(pa.string(), + pa.struct([("int1", pa.int32()), + ("string1", pa.string())]) + )), + ]) + assert table.schema == expected_schema + + +def test_filesystem_uri(tmpdir): + from pyarrow import orc + table = pa.table({"a": [1, 2, 3]}) + + directory = tmpdir / "data_dir" + directory.mkdir() + path = directory / "data.orc" + orc.write_table(table, str(path)) + + # filesystem object + result = orc.read_table(path, filesystem=fs.LocalFileSystem()) + assert result.equals(table) + + # filesystem URI + result = orc.read_table( + "data_dir/data.orc", filesystem=util._filesystem_uri(tmpdir)) + assert result.equals(table) + + # use the path only + result = orc.read_table( + util._filesystem_uri(path)) + assert result.equals(table) + + +def test_orcfile_readwrite(tmpdir): + from pyarrow import orc + a = pa.array([1, None, 3, None]) + b = pa.array([None, "Arrow", None, "ORC"]) + table = pa.table({"int64": a, "utf8": b}) + file = tmpdir.join("test.orc") + orc.write_table(table, file) + output_table = orc.read_table(file) + assert table.equals(output_table) + + output_table = orc.read_table(file, []) + assert 4 == output_table.num_rows + assert 0 == output_table.num_columns + + output_table = orc.read_table(file, columns=["int64"]) + assert 4 == output_table.num_rows + assert 1 == output_table.num_columns + + +def test_bytesio_readwrite(): + from pyarrow import orc + from io import BytesIO + + buf = BytesIO() + a = pa.array([1, None, 3, None]) + b = pa.array([None, "Arrow", None, "ORC"]) + table = pa.table({"int64": a, "utf8": b}) + orc.write_table(table, buf) + buf.seek(0) + orc_file = orc.ORCFile(buf) + output_table = orc_file.read() + assert table.equals(output_table) + + +def test_buffer_readwrite(): + from pyarrow import orc + + buffer_output_stream = pa.BufferOutputStream() + a = pa.array([1, None, 3, None]) + b = pa.array([None, "Arrow", None, "ORC"]) + table = pa.table({"int64": a, "utf8": b}) + orc.write_table(table, buffer_output_stream) + buffer_reader = pa.BufferReader(buffer_output_stream.getvalue()) + orc_file = orc.ORCFile(buffer_reader) + output_table = orc_file.read() + assert table.equals(output_table) + # Check for default WriteOptions + assert orc_file.compression == 'UNCOMPRESSED' + assert orc_file.file_version == '0.12' + assert orc_file.row_index_stride == 10000 + assert orc_file.compression_size == 65536 + + # deprecated keyword order + buffer_output_stream = pa.BufferOutputStream() + with pytest.warns(FutureWarning): + orc.write_table(buffer_output_stream, table) + buffer_reader = pa.BufferReader(buffer_output_stream.getvalue()) + orc_file = orc.ORCFile(buffer_reader) + output_table = orc_file.read() + assert table.equals(output_table) + # Check for default WriteOptions + assert orc_file.compression == 'UNCOMPRESSED' + assert orc_file.file_version == '0.12' + assert orc_file.row_index_stride == 10000 + assert orc_file.compression_size == 65536 + + +@pytest.mark.snappy +def test_buffer_readwrite_with_writeoptions(): + from pyarrow import orc + + buffer_output_stream = pa.BufferOutputStream() + a = pa.array([1, None, 3, None]) + b = pa.array([None, "Arrow", None, "ORC"]) + table = pa.table({"int64": a, "utf8": b}) + orc.write_table( + table, + buffer_output_stream, + compression='snappy', + file_version='0.11', + row_index_stride=5000, + compression_block_size=32768, + ) + buffer_reader = pa.BufferReader(buffer_output_stream.getvalue()) + orc_file = orc.ORCFile(buffer_reader) + output_table = orc_file.read() + assert table.equals(output_table) + # Check for modified WriteOptions + assert orc_file.compression == 'SNAPPY' + assert orc_file.file_version == '0.11' + assert orc_file.row_index_stride == 5000 + assert orc_file.compression_size == 32768 + + # deprecated keyword order + buffer_output_stream = pa.BufferOutputStream() + with pytest.warns(FutureWarning): + orc.write_table( + buffer_output_stream, + table, + compression='uncompressed', + file_version='0.11', + row_index_stride=20000, + compression_block_size=16384, + ) + buffer_reader = pa.BufferReader(buffer_output_stream.getvalue()) + orc_file = orc.ORCFile(buffer_reader) + output_table = orc_file.read() + assert table.equals(output_table) + # Check for default WriteOptions + assert orc_file.compression == 'UNCOMPRESSED' + assert orc_file.file_version == '0.11' + assert orc_file.row_index_stride == 20000 + assert orc_file.compression_size == 16384 + + +def test_buffer_readwrite_with_bad_writeoptions(): + from pyarrow import orc + buffer_output_stream = pa.BufferOutputStream() + a = pa.array([1, None, 3, None]) + table = pa.table({"int64": a}) + + # batch_size must be a positive integer + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + batch_size=0, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + batch_size=-100, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + batch_size=1024.23, + ) + + # file_version must be 0.11 or 0.12 + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + file_version=0.13, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + file_version='1.1', + ) + + # stripe_size must be a positive integer + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + stripe_size=0, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + stripe_size=-400, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + stripe_size=4096.73, + ) + + # compression must be among the given options + with pytest.raises(TypeError): + orc.write_table( + table, + buffer_output_stream, + compression=0, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression='none', + ) + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression='zlid', + ) + + # compression_block_size must be a positive integer + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression_block_size=0, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression_block_size=-200, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression_block_size=1096.73, + ) + + # compression_strategy must be among the given options + with pytest.raises(TypeError): + orc.write_table( + table, + buffer_output_stream, + compression_strategy=0, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression_strategy='no', + ) + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + compression_strategy='large', + ) + + # row_index_stride must be a positive integer + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + row_index_stride=0, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + row_index_stride=-800, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + row_index_stride=3096.29, + ) + + # padding_tolerance must be possible to cast to float + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + padding_tolerance='cat', + ) + + # dictionary_key_size_threshold must be possible to cast to + # float between 0.0 and 1.0 + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + dictionary_key_size_threshold='arrow', + ) + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + dictionary_key_size_threshold=1.2, + ) + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + dictionary_key_size_threshold=-3.2, + ) + + # bloom_filter_columns must be convertible to a list containing + # nonnegative integers + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + bloom_filter_columns="string", + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + bloom_filter_columns=[0, 1.4], + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + bloom_filter_columns={0, 2, -1}, + ) + + # bloom_filter_fpp must be convertible to a float between 0.0 and 1.0 + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + bloom_filter_fpp='arrow', + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + bloom_filter_fpp=1.1, + ) + + with pytest.raises(ValueError): + orc.write_table( + table, + buffer_output_stream, + bloom_filter_fpp=-0.1, + ) + + +def test_column_selection(tempdir): + from pyarrow import orc + + # create a table with nested types + inner = pa.field('inner', pa.int64()) + middle = pa.field('middle', pa.struct([inner])) + fields = [ + pa.field('basic', pa.int32()), + pa.field( + 'list', pa.list_(pa.field('item', pa.int32())) + ), + pa.field( + 'struct', pa.struct([middle, pa.field('inner2', pa.int64())]) + ), + pa.field( + 'list-struct', pa.list_(pa.field( + 'item', pa.struct([ + pa.field('inner1', pa.int64()), + pa.field('inner2', pa.int64()) + ]) + )) + ), + pa.field('basic2', pa.int64()), + ] + arrs = [ + [0], [[1, 2]], [{"middle": {"inner": 3}, "inner2": 4}], + [[{"inner1": 5, "inner2": 6}, {"inner1": 7, "inner2": 8}]], [9]] + table = pa.table(arrs, schema=pa.schema(fields)) + + path = str(tempdir / 'test.orc') + orc.write_table(table, path) + orc_file = orc.ORCFile(path) + + # default selecting all columns + result1 = orc_file.read() + assert result1.equals(table) + + # selecting with columns names + result2 = orc_file.read(columns=["basic", "basic2"]) + assert result2.equals(table.select(["basic", "basic2"])) + + result3 = orc_file.read(columns=["list", "struct", "basic2"]) + assert result3.equals(table.select(["list", "struct", "basic2"])) + + # using dotted paths + result4 = orc_file.read(columns=["struct.middle.inner"]) + expected4 = pa.table({"struct": [{"middle": {"inner": 3}}]}) + assert result4.equals(expected4) + + result5 = orc_file.read(columns=["struct.inner2"]) + expected5 = pa.table({"struct": [{"inner2": 4}]}) + assert result5.equals(expected5) + + result6 = orc_file.read( + columns=["list", "struct.middle.inner", "struct.inner2"] + ) + assert result6.equals(table.select(["list", "struct"])) + + result7 = orc_file.read(columns=["list-struct.inner1"]) + expected7 = pa.table({"list-struct": [[{"inner1": 5}, {"inner1": 7}]]}) + assert result7.equals(expected7) + + # selecting with (Arrow-based) field indices + result2 = orc_file.read(columns=[0, 4]) + assert result2.equals(table.select(["basic", "basic2"])) + + result3 = orc_file.read(columns=[1, 2, 3]) + assert result3.equals(table.select(["list", "struct", "list-struct"])) + + # error on non-existing name or index + with pytest.raises(IOError): + # liborc returns ParseError, which gets translated into IOError + # instead of ValueError + orc_file.read(columns=["wrong"]) + + with pytest.raises(ValueError): + orc_file.read(columns=[5]) + + +def test_wrong_usage_orc_writer(tempdir): + from pyarrow import orc + + path = str(tempdir / 'test.orc') + with orc.ORCWriter(path) as writer: + with pytest.raises(AttributeError): + writer.test() + + +def test_orc_writer_with_null_arrays(tempdir): + from pyarrow import orc + + path = str(tempdir / 'test.orc') + a = pa.array([1, None, 3, None]) + b = pa.array([None, None, None, None]) + table = pa.table({"int64": a, "utf8": b}) + with pytest.raises(pa.ArrowNotImplementedError): + orc.write_table(table, path) diff --git a/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_udf.py b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_udf.py new file mode 100644 index 0000000000000000000000000000000000000000..c8e376fefb3b8a52f102002a89bc79f114c6bc10 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pyarrow/tests/test_udf.py @@ -0,0 +1,869 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import pytest + +import numpy as np + +import pyarrow as pa +from pyarrow import compute as pc + +# UDFs are all tested with a dataset scan +pytestmark = pytest.mark.dataset + +# For convenience, most of the test here doesn't care about udf func docs +empty_udf_doc = {"summary": "", "description": ""} + +try: + import pyarrow.dataset as ds +except ImportError: + ds = None + + +def mock_udf_context(batch_length=10): + from pyarrow._compute import _get_udf_context + return _get_udf_context(pa.default_memory_pool(), batch_length) + + +class MyError(RuntimeError): + pass + + +@pytest.fixture(scope="session") +def sum_agg_func_fixture(): + """ + Register a unary aggregate function (mean) + """ + def func(ctx, x, *args): + return pa.scalar(np.nansum(x)) + + func_name = "sum_udf" + func_doc = empty_udf_doc + + pc.register_aggregate_function(func, + func_name, + func_doc, + { + "x": pa.float64(), + }, + pa.float64() + ) + return func, func_name + + +@pytest.fixture(scope="session") +def exception_agg_func_fixture(): + def func(ctx, x): + raise RuntimeError("Oops") + return pa.scalar(len(x)) + + func_name = "y=exception_len(x)" + func_doc = empty_udf_doc + + pc.register_aggregate_function(func, + func_name, + func_doc, + { + "x": pa.int64(), + }, + pa.int64() + ) + return func, func_name + + +@pytest.fixture(scope="session") +def wrong_output_dtype_agg_func_fixture(scope="session"): + def func(ctx, x): + return pa.scalar(len(x), pa.int32()) + + func_name = "y=wrong_output_dtype(x)" + func_doc = empty_udf_doc + + pc.register_aggregate_function(func, + func_name, + func_doc, + { + "x": pa.int64(), + }, + pa.int64() + ) + return func, func_name + + +@pytest.fixture(scope="session") +def wrong_output_type_agg_func_fixture(scope="session"): + def func(ctx, x): + return len(x) + + func_name = "y=wrong_output_type(x)" + func_doc = empty_udf_doc + + pc.register_aggregate_function(func, + func_name, + func_doc, + { + "x": pa.int64(), + }, + pa.int64() + ) + return func, func_name + + +@pytest.fixture(scope="session") +def binary_func_fixture(): + """ + Register a binary scalar function. + """ + def binary_function(ctx, m, x): + return pc.call_function("multiply", [m, x], + memory_pool=ctx.memory_pool) + func_name = "y=mx" + binary_doc = {"summary": "y=mx", + "description": "find y from y = mx"} + pc.register_scalar_function(binary_function, + func_name, + binary_doc, + {"m": pa.int64(), + "x": pa.int64(), + }, + pa.int64()) + return binary_function, func_name + + +@pytest.fixture(scope="session") +def ternary_func_fixture(): + """ + Register a ternary scalar function. + """ + def ternary_function(ctx, m, x, c): + mx = pc.call_function("multiply", [m, x], + memory_pool=ctx.memory_pool) + return pc.call_function("add", [mx, c], + memory_pool=ctx.memory_pool) + ternary_doc = {"summary": "y=mx+c", + "description": "find y from y = mx + c"} + func_name = "y=mx+c" + pc.register_scalar_function(ternary_function, + func_name, + ternary_doc, + { + "array1": pa.int64(), + "array2": pa.int64(), + "array3": pa.int64(), + }, + pa.int64()) + return ternary_function, func_name + + +@pytest.fixture(scope="session") +def varargs_func_fixture(): + """ + Register a varargs scalar function with at least two arguments. + """ + def varargs_function(ctx, first, *values): + acc = first + for val in values: + acc = pc.call_function("add", [acc, val], + memory_pool=ctx.memory_pool) + return acc + func_name = "z=ax+by+c" + varargs_doc = {"summary": "z=ax+by+c", + "description": "find z from z = ax + by + c" + } + pc.register_scalar_function(varargs_function, + func_name, + varargs_doc, + { + "array1": pa.int64(), + "array2": pa.int64(), + }, + pa.int64()) + return varargs_function, func_name + + +@pytest.fixture(scope="session") +def nullary_func_fixture(): + """ + Register a nullary scalar function. + """ + def nullary_func(context): + return pa.array([42] * context.batch_length, type=pa.int64(), + memory_pool=context.memory_pool) + + func_doc = { + "summary": "random function", + "description": "generates a random value" + } + func_name = "test_nullary_func" + pc.register_scalar_function(nullary_func, + func_name, + func_doc, + {}, + pa.int64()) + + return nullary_func, func_name + + +@pytest.fixture(scope="session") +def wrong_output_type_func_fixture(): + """ + Register a scalar function which returns something that is neither + a Arrow scalar or array. + """ + def wrong_output_type(ctx): + return 42 + + func_name = "test_wrong_output_type" + in_types = {} + out_type = pa.int64() + doc = { + "summary": "return wrong output type", + "description": "" + } + pc.register_scalar_function(wrong_output_type, func_name, doc, + in_types, out_type) + return wrong_output_type, func_name + + +@pytest.fixture(scope="session") +def wrong_output_datatype_func_fixture(): + """ + Register a scalar function whose actual output DataType doesn't + match the declared output DataType. + """ + def wrong_output_datatype(ctx, array): + return pc.call_function("add", [array, 1]) + func_name = "test_wrong_output_datatype" + in_types = {"array": pa.int64()} + # The actual output DataType will be int64. + out_type = pa.int16() + doc = { + "summary": "return wrong output datatype", + "description": "" + } + pc.register_scalar_function(wrong_output_datatype, func_name, doc, + in_types, out_type) + return wrong_output_datatype, func_name + + +@pytest.fixture(scope="session") +def wrong_signature_func_fixture(): + """ + Register a scalar function with the wrong signature. + """ + # Missing the context argument + def wrong_signature(): + return pa.scalar(1, type=pa.int64()) + + func_name = "test_wrong_signature" + in_types = {} + out_type = pa.int64() + doc = { + "summary": "UDF with wrong signature", + "description": "" + } + pc.register_scalar_function(wrong_signature, func_name, doc, + in_types, out_type) + return wrong_signature, func_name + + +@pytest.fixture(scope="session") +def raising_func_fixture(): + """ + Register a scalar function which raises a custom exception. + """ + def raising_func(ctx): + raise MyError("error raised by scalar UDF") + func_name = "test_raise" + doc = { + "summary": "raising function", + "description": "" + } + pc.register_scalar_function(raising_func, func_name, doc, + {}, pa.int64()) + return raising_func, func_name + + +@pytest.fixture(scope="session") +def unary_vector_func_fixture(): + """ + Register a vector function + """ + def pct_rank(ctx, x): + # copy here to get around pandas 1.0 issue + return pa.array(x.to_pandas().copy().rank(pct=True)) + + func_name = "y=pct_rank(x)" + doc = empty_udf_doc + pc.register_vector_function(pct_rank, func_name, doc, { + 'x': pa.float64()}, pa.float64()) + + return pct_rank, func_name + + +@pytest.fixture(scope="session") +def struct_vector_func_fixture(): + """ + Register a vector function that returns a struct array + """ + def pivot(ctx, k, v, c): + df = pa.RecordBatch.from_arrays([k, v, c], names=['k', 'v', 'c']).to_pandas() + df_pivot = df.pivot(columns='c', values='v', index='k').reset_index() + return pa.RecordBatch.from_pandas(df_pivot).to_struct_array() + + func_name = "y=pivot(x)" + doc = empty_udf_doc + pc.register_vector_function( + pivot, func_name, doc, + {'k': pa.int64(), 'v': pa.float64(), 'c': pa.utf8()}, + pa.struct([('k', pa.int64()), ('v1', pa.float64()), ('v2', pa.float64())]) + ) + + return pivot, func_name + + +def check_scalar_function(func_fixture, + inputs, *, + run_in_dataset=True, + batch_length=None): + function, name = func_fixture + if batch_length is None: + all_scalar = True + for arg in inputs: + if isinstance(arg, pa.Array): + all_scalar = False + batch_length = len(arg) + if all_scalar: + batch_length = 1 + + func = pc.get_function(name) + assert func.name == name + + result = pc.call_function(name, inputs, length=batch_length) + expected_output = function(mock_udf_context(batch_length), *inputs) + assert result == expected_output + # At the moment there is an issue when handling nullary functions. + # See: ARROW-15286 and ARROW-16290. + if run_in_dataset: + field_names = [f'field{index}' for index, in_arr in inputs] + table = pa.Table.from_arrays(inputs, field_names) + dataset = ds.dataset(table) + func_args = [ds.field(field_name) for field_name in field_names] + result_table = dataset.to_table( + columns={'result': ds.field('')._call(name, func_args)}) + assert result_table.column(0).chunks[0] == expected_output + + +def test_udf_array_unary(unary_func_fixture): + check_scalar_function(unary_func_fixture, + [ + pa.array([10, 20], pa.int64()) + ] + ) + + +def test_udf_array_binary(binary_func_fixture): + check_scalar_function(binary_func_fixture, + [ + pa.array([10, 20], pa.int64()), + pa.array([2, 4], pa.int64()) + ] + ) + + +def test_udf_array_ternary(ternary_func_fixture): + check_scalar_function(ternary_func_fixture, + [ + pa.array([10, 20], pa.int64()), + pa.array([2, 4], pa.int64()), + pa.array([5, 10], pa.int64()) + ] + ) + + +def test_udf_array_varargs(varargs_func_fixture): + check_scalar_function(varargs_func_fixture, + [ + pa.array([2, 3], pa.int64()), + pa.array([10, 20], pa.int64()), + pa.array([3, 7], pa.int64()), + pa.array([20, 30], pa.int64()), + pa.array([5, 10], pa.int64()) + ] + ) + + +def test_registration_errors(): + # validate function name + doc = { + "summary": "test udf input", + "description": "parameters are validated" + } + in_types = {"scalar": pa.int64()} + out_type = pa.int64() + + def test_reg_function(context): + return pa.array([10]) + + with pytest.raises(TypeError): + pc.register_scalar_function(test_reg_function, + None, doc, in_types, + out_type) + + # validate function + with pytest.raises(TypeError, match="func must be a callable"): + pc.register_scalar_function(None, "test_none_function", doc, in_types, + out_type) + + # validate output type + expected_expr = "DataType expected, got " + with pytest.raises(TypeError, match=expected_expr): + pc.register_scalar_function(test_reg_function, + "test_output_function", doc, in_types, + None) + + # validate input type + expected_expr = "in_types must be a dictionary of DataType" + with pytest.raises(TypeError, match=expected_expr): + pc.register_scalar_function(test_reg_function, + "test_input_function", doc, None, + out_type) + + # register an already registered function + # first registration + pc.register_scalar_function(test_reg_function, + "test_reg_function", doc, {}, + out_type) + # second registration + expected_expr = "Already have a function registered with name:" \ + + " test_reg_function" + with pytest.raises(KeyError, match=expected_expr): + pc.register_scalar_function(test_reg_function, + "test_reg_function", doc, {}, + out_type) + + +def test_varargs_function_validation(varargs_func_fixture): + _, func_name = varargs_func_fixture + + error_msg = r"VarArgs function 'z=ax\+by\+c' needs at least 2 arguments" + + with pytest.raises(ValueError, match=error_msg): + pc.call_function(func_name, [42]) + + +def test_function_doc_validation(): + # validate arity + in_types = {"scalar": pa.int64()} + out_type = pa.int64() + + # doc with no summary + func_doc = { + "description": "desc" + } + + def add_const(ctx, scalar): + return pc.call_function("add", [scalar, 1]) + + with pytest.raises(ValueError, + match="Function doc must contain a summary"): + pc.register_scalar_function(add_const, "test_no_summary", + func_doc, in_types, + out_type) + + # doc with no description + func_doc = { + "summary": "test summary" + } + + with pytest.raises(ValueError, + match="Function doc must contain a description"): + pc.register_scalar_function(add_const, "test_no_desc", + func_doc, in_types, + out_type) + + +def test_nullary_function(nullary_func_fixture): + # XXX the Python compute layer API doesn't let us override batch_length, + # so only test with the default value of 1. + check_scalar_function(nullary_func_fixture, [], run_in_dataset=False, + batch_length=1) + + +def test_wrong_output_type(wrong_output_type_func_fixture): + _, func_name = wrong_output_type_func_fixture + + with pytest.raises(TypeError, + match="Unexpected output type: int"): + pc.call_function(func_name, [], length=1) + + +def test_wrong_output_datatype(wrong_output_datatype_func_fixture): + _, func_name = wrong_output_datatype_func_fixture + + expected_expr = ("Expected output datatype int16, " + "but function returned datatype int64") + + with pytest.raises(TypeError, match=expected_expr): + pc.call_function(func_name, [pa.array([20, 30])]) + + +def test_wrong_signature(wrong_signature_func_fixture): + _, func_name = wrong_signature_func_fixture + + expected_expr = (r"wrong_signature\(\) takes 0 positional arguments " + "but 1 was given") + + with pytest.raises(TypeError, match=expected_expr): + pc.call_function(func_name, [], length=1) + + +def test_wrong_datatype_declaration(): + def identity(ctx, val): + return val + + func_name = "test_wrong_datatype_declaration" + in_types = {"array": pa.int64()} + out_type = {} + doc = { + "summary": "test output value", + "description": "test output" + } + with pytest.raises(TypeError, + match="DataType expected, got "): + pc.register_scalar_function(identity, func_name, + doc, in_types, out_type) + + +def test_wrong_input_type_declaration(): + def identity(ctx, val): + return val + + func_name = "test_wrong_input_type_declaration" + in_types = {"array": None} + out_type = pa.int64() + doc = { + "summary": "test invalid input type", + "description": "invalid input function" + } + with pytest.raises(TypeError, + match="DataType expected, got "): + pc.register_scalar_function(identity, func_name, doc, + in_types, out_type) + + +def test_scalar_udf_context(unary_func_fixture): + # Check the memory_pool argument is properly propagated + proxy_pool = pa.proxy_memory_pool(pa.default_memory_pool()) + _, func_name = unary_func_fixture + + res = pc.call_function(func_name, + [pa.array([1] * 1000, type=pa.int64())], + memory_pool=proxy_pool) + assert res == pa.array([2] * 1000, type=pa.int64()) + assert proxy_pool.bytes_allocated() == 1000 * 8 + # Destroying Python array should destroy underlying C++ memory + res = None + assert proxy_pool.bytes_allocated() == 0 + + +def test_raising_func(raising_func_fixture): + _, func_name = raising_func_fixture + with pytest.raises(MyError, match="error raised by scalar UDF"): + pc.call_function(func_name, [], length=1) + + +def test_scalar_input(unary_func_fixture): + function, func_name = unary_func_fixture + res = pc.call_function(func_name, [pa.scalar(10)]) + assert res == pa.scalar(11) + + +def test_input_lifetime(unary_func_fixture): + function, func_name = unary_func_fixture + + proxy_pool = pa.proxy_memory_pool(pa.default_memory_pool()) + assert proxy_pool.bytes_allocated() == 0 + + v = pa.array([1] * 1000, type=pa.int64(), memory_pool=proxy_pool) + assert proxy_pool.bytes_allocated() == 1000 * 8 + pc.call_function(func_name, [v]) + assert proxy_pool.bytes_allocated() == 1000 * 8 + # Calling a UDF should not have kept `v` alive longer than required + v = None + assert proxy_pool.bytes_allocated() == 0 + + +def _record_batch_from_iters(schema, *iters): + arrays = [pa.array(list(v), type=schema[i].type) + for i, v in enumerate(iters)] + return pa.RecordBatch.from_arrays(arrays=arrays, schema=schema) + + +def _record_batch_for_range(schema, n): + return _record_batch_from_iters(schema, + range(n, n + 10), + range(n + 1, n + 11)) + + +def make_udt_func(schema, batch_gen): + def udf_func(ctx): + class UDT: + def __init__(self): + self.caller = None + + def __call__(self, ctx): + try: + if self.caller is None: + self.caller, ctx = batch_gen(ctx).send, None + batch = self.caller(ctx) + except StopIteration: + arrays = [pa.array([], type=field.type) + for field in schema] + batch = pa.RecordBatch.from_arrays( + arrays=arrays, schema=schema) + return batch.to_struct_array() + return UDT() + return udf_func + + +def datasource1_direct(): + """A short dataset""" + schema = datasource1_schema() + + class Generator: + def __init__(self): + self.n = 3 + + def __call__(self, ctx): + if self.n == 0: + batch = _record_batch_from_iters(schema, [], []) + else: + self.n -= 1 + batch = _record_batch_for_range(schema, self.n) + return batch.to_struct_array() + return lambda ctx: Generator() + + +def datasource1_generator(): + schema = datasource1_schema() + + def batch_gen(ctx): + for n in range(3, 0, -1): + # ctx = + yield _record_batch_for_range(schema, n - 1) + return make_udt_func(schema, batch_gen) + + +def datasource1_exception(): + schema = datasource1_schema() + + def batch_gen(ctx): + for n in range(3, 0, -1): + # ctx = + yield _record_batch_for_range(schema, n - 1) + raise RuntimeError("datasource1_exception") + return make_udt_func(schema, batch_gen) + + +def datasource1_schema(): + return pa.schema([('', pa.int32()), ('', pa.int32())]) + + +def datasource1_args(func, func_name): + func_doc = {"summary": f"{func_name} UDT", + "description": "test {func_name} UDT"} + in_types = {} + out_type = pa.struct([("", pa.int32()), ("", pa.int32())]) + return func, func_name, func_doc, in_types, out_type + + +def _test_datasource1_udt(func_maker): + schema = datasource1_schema() + func = func_maker() + func_name = func_maker.__name__ + func_args = datasource1_args(func, func_name) + pc.register_tabular_function(*func_args) + n = 3 + for item in pc.call_tabular_function(func_name): + n -= 1 + assert item == _record_batch_for_range(schema, n) + + +def test_udt_datasource1_direct(): + _test_datasource1_udt(datasource1_direct) + + +def test_udt_datasource1_generator(): + _test_datasource1_udt(datasource1_generator) + + +def test_udt_datasource1_exception(): + with pytest.raises(RuntimeError, match='datasource1_exception'): + _test_datasource1_udt(datasource1_exception) + + +def test_scalar_agg_basic(unary_agg_func_fixture): + arr = pa.array([10.0, 20.0, 30.0, 40.0, 50.0], pa.float64()) + result = pc.call_function("mean_udf", [arr]) + expected = pa.scalar(30.0) + assert result == expected + + +def test_scalar_agg_empty(unary_agg_func_fixture): + empty = pa.array([], pa.float64()) + + with pytest.raises(pa.ArrowInvalid, match='empty inputs'): + pc.call_function("mean_udf", [empty]) + + +def test_scalar_agg_wrong_output_dtype(wrong_output_dtype_agg_func_fixture): + arr = pa.array([10, 20, 30, 40, 50], pa.int64()) + with pytest.raises(pa.ArrowTypeError, match="output datatype"): + pc.call_function("y=wrong_output_dtype(x)", [arr]) + + +def test_scalar_agg_wrong_output_type(wrong_output_type_agg_func_fixture): + arr = pa.array([10, 20, 30, 40, 50], pa.int64()) + with pytest.raises(pa.ArrowTypeError, match="output type"): + pc.call_function("y=wrong_output_type(x)", [arr]) + + +def test_scalar_agg_varargs(varargs_agg_func_fixture): + arr1 = pa.array([10, 20, 30, 40, 50], pa.int64()) + arr2 = pa.array([1.0, 2.0, 3.0, 4.0, 5.0], pa.float64()) + + result = pc.call_function( + "sum_mean", [arr1, arr2] + ) + expected = pa.scalar(33.0) + assert result == expected + + +def test_scalar_agg_exception(exception_agg_func_fixture): + arr = pa.array([10, 20, 30, 40, 50, 60], pa.int64()) + + with pytest.raises(RuntimeError, match='Oops'): + pc.call_function("y=exception_len(x)", [arr]) + + +def test_hash_agg_basic(unary_agg_func_fixture): + arr1 = pa.array([10.0, 20.0, 30.0, 40.0, 50.0], pa.float64()) + arr2 = pa.array([4, 2, 1, 2, 1], pa.int32()) + + arr3 = pa.array([60.0, 70.0, 80.0, 90.0, 100.0], pa.float64()) + arr4 = pa.array([5, 1, 1, 4, 1], pa.int32()) + + table1 = pa.table([arr2, arr1], names=["id", "value"]) + table2 = pa.table([arr4, arr3], names=["id", "value"]) + table = pa.concat_tables([table1, table2]) + + result = table.group_by("id").aggregate([("value", "mean_udf")]) + expected = table.group_by("id").aggregate( + [("value", "mean")]).rename_columns(['id', 'value_mean_udf']) + + assert result.sort_by('id') == expected.sort_by('id') + + +def test_hash_agg_empty(unary_agg_func_fixture): + arr1 = pa.array([], pa.float64()) + arr2 = pa.array([], pa.int32()) + table = pa.table([arr2, arr1], names=["id", "value"]) + + result = table.group_by("id").aggregate([("value", "mean_udf")]) + expected = pa.table([pa.array([], pa.int32()), pa.array( + [], pa.float64())], names=['id', 'value_mean_udf']) + + assert result == expected + + +def test_hash_agg_wrong_output_dtype(wrong_output_dtype_agg_func_fixture): + arr1 = pa.array([10, 20, 30, 40, 50], pa.int64()) + arr2 = pa.array([4, 2, 1, 2, 1], pa.int32()) + + table = pa.table([arr2, arr1], names=["id", "value"]) + with pytest.raises(pa.ArrowTypeError, match="output datatype"): + table.group_by("id").aggregate([("value", "y=wrong_output_dtype(x)")]) + + +def test_hash_agg_wrong_output_type(wrong_output_type_agg_func_fixture): + arr1 = pa.array([10, 20, 30, 40, 50], pa.int64()) + arr2 = pa.array([4, 2, 1, 2, 1], pa.int32()) + table = pa.table([arr2, arr1], names=["id", "value"]) + + with pytest.raises(pa.ArrowTypeError, match="output type"): + table.group_by("id").aggregate([("value", "y=wrong_output_type(x)")]) + + +def test_hash_agg_exception(exception_agg_func_fixture): + arr1 = pa.array([10, 20, 30, 40, 50], pa.int64()) + arr2 = pa.array([4, 2, 1, 2, 1], pa.int32()) + table = pa.table([arr2, arr1], names=["id", "value"]) + + with pytest.raises(RuntimeError, match='Oops'): + table.group_by("id").aggregate([("value", "y=exception_len(x)")]) + + +def test_hash_agg_random(sum_agg_func_fixture): + """Test hash aggregate udf with randomly sampled data""" + + value_num = 1000000 + group_num = 1000 + + arr1 = pa.array(np.repeat(1, value_num), pa.float64()) + arr2 = pa.array(np.random.choice(group_num, value_num), pa.int32()) + + table = pa.table([arr2, arr1], names=['id', 'value']) + + result = table.group_by("id").aggregate([("value", "sum_udf")]) + expected = table.group_by("id").aggregate( + [("value", "sum")]).rename_columns(['id', 'value_sum_udf']) + + assert result.sort_by('id') == expected.sort_by('id') + + +@pytest.mark.pandas +def test_vector_basic(unary_vector_func_fixture): + arr = pa.array([10.0, 20.0, 30.0, 40.0, 50.0], pa.float64()) + result = pc.call_function("y=pct_rank(x)", [arr]) + expected = unary_vector_func_fixture[0](None, arr) + assert result == expected + + +@pytest.mark.pandas +def test_vector_empty(unary_vector_func_fixture): + arr = pa.array([1], pa.float64()) + result = pc.call_function("y=pct_rank(x)", [arr]) + expected = unary_vector_func_fixture[0](None, arr) + assert result == expected + + +@pytest.mark.pandas +def test_vector_struct(struct_vector_func_fixture): + k = pa.array( + [1, 1, 2, 2], pa.int64() + ) + v = pa.array( + [1.0, 2.0, 3.0, 4.0], pa.float64() + ) + c = pa.array( + ['v1', 'v2', 'v1', 'v2'] + ) + result = pc.call_function("y=pivot(x)", [k, v, c]) + expected = struct_vector_func_fixture[0](None, k, v, c) + assert result == expected