diff --git a/.gitattributes b/.gitattributes index 833c91110e7b629d16118be103546f87fdedb7ef..6271c832984471ecf8d4c5a0f15f775768d56ace 100644 --- a/.gitattributes +++ b/.gitattributes @@ -197,3 +197,4 @@ llmeval-env/lib/python3.10/site-packages/pandas/_libs/algos.cpython-310-x86_64-l llmeval-env/lib/python3.10/site-packages/pandas/_libs/groupby.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/pandas/_libs/hashtable.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 b/llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 new file mode 100644 index 0000000000000000000000000000000000000000..8dddd9e617fa191976f52acc907cae5eab57a67a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f97bfc2cf75dc40da650eb97aff63d3e195f500cc623c74f3fe33ce2ce2b71f4 +size 515090264 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_compression.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_compression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c92af48eace20ed38d70469dc15c202f92826408 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_compression.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_fsspec.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_fsspec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6bf72735fd9f3e67e5b3b0ae077f6f0da29cf4f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_fsspec.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_spss.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_spss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa147b283a80f3d6b23c8233e14532152f391571 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_spss.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec1419d2ef84d8f4234c89200fbb00b07e3e6f32 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/conftest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e1a43b2da4bf82dd5bfedd13b7ceb7a382147a6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/conftest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..879be6d617794c2f4e91962710735453b9dfdeb1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_deprecated_kwargs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_deprecated_kwargs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f51f7d173c47667e18eb748e0f24b0aada83e23f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_deprecated_kwargs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff824ccd30f53dd1d56b8efc31f93a2ef6929e30 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8843426b8630a4f0a493b3b7f981f1ab0dfaaf45 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_normalize.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_normalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ba342114da4386ae000d3676cc4e43a3511bab8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_normalize.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_pandas.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_pandas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c28b40b8b56ed1630d08d5b6cc5f1ffd7c1c785b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_pandas.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a067e2b9ef0f2cdb0f227c894d3ff108f9fe6be Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_ujson.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_ujson.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e15e967e0fc25e5570fe24d8c93843b7127ce5e4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_ujson.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/conftest.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..4e848cd48b42d70033af233bf71c6904b9d069f9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/conftest.py @@ -0,0 +1,9 @@ +import pytest + + +@pytest.fixture(params=["split", "records", "index", "columns", "values"]) +def orient(request): + """ + Fixture for orients excluding the table format. + """ + return request.param diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py new file mode 100644 index 0000000000000000000000000000000000000000..ff7d34c85c01599707e648c4d9964773d16a13fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py @@ -0,0 +1,130 @@ +from io import ( + BytesIO, + StringIO, +) + +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm + + +def test_compression_roundtrip(compression): + df = pd.DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + + with tm.ensure_clean() as path: + df.to_json(path, compression=compression) + tm.assert_frame_equal(df, pd.read_json(path, compression=compression)) + + # explicitly ensure file was compressed. + with tm.decompress_file(path, compression) as fh: + result = fh.read().decode("utf8") + data = StringIO(result) + tm.assert_frame_equal(df, pd.read_json(data)) + + +def test_read_zipped_json(datapath): + uncompressed_path = datapath("io", "json", "data", "tsframe_v012.json") + uncompressed_df = pd.read_json(uncompressed_path) + + compressed_path = datapath("io", "json", "data", "tsframe_v012.json.zip") + compressed_df = pd.read_json(compressed_path, compression="zip") + + tm.assert_frame_equal(uncompressed_df, compressed_df) + + +@td.skip_if_not_us_locale +@pytest.mark.single_cpu +def test_with_s3_url(compression, s3_public_bucket, s3so): + # Bucket created in tests/io/conftest.py + df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}')) + + with tm.ensure_clean() as path: + df.to_json(path, compression=compression) + with open(path, "rb") as f: + s3_public_bucket.put_object(Key="test-1", Body=f) + + roundtripped_df = pd.read_json( + f"s3://{s3_public_bucket.name}/test-1", + compression=compression, + storage_options=s3so, + ) + tm.assert_frame_equal(df, roundtripped_df) + + +def test_lines_with_compression(compression): + with tm.ensure_clean() as path: + df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}')) + df.to_json(path, orient="records", lines=True, compression=compression) + roundtripped_df = pd.read_json(path, lines=True, compression=compression) + tm.assert_frame_equal(df, roundtripped_df) + + +def test_chunksize_with_compression(compression): + with tm.ensure_clean() as path: + df = pd.read_json(StringIO('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}')) + df.to_json(path, orient="records", lines=True, compression=compression) + + with pd.read_json( + path, lines=True, chunksize=1, compression=compression + ) as res: + roundtripped_df = pd.concat(res) + tm.assert_frame_equal(df, roundtripped_df) + + +def test_write_unsupported_compression_type(): + df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}')) + with tm.ensure_clean() as path: + msg = "Unrecognized compression type: unsupported" + with pytest.raises(ValueError, match=msg): + df.to_json(path, compression="unsupported") + + +def test_read_unsupported_compression_type(): + with tm.ensure_clean() as path: + msg = "Unrecognized compression type: unsupported" + with pytest.raises(ValueError, match=msg): + pd.read_json(path, compression="unsupported") + + +@pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] +) +@pytest.mark.parametrize("to_infer", [True, False]) +@pytest.mark.parametrize("read_infer", [True, False]) +def test_to_json_compression( + compression_only, read_infer, to_infer, compression_to_extension, infer_string +): + with pd.option_context("future.infer_string", infer_string): + # see gh-15008 + compression = compression_only + + # We'll complete file extension subsequently. + filename = "test." + filename += compression_to_extension[compression] + + df = pd.DataFrame({"A": [1]}) + + to_compression = "infer" if to_infer else compression + read_compression = "infer" if read_infer else compression + + with tm.ensure_clean(filename) as path: + df.to_json(path, compression=to_compression) + result = pd.read_json(path, compression=read_compression) + tm.assert_frame_equal(result, df) + + +def test_to_json_compression_mode(compression): + # GH 39985 (read_json does not support user-provided binary files) + expected = pd.DataFrame({"A": [1]}) + + with BytesIO() as buffer: + expected.to_json(buffer, compression=compression) + # df = pd.read_json(buffer, compression=compression) + # tm.assert_frame_equal(expected, df) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py new file mode 100644 index 0000000000000000000000000000000000000000..cc88fc3ba18263ac78f7057bfb5950f0420646b4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py @@ -0,0 +1,21 @@ +""" +Tests for the deprecated keyword arguments for `read_json`. +""" +from io import StringIO + +import pandas as pd +import pandas._testing as tm + +from pandas.io.json import read_json + + +def test_good_kwargs(): + df = pd.DataFrame({"A": [2, 4, 6], "B": [3, 6, 9]}, index=[0, 1, 2]) + + with tm.assert_produces_warning(None): + data1 = StringIO(df.to_json(orient="split")) + tm.assert_frame_equal(df, read_json(data1, orient="split")) + data2 = StringIO(df.to_json(orient="columns")) + tm.assert_frame_equal(df, read_json(data2, orient="columns")) + data3 = StringIO(df.to_json(orient="index")) + tm.assert_frame_equal(df, read_json(data3, orient="index")) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..cc101bb9c8b6d7c5f230b408ecf060a4af520106 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema.py @@ -0,0 +1,873 @@ +"""Tests for Table Schema integration.""" +from collections import OrderedDict +from io import StringIO +import json + +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + PeriodDtype, +) + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm + +from pandas.io.json._table_schema import ( + as_json_table_type, + build_table_schema, + convert_json_field_to_pandas_type, + convert_pandas_type_to_json_field, + set_default_names, +) + + +@pytest.fixture +def df_schema(): + return DataFrame( + { + "A": [1, 2, 3, 4], + "B": ["a", "b", "c", "c"], + "C": pd.date_range("2016-01-01", freq="d", periods=4), + "D": pd.timedelta_range("1h", periods=4, freq="min"), + }, + index=pd.Index(range(4), name="idx"), + ) + + +@pytest.fixture +def df_table(): + return DataFrame( + { + "A": [1, 2, 3, 4], + "B": ["a", "b", "c", "c"], + "C": pd.date_range("2016-01-01", freq="d", periods=4), + "D": pd.timedelta_range("1h", periods=4, freq="min"), + "E": pd.Series(pd.Categorical(["a", "b", "c", "c"])), + "F": pd.Series(pd.Categorical(["a", "b", "c", "c"], ordered=True)), + "G": [1.0, 2.0, 3, 4.0], + "H": pd.date_range("2016-01-01", freq="d", periods=4, tz="US/Central"), + }, + index=pd.Index(range(4), name="idx"), + ) + + +class TestBuildSchema: + def test_build_table_schema(self, df_schema, using_infer_string): + result = build_table_schema(df_schema, version=False) + expected = { + "fields": [ + {"name": "idx", "type": "integer"}, + {"name": "A", "type": "integer"}, + {"name": "B", "type": "string"}, + {"name": "C", "type": "datetime"}, + {"name": "D", "type": "duration"}, + ], + "primaryKey": ["idx"], + } + if using_infer_string: + expected["fields"][2] = {"name": "B", "type": "any", "extDtype": "string"} + assert result == expected + result = build_table_schema(df_schema) + assert "pandas_version" in result + + def test_series(self): + s = pd.Series([1, 2, 3], name="foo") + result = build_table_schema(s, version=False) + expected = { + "fields": [ + {"name": "index", "type": "integer"}, + {"name": "foo", "type": "integer"}, + ], + "primaryKey": ["index"], + } + assert result == expected + result = build_table_schema(s) + assert "pandas_version" in result + + def test_series_unnamed(self): + result = build_table_schema(pd.Series([1, 2, 3]), version=False) + expected = { + "fields": [ + {"name": "index", "type": "integer"}, + {"name": "values", "type": "integer"}, + ], + "primaryKey": ["index"], + } + assert result == expected + + def test_multiindex(self, df_schema, using_infer_string): + df = df_schema + idx = pd.MultiIndex.from_product([("a", "b"), (1, 2)]) + df.index = idx + + result = build_table_schema(df, version=False) + expected = { + "fields": [ + {"name": "level_0", "type": "string"}, + {"name": "level_1", "type": "integer"}, + {"name": "A", "type": "integer"}, + {"name": "B", "type": "string"}, + {"name": "C", "type": "datetime"}, + {"name": "D", "type": "duration"}, + ], + "primaryKey": ["level_0", "level_1"], + } + if using_infer_string: + expected["fields"][0] = { + "name": "level_0", + "type": "any", + "extDtype": "string", + } + expected["fields"][3] = {"name": "B", "type": "any", "extDtype": "string"} + assert result == expected + + df.index.names = ["idx0", None] + expected["fields"][0]["name"] = "idx0" + expected["primaryKey"] = ["idx0", "level_1"] + result = build_table_schema(df, version=False) + assert result == expected + + +class TestTableSchemaType: + @pytest.mark.parametrize("int_type", [int, np.int16, np.int32, np.int64]) + def test_as_json_table_type_int_data(self, int_type): + int_data = [1, 2, 3] + assert as_json_table_type(np.array(int_data, dtype=int_type).dtype) == "integer" + + @pytest.mark.parametrize("float_type", [float, np.float16, np.float32, np.float64]) + def test_as_json_table_type_float_data(self, float_type): + float_data = [1.0, 2.0, 3.0] + assert ( + as_json_table_type(np.array(float_data, dtype=float_type).dtype) == "number" + ) + + @pytest.mark.parametrize("bool_type", [bool, np.bool_]) + def test_as_json_table_type_bool_data(self, bool_type): + bool_data = [True, False] + assert ( + as_json_table_type(np.array(bool_data, dtype=bool_type).dtype) == "boolean" + ) + + @pytest.mark.parametrize( + "date_data", + [ + pd.to_datetime(["2016"]), + pd.to_datetime(["2016"], utc=True), + pd.Series(pd.to_datetime(["2016"])), + pd.Series(pd.to_datetime(["2016"], utc=True)), + pd.period_range("2016", freq="Y", periods=3), + ], + ) + def test_as_json_table_type_date_data(self, date_data): + assert as_json_table_type(date_data.dtype) == "datetime" + + @pytest.mark.parametrize( + "str_data", + [pd.Series(["a", "b"], dtype=object), pd.Index(["a", "b"], dtype=object)], + ) + def test_as_json_table_type_string_data(self, str_data): + assert as_json_table_type(str_data.dtype) == "string" + + @pytest.mark.parametrize( + "cat_data", + [ + pd.Categorical(["a"]), + pd.Categorical([1]), + pd.Series(pd.Categorical([1])), + pd.CategoricalIndex([1]), + pd.Categorical([1]), + ], + ) + def test_as_json_table_type_categorical_data(self, cat_data): + assert as_json_table_type(cat_data.dtype) == "any" + + # ------ + # dtypes + # ------ + @pytest.mark.parametrize("int_dtype", [int, np.int16, np.int32, np.int64]) + def test_as_json_table_type_int_dtypes(self, int_dtype): + assert as_json_table_type(int_dtype) == "integer" + + @pytest.mark.parametrize("float_dtype", [float, np.float16, np.float32, np.float64]) + def test_as_json_table_type_float_dtypes(self, float_dtype): + assert as_json_table_type(float_dtype) == "number" + + @pytest.mark.parametrize("bool_dtype", [bool, np.bool_]) + def test_as_json_table_type_bool_dtypes(self, bool_dtype): + assert as_json_table_type(bool_dtype) == "boolean" + + @pytest.mark.parametrize( + "date_dtype", + [ + np.dtype(" None: + self.hexed = hexed + self.binary = bytes.fromhex(hexed) + + def __str__(self) -> str: + return self.hexed + + hexed = "574b4454ba8c5eb4f98a8f45" + binthing = BinaryThing(hexed) + + # verify the proper conversion of printable content + df_printable = DataFrame({"A": [binthing.hexed]}) + assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}' + + # check if non-printable content throws appropriate Exception + df_nonprintable = DataFrame({"A": [binthing]}) + msg = "Unsupported UTF-8 sequence length when encoding string" + with pytest.raises(OverflowError, match=msg): + df_nonprintable.to_json() + + # the same with multiple columns threw segfaults + df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"]) + with pytest.raises(OverflowError, match=msg): + df_mixed.to_json() + + # default_handler should resolve exceptions for non-string types + result = df_nonprintable.to_json(default_handler=str) + expected = f'{{"A":{{"0":"{hexed}"}}}}' + assert result == expected + assert ( + df_mixed.to_json(default_handler=str) + == f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}' + ) + + def test_label_overflow(self): + # GH14256: buffer length not checked when writing label + result = DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json() + expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}' + assert result == expected + + def test_series_non_unique_index(self): + s = Series(["a", "b"], index=[1, 1]) + + msg = "Series index must be unique for orient='index'" + with pytest.raises(ValueError, match=msg): + s.to_json(orient="index") + + tm.assert_series_equal( + s, + read_json( + StringIO(s.to_json(orient="split")), orient="split", typ="series" + ), + ) + unserialized = read_json( + StringIO(s.to_json(orient="records")), orient="records", typ="series" + ) + tm.assert_equal(s.values, unserialized.values) + + def test_series_default_orient(self, string_series): + assert string_series.to_json() == string_series.to_json(orient="index") + + def test_series_roundtrip_simple(self, orient, string_series, using_infer_string): + data = StringIO(string_series.to_json(orient=orient)) + result = read_json(data, typ="series", orient=orient) + + expected = string_series + if using_infer_string and orient in ("split", "index", "columns"): + # These schemas don't contain dtypes, so we infer string + expected.index = expected.index.astype("string[pyarrow_numpy]") + if orient in ("values", "records"): + expected = expected.reset_index(drop=True) + if orient != "split": + expected.name = None + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", [False, None]) + def test_series_roundtrip_object(self, orient, dtype, object_series): + data = StringIO(object_series.to_json(orient=orient)) + result = read_json(data, typ="series", orient=orient, dtype=dtype) + + expected = object_series + if orient in ("values", "records"): + expected = expected.reset_index(drop=True) + if orient != "split": + expected.name = None + + tm.assert_series_equal(result, expected) + + def test_series_roundtrip_empty(self, orient): + empty_series = Series([], index=[], dtype=np.float64) + data = StringIO(empty_series.to_json(orient=orient)) + result = read_json(data, typ="series", orient=orient) + + expected = empty_series.reset_index(drop=True) + if orient in ("split"): + expected.index = expected.index.astype(np.float64) + + tm.assert_series_equal(result, expected) + + def test_series_roundtrip_timeseries(self, orient, datetime_series): + data = StringIO(datetime_series.to_json(orient=orient)) + result = read_json(data, typ="series", orient=orient) + + expected = datetime_series + if orient in ("values", "records"): + expected = expected.reset_index(drop=True) + if orient != "split": + expected.name = None + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", [np.float64, int]) + def test_series_roundtrip_numeric(self, orient, dtype): + s = Series(range(6), index=["a", "b", "c", "d", "e", "f"]) + data = StringIO(s.to_json(orient=orient)) + result = read_json(data, typ="series", orient=orient) + + expected = s.copy() + if orient in ("values", "records"): + expected = expected.reset_index(drop=True) + + tm.assert_series_equal(result, expected) + + def test_series_to_json_except(self): + s = Series([1, 2, 3]) + msg = "Invalid value 'garbage' for option 'orient'" + with pytest.raises(ValueError, match=msg): + s.to_json(orient="garbage") + + def test_series_from_json_precise_float(self): + s = Series([4.56, 4.56, 4.56]) + result = read_json(StringIO(s.to_json()), typ="series", precise_float=True) + tm.assert_series_equal(result, s, check_index_type=False) + + def test_series_with_dtype(self): + # GH 21986 + s = Series([4.56, 4.56, 4.56]) + result = read_json(StringIO(s.to_json()), typ="series", dtype=np.int64) + expected = Series([4] * 3) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "dtype,expected", + [ + (True, Series(["2000-01-01"], dtype="datetime64[ns]")), + (False, Series([946684800000])), + ], + ) + def test_series_with_dtype_datetime(self, dtype, expected): + s = Series(["2000-01-01"], dtype="datetime64[ns]") + data = StringIO(s.to_json()) + result = read_json(data, typ="series", dtype=dtype) + tm.assert_series_equal(result, expected) + + def test_frame_from_json_precise_float(self): + df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]]) + result = read_json(StringIO(df.to_json()), precise_float=True) + tm.assert_frame_equal(result, df) + + def test_typ(self): + s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64") + result = read_json(StringIO(s.to_json()), typ=None) + tm.assert_series_equal(result, s) + + def test_reconstruction_index(self): + df = DataFrame([[1, 2, 3], [4, 5, 6]]) + result = read_json(StringIO(df.to_json())) + tm.assert_frame_equal(result, df) + + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"]) + result = read_json(StringIO(df.to_json())) + tm.assert_frame_equal(result, df) + + def test_path(self, float_frame, int_frame, datetime_frame): + with tm.ensure_clean("test.json") as path: + for df in [float_frame, int_frame, datetime_frame]: + df.to_json(path) + read_json(path) + + def test_axis_dates(self, datetime_series, datetime_frame): + # frame + json = StringIO(datetime_frame.to_json()) + result = read_json(json) + tm.assert_frame_equal(result, datetime_frame) + + # series + json = StringIO(datetime_series.to_json()) + result = read_json(json, typ="series") + tm.assert_series_equal(result, datetime_series, check_names=False) + assert result.name is None + + def test_convert_dates(self, datetime_series, datetime_frame): + # frame + df = datetime_frame + df["date"] = Timestamp("20130101").as_unit("ns") + + json = StringIO(df.to_json()) + result = read_json(json) + tm.assert_frame_equal(result, df) + + df["foo"] = 1.0 + json = StringIO(df.to_json(date_unit="ns")) + + result = read_json(json, convert_dates=False) + expected = df.copy() + expected["date"] = expected["date"].values.view("i8") + expected["foo"] = expected["foo"].astype("int64") + tm.assert_frame_equal(result, expected) + + # series + ts = Series(Timestamp("20130101").as_unit("ns"), index=datetime_series.index) + json = StringIO(ts.to_json()) + result = read_json(json, typ="series") + tm.assert_series_equal(result, ts) + + @pytest.mark.parametrize("date_format", ["epoch", "iso"]) + @pytest.mark.parametrize("as_object", [True, False]) + @pytest.mark.parametrize("date_typ", [datetime.date, datetime.datetime, Timestamp]) + def test_date_index_and_values(self, date_format, as_object, date_typ): + data = [date_typ(year=2020, month=1, day=1), pd.NaT] + if as_object: + data.append("a") + + ser = Series(data, index=data) + result = ser.to_json(date_format=date_format) + + if date_format == "epoch": + expected = '{"1577836800000":1577836800000,"null":null}' + else: + expected = ( + '{"2020-01-01T00:00:00.000":"2020-01-01T00:00:00.000","null":null}' + ) + + if as_object: + expected = expected.replace("}", ',"a":"a"}') + + assert result == expected + + @pytest.mark.parametrize( + "infer_word", + [ + "trade_time", + "date", + "datetime", + "sold_at", + "modified", + "timestamp", + "timestamps", + ], + ) + def test_convert_dates_infer(self, infer_word): + # GH10747 + + data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}] + expected = DataFrame( + [[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word] + ) + + result = read_json(StringIO(ujson_dumps(data)))[["id", infer_word]] + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "date,date_unit", + [ + ("20130101 20:43:42.123", None), + ("20130101 20:43:42", "s"), + ("20130101 20:43:42.123", "ms"), + ("20130101 20:43:42.123456", "us"), + ("20130101 20:43:42.123456789", "ns"), + ], + ) + def test_date_format_frame(self, date, date_unit, datetime_frame): + df = datetime_frame + + df["date"] = Timestamp(date).as_unit("ns") + df.iloc[1, df.columns.get_loc("date")] = pd.NaT + df.iloc[5, df.columns.get_loc("date")] = pd.NaT + if date_unit: + json = df.to_json(date_format="iso", date_unit=date_unit) + else: + json = df.to_json(date_format="iso") + + result = read_json(StringIO(json)) + expected = df.copy() + tm.assert_frame_equal(result, expected) + + def test_date_format_frame_raises(self, datetime_frame): + df = datetime_frame + msg = "Invalid value 'foo' for option 'date_unit'" + with pytest.raises(ValueError, match=msg): + df.to_json(date_format="iso", date_unit="foo") + + @pytest.mark.parametrize( + "date,date_unit", + [ + ("20130101 20:43:42.123", None), + ("20130101 20:43:42", "s"), + ("20130101 20:43:42.123", "ms"), + ("20130101 20:43:42.123456", "us"), + ("20130101 20:43:42.123456789", "ns"), + ], + ) + def test_date_format_series(self, date, date_unit, datetime_series): + ts = Series(Timestamp(date).as_unit("ns"), index=datetime_series.index) + ts.iloc[1] = pd.NaT + ts.iloc[5] = pd.NaT + if date_unit: + json = ts.to_json(date_format="iso", date_unit=date_unit) + else: + json = ts.to_json(date_format="iso") + + result = read_json(StringIO(json), typ="series") + expected = ts.copy() + tm.assert_series_equal(result, expected) + + def test_date_format_series_raises(self, datetime_series): + ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index) + msg = "Invalid value 'foo' for option 'date_unit'" + with pytest.raises(ValueError, match=msg): + ts.to_json(date_format="iso", date_unit="foo") + + @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) + def test_date_unit(self, unit, datetime_frame): + df = datetime_frame + df["date"] = Timestamp("20130101 20:43:42").as_unit("ns") + dl = df.columns.get_loc("date") + df.iloc[1, dl] = Timestamp("19710101 20:43:42") + df.iloc[2, dl] = Timestamp("21460101 20:43:42") + df.iloc[4, dl] = pd.NaT + + json = df.to_json(date_format="epoch", date_unit=unit) + + # force date unit + result = read_json(StringIO(json), date_unit=unit) + tm.assert_frame_equal(result, df) + + # detect date unit + result = read_json(StringIO(json), date_unit=None) + tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize("unit", ["s", "ms", "us"]) + def test_iso_non_nano_datetimes(self, unit): + # Test that numpy datetimes + # in an Index or a column with non-nano resolution can be serialized + # correctly + # GH53686 + index = DatetimeIndex( + [np.datetime64("2023-01-01T11:22:33.123456", unit)], + dtype=f"datetime64[{unit}]", + ) + df = DataFrame( + { + "date": Series( + [np.datetime64("2022-01-01T11:22:33.123456", unit)], + dtype=f"datetime64[{unit}]", + index=index, + ), + "date_obj": Series( + [np.datetime64("2023-01-01T11:22:33.123456", unit)], + dtype=object, + index=index, + ), + }, + ) + + buf = StringIO() + df.to_json(buf, date_format="iso", date_unit=unit) + buf.seek(0) + + # read_json always reads datetimes in nanosecond resolution + # TODO: check_dtype/check_index_type should be removable + # once read_json gets non-nano support + tm.assert_frame_equal( + read_json(buf, convert_dates=["date", "date_obj"]), + df, + check_index_type=False, + check_dtype=False, + ) + + def test_weird_nested_json(self): + # this used to core dump the parser + s = r"""{ + "status": "success", + "data": { + "posts": [ + { + "id": 1, + "title": "A blog post", + "body": "Some useful content" + }, + { + "id": 2, + "title": "Another blog post", + "body": "More content" + } + ] + } + }""" + read_json(StringIO(s)) + + def test_doc_example(self): + dfj2 = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("AB") + ) + dfj2["date"] = Timestamp("20130101") + dfj2["ints"] = range(5) + dfj2["bools"] = True + dfj2.index = date_range("20130101", periods=5) + + json = StringIO(dfj2.to_json()) + result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_}) + tm.assert_frame_equal(result, result) + + def test_round_trip_exception(self, datapath): + # GH 3867 + path = datapath("io", "json", "data", "teams.csv") + df = pd.read_csv(path) + s = df.to_json() + + result = read_json(StringIO(s)) + res = result.reindex(index=df.index, columns=df.columns) + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = res.fillna(np.nan, downcast=False) + tm.assert_frame_equal(res, df) + + @pytest.mark.network + @pytest.mark.single_cpu + @pytest.mark.parametrize( + "field,dtype", + [ + ["created_at", pd.DatetimeTZDtype(tz="UTC")], + ["closed_at", "datetime64[ns]"], + ["updated_at", pd.DatetimeTZDtype(tz="UTC")], + ], + ) + def test_url(self, field, dtype, httpserver): + data = '{"created_at": ["2023-06-23T18:21:36Z"], "closed_at": ["2023-06-23T18:21:36"], "updated_at": ["2023-06-23T18:21:36Z"]}\n' # noqa: E501 + httpserver.serve_content(content=data) + result = read_json(httpserver.url, convert_dates=True) + assert result[field].dtype == dtype + + def test_timedelta(self): + converter = lambda x: pd.to_timedelta(x, unit="ms") + + ser = Series([timedelta(23), timedelta(seconds=5)]) + assert ser.dtype == "timedelta64[ns]" + + result = read_json(StringIO(ser.to_json()), typ="series").apply(converter) + tm.assert_series_equal(result, ser) + + ser = Series([timedelta(23), timedelta(seconds=5)], index=Index([0, 1])) + assert ser.dtype == "timedelta64[ns]" + result = read_json(StringIO(ser.to_json()), typ="series").apply(converter) + tm.assert_series_equal(result, ser) + + frame = DataFrame([timedelta(23), timedelta(seconds=5)]) + assert frame[0].dtype == "timedelta64[ns]" + tm.assert_frame_equal( + frame, read_json(StringIO(frame.to_json())).apply(converter) + ) + + def test_timedelta2(self): + frame = DataFrame( + { + "a": [timedelta(days=23), timedelta(seconds=5)], + "b": [1, 2], + "c": date_range(start="20130101", periods=2), + } + ) + data = StringIO(frame.to_json(date_unit="ns")) + result = read_json(data) + result["a"] = pd.to_timedelta(result.a, unit="ns") + result["c"] = pd.to_datetime(result.c) + tm.assert_frame_equal(frame, result) + + def test_mixed_timedelta_datetime(self): + td = timedelta(23) + ts = Timestamp("20130101") + frame = DataFrame({"a": [td, ts]}, dtype=object) + + expected = DataFrame( + {"a": [pd.Timedelta(td).as_unit("ns")._value, ts.as_unit("ns")._value]} + ) + data = StringIO(frame.to_json(date_unit="ns")) + result = read_json(data, dtype={"a": "int64"}) + tm.assert_frame_equal(result, expected, check_index_type=False) + + @pytest.mark.parametrize("as_object", [True, False]) + @pytest.mark.parametrize("date_format", ["iso", "epoch"]) + @pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta]) + def test_timedelta_to_json(self, as_object, date_format, timedelta_typ): + # GH28156: to_json not correctly formatting Timedelta + data = [timedelta_typ(days=1), timedelta_typ(days=2), pd.NaT] + if as_object: + data.append("a") + + ser = Series(data, index=data) + if date_format == "iso": + expected = ( + '{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}' + ) + else: + expected = '{"86400000":86400000,"172800000":172800000,"null":null}' + + if as_object: + expected = expected.replace("}", ',"a":"a"}') + + result = ser.to_json(date_format=date_format) + assert result == expected + + @pytest.mark.parametrize("as_object", [True, False]) + @pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta]) + def test_timedelta_to_json_fractional_precision(self, as_object, timedelta_typ): + data = [timedelta_typ(milliseconds=42)] + ser = Series(data, index=data) + if as_object: + ser = ser.astype(object) + + result = ser.to_json() + expected = '{"42":42}' + assert result == expected + + def test_default_handler(self): + value = object() + frame = DataFrame({"a": [7, value]}) + expected = DataFrame({"a": [7, str(value)]}) + result = read_json(StringIO(frame.to_json(default_handler=str))) + tm.assert_frame_equal(expected, result, check_index_type=False) + + def test_default_handler_indirect(self): + def default(obj): + if isinstance(obj, complex): + return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)] + return str(obj) + + df_list = [ + 9, + DataFrame( + {"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]}, + columns=["a", "b"], + ), + ] + expected = ( + '[9,[[1,null],["STR",null],[[["mathjs","Complex"],' + '["re",4.0],["im",-5.0]],"N\\/A"]]]' + ) + assert ( + ujson_dumps(df_list, default_handler=default, orient="values") == expected + ) + + def test_default_handler_numpy_unsupported_dtype(self): + # GH12554 to_json raises 'Unhandled numpy dtype 15' + df = DataFrame( + {"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]}, + columns=["a", "b"], + ) + expected = ( + '[["(1+0j)","(nan+0j)"],' + '["(2.3+0j)","(nan+0j)"],' + '["(4-5j)","(1.2+0j)"]]' + ) + assert df.to_json(default_handler=str, orient="values") == expected + + def test_default_handler_raises(self): + msg = "raisin" + + def my_handler_raises(obj): + raise TypeError(msg) + + with pytest.raises(TypeError, match=msg): + DataFrame({"a": [1, 2, object()]}).to_json( + default_handler=my_handler_raises + ) + with pytest.raises(TypeError, match=msg): + DataFrame({"a": [1, 2, complex(4, -5)]}).to_json( + default_handler=my_handler_raises + ) + + def test_categorical(self): + # GH4377 df.to_json segfaults with non-ndarray blocks + df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]}) + df["B"] = df["A"] + expected = df.to_json() + + df["B"] = df["A"].astype("category") + assert expected == df.to_json() + + s = df["A"] + sc = df["B"] + assert s.to_json() == sc.to_json() + + def test_datetime_tz(self): + # GH4377 df.to_json segfaults with non-ndarray blocks + tz_range = date_range("20130101", periods=3, tz="US/Eastern") + tz_naive = tz_range.tz_convert("utc").tz_localize(None) + + df = DataFrame({"A": tz_range, "B": date_range("20130101", periods=3)}) + + df_naive = df.copy() + df_naive["A"] = tz_naive + expected = df_naive.to_json() + assert expected == df.to_json() + + stz = Series(tz_range) + s_naive = Series(tz_naive) + assert stz.to_json() == s_naive.to_json() + + def test_sparse(self): + # GH4377 df.to_json segfaults with non-ndarray blocks + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + df.loc[:8] = np.nan + + sdf = df.astype("Sparse") + expected = df.to_json() + assert expected == sdf.to_json() + + s = Series(np.random.default_rng(2).standard_normal(10)) + s.loc[:8] = np.nan + ss = s.astype("Sparse") + + expected = s.to_json() + assert expected == ss.to_json() + + @pytest.mark.parametrize( + "ts", + [ + Timestamp("2013-01-10 05:00:00Z"), + Timestamp("2013-01-10 00:00:00", tz="US/Eastern"), + Timestamp("2013-01-10 00:00:00-0500"), + ], + ) + def test_tz_is_utc(self, ts): + exp = '"2013-01-10T05:00:00.000Z"' + + assert ujson_dumps(ts, iso_dates=True) == exp + dt = ts.to_pydatetime() + assert ujson_dumps(dt, iso_dates=True) == exp + + def test_tz_is_naive(self): + ts = Timestamp("2013-01-10 05:00:00") + exp = '"2013-01-10T05:00:00.000"' + + assert ujson_dumps(ts, iso_dates=True) == exp + dt = ts.to_pydatetime() + assert ujson_dumps(dt, iso_dates=True) == exp + + @pytest.mark.parametrize( + "tz_range", + [ + date_range("2013-01-01 05:00:00Z", periods=2), + date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"), + date_range("2013-01-01 00:00:00-0500", periods=2), + ], + ) + def test_tz_range_is_utc(self, tz_range): + exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]' + dfexp = ( + '{"DT":{' + '"0":"2013-01-01T05:00:00.000Z",' + '"1":"2013-01-02T05:00:00.000Z"}}' + ) + + assert ujson_dumps(tz_range, iso_dates=True) == exp + dti = DatetimeIndex(tz_range) + # Ensure datetimes in object array are serialized correctly + # in addition to the normal DTI case + assert ujson_dumps(dti, iso_dates=True) == exp + assert ujson_dumps(dti.astype(object), iso_dates=True) == exp + df = DataFrame({"DT": dti}) + result = ujson_dumps(df, iso_dates=True) + assert result == dfexp + assert ujson_dumps(df.astype({"DT": object}), iso_dates=True) + + def test_tz_range_is_naive(self): + dti = date_range("2013-01-01 05:00:00", periods=2) + + exp = '["2013-01-01T05:00:00.000","2013-01-02T05:00:00.000"]' + dfexp = '{"DT":{"0":"2013-01-01T05:00:00.000","1":"2013-01-02T05:00:00.000"}}' + + # Ensure datetimes in object array are serialized correctly + # in addition to the normal DTI case + assert ujson_dumps(dti, iso_dates=True) == exp + assert ujson_dumps(dti.astype(object), iso_dates=True) == exp + df = DataFrame({"DT": dti}) + result = ujson_dumps(df, iso_dates=True) + assert result == dfexp + assert ujson_dumps(df.astype({"DT": object}), iso_dates=True) + + def test_read_inline_jsonl(self): + # GH9180 + + result = read_json(StringIO('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n'), lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.single_cpu + @td.skip_if_not_us_locale + def test_read_s3_jsonl(self, s3_public_bucket_with_data, s3so): + # GH17200 + + result = read_json( + f"s3n://{s3_public_bucket_with_data.name}/items.jsonl", + lines=True, + storage_options=s3so, + ) + expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + def test_read_local_jsonl(self): + # GH17200 + with tm.ensure_clean("tmp_items.json") as path: + with open(path, "w", encoding="utf-8") as infile: + infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n') + result = read_json(path, lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + def test_read_jsonl_unicode_chars(self): + # GH15132: non-ascii unicode characters + # \u201d == RIGHT DOUBLE QUOTATION MARK + + # simulate file handle + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + json = StringIO(json) + result = read_json(json, lines=True) + expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + # simulate string + json = StringIO('{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n') + result = read_json(json, lines=True) + expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)]) + def test_to_json_large_numbers(self, bigNum): + # GH34473 + series = Series(bigNum, dtype=object, index=["articleId"]) + json = series.to_json() + expected = '{"articleId":' + str(bigNum) + "}" + assert json == expected + + df = DataFrame(bigNum, dtype=object, index=["articleId"], columns=[0]) + json = df.to_json() + expected = '{"0":{"articleId":' + str(bigNum) + "}}" + assert json == expected + + @pytest.mark.parametrize("bigNum", [-(2**63) - 1, 2**64]) + def test_read_json_large_numbers(self, bigNum): + # GH20599, 26068 + json = StringIO('{"articleId":' + str(bigNum) + "}") + msg = r"Value is too small|Value is too big" + with pytest.raises(ValueError, match=msg): + read_json(json) + + json = StringIO('{"0":{"articleId":' + str(bigNum) + "}}") + with pytest.raises(ValueError, match=msg): + read_json(json) + + def test_read_json_large_numbers2(self): + # GH18842 + json = '{"articleId": "1404366058080022500245"}' + json = StringIO(json) + result = read_json(json, typ="series") + expected = Series(1.404366e21, index=["articleId"]) + tm.assert_series_equal(result, expected) + + json = '{"0": {"articleId": "1404366058080022500245"}}' + json = StringIO(json) + result = read_json(json) + expected = DataFrame(1.404366e21, index=["articleId"], columns=[0]) + tm.assert_frame_equal(result, expected) + + def test_to_jsonl(self): + # GH9180 + df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + result = df.to_json(orient="records", lines=True) + expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n' + assert result == expected + + df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"]) + result = df.to_json(orient="records", lines=True) + expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n' + assert result == expected + tm.assert_frame_equal(read_json(StringIO(result), lines=True), df) + + # GH15096: escaped characters in columns and data + df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"]) + result = df.to_json(orient="records", lines=True) + expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n' + assert result == expected + + tm.assert_frame_equal(read_json(StringIO(result), lines=True), df) + + # TODO: there is a near-identical test for pytables; can we share? + @pytest.mark.xfail(reason="GH#13774 encoding kwarg not supported", raises=TypeError) + @pytest.mark.parametrize( + "val", + [ + [b"E\xc9, 17", b"", b"a", b"b", b"c"], + [b"E\xc9, 17", b"a", b"b", b"c"], + [b"EE, 17", b"", b"a", b"b", b"c"], + [b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"], + [b"", b"a", b"b", b"c"], + [b"\xf8\xfc", b"a", b"b", b"c"], + [b"A\xf8\xfc", b"", b"a", b"b", b"c"], + [np.nan, b"", b"b", b"c"], + [b"A\xf8\xfc", np.nan, b"", b"b", b"c"], + ], + ) + @pytest.mark.parametrize("dtype", ["category", object]) + def test_latin_encoding(self, dtype, val): + # GH 13774 + ser = Series( + [x.decode("latin-1") if isinstance(x, bytes) else x for x in val], + dtype=dtype, + ) + encoding = "latin-1" + with tm.ensure_clean("test.json") as path: + ser.to_json(path, encoding=encoding) + retr = read_json(StringIO(path), encoding=encoding) + tm.assert_series_equal(ser, retr, check_categorical=False) + + def test_data_frame_size_after_to_json(self): + # GH15344 + df = DataFrame({"a": [str(1)]}) + + size_before = df.memory_usage(index=True, deep=True).sum() + df.to_json() + size_after = df.memory_usage(index=True, deep=True).sum() + + assert size_before == size_after + + @pytest.mark.parametrize( + "index", [None, [1, 2], [1.0, 2.0], ["a", "b"], ["1", "2"], ["1.", "2."]] + ) + @pytest.mark.parametrize("columns", [["a", "b"], ["1", "2"], ["1.", "2."]]) + def test_from_json_to_json_table_index_and_columns(self, index, columns): + # GH25433 GH25435 + expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns) + dfjson = expected.to_json(orient="table") + + result = read_json(StringIO(dfjson), orient="table") + tm.assert_frame_equal(result, expected) + + def test_from_json_to_json_table_dtypes(self): + # GH21345 + expected = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]}) + dfjson = expected.to_json(orient="table") + result = read_json(StringIO(dfjson), orient="table") + tm.assert_frame_equal(result, expected) + + # TODO: We are casting to string which coerces None to NaN before casting back + # to object, ending up with incorrect na values + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="incorrect na conversion") + @pytest.mark.parametrize("orient", ["split", "records", "index", "columns"]) + def test_to_json_from_json_columns_dtypes(self, orient): + # GH21892 GH33205 + expected = DataFrame.from_dict( + { + "Integer": Series([1, 2, 3], dtype="int64"), + "Float": Series([None, 2.0, 3.0], dtype="float64"), + "Object": Series([None, "", "c"], dtype="object"), + "Bool": Series([True, False, True], dtype="bool"), + "Category": Series(["a", "b", None], dtype="category"), + "Datetime": Series( + ["2020-01-01", None, "2020-01-03"], dtype="datetime64[ns]" + ), + } + ) + dfjson = expected.to_json(orient=orient) + + result = read_json( + StringIO(dfjson), + orient=orient, + dtype={ + "Integer": "int64", + "Float": "float64", + "Object": "object", + "Bool": "bool", + "Category": "category", + "Datetime": "datetime64[ns]", + }, + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", [True, {"b": int, "c": int}]) + def test_read_json_table_dtype_raises(self, dtype): + # GH21345 + df = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]}) + dfjson = df.to_json(orient="table") + msg = "cannot pass both dtype and orient='table'" + with pytest.raises(ValueError, match=msg): + read_json(dfjson, orient="table", dtype=dtype) + + @pytest.mark.parametrize("orient", ["index", "columns", "records", "values"]) + def test_read_json_table_empty_axes_dtype(self, orient): + # GH28558 + + expected = DataFrame() + result = read_json(StringIO("{}"), orient=orient, convert_axes=True) + tm.assert_index_equal(result.index, expected.index) + tm.assert_index_equal(result.columns, expected.columns) + + def test_read_json_table_convert_axes_raises(self): + # GH25433 GH25435 + df = DataFrame([[1, 2], [3, 4]], index=[1.0, 2.0], columns=["1.", "2."]) + dfjson = df.to_json(orient="table") + msg = "cannot pass both convert_axes and orient='table'" + with pytest.raises(ValueError, match=msg): + read_json(dfjson, orient="table", convert_axes=True) + + @pytest.mark.parametrize( + "data, expected", + [ + ( + DataFrame([[1, 2], [4, 5]], columns=["a", "b"]), + {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]}, + ), + ( + DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo"), + {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]}, + ), + ( + DataFrame( + [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]] + ), + {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]}, + ), + (Series([1, 2, 3], name="A"), {"name": "A", "data": [1, 2, 3]}), + ( + Series([1, 2, 3], name="A").rename_axis("foo"), + {"name": "A", "data": [1, 2, 3]}, + ), + ( + Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]]), + {"name": "A", "data": [1, 2]}, + ), + ], + ) + def test_index_false_to_json_split(self, data, expected): + # GH 17394 + # Testing index=False in to_json with orient='split' + + result = data.to_json(orient="split", index=False) + result = json.loads(result) + + assert result == expected + + @pytest.mark.parametrize( + "data", + [ + (DataFrame([[1, 2], [4, 5]], columns=["a", "b"])), + (DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo")), + ( + DataFrame( + [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]] + ) + ), + (Series([1, 2, 3], name="A")), + (Series([1, 2, 3], name="A").rename_axis("foo")), + (Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]])), + ], + ) + def test_index_false_to_json_table(self, data): + # GH 17394 + # Testing index=False in to_json with orient='table' + + result = data.to_json(orient="table", index=False) + result = json.loads(result) + + expected = { + "schema": pd.io.json.build_table_schema(data, index=False), + "data": DataFrame(data).to_dict(orient="records"), + } + + assert result == expected + + @pytest.mark.parametrize("orient", ["index", "columns"]) + def test_index_false_error_to_json(self, orient): + # GH 17394, 25513 + # Testing error message from to_json with index=False + + df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"]) + + msg = ( + "'index=False' is only valid when 'orient' is 'split', " + "'table', 'records', or 'values'" + ) + with pytest.raises(ValueError, match=msg): + df.to_json(orient=orient, index=False) + + @pytest.mark.parametrize("orient", ["records", "values"]) + def test_index_true_error_to_json(self, orient): + # GH 25513 + # Testing error message from to_json with index=True + + df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"]) + + msg = ( + "'index=True' is only valid when 'orient' is 'split', " + "'table', 'index', or 'columns'" + ) + with pytest.raises(ValueError, match=msg): + df.to_json(orient=orient, index=True) + + @pytest.mark.parametrize("orient", ["split", "table"]) + @pytest.mark.parametrize("index", [True, False]) + def test_index_false_from_json_to_json(self, orient, index): + # GH25170 + # Test index=False in from_json to_json + expected = DataFrame({"a": [1, 2], "b": [3, 4]}) + dfjson = expected.to_json(orient=orient, index=index) + result = read_json(StringIO(dfjson), orient=orient) + tm.assert_frame_equal(result, expected) + + def test_read_timezone_information(self): + # GH 25546 + result = read_json( + StringIO('{"2019-01-01T11:00:00.000Z":88}'), typ="series", orient="index" + ) + exp_dti = DatetimeIndex(["2019-01-01 11:00:00"], dtype="M8[ns, UTC]") + expected = Series([88], index=exp_dti) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "url", + [ + "s3://example-fsspec/", + "gcs://another-fsspec/file.json", + "https://example-site.com/data", + "some-protocol://data.txt", + ], + ) + def test_read_json_with_url_value(self, url): + # GH 36271 + result = read_json(StringIO(f'{{"url":{{"0":"{url}"}}}}')) + expected = DataFrame({"url": [url]}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "compression", + ["", ".gz", ".bz2", ".tar"], + ) + def test_read_json_with_very_long_file_path(self, compression): + # GH 46718 + long_json_path = f'{"a" * 1000}.json{compression}' + with pytest.raises( + FileNotFoundError, match=f"File {long_json_path} does not exist" + ): + # path too long for Windows is handled in file_exists() but raises in + # _get_data_from_filepath() + read_json(long_json_path) + + @pytest.mark.parametrize( + "date_format,key", [("epoch", 86400000), ("iso", "P1DT0H0M0S")] + ) + def test_timedelta_as_label(self, date_format, key): + df = DataFrame([[1]], columns=[pd.Timedelta("1D")]) + expected = f'{{"{key}":{{"0":1}}}}' + result = df.to_json(date_format=date_format) + + assert result == expected + + @pytest.mark.parametrize( + "orient,expected", + [ + ("index", "{\"('a', 'b')\":{\"('c', 'd')\":1}}"), + ("columns", "{\"('c', 'd')\":{\"('a', 'b')\":1}}"), + # TODO: the below have separate encoding procedures + pytest.param( + "split", + "", + marks=pytest.mark.xfail( + reason="Produces JSON but not in a consistent manner" + ), + ), + pytest.param( + "table", + "", + marks=pytest.mark.xfail( + reason="Produces JSON but not in a consistent manner" + ), + ), + ], + ) + def test_tuple_labels(self, orient, expected): + # GH 20500 + df = DataFrame([[1]], index=[("a", "b")], columns=[("c", "d")]) + result = df.to_json(orient=orient) + assert result == expected + + @pytest.mark.parametrize("indent", [1, 2, 4]) + def test_to_json_indent(self, indent): + # GH 12004 + df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"]) + + result = df.to_json(indent=indent) + spaces = " " * indent + expected = f"""{{ +{spaces}"a":{{ +{spaces}{spaces}"0":"foo", +{spaces}{spaces}"1":"baz" +{spaces}}}, +{spaces}"b":{{ +{spaces}{spaces}"0":"bar", +{spaces}{spaces}"1":"qux" +{spaces}}} +}}""" + + assert result == expected + + @pytest.mark.skipif( + using_pyarrow_string_dtype(), + reason="Adjust expected when infer_string is default, no bug here, " + "just a complicated parametrization", + ) + @pytest.mark.parametrize( + "orient,expected", + [ + ( + "split", + """{ + "columns":[ + "a", + "b" + ], + "index":[ + 0, + 1 + ], + "data":[ + [ + "foo", + "bar" + ], + [ + "baz", + "qux" + ] + ] +}""", + ), + ( + "records", + """[ + { + "a":"foo", + "b":"bar" + }, + { + "a":"baz", + "b":"qux" + } +]""", + ), + ( + "index", + """{ + "0":{ + "a":"foo", + "b":"bar" + }, + "1":{ + "a":"baz", + "b":"qux" + } +}""", + ), + ( + "columns", + """{ + "a":{ + "0":"foo", + "1":"baz" + }, + "b":{ + "0":"bar", + "1":"qux" + } +}""", + ), + ( + "values", + """[ + [ + "foo", + "bar" + ], + [ + "baz", + "qux" + ] +]""", + ), + ( + "table", + """{ + "schema":{ + "fields":[ + { + "name":"index", + "type":"integer" + }, + { + "name":"a", + "type":"string" + }, + { + "name":"b", + "type":"string" + } + ], + "primaryKey":[ + "index" + ], + "pandas_version":"1.4.0" + }, + "data":[ + { + "index":0, + "a":"foo", + "b":"bar" + }, + { + "index":1, + "a":"baz", + "b":"qux" + } + ] +}""", + ), + ], + ) + def test_json_indent_all_orients(self, orient, expected): + # GH 12004 + df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"]) + result = df.to_json(orient=orient, indent=4) + assert result == expected + + def test_json_negative_indent_raises(self): + with pytest.raises(ValueError, match="must be a nonnegative integer"): + DataFrame().to_json(indent=-1) + + def test_emca_262_nan_inf_support(self): + # GH 12213 + data = StringIO( + '["a", NaN, "NaN", Infinity, "Infinity", -Infinity, "-Infinity"]' + ) + result = read_json(data) + expected = DataFrame( + ["a", None, "NaN", np.inf, "Infinity", -np.inf, "-Infinity"] + ) + tm.assert_frame_equal(result, expected) + + def test_frame_int_overflow(self): + # GH 30320 + encoded_json = json.dumps([{"col": "31900441201190696999"}, {"col": "Text"}]) + expected = DataFrame({"col": ["31900441201190696999", "Text"]}) + result = read_json(StringIO(encoded_json)) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dataframe,expected", + [ + ( + DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]}), + '{"(0, \'x\')":1,"(0, \'y\')":"a","(1, \'x\')":2,' + '"(1, \'y\')":"b","(2, \'x\')":3,"(2, \'y\')":"c"}', + ) + ], + ) + def test_json_multiindex(self, dataframe, expected): + series = dataframe.stack(future_stack=True) + result = series.to_json(orient="index") + assert result == expected + + @pytest.mark.single_cpu + def test_to_s3(self, s3_public_bucket, s3so): + # GH 28375 + mock_bucket_name, target_file = s3_public_bucket.name, "test.json" + df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]}) + df.to_json(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so) + timeout = 5 + while True: + if target_file in (obj.key for obj in s3_public_bucket.objects.all()): + break + time.sleep(0.1) + timeout -= 0.1 + assert timeout > 0, "Timed out waiting for file to appear on moto" + + def test_json_pandas_nulls(self, nulls_fixture, request): + # GH 31615 + if isinstance(nulls_fixture, Decimal): + mark = pytest.mark.xfail(reason="not implemented") + request.applymarker(mark) + + result = DataFrame([[nulls_fixture]]).to_json() + assert result == '{"0":{"0":null}}' + + def test_readjson_bool_series(self): + # GH31464 + result = read_json(StringIO("[true, true, false]"), typ="series") + expected = Series([True, True, False]) + tm.assert_series_equal(result, expected) + + def test_to_json_multiindex_escape(self): + # GH 15273 + df = DataFrame( + True, + index=date_range("2017-01-20", "2017-01-23"), + columns=["foo", "bar"], + ).stack(future_stack=True) + result = df.to_json() + expected = ( + "{\"(Timestamp('2017-01-20 00:00:00'), 'foo')\":true," + "\"(Timestamp('2017-01-20 00:00:00'), 'bar')\":true," + "\"(Timestamp('2017-01-21 00:00:00'), 'foo')\":true," + "\"(Timestamp('2017-01-21 00:00:00'), 'bar')\":true," + "\"(Timestamp('2017-01-22 00:00:00'), 'foo')\":true," + "\"(Timestamp('2017-01-22 00:00:00'), 'bar')\":true," + "\"(Timestamp('2017-01-23 00:00:00'), 'foo')\":true," + "\"(Timestamp('2017-01-23 00:00:00'), 'bar')\":true}" + ) + assert result == expected + + def test_to_json_series_of_objects(self): + class _TestObject: + def __init__(self, a, b, _c, d) -> None: + self.a = a + self.b = b + self._c = _c + self.d = d + + def e(self): + return 5 + + # JSON keys should be all non-callable non-underscore attributes, see GH-42768 + series = Series([_TestObject(a=1, b=2, _c=3, d=4)]) + assert json.loads(series.to_json()) == {"0": {"a": 1, "b": 2, "d": 4}} + + @pytest.mark.parametrize( + "data,expected", + [ + ( + Series({0: -6 + 8j, 1: 0 + 1j, 2: 9 - 5j}), + '{"0":{"imag":8.0,"real":-6.0},' + '"1":{"imag":1.0,"real":0.0},' + '"2":{"imag":-5.0,"real":9.0}}', + ), + ( + Series({0: -9.39 + 0.66j, 1: 3.95 + 9.32j, 2: 4.03 - 0.17j}), + '{"0":{"imag":0.66,"real":-9.39},' + '"1":{"imag":9.32,"real":3.95},' + '"2":{"imag":-0.17,"real":4.03}}', + ), + ( + DataFrame([[-2 + 3j, -1 - 0j], [4 - 3j, -0 - 10j]]), + '{"0":{"0":{"imag":3.0,"real":-2.0},' + '"1":{"imag":-3.0,"real":4.0}},' + '"1":{"0":{"imag":0.0,"real":-1.0},' + '"1":{"imag":-10.0,"real":0.0}}}', + ), + ( + DataFrame( + [[-0.28 + 0.34j, -1.08 - 0.39j], [0.41 - 0.34j, -0.78 - 1.35j]] + ), + '{"0":{"0":{"imag":0.34,"real":-0.28},' + '"1":{"imag":-0.34,"real":0.41}},' + '"1":{"0":{"imag":-0.39,"real":-1.08},' + '"1":{"imag":-1.35,"real":-0.78}}}', + ), + ], + ) + def test_complex_data_tojson(self, data, expected): + # GH41174 + result = data.to_json() + assert result == expected + + def test_json_uint64(self): + # GH21073 + expected = ( + '{"columns":["col1"],"index":[0,1],' + '"data":[[13342205958987758245],[12388075603347835679]]}' + ) + df = DataFrame(data={"col1": [13342205958987758245, 12388075603347835679]}) + result = df.to_json(orient="split") + assert result == expected + + @pytest.mark.parametrize( + "orient", ["split", "records", "values", "index", "columns"] + ) + def test_read_json_dtype_backend( + self, string_storage, dtype_backend, orient, using_infer_string + ): + # GH#50750 + pa = pytest.importorskip("pyarrow") + df = DataFrame( + { + "a": Series([1, np.nan, 3], dtype="Int64"), + "b": Series([1, 2, 3], dtype="Int64"), + "c": Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": [True, False, None], + "f": [True, False, True], + "g": ["a", "b", "c"], + "h": ["a", "b", None], + } + ) + + if using_infer_string: + string_array = ArrowStringArrayNumpySemantics(pa.array(["a", "b", "c"])) + string_array_na = ArrowStringArrayNumpySemantics(pa.array(["a", "b", None])) + elif string_storage == "python": + string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_)) + string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_)) + + elif dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + string_array = ArrowExtensionArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowExtensionArray(pa.array(["a", "b", None])) + + else: + string_array = ArrowStringArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowStringArray(pa.array(["a", "b", None])) + + out = df.to_json(orient=orient) + with pd.option_context("mode.string_storage", string_storage): + result = read_json( + StringIO(out), dtype_backend=dtype_backend, orient=orient + ) + + expected = DataFrame( + { + "a": Series([1, np.nan, 3], dtype="Int64"), + "b": Series([1, 2, 3], dtype="Int64"), + "c": Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": Series([True, False, NA], dtype="boolean"), + "f": Series([True, False, True], dtype="boolean"), + "g": string_array, + "h": string_array_na, + } + ) + + if dtype_backend == "pyarrow": + from pandas.arrays import ArrowExtensionArray + + expected = DataFrame( + { + col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True)) + for col in expected.columns + } + ) + + if orient == "values": + expected.columns = list(range(8)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("orient", ["split", "records", "index"]) + def test_read_json_nullable_series(self, string_storage, dtype_backend, orient): + # GH#50750 + pa = pytest.importorskip("pyarrow") + ser = Series([1, np.nan, 3], dtype="Int64") + + out = ser.to_json(orient=orient) + with pd.option_context("mode.string_storage", string_storage): + result = read_json( + StringIO(out), dtype_backend=dtype_backend, orient=orient, typ="series" + ) + + expected = Series([1, np.nan, 3], dtype="Int64") + + if dtype_backend == "pyarrow": + from pandas.arrays import ArrowExtensionArray + + expected = Series(ArrowExtensionArray(pa.array(expected, from_pandas=True))) + + tm.assert_series_equal(result, expected) + + def test_invalid_dtype_backend(self): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + read_json("test", dtype_backend="numpy") + + +def test_invalid_engine(): + # GH 48893 + ser = Series(range(1)) + out = ser.to_json() + with pytest.raises(ValueError, match="The engine type foo"): + read_json(out, engine="foo") + + +def test_pyarrow_engine_lines_false(): + # GH 48893 + ser = Series(range(1)) + out = ser.to_json() + with pytest.raises(ValueError, match="currently pyarrow engine only supports"): + read_json(out, engine="pyarrow", lines=False) + + +def test_json_roundtrip_string_inference(orient): + pytest.importorskip("pyarrow") + df = DataFrame( + [["a", "b"], ["c", "d"]], index=["row 1", "row 2"], columns=["col 1", "col 2"] + ) + out = df.to_json() + with pd.option_context("future.infer_string", True): + result = read_json(StringIO(out)) + expected = DataFrame( + [["a", "b"], ["c", "d"]], + dtype="string[pyarrow_numpy]", + index=Index(["row 1", "row 2"], dtype="string[pyarrow_numpy]"), + columns=Index(["col 1", "col 2"], dtype="string[pyarrow_numpy]"), + ) + tm.assert_frame_equal(result, expected) + + +def test_json_pos_args_deprecation(): + # GH-54229 + df = DataFrame({"a": [1, 2, 3]}) + msg = ( + r"Starting with pandas version 3.0 all arguments of to_json except for the " + r"argument 'path_or_buf' will be keyword-only." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + buf = BytesIO() + df.to_json(buf, "split") + + +@td.skip_if_no("pyarrow") +def test_to_json_ea_null(): + # GH#57224 + df = DataFrame( + { + "a": Series([1, NA], dtype="int64[pyarrow]"), + "b": Series([2, NA], dtype="Int64"), + } + ) + result = df.to_json(orient="records", lines=True) + expected = """{"a":1,"b":2} +{"a":null,"b":null} +""" + assert result == expected + + +def test_read_json_lines_rangeindex(): + # GH 57429 + data = """ +{"a": 1, "b": 2} +{"a": 3, "b": 4} +""" + result = read_json(StringIO(data), lines=True).index + expected = RangeIndex(2) + tm.assert_index_equal(result, expected, exact=True) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_readlines.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_readlines.py new file mode 100644 index 0000000000000000000000000000000000000000..d96ccb4b94cc2c567c8d7e59cad3227017f9200d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_readlines.py @@ -0,0 +1,543 @@ +from collections.abc import Iterator +from io import StringIO +from pathlib import Path + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + read_json, +) +import pandas._testing as tm + +from pandas.io.json._json import JsonReader + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.fixture +def lines_json_df(): + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + return df.to_json(lines=True, orient="records") + + +@pytest.fixture(params=["ujson", "pyarrow"]) +def engine(request): + if request.param == "pyarrow": + pytest.importorskip("pyarrow.json") + return request.param + + +def test_read_jsonl(): + # GH9180 + result = read_json(StringIO('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n'), lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + +def test_read_jsonl_engine_pyarrow(datapath, engine): + result = read_json( + datapath("io", "json", "data", "line_delimited.json"), + lines=True, + engine=engine, + ) + expected = DataFrame({"a": [1, 3, 5], "b": [2, 4, 6]}) + tm.assert_frame_equal(result, expected) + + +def test_read_datetime(request, engine): + # GH33787 + if engine == "pyarrow": + # GH 48893 + reason = "Pyarrow only supports a file path as an input and line delimited json" + request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError)) + + df = DataFrame( + [([1, 2], ["2020-03-05", "2020-04-08T09:58:49+00:00"], "hector")], + columns=["accounts", "date", "name"], + ) + json_line = df.to_json(lines=True, orient="records") + + if engine == "pyarrow": + result = read_json(StringIO(json_line), engine=engine) + else: + result = read_json(StringIO(json_line), engine=engine) + expected = DataFrame( + [[1, "2020-03-05", "hector"], [2, "2020-04-08T09:58:49+00:00", "hector"]], + columns=["accounts", "date", "name"], + ) + tm.assert_frame_equal(result, expected) + + +def test_read_jsonl_unicode_chars(): + # GH15132: non-ascii unicode characters + # \u201d == RIGHT DOUBLE QUOTATION MARK + + # simulate file handle + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + json = StringIO(json) + result = read_json(json, lines=True) + expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + # simulate string + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + result = read_json(StringIO(json), lines=True) + expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + +def test_to_jsonl(): + # GH9180 + df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + result = df.to_json(orient="records", lines=True) + expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n' + assert result == expected + + df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"]) + result = df.to_json(orient="records", lines=True) + expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n' + assert result == expected + tm.assert_frame_equal(read_json(StringIO(result), lines=True), df) + + # GH15096: escaped characters in columns and data + df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"]) + result = df.to_json(orient="records", lines=True) + expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n' + assert result == expected + tm.assert_frame_equal(read_json(StringIO(result), lines=True), df) + + +def test_to_jsonl_count_new_lines(): + # GH36888 + df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + actual_new_lines_count = df.to_json(orient="records", lines=True).count("\n") + expected_new_lines_count = 2 + assert actual_new_lines_count == expected_new_lines_count + + +@pytest.mark.parametrize("chunksize", [1, 1.0]) +def test_readjson_chunks(request, lines_json_df, chunksize, engine): + # Basic test that read_json(chunks=True) gives the same result as + # read_json(chunks=False) + # GH17048: memory usage when lines=True + + if engine == "pyarrow": + # GH 48893 + reason = ( + "Pyarrow only supports a file path as an input and line delimited json" + "and doesn't support chunksize parameter." + ) + request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError)) + + unchunked = read_json(StringIO(lines_json_df), lines=True) + with read_json( + StringIO(lines_json_df), lines=True, chunksize=chunksize, engine=engine + ) as reader: + chunked = pd.concat(reader) + + tm.assert_frame_equal(chunked, unchunked) + + +def test_readjson_chunksize_requires_lines(lines_json_df, engine): + msg = "chunksize can only be passed if lines=True" + with pytest.raises(ValueError, match=msg): + with read_json( + StringIO(lines_json_df), lines=False, chunksize=2, engine=engine + ) as _: + pass + + +def test_readjson_chunks_series(request, engine): + if engine == "pyarrow": + # GH 48893 + reason = ( + "Pyarrow only supports a file path as an input and line delimited json" + "and doesn't support chunksize parameter." + ) + request.applymarker(pytest.mark.xfail(reason=reason)) + + # Test reading line-format JSON to Series with chunksize param + s = pd.Series({"A": 1, "B": 2}) + + strio = StringIO(s.to_json(lines=True, orient="records")) + unchunked = read_json(strio, lines=True, typ="Series", engine=engine) + + strio = StringIO(s.to_json(lines=True, orient="records")) + with read_json( + strio, lines=True, typ="Series", chunksize=1, engine=engine + ) as reader: + chunked = pd.concat(reader) + + tm.assert_series_equal(chunked, unchunked) + + +def test_readjson_each_chunk(request, lines_json_df, engine): + if engine == "pyarrow": + # GH 48893 + reason = ( + "Pyarrow only supports a file path as an input and line delimited json" + "and doesn't support chunksize parameter." + ) + request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError)) + + # Other tests check that the final result of read_json(chunksize=True) + # is correct. This checks the intermediate chunks. + with read_json( + StringIO(lines_json_df), lines=True, chunksize=2, engine=engine + ) as reader: + chunks = list(reader) + assert chunks[0].shape == (2, 2) + assert chunks[1].shape == (1, 2) + + +def test_readjson_chunks_from_file(request, engine): + if engine == "pyarrow": + # GH 48893 + reason = ( + "Pyarrow only supports a file path as an input and line delimited json" + "and doesn't support chunksize parameter." + ) + request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError)) + + with tm.ensure_clean("test.json") as path: + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + df.to_json(path, lines=True, orient="records") + with read_json(path, lines=True, chunksize=1, engine=engine) as reader: + chunked = pd.concat(reader) + unchunked = read_json(path, lines=True, engine=engine) + tm.assert_frame_equal(unchunked, chunked) + + +@pytest.mark.parametrize("chunksize", [None, 1]) +def test_readjson_chunks_closes(chunksize): + with tm.ensure_clean("test.json") as path: + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + df.to_json(path, lines=True, orient="records") + reader = JsonReader( + path, + orient=None, + typ="frame", + dtype=True, + convert_axes=True, + convert_dates=True, + keep_default_dates=True, + precise_float=False, + date_unit=None, + encoding=None, + lines=True, + chunksize=chunksize, + compression=None, + nrows=None, + ) + with reader: + reader.read() + assert ( + reader.handles.handle.closed + ), f"didn't close stream with chunksize = {chunksize}" + + +@pytest.mark.parametrize("chunksize", [0, -1, 2.2, "foo"]) +def test_readjson_invalid_chunksize(lines_json_df, chunksize, engine): + msg = r"'chunksize' must be an integer >=1" + + with pytest.raises(ValueError, match=msg): + with read_json( + StringIO(lines_json_df), lines=True, chunksize=chunksize, engine=engine + ) as _: + pass + + +@pytest.mark.parametrize("chunksize", [None, 1, 2]) +def test_readjson_chunks_multiple_empty_lines(chunksize): + j = """ + + {"A":1,"B":4} + + + + {"A":2,"B":5} + + + + + + + + {"A":3,"B":6} + """ + orig = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) + test = read_json(StringIO(j), lines=True, chunksize=chunksize) + if chunksize is not None: + with test: + test = pd.concat(test) + tm.assert_frame_equal(orig, test, obj=f"chunksize: {chunksize}") + + +def test_readjson_unicode(request, monkeypatch, engine): + if engine == "pyarrow": + # GH 48893 + reason = ( + "Pyarrow only supports a file path as an input and line delimited json" + "and doesn't support chunksize parameter." + ) + request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError)) + + with tm.ensure_clean("test.json") as path: + monkeypatch.setattr("locale.getpreferredencoding", lambda do_setlocale: "cp949") + with open(path, "w", encoding="utf-8") as f: + f.write('{"£©µÀÆÖÞßéöÿ":["АБВГДабвгд가"]}') + + result = read_json(path, engine=engine) + expected = DataFrame({"£©µÀÆÖÞßéöÿ": ["АБВГДабвгд가"]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("nrows", [1, 2]) +def test_readjson_nrows(nrows, engine): + # GH 33916 + # Test reading line-format JSON to Series with nrows param + jsonl = """{"a": 1, "b": 2} + {"a": 3, "b": 4} + {"a": 5, "b": 6} + {"a": 7, "b": 8}""" + result = read_json(StringIO(jsonl), lines=True, nrows=nrows) + expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("nrows,chunksize", [(2, 2), (4, 2)]) +def test_readjson_nrows_chunks(request, nrows, chunksize, engine): + # GH 33916 + # Test reading line-format JSON to Series with nrows and chunksize param + if engine == "pyarrow": + # GH 48893 + reason = ( + "Pyarrow only supports a file path as an input and line delimited json" + "and doesn't support chunksize parameter." + ) + request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError)) + + jsonl = """{"a": 1, "b": 2} + {"a": 3, "b": 4} + {"a": 5, "b": 6} + {"a": 7, "b": 8}""" + + if engine != "pyarrow": + with read_json( + StringIO(jsonl), lines=True, nrows=nrows, chunksize=chunksize, engine=engine + ) as reader: + chunked = pd.concat(reader) + else: + with read_json( + jsonl, lines=True, nrows=nrows, chunksize=chunksize, engine=engine + ) as reader: + chunked = pd.concat(reader) + expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows] + tm.assert_frame_equal(chunked, expected) + + +def test_readjson_nrows_requires_lines(engine): + # GH 33916 + # Test ValueError raised if nrows is set without setting lines in read_json + jsonl = """{"a": 1, "b": 2} + {"a": 3, "b": 4} + {"a": 5, "b": 6} + {"a": 7, "b": 8}""" + msg = "nrows can only be passed if lines=True" + with pytest.raises(ValueError, match=msg): + read_json(jsonl, lines=False, nrows=2, engine=engine) + + +def test_readjson_lines_chunks_fileurl(request, datapath, engine): + # GH 27135 + # Test reading line-format JSON from file url + if engine == "pyarrow": + # GH 48893 + reason = ( + "Pyarrow only supports a file path as an input and line delimited json" + "and doesn't support chunksize parameter." + ) + request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError)) + + df_list_expected = [ + DataFrame([[1, 2]], columns=["a", "b"], index=[0]), + DataFrame([[3, 4]], columns=["a", "b"], index=[1]), + DataFrame([[5, 6]], columns=["a", "b"], index=[2]), + ] + os_path = datapath("io", "json", "data", "line_delimited.json") + file_url = Path(os_path).as_uri() + with read_json(file_url, lines=True, chunksize=1, engine=engine) as url_reader: + for index, chuck in enumerate(url_reader): + tm.assert_frame_equal(chuck, df_list_expected[index]) + + +def test_chunksize_is_incremental(): + # See https://github.com/pandas-dev/pandas/issues/34548 + jsonl = ( + """{"a": 1, "b": 2} + {"a": 3, "b": 4} + {"a": 5, "b": 6} + {"a": 7, "b": 8}\n""" + * 1000 + ) + + class MyReader: + def __init__(self, contents) -> None: + self.read_count = 0 + self.stringio = StringIO(contents) + + def read(self, *args): + self.read_count += 1 + return self.stringio.read(*args) + + def __iter__(self) -> Iterator: + self.read_count += 1 + return iter(self.stringio) + + reader = MyReader(jsonl) + assert len(list(read_json(reader, lines=True, chunksize=100))) > 1 + assert reader.read_count > 10 + + +@pytest.mark.parametrize("orient_", ["split", "index", "table"]) +def test_to_json_append_orient(orient_): + # GH 35849 + # Test ValueError when orient is not 'records' + df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + msg = ( + r"mode='a' \(append\) is only supported when " + "lines is True and orient is 'records'" + ) + with pytest.raises(ValueError, match=msg): + df.to_json(mode="a", orient=orient_) + + +def test_to_json_append_lines(): + # GH 35849 + # Test ValueError when lines is not True + df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + msg = ( + r"mode='a' \(append\) is only supported when " + "lines is True and orient is 'records'" + ) + with pytest.raises(ValueError, match=msg): + df.to_json(mode="a", lines=False, orient="records") + + +@pytest.mark.parametrize("mode_", ["r", "x"]) +def test_to_json_append_mode(mode_): + # GH 35849 + # Test ValueError when mode is not supported option + df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + msg = ( + f"mode={mode_} is not a valid option." + "Only 'w' and 'a' are currently supported." + ) + with pytest.raises(ValueError, match=msg): + df.to_json(mode=mode_, lines=False, orient="records") + + +def test_to_json_append_output_consistent_columns(): + # GH 35849 + # Testing that resulting output reads in as expected. + # Testing same columns, new rows + df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]}) + + expected = DataFrame({"col1": [1, 2, 3, 4], "col2": ["a", "b", "c", "d"]}) + with tm.ensure_clean("test.json") as path: + # Save dataframes to the same file + df1.to_json(path, lines=True, orient="records") + df2.to_json(path, mode="a", lines=True, orient="records") + + # Read path file + result = read_json(path, lines=True) + tm.assert_frame_equal(result, expected) + + +def test_to_json_append_output_inconsistent_columns(): + # GH 35849 + # Testing that resulting output reads in as expected. + # Testing one new column, one old column, new rows + df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]}) + + expected = DataFrame( + { + "col1": [1, 2, None, None], + "col2": ["a", "b", "e", "f"], + "col3": [np.nan, np.nan, "!", "#"], + } + ) + with tm.ensure_clean("test.json") as path: + # Save dataframes to the same file + df1.to_json(path, mode="a", lines=True, orient="records") + df3.to_json(path, mode="a", lines=True, orient="records") + + # Read path file + result = read_json(path, lines=True) + tm.assert_frame_equal(result, expected) + + +def test_to_json_append_output_different_columns(): + # GH 35849 + # Testing that resulting output reads in as expected. + # Testing same, differing and new columns + df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]}) + df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]}) + df4 = DataFrame({"col4": [True, False]}) + + expected = DataFrame( + { + "col1": [1, 2, 3, 4, None, None, None, None], + "col2": ["a", "b", "c", "d", "e", "f", np.nan, np.nan], + "col3": [np.nan, np.nan, np.nan, np.nan, "!", "#", np.nan, np.nan], + "col4": [None, None, None, None, None, None, True, False], + } + ).astype({"col4": "float"}) + with tm.ensure_clean("test.json") as path: + # Save dataframes to the same file + df1.to_json(path, mode="a", lines=True, orient="records") + df2.to_json(path, mode="a", lines=True, orient="records") + df3.to_json(path, mode="a", lines=True, orient="records") + df4.to_json(path, mode="a", lines=True, orient="records") + + # Read path file + result = read_json(path, lines=True) + tm.assert_frame_equal(result, expected) + + +def test_to_json_append_output_different_columns_reordered(): + # GH 35849 + # Testing that resulting output reads in as expected. + # Testing specific result column order. + df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]}) + df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]}) + df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]}) + df4 = DataFrame({"col4": [True, False]}) + + # df4, df3, df2, df1 (in that order) + expected = DataFrame( + { + "col4": [True, False, None, None, None, None, None, None], + "col2": [np.nan, np.nan, "e", "f", "c", "d", "a", "b"], + "col3": [np.nan, np.nan, "!", "#", np.nan, np.nan, np.nan, np.nan], + "col1": [None, None, None, None, 3, 4, 1, 2], + } + ).astype({"col4": "float"}) + with tm.ensure_clean("test.json") as path: + # Save dataframes to the same file + df4.to_json(path, mode="a", lines=True, orient="records") + df3.to_json(path, mode="a", lines=True, orient="records") + df2.to_json(path, mode="a", lines=True, orient="records") + df1.to_json(path, mode="a", lines=True, orient="records") + + # Read path file + result = read_json(path, lines=True) + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_ujson.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_ujson.py new file mode 100644 index 0000000000000000000000000000000000000000..56ea9ea625dff721a2e989d858ae89c22aa69f4d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/json/test_ujson.py @@ -0,0 +1,1087 @@ +import calendar +import datetime +import decimal +import json +import locale +import math +import re +import time + +import dateutil +import numpy as np +import pytest +import pytz + +import pandas._libs.json as ujson +from pandas.compat import IS64 + +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + NaT, + PeriodIndex, + Series, + Timedelta, + Timestamp, + date_range, +) +import pandas._testing as tm + + +def _clean_dict(d): + """ + Sanitize dictionary for JSON by converting all keys to strings. + + Parameters + ---------- + d : dict + The dictionary to convert. + + Returns + ------- + cleaned_dict : dict + """ + return {str(k): v for k, v in d.items()} + + +@pytest.fixture( + params=[None, "split", "records", "values", "index"] # Column indexed by default. +) +def orient(request): + return request.param + + +class TestUltraJSONTests: + @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865") + def test_encode_decimal(self): + sut = decimal.Decimal("1337.1337") + encoded = ujson.ujson_dumps(sut, double_precision=15) + decoded = ujson.ujson_loads(encoded) + assert decoded == 1337.1337 + + sut = decimal.Decimal("0.95") + encoded = ujson.ujson_dumps(sut, double_precision=1) + assert encoded == "1.0" + + decoded = ujson.ujson_loads(encoded) + assert decoded == 1.0 + + sut = decimal.Decimal("0.94") + encoded = ujson.ujson_dumps(sut, double_precision=1) + assert encoded == "0.9" + + decoded = ujson.ujson_loads(encoded) + assert decoded == 0.9 + + sut = decimal.Decimal("1.95") + encoded = ujson.ujson_dumps(sut, double_precision=1) + assert encoded == "2.0" + + decoded = ujson.ujson_loads(encoded) + assert decoded == 2.0 + + sut = decimal.Decimal("-1.95") + encoded = ujson.ujson_dumps(sut, double_precision=1) + assert encoded == "-2.0" + + decoded = ujson.ujson_loads(encoded) + assert decoded == -2.0 + + sut = decimal.Decimal("0.995") + encoded = ujson.ujson_dumps(sut, double_precision=2) + assert encoded == "1.0" + + decoded = ujson.ujson_loads(encoded) + assert decoded == 1.0 + + sut = decimal.Decimal("0.9995") + encoded = ujson.ujson_dumps(sut, double_precision=3) + assert encoded == "1.0" + + decoded = ujson.ujson_loads(encoded) + assert decoded == 1.0 + + sut = decimal.Decimal("0.99999999999999944") + encoded = ujson.ujson_dumps(sut, double_precision=15) + assert encoded == "1.0" + + decoded = ujson.ujson_loads(encoded) + assert decoded == 1.0 + + @pytest.mark.parametrize("ensure_ascii", [True, False]) + def test_encode_string_conversion(self, ensure_ascii): + string_input = "A string \\ / \b \f \n \r \t &" + not_html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t <\\/script> &"' + html_encoded = ( + '"A string \\\\ \\/ \\b \\f \\n \\r \\t \\u003c\\/script\\u003e \\u0026"' + ) + + def helper(expected_output, **encode_kwargs): + output = ujson.ujson_dumps( + string_input, ensure_ascii=ensure_ascii, **encode_kwargs + ) + + assert output == expected_output + assert string_input == json.loads(output) + assert string_input == ujson.ujson_loads(output) + + # Default behavior assumes encode_html_chars=False. + helper(not_html_encoded) + + # Make sure explicit encode_html_chars=False works. + helper(not_html_encoded, encode_html_chars=False) + + # Make sure explicit encode_html_chars=True does the encoding. + helper(html_encoded, encode_html_chars=True) + + @pytest.mark.parametrize( + "long_number", [-4342969734183514, -12345678901234.56789012, -528656961.4399388] + ) + def test_double_long_numbers(self, long_number): + sut = {"a": long_number} + encoded = ujson.ujson_dumps(sut, double_precision=15) + + decoded = ujson.ujson_loads(encoded) + assert sut == decoded + + def test_encode_non_c_locale(self): + lc_category = locale.LC_NUMERIC + + # We just need one of these locales to work. + for new_locale in ("it_IT.UTF-8", "Italian_Italy"): + if tm.can_set_locale(new_locale, lc_category): + with tm.set_locale(new_locale, lc_category): + assert ujson.ujson_loads(ujson.ujson_dumps(4.78e60)) == 4.78e60 + assert ujson.ujson_loads("4.78", precise_float=True) == 4.78 + break + + def test_decimal_decode_test_precise(self): + sut = {"a": 4.56} + encoded = ujson.ujson_dumps(sut) + decoded = ujson.ujson_loads(encoded, precise_float=True) + assert sut == decoded + + def test_encode_double_tiny_exponential(self): + num = 1e-40 + assert num == ujson.ujson_loads(ujson.ujson_dumps(num)) + num = 1e-100 + assert num == ujson.ujson_loads(ujson.ujson_dumps(num)) + num = -1e-45 + assert num == ujson.ujson_loads(ujson.ujson_dumps(num)) + num = -1e-145 + assert np.allclose(num, ujson.ujson_loads(ujson.ujson_dumps(num))) + + @pytest.mark.parametrize("unicode_key", ["key1", "بن"]) + def test_encode_dict_with_unicode_keys(self, unicode_key): + unicode_dict = {unicode_key: "value1"} + assert unicode_dict == ujson.ujson_loads(ujson.ujson_dumps(unicode_dict)) + + @pytest.mark.parametrize( + "double_input", [math.pi, -math.pi] # Should work with negatives too. + ) + def test_encode_double_conversion(self, double_input): + output = ujson.ujson_dumps(double_input) + assert round(double_input, 5) == round(json.loads(output), 5) + assert round(double_input, 5) == round(ujson.ujson_loads(output), 5) + + def test_encode_with_decimal(self): + decimal_input = 1.0 + output = ujson.ujson_dumps(decimal_input) + + assert output == "1.0" + + def test_encode_array_of_nested_arrays(self): + nested_input = [[[[]]]] * 20 + output = ujson.ujson_dumps(nested_input) + + assert nested_input == json.loads(output) + assert nested_input == ujson.ujson_loads(output) + + def test_encode_array_of_doubles(self): + doubles_input = [31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10 + output = ujson.ujson_dumps(doubles_input) + + assert doubles_input == json.loads(output) + assert doubles_input == ujson.ujson_loads(output) + + def test_double_precision(self): + double_input = 30.012345678901234 + output = ujson.ujson_dumps(double_input, double_precision=15) + + assert double_input == json.loads(output) + assert double_input == ujson.ujson_loads(output) + + for double_precision in (3, 9): + output = ujson.ujson_dumps(double_input, double_precision=double_precision) + rounded_input = round(double_input, double_precision) + + assert rounded_input == json.loads(output) + assert rounded_input == ujson.ujson_loads(output) + + @pytest.mark.parametrize( + "invalid_val", + [ + 20, + -1, + "9", + None, + ], + ) + def test_invalid_double_precision(self, invalid_val): + double_input = 30.12345678901234567890 + expected_exception = ValueError if isinstance(invalid_val, int) else TypeError + msg = ( + r"Invalid value '.*' for option 'double_precision', max is '15'|" + r"an integer is required \(got type |" + r"object cannot be interpreted as an integer" + ) + with pytest.raises(expected_exception, match=msg): + ujson.ujson_dumps(double_input, double_precision=invalid_val) + + def test_encode_string_conversion2(self): + string_input = "A string \\ / \b \f \n \r \t" + output = ujson.ujson_dumps(string_input) + + assert string_input == json.loads(output) + assert string_input == ujson.ujson_loads(output) + assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"' + + @pytest.mark.parametrize( + "unicode_input", + ["Räksmörgås اسامة بن محمد بن عوض بن لادن", "\xe6\x97\xa5\xd1\x88"], + ) + def test_encode_unicode_conversion(self, unicode_input): + enc = ujson.ujson_dumps(unicode_input) + dec = ujson.ujson_loads(enc) + + assert enc == json.dumps(unicode_input) + assert dec == json.loads(enc) + + def test_encode_control_escaping(self): + escaped_input = "\x19" + enc = ujson.ujson_dumps(escaped_input) + dec = ujson.ujson_loads(enc) + + assert escaped_input == dec + assert enc == json.dumps(escaped_input) + + def test_encode_unicode_surrogate_pair(self): + surrogate_input = "\xf0\x90\x8d\x86" + enc = ujson.ujson_dumps(surrogate_input) + dec = ujson.ujson_loads(enc) + + assert enc == json.dumps(surrogate_input) + assert dec == json.loads(enc) + + def test_encode_unicode_4bytes_utf8(self): + four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL" + enc = ujson.ujson_dumps(four_bytes_input) + dec = ujson.ujson_loads(enc) + + assert enc == json.dumps(four_bytes_input) + assert dec == json.loads(enc) + + def test_encode_unicode_4bytes_utf8highest(self): + four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL" + enc = ujson.ujson_dumps(four_bytes_input) + + dec = ujson.ujson_loads(enc) + + assert enc == json.dumps(four_bytes_input) + assert dec == json.loads(enc) + + def test_encode_unicode_error(self): + string = "'\udac0'" + msg = ( + r"'utf-8' codec can't encode character '\\udac0' " + r"in position 1: surrogates not allowed" + ) + with pytest.raises(UnicodeEncodeError, match=msg): + ujson.ujson_dumps([string]) + + def test_encode_array_in_array(self): + arr_in_arr_input = [[[[]]]] + output = ujson.ujson_dumps(arr_in_arr_input) + + assert arr_in_arr_input == json.loads(output) + assert output == json.dumps(arr_in_arr_input) + assert arr_in_arr_input == ujson.ujson_loads(output) + + @pytest.mark.parametrize( + "num_input", + [ + 31337, + -31337, # Negative number. + -9223372036854775808, # Large negative number. + ], + ) + def test_encode_num_conversion(self, num_input): + output = ujson.ujson_dumps(num_input) + assert num_input == json.loads(output) + assert output == json.dumps(num_input) + assert num_input == ujson.ujson_loads(output) + + def test_encode_list_conversion(self): + list_input = [1, 2, 3, 4] + output = ujson.ujson_dumps(list_input) + + assert list_input == json.loads(output) + assert list_input == ujson.ujson_loads(output) + + def test_encode_dict_conversion(self): + dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4} + output = ujson.ujson_dumps(dict_input) + + assert dict_input == json.loads(output) + assert dict_input == ujson.ujson_loads(output) + + @pytest.mark.parametrize("builtin_value", [None, True, False]) + def test_encode_builtin_values_conversion(self, builtin_value): + output = ujson.ujson_dumps(builtin_value) + assert builtin_value == json.loads(output) + assert output == json.dumps(builtin_value) + assert builtin_value == ujson.ujson_loads(output) + + def test_encode_datetime_conversion(self): + datetime_input = datetime.datetime.fromtimestamp(time.time()) + output = ujson.ujson_dumps(datetime_input, date_unit="s") + expected = calendar.timegm(datetime_input.utctimetuple()) + + assert int(expected) == json.loads(output) + assert int(expected) == ujson.ujson_loads(output) + + def test_encode_date_conversion(self): + date_input = datetime.date.fromtimestamp(time.time()) + output = ujson.ujson_dumps(date_input, date_unit="s") + + tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0) + expected = calendar.timegm(tup) + + assert int(expected) == json.loads(output) + assert int(expected) == ujson.ujson_loads(output) + + @pytest.mark.parametrize( + "test", + [datetime.time(), datetime.time(1, 2, 3), datetime.time(10, 12, 15, 343243)], + ) + def test_encode_time_conversion_basic(self, test): + output = ujson.ujson_dumps(test) + expected = f'"{test.isoformat()}"' + assert expected == output + + def test_encode_time_conversion_pytz(self): + # see gh-11473: to_json segfaults with timezone-aware datetimes + test = datetime.time(10, 12, 15, 343243, pytz.utc) + output = ujson.ujson_dumps(test) + expected = f'"{test.isoformat()}"' + assert expected == output + + def test_encode_time_conversion_dateutil(self): + # see gh-11473: to_json segfaults with timezone-aware datetimes + test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc()) + output = ujson.ujson_dumps(test) + expected = f'"{test.isoformat()}"' + assert expected == output + + @pytest.mark.parametrize( + "decoded_input", [NaT, np.datetime64("NaT"), np.nan, np.inf, -np.inf] + ) + def test_encode_as_null(self, decoded_input): + assert ujson.ujson_dumps(decoded_input) == "null", "Expected null" + + def test_datetime_units(self): + val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504) + stamp = Timestamp(val).as_unit("ns") + + roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="s")) + assert roundtrip == stamp._value // 10**9 + + roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="ms")) + assert roundtrip == stamp._value // 10**6 + + roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="us")) + assert roundtrip == stamp._value // 10**3 + + roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="ns")) + assert roundtrip == stamp._value + + msg = "Invalid value 'foo' for option 'date_unit'" + with pytest.raises(ValueError, match=msg): + ujson.ujson_dumps(val, date_unit="foo") + + def test_encode_to_utf8(self): + unencoded = "\xe6\x97\xa5\xd1\x88" + + enc = ujson.ujson_dumps(unencoded, ensure_ascii=False) + dec = ujson.ujson_loads(enc) + + assert enc == json.dumps(unencoded, ensure_ascii=False) + assert dec == json.loads(enc) + + def test_decode_from_unicode(self): + unicode_input = '{"obj": 31337}' + + dec1 = ujson.ujson_loads(unicode_input) + dec2 = ujson.ujson_loads(str(unicode_input)) + + assert dec1 == dec2 + + def test_encode_recursion_max(self): + # 8 is the max recursion depth + + class O2: + member = 0 + + class O1: + member = 0 + + decoded_input = O1() + decoded_input.member = O2() + decoded_input.member.member = decoded_input + + with pytest.raises(OverflowError, match="Maximum recursion level reached"): + ujson.ujson_dumps(decoded_input) + + def test_decode_jibberish(self): + jibberish = "fdsa sda v9sa fdsa" + msg = "Unexpected character found when decoding 'false'" + with pytest.raises(ValueError, match=msg): + ujson.ujson_loads(jibberish) + + @pytest.mark.parametrize( + "broken_json", + [ + "[", # Broken array start. + "{", # Broken object start. + "]", # Broken array end. + "}", # Broken object end. + ], + ) + def test_decode_broken_json(self, broken_json): + msg = "Expected object or value" + with pytest.raises(ValueError, match=msg): + ujson.ujson_loads(broken_json) + + @pytest.mark.parametrize("too_big_char", ["[", "{"]) + def test_decode_depth_too_big(self, too_big_char): + with pytest.raises(ValueError, match="Reached object decoding depth limit"): + ujson.ujson_loads(too_big_char * (1024 * 1024)) + + @pytest.mark.parametrize( + "bad_string", + [ + '"TESTING', # Unterminated. + '"TESTING\\"', # Unterminated escape. + "tru", # Broken True. + "fa", # Broken False. + "n", # Broken None. + ], + ) + def test_decode_bad_string(self, bad_string): + msg = ( + "Unexpected character found when decoding|" + "Unmatched ''\"' when when decoding 'string'" + ) + with pytest.raises(ValueError, match=msg): + ujson.ujson_loads(bad_string) + + @pytest.mark.parametrize( + "broken_json, err_msg", + [ + ( + '{{1337:""}}', + "Key name of object must be 'string' when decoding 'object'", + ), + ('{{"key":"}', "Unmatched ''\"' when when decoding 'string'"), + ("[[[true", "Unexpected character found when decoding array value (2)"), + ], + ) + def test_decode_broken_json_leak(self, broken_json, err_msg): + for _ in range(1000): + with pytest.raises(ValueError, match=re.escape(err_msg)): + ujson.ujson_loads(broken_json) + + @pytest.mark.parametrize( + "invalid_dict", + [ + "{{{{31337}}}}", # No key. + '{{{{"key":}}}}', # No value. + '{{{{"key"}}}}', # No colon or value. + ], + ) + def test_decode_invalid_dict(self, invalid_dict): + msg = ( + "Key name of object must be 'string' when decoding 'object'|" + "No ':' found when decoding object value|" + "Expected object or value" + ) + with pytest.raises(ValueError, match=msg): + ujson.ujson_loads(invalid_dict) + + @pytest.mark.parametrize( + "numeric_int_as_str", ["31337", "-31337"] # Should work with negatives. + ) + def test_decode_numeric_int(self, numeric_int_as_str): + assert int(numeric_int_as_str) == ujson.ujson_loads(numeric_int_as_str) + + def test_encode_null_character(self): + wrapped_input = "31337 \x00 1337" + output = ujson.ujson_dumps(wrapped_input) + + assert wrapped_input == json.loads(output) + assert output == json.dumps(wrapped_input) + assert wrapped_input == ujson.ujson_loads(output) + + alone_input = "\x00" + output = ujson.ujson_dumps(alone_input) + + assert alone_input == json.loads(output) + assert output == json.dumps(alone_input) + assert alone_input == ujson.ujson_loads(output) + assert '" \\u0000\\r\\n "' == ujson.ujson_dumps(" \u0000\r\n ") + + def test_decode_null_character(self): + wrapped_input = '"31337 \\u0000 31337"' + assert ujson.ujson_loads(wrapped_input) == json.loads(wrapped_input) + + def test_encode_list_long_conversion(self): + long_input = [ + 9223372036854775807, + 9223372036854775807, + 9223372036854775807, + 9223372036854775807, + 9223372036854775807, + 9223372036854775807, + ] + output = ujson.ujson_dumps(long_input) + + assert long_input == json.loads(output) + assert long_input == ujson.ujson_loads(output) + + @pytest.mark.parametrize("long_input", [9223372036854775807, 18446744073709551615]) + def test_encode_long_conversion(self, long_input): + output = ujson.ujson_dumps(long_input) + + assert long_input == json.loads(output) + assert output == json.dumps(long_input) + assert long_input == ujson.ujson_loads(output) + + @pytest.mark.parametrize("bigNum", [2**64, -(2**63) - 1]) + def test_dumps_ints_larger_than_maxsize(self, bigNum): + encoding = ujson.ujson_dumps(bigNum) + assert str(bigNum) == encoding + + with pytest.raises( + ValueError, + match="Value is too big|Value is too small", + ): + assert ujson.ujson_loads(encoding) == bigNum + + @pytest.mark.parametrize( + "int_exp", ["1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"] + ) + def test_decode_numeric_int_exp(self, int_exp): + assert ujson.ujson_loads(int_exp) == json.loads(int_exp) + + def test_loads_non_str_bytes_raises(self): + msg = "a bytes-like object is required, not 'NoneType'" + with pytest.raises(TypeError, match=msg): + ujson.ujson_loads(None) + + @pytest.mark.parametrize("val", [3590016419, 2**31, 2**32, (2**32) - 1]) + def test_decode_number_with_32bit_sign_bit(self, val): + # Test that numbers that fit within 32 bits but would have the + # sign bit set (2**31 <= x < 2**32) are decoded properly. + doc = f'{{"id": {val}}}' + assert ujson.ujson_loads(doc)["id"] == val + + def test_encode_big_escape(self): + # Make sure no Exception is raised. + for _ in range(10): + base = "\u00e5".encode() + escape_input = base * 1024 * 1024 * 2 + ujson.ujson_dumps(escape_input) + + def test_decode_big_escape(self): + # Make sure no Exception is raised. + for _ in range(10): + base = "\u00e5".encode() + quote = b'"' + + escape_input = quote + (base * 1024 * 1024 * 2) + quote + ujson.ujson_loads(escape_input) + + def test_to_dict(self): + d = {"key": 31337} + + class DictTest: + def toDict(self): + return d + + o = DictTest() + output = ujson.ujson_dumps(o) + + dec = ujson.ujson_loads(output) + assert dec == d + + def test_default_handler(self): + class _TestObject: + def __init__(self, val) -> None: + self.val = val + + @property + def recursive_attr(self): + return _TestObject("recursive_attr") + + def __str__(self) -> str: + return str(self.val) + + msg = "Maximum recursion level reached" + with pytest.raises(OverflowError, match=msg): + ujson.ujson_dumps(_TestObject("foo")) + assert '"foo"' == ujson.ujson_dumps(_TestObject("foo"), default_handler=str) + + def my_handler(_): + return "foobar" + + assert '"foobar"' == ujson.ujson_dumps( + _TestObject("foo"), default_handler=my_handler + ) + + def my_handler_raises(_): + raise TypeError("I raise for anything") + + with pytest.raises(TypeError, match="I raise for anything"): + ujson.ujson_dumps(_TestObject("foo"), default_handler=my_handler_raises) + + def my_int_handler(_): + return 42 + + assert ( + ujson.ujson_loads( + ujson.ujson_dumps(_TestObject("foo"), default_handler=my_int_handler) + ) + == 42 + ) + + def my_obj_handler(_): + return datetime.datetime(2013, 2, 3) + + assert ujson.ujson_loads( + ujson.ujson_dumps(datetime.datetime(2013, 2, 3)) + ) == ujson.ujson_loads( + ujson.ujson_dumps(_TestObject("foo"), default_handler=my_obj_handler) + ) + + obj_list = [_TestObject("foo"), _TestObject("bar")] + assert json.loads(json.dumps(obj_list, default=str)) == ujson.ujson_loads( + ujson.ujson_dumps(obj_list, default_handler=str) + ) + + def test_encode_object(self): + class _TestObject: + def __init__(self, a, b, _c, d) -> None: + self.a = a + self.b = b + self._c = _c + self.d = d + + def e(self): + return 5 + + # JSON keys should be all non-callable non-underscore attributes, see GH-42768 + test_object = _TestObject(a=1, b=2, _c=3, d=4) + assert ujson.ujson_loads(ujson.ujson_dumps(test_object)) == { + "a": 1, + "b": 2, + "d": 4, + } + + def test_ujson__name__(self): + # GH 52898 + assert ujson.__name__ == "pandas._libs.json" + + +class TestNumpyJSONTests: + @pytest.mark.parametrize("bool_input", [True, False]) + def test_bool(self, bool_input): + b = bool(bool_input) + assert ujson.ujson_loads(ujson.ujson_dumps(b)) == b + + def test_bool_array(self): + bool_array = np.array( + [True, False, True, True, False, True, False, False], dtype=bool + ) + output = np.array(ujson.ujson_loads(ujson.ujson_dumps(bool_array)), dtype=bool) + tm.assert_numpy_array_equal(bool_array, output) + + def test_int(self, any_int_numpy_dtype): + klass = np.dtype(any_int_numpy_dtype).type + num = klass(1) + + assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num + + def test_int_array(self, any_int_numpy_dtype): + arr = np.arange(100, dtype=int) + arr_input = arr.astype(any_int_numpy_dtype) + + arr_output = np.array( + ujson.ujson_loads(ujson.ujson_dumps(arr_input)), dtype=any_int_numpy_dtype + ) + tm.assert_numpy_array_equal(arr_input, arr_output) + + def test_int_max(self, any_int_numpy_dtype): + if any_int_numpy_dtype in ("int64", "uint64") and not IS64: + pytest.skip("Cannot test 64-bit integer on 32-bit platform") + + klass = np.dtype(any_int_numpy_dtype).type + + # uint64 max will always overflow, + # as it's encoded to signed. + if any_int_numpy_dtype == "uint64": + num = np.iinfo("int64").max + else: + num = np.iinfo(any_int_numpy_dtype).max + + assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num + + def test_float(self, float_numpy_dtype): + klass = np.dtype(float_numpy_dtype).type + num = klass(256.2013) + + assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num + + def test_float_array(self, float_numpy_dtype): + arr = np.arange(12.5, 185.72, 1.7322, dtype=float) + float_input = arr.astype(float_numpy_dtype) + + float_output = np.array( + ujson.ujson_loads(ujson.ujson_dumps(float_input, double_precision=15)), + dtype=float_numpy_dtype, + ) + tm.assert_almost_equal(float_input, float_output) + + def test_float_max(self, float_numpy_dtype): + klass = np.dtype(float_numpy_dtype).type + num = klass(np.finfo(float_numpy_dtype).max / 10) + + tm.assert_almost_equal( + klass(ujson.ujson_loads(ujson.ujson_dumps(num, double_precision=15))), num + ) + + def test_array_basic(self): + arr = np.arange(96) + arr = arr.reshape((2, 2, 2, 2, 3, 2)) + + tm.assert_numpy_array_equal( + np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr + ) + + @pytest.mark.parametrize("shape", [(10, 10), (5, 5, 4), (100, 1)]) + def test_array_reshaped(self, shape): + arr = np.arange(100) + arr = arr.reshape(shape) + + tm.assert_numpy_array_equal( + np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr + ) + + def test_array_list(self): + arr_list = [ + "a", + [], + {}, + {}, + [], + 42, + 97.8, + ["a", "b"], + {"key": "val"}, + ] + arr = np.array(arr_list, dtype=object) + result = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=object) + tm.assert_numpy_array_equal(result, arr) + + def test_array_float(self): + dtype = np.float32 + + arr = np.arange(100.202, 200.202, 1, dtype=dtype) + arr = arr.reshape((5, 5, 4)) + + arr_out = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=dtype) + tm.assert_almost_equal(arr, arr_out) + + def test_0d_array(self): + # gh-18878 + msg = re.escape( + "array(1) (numpy-scalar) is not JSON serializable at the moment" + ) + with pytest.raises(TypeError, match=msg): + ujson.ujson_dumps(np.array(1)) + + def test_array_long_double(self): + msg = re.compile( + "1234.5.* \\(numpy-scalar\\) is not JSON serializable at the moment" + ) + with pytest.raises(TypeError, match=msg): + ujson.ujson_dumps(np.longdouble(1234.5)) + + +class TestPandasJSONTests: + def test_dataframe(self, orient): + dtype = np.int64 + + df = DataFrame( + [[1, 2, 3], [4, 5, 6]], + index=["a", "b"], + columns=["x", "y", "z"], + dtype=dtype, + ) + encode_kwargs = {} if orient is None else {"orient": orient} + assert (df.dtypes == dtype).all() + + output = ujson.ujson_loads(ujson.ujson_dumps(df, **encode_kwargs)) + assert (df.dtypes == dtype).all() + + # Ensure proper DataFrame initialization. + if orient == "split": + dec = _clean_dict(output) + output = DataFrame(**dec) + else: + output = DataFrame(output) + + # Corrections to enable DataFrame comparison. + if orient == "values": + df.columns = [0, 1, 2] + df.index = [0, 1] + elif orient == "records": + df.index = [0, 1] + elif orient == "index": + df = df.transpose() + + assert (df.dtypes == dtype).all() + tm.assert_frame_equal(output, df) + + def test_dataframe_nested(self, orient): + df = DataFrame( + [[1, 2, 3], [4, 5, 6]], index=["a", "b"], columns=["x", "y", "z"] + ) + + nested = {"df1": df, "df2": df.copy()} + kwargs = {} if orient is None else {"orient": orient} + + exp = { + "df1": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)), + "df2": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)), + } + assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp + + def test_series(self, orient): + dtype = np.int64 + s = Series( + [10, 20, 30, 40, 50, 60], + name="series", + index=[6, 7, 8, 9, 10, 15], + dtype=dtype, + ).sort_values() + assert s.dtype == dtype + + encode_kwargs = {} if orient is None else {"orient": orient} + + output = ujson.ujson_loads(ujson.ujson_dumps(s, **encode_kwargs)) + assert s.dtype == dtype + + if orient == "split": + dec = _clean_dict(output) + output = Series(**dec) + else: + output = Series(output) + + if orient in (None, "index"): + s.name = None + output = output.sort_values() + s.index = ["6", "7", "8", "9", "10", "15"] + elif orient in ("records", "values"): + s.name = None + s.index = [0, 1, 2, 3, 4, 5] + + assert s.dtype == dtype + tm.assert_series_equal(output, s) + + def test_series_nested(self, orient): + s = Series( + [10, 20, 30, 40, 50, 60], name="series", index=[6, 7, 8, 9, 10, 15] + ).sort_values() + nested = {"s1": s, "s2": s.copy()} + kwargs = {} if orient is None else {"orient": orient} + + exp = { + "s1": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)), + "s2": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)), + } + assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp + + def test_index(self): + i = Index([23, 45, 18, 98, 43, 11], name="index") + + # Column indexed. + output = Index(ujson.ujson_loads(ujson.ujson_dumps(i)), name="index") + tm.assert_index_equal(i, output) + + dec = _clean_dict(ujson.ujson_loads(ujson.ujson_dumps(i, orient="split"))) + output = Index(**dec) + + tm.assert_index_equal(i, output) + assert i.name == output.name + + tm.assert_index_equal(i, output) + assert i.name == output.name + + output = Index( + ujson.ujson_loads(ujson.ujson_dumps(i, orient="values")), name="index" + ) + tm.assert_index_equal(i, output) + + output = Index( + ujson.ujson_loads(ujson.ujson_dumps(i, orient="records")), name="index" + ) + tm.assert_index_equal(i, output) + + output = Index( + ujson.ujson_loads(ujson.ujson_dumps(i, orient="index")), name="index" + ) + tm.assert_index_equal(i, output) + + def test_datetime_index(self): + date_unit = "ns" + + # freq doesn't round-trip + rng = DatetimeIndex(list(date_range("1/1/2000", periods=20)), freq=None) + encoded = ujson.ujson_dumps(rng, date_unit=date_unit) + + decoded = DatetimeIndex(np.array(ujson.ujson_loads(encoded))) + tm.assert_index_equal(rng, decoded) + + ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng) + decoded = Series(ujson.ujson_loads(ujson.ujson_dumps(ts, date_unit=date_unit))) + + idx_values = decoded.index.values.astype(np.int64) + decoded.index = DatetimeIndex(idx_values) + tm.assert_series_equal(ts, decoded) + + @pytest.mark.parametrize( + "invalid_arr", + [ + "[31337,]", # Trailing comma. + "[,31337]", # Leading comma. + "[]]", # Unmatched bracket. + "[,]", # Only comma. + ], + ) + def test_decode_invalid_array(self, invalid_arr): + msg = ( + "Expected object or value|Trailing data|" + "Unexpected character found when decoding array value" + ) + with pytest.raises(ValueError, match=msg): + ujson.ujson_loads(invalid_arr) + + @pytest.mark.parametrize("arr", [[], [31337]]) + def test_decode_array(self, arr): + assert arr == ujson.ujson_loads(str(arr)) + + @pytest.mark.parametrize("extreme_num", [9223372036854775807, -9223372036854775808]) + def test_decode_extreme_numbers(self, extreme_num): + assert extreme_num == ujson.ujson_loads(str(extreme_num)) + + @pytest.mark.parametrize("too_extreme_num", [f"{2**64}", f"{-2**63-1}"]) + def test_decode_too_extreme_numbers(self, too_extreme_num): + with pytest.raises( + ValueError, + match="Value is too big|Value is too small", + ): + ujson.ujson_loads(too_extreme_num) + + def test_decode_with_trailing_whitespaces(self): + assert {} == ujson.ujson_loads("{}\n\t ") + + def test_decode_with_trailing_non_whitespaces(self): + with pytest.raises(ValueError, match="Trailing data"): + ujson.ujson_loads("{}\n\t a") + + @pytest.mark.parametrize("value", [f"{2**64}", f"{-2**63-1}"]) + def test_decode_array_with_big_int(self, value): + with pytest.raises( + ValueError, + match="Value is too big|Value is too small", + ): + ujson.ujson_loads(value) + + @pytest.mark.parametrize( + "float_number", + [ + 1.1234567893, + 1.234567893, + 1.34567893, + 1.4567893, + 1.567893, + 1.67893, + 1.7893, + 1.893, + 1.3, + ], + ) + @pytest.mark.parametrize("sign", [-1, 1]) + def test_decode_floating_point(self, sign, float_number): + float_number *= sign + tm.assert_almost_equal( + float_number, ujson.ujson_loads(str(float_number)), rtol=1e-15 + ) + + def test_encode_big_set(self): + s = set() + + for x in range(100000): + s.add(x) + + # Make sure no Exception is raised. + ujson.ujson_dumps(s) + + def test_encode_empty_set(self): + assert "[]" == ujson.ujson_dumps(set()) + + def test_encode_set(self): + s = {1, 2, 3, 4, 5, 6, 7, 8, 9} + enc = ujson.ujson_dumps(s) + dec = ujson.ujson_loads(enc) + + for v in dec: + assert v in s + + @pytest.mark.parametrize( + "td", + [ + Timedelta(days=366), + Timedelta(days=-1), + Timedelta(hours=13, minutes=5, seconds=5), + Timedelta(hours=13, minutes=20, seconds=30), + Timedelta(days=-1, nanoseconds=5), + Timedelta(nanoseconds=1), + Timedelta(microseconds=1, nanoseconds=1), + Timedelta(milliseconds=1, microseconds=1, nanoseconds=1), + Timedelta(milliseconds=999, microseconds=999, nanoseconds=999), + ], + ) + def test_encode_timedelta_iso(self, td): + # GH 28256 + result = ujson.ujson_dumps(td, iso_dates=True) + expected = f'"{td.isoformat()}"' + + assert result == expected + + def test_encode_periodindex(self): + # GH 46683 + p = PeriodIndex(["2022-04-06", "2022-04-07"], freq="D") + df = DataFrame(index=p) + assert df.to_json() == "{}" diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_network.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_network.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..590cb6447cf8f0a05a2aba492cb36377cca69a82 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_network.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_unsupported.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_unsupported.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50fbc3b3d6f8f8031b429d04f9d1babba92bf198 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_unsupported.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18bdcba97e37d46dcf137facb9c5e9f94b52725f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..830f93ce9b4c653a6b73b009d78c40b527bf557d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f895fafd3cb546017da50ab230a7f5f32a0fb298 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a40bca83964ccb2d67e3addd07d265419ce8a0c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caa08179ee45ed93afeea2da63ef95ca1b0c8ff0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3abc5256ff6759ec27ca270a192cd7fc6deb59e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29ecb07be655a308e7e3c485f64c1e9185f48e9b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fa6da95d87733eb24a72a0a88ee4ee8899aeabc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e08fb914d4738fb925073498b4dd59d7fe24dbd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c03ec773c5262f9a869e68997b2be8d6e149df7a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5cad9bcdd3f4287662ce2c9ccf3c0f9dfe9c997 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac322770a22e3678b5ad47b010a12f5e0359407c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da266f32776f154aaf4ccd52ff8c9a3ee9521f2d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fafa7ca3770248e631e34115beb07000f6194ac Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5534a9b9ed105fc4bec4618e8f8cb7bf589adbd6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..007d2b9e9a32bf05cfbb1ce66cdc69be38442288 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_store.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_store.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21d928623e5fcdf6df6a35418986aba2e3e5be1a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_store.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_subclass.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_subclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a85aa0e90237598e542870b059ac1c4b0fd7f19 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_subclass.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68907fed1c3d78a81aac97408b22f078ad99d54d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5e42c109d16dacaee8f3144c541392ee591b888 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/common.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/common.py new file mode 100644 index 0000000000000000000000000000000000000000..62582b212eb387c94b095ca54029f971dc54e777 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/common.py @@ -0,0 +1,50 @@ +from collections.abc import Generator +from contextlib import contextmanager +import pathlib +import tempfile + +import pytest + +from pandas.io.pytables import HDFStore + +tables = pytest.importorskip("tables") +# set these parameters so we don't have file sharing +tables.parameters.MAX_NUMEXPR_THREADS = 1 +tables.parameters.MAX_BLOSC_THREADS = 1 +tables.parameters.MAX_THREADS = 1 + + +def safe_close(store): + try: + if store is not None: + store.close() + except OSError: + pass + + +# contextmanager to ensure the file cleanup +@contextmanager +def ensure_clean_store( + path, mode="a", complevel=None, complib=None, fletcher32=False +) -> Generator[HDFStore, None, None]: + with tempfile.TemporaryDirectory() as tmpdirname: + tmp_path = pathlib.Path(tmpdirname, path) + with HDFStore( + tmp_path, + mode=mode, + complevel=complevel, + complib=complib, + fletcher32=fletcher32, + ) as store: + yield store + + +def _maybe_remove(store, key): + """ + For tests using tables, try removing the table to be sure there is + no content from previous tests using the same table name. + """ + try: + store.remove(key) + except (ValueError, KeyError): + pass diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/conftest.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..466e4ae8bb99c6c4e0f24045f71b4c3aa27b7851 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/conftest.py @@ -0,0 +1,9 @@ +import uuid + +import pytest + + +@pytest.fixture +def setup_path(): + """Fixture for setup path""" + return f"tmp.__{uuid.uuid4()}__.h5" diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_append.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_append.py new file mode 100644 index 0000000000000000000000000000000000000000..00a81a4f1f385d044a21b987cfa9c5d2c65d1f0d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_append.py @@ -0,0 +1,986 @@ +import datetime +from datetime import timedelta +import re + +import numpy as np +import pytest + +from pandas._libs.tslibs import Timestamp +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + _testing as tm, + concat, + date_range, + read_hdf, +) +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) + +pytestmark = pytest.mark.single_cpu + +tables = pytest.importorskip("tables") + + +@pytest.mark.filterwarnings("ignore::tables.NaturalNameWarning") +def test_append(setup_path): + with ensure_clean_store(setup_path) as store: + # this is allowed by almost always don't want to do it + # tables.NaturalNameWarning): + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=20, freq="B"), + ) + _maybe_remove(store, "df1") + store.append("df1", df[:10]) + store.append("df1", df[10:]) + tm.assert_frame_equal(store["df1"], df) + + _maybe_remove(store, "df2") + store.put("df2", df[:10], format="table") + store.append("df2", df[10:]) + tm.assert_frame_equal(store["df2"], df) + + _maybe_remove(store, "df3") + store.append("/df3", df[:10]) + store.append("/df3", df[10:]) + tm.assert_frame_equal(store["df3"], df) + + # this is allowed by almost always don't want to do it + # tables.NaturalNameWarning + _maybe_remove(store, "/df3 foo") + store.append("/df3 foo", df[:10]) + store.append("/df3 foo", df[10:]) + tm.assert_frame_equal(store["df3 foo"], df) + + # dtype issues - mizxed type in a single object column + df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]]) + df["mixed_column"] = "testing" + df.loc[2, "mixed_column"] = np.nan + _maybe_remove(store, "df") + store.append("df", df) + tm.assert_frame_equal(store["df"], df) + + # uints - test storage of uints + uint_data = DataFrame( + { + "u08": Series( + np.random.default_rng(2).integers(0, high=255, size=5), + dtype=np.uint8, + ), + "u16": Series( + np.random.default_rng(2).integers(0, high=65535, size=5), + dtype=np.uint16, + ), + "u32": Series( + np.random.default_rng(2).integers(0, high=2**30, size=5), + dtype=np.uint32, + ), + "u64": Series( + [2**58, 2**59, 2**60, 2**61, 2**62], + dtype=np.uint64, + ), + }, + index=np.arange(5), + ) + _maybe_remove(store, "uints") + store.append("uints", uint_data) + tm.assert_frame_equal(store["uints"], uint_data, check_index_type=True) + + # uints - test storage of uints in indexable columns + _maybe_remove(store, "uints") + # 64-bit indices not yet supported + store.append("uints", uint_data, data_columns=["u08", "u16", "u32"]) + tm.assert_frame_equal(store["uints"], uint_data, check_index_type=True) + + +def test_append_series(setup_path): + with ensure_clean_store(setup_path) as store: + # basic + ss = Series(range(20), dtype=np.float64, index=[f"i_{i}" for i in range(20)]) + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + ns = Series(np.arange(100)) + + store.append("ss", ss) + result = store["ss"] + tm.assert_series_equal(result, ss) + assert result.name is None + + store.append("ts", ts) + result = store["ts"] + tm.assert_series_equal(result, ts) + assert result.name is None + + ns.name = "foo" + store.append("ns", ns) + result = store["ns"] + tm.assert_series_equal(result, ns) + assert result.name == ns.name + + # select on the values + expected = ns[ns > 60] + result = store.select("ns", "foo>60") + tm.assert_series_equal(result, expected) + + # select on the index and values + expected = ns[(ns > 70) & (ns.index < 90)] + result = store.select("ns", "foo>70 and index<90") + tm.assert_series_equal(result, expected, check_index_type=True) + + # multi-index + mi = DataFrame(np.random.default_rng(2).standard_normal((5, 1)), columns=["A"]) + mi["B"] = np.arange(len(mi)) + mi["C"] = "foo" + mi.loc[3:5, "C"] = "bar" + mi.set_index(["C", "B"], inplace=True) + s = mi.stack(future_stack=True) + s.index = s.index.droplevel(2) + store.append("mi", s) + tm.assert_series_equal(store["mi"], s, check_index_type=True) + + +def test_append_some_nans(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame( + { + "A": Series(np.random.default_rng(2).standard_normal(20)).astype( + "int32" + ), + "A1": np.random.default_rng(2).standard_normal(20), + "A2": np.random.default_rng(2).standard_normal(20), + "B": "foo", + "C": "bar", + "D": Timestamp("2001-01-01").as_unit("ns"), + "E": Timestamp("2001-01-02").as_unit("ns"), + }, + index=np.arange(20), + ) + # some nans + _maybe_remove(store, "df1") + df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan + store.append("df1", df[:10]) + store.append("df1", df[10:]) + tm.assert_frame_equal(store["df1"], df, check_index_type=True) + + # first column + df1 = df.copy() + df1["A1"] = np.nan + _maybe_remove(store, "df1") + store.append("df1", df1[:10]) + store.append("df1", df1[10:]) + tm.assert_frame_equal(store["df1"], df1, check_index_type=True) + + # 2nd column + df2 = df.copy() + df2["A2"] = np.nan + _maybe_remove(store, "df2") + store.append("df2", df2[:10]) + store.append("df2", df2[10:]) + tm.assert_frame_equal(store["df2"], df2, check_index_type=True) + + # datetimes + df3 = df.copy() + df3["E"] = np.nan + _maybe_remove(store, "df3") + store.append("df3", df3[:10]) + store.append("df3", df3[10:]) + tm.assert_frame_equal(store["df3"], df3, check_index_type=True) + + +def test_append_all_nans(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame( + { + "A1": np.random.default_rng(2).standard_normal(20), + "A2": np.random.default_rng(2).standard_normal(20), + }, + index=np.arange(20), + ) + df.loc[0:15, :] = np.nan + + # nan some entire rows (dropna=True) + _maybe_remove(store, "df") + store.append("df", df[:10], dropna=True) + store.append("df", df[10:], dropna=True) + tm.assert_frame_equal(store["df"], df[-4:], check_index_type=True) + + # nan some entire rows (dropna=False) + _maybe_remove(store, "df2") + store.append("df2", df[:10], dropna=False) + store.append("df2", df[10:], dropna=False) + tm.assert_frame_equal(store["df2"], df, check_index_type=True) + + # tests the option io.hdf.dropna_table + with pd.option_context("io.hdf.dropna_table", False): + _maybe_remove(store, "df3") + store.append("df3", df[:10]) + store.append("df3", df[10:]) + tm.assert_frame_equal(store["df3"], df) + + with pd.option_context("io.hdf.dropna_table", True): + _maybe_remove(store, "df4") + store.append("df4", df[:10]) + store.append("df4", df[10:]) + tm.assert_frame_equal(store["df4"], df[-4:]) + + # nan some entire rows (string are still written!) + df = DataFrame( + { + "A1": np.random.default_rng(2).standard_normal(20), + "A2": np.random.default_rng(2).standard_normal(20), + "B": "foo", + "C": "bar", + }, + index=np.arange(20), + ) + + df.loc[0:15, :] = np.nan + + _maybe_remove(store, "df") + store.append("df", df[:10], dropna=True) + store.append("df", df[10:], dropna=True) + tm.assert_frame_equal(store["df"], df, check_index_type=True) + + _maybe_remove(store, "df2") + store.append("df2", df[:10], dropna=False) + store.append("df2", df[10:], dropna=False) + tm.assert_frame_equal(store["df2"], df, check_index_type=True) + + # nan some entire rows (but since we have dates they are still + # written!) + df = DataFrame( + { + "A1": np.random.default_rng(2).standard_normal(20), + "A2": np.random.default_rng(2).standard_normal(20), + "B": "foo", + "C": "bar", + "D": Timestamp("2001-01-01").as_unit("ns"), + "E": Timestamp("2001-01-02").as_unit("ns"), + }, + index=np.arange(20), + ) + + df.loc[0:15, :] = np.nan + + _maybe_remove(store, "df") + store.append("df", df[:10], dropna=True) + store.append("df", df[10:], dropna=True) + tm.assert_frame_equal(store["df"], df, check_index_type=True) + + _maybe_remove(store, "df2") + store.append("df2", df[:10], dropna=False) + store.append("df2", df[10:], dropna=False) + tm.assert_frame_equal(store["df2"], df, check_index_type=True) + + +def test_append_frame_column_oriented(setup_path): + with ensure_clean_store(setup_path) as store: + # column oriented + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df.index = df.index._with_freq(None) # freq doesn't round-trip + + _maybe_remove(store, "df1") + store.append("df1", df.iloc[:, :2], axes=["columns"]) + store.append("df1", df.iloc[:, 2:]) + tm.assert_frame_equal(store["df1"], df) + + result = store.select("df1", "columns=A") + expected = df.reindex(columns=["A"]) + tm.assert_frame_equal(expected, result) + + # selection on the non-indexable + result = store.select("df1", ("columns=A", "index=df.index[0:4]")) + expected = df.reindex(columns=["A"], index=df.index[0:4]) + tm.assert_frame_equal(expected, result) + + # this isn't supported + msg = re.escape( + "passing a filterable condition to a non-table indexer " + "[Filter: Not Initialized]" + ) + with pytest.raises(TypeError, match=msg): + store.select("df1", "columns=A and index>df.index[4]") + + +def test_append_with_different_block_ordering(setup_path): + # GH 4096; using same frames, but different block orderings + with ensure_clean_store(setup_path) as store: + for i in range(10): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB") + ) + df["index"] = range(10) + df["index"] += i * 10 + df["int64"] = Series([1] * len(df), dtype="int64") + df["int16"] = Series([1] * len(df), dtype="int16") + + if i % 2 == 0: + del df["int64"] + df["int64"] = Series([1] * len(df), dtype="int64") + if i % 3 == 0: + a = df.pop("A") + df["A"] = a + + df.set_index("index", inplace=True) + + store.append("df", df) + + # test a different ordering but with more fields (like invalid + # combinations) + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + columns=list("AB"), + dtype="float64", + ) + df["int64"] = Series([1] * len(df), dtype="int64") + df["int16"] = Series([1] * len(df), dtype="int16") + store.append("df", df) + + # store additional fields in different blocks + df["int16_2"] = Series([1] * len(df), dtype="int16") + msg = re.escape( + "cannot match existing table structure for [int16] on appending data" + ) + with pytest.raises(ValueError, match=msg): + store.append("df", df) + + # store multiple additional fields in different blocks + df["float_3"] = Series([1.0] * len(df), dtype="float64") + msg = re.escape( + "cannot match existing table structure for [A,B] on appending data" + ) + with pytest.raises(ValueError, match=msg): + store.append("df", df) + + +def test_append_with_strings(setup_path): + with ensure_clean_store(setup_path) as store: + + def check_col(key, name, size): + assert ( + getattr(store.get_storer(key).table.description, name).itemsize == size + ) + + # avoid truncation on elements + df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]]) + store.append("df_big", df) + tm.assert_frame_equal(store.select("df_big"), df) + check_col("df_big", "values_block_1", 15) + + # appending smaller string ok + df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]]) + store.append("df_big", df2) + expected = concat([df, df2]) + tm.assert_frame_equal(store.select("df_big"), expected) + check_col("df_big", "values_block_1", 15) + + # avoid truncation on elements + df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]]) + store.append("df_big2", df, min_itemsize={"values": 50}) + tm.assert_frame_equal(store.select("df_big2"), df) + check_col("df_big2", "values_block_1", 50) + + # bigger string on next append + store.append("df_new", df) + df_new = DataFrame([[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]) + msg = ( + r"Trying to store a string with len \[26\] in " + r"\[values_block_1\] column but\n" + r"this column has a limit of \[15\]!\n" + "Consider using min_itemsize to preset the sizes on these " + "columns" + ) + with pytest.raises(ValueError, match=msg): + store.append("df_new", df_new) + + # min_itemsize on Series index (GH 11412) + df = DataFrame( + { + "A": [0.0, 1.0, 2.0, 3.0, 4.0], + "B": [0.0, 1.0, 0.0, 1.0, 0.0], + "C": Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object), + "D": date_range("20130101", periods=5), + } + ).set_index("C") + store.append("ss", df["B"], min_itemsize={"index": 4}) + tm.assert_series_equal(store.select("ss"), df["B"]) + + # same as above, with data_columns=True + store.append("ss2", df["B"], data_columns=True, min_itemsize={"index": 4}) + tm.assert_series_equal(store.select("ss2"), df["B"]) + + # min_itemsize in index without appending (GH 10381) + store.put("ss3", df, format="table", min_itemsize={"index": 6}) + # just make sure there is a longer string: + df2 = df.copy().reset_index().assign(C="longer").set_index("C") + store.append("ss3", df2) + tm.assert_frame_equal(store.select("ss3"), concat([df, df2])) + + # same as above, with a Series + store.put("ss4", df["B"], format="table", min_itemsize={"index": 6}) + store.append("ss4", df2["B"]) + tm.assert_series_equal(store.select("ss4"), concat([df["B"], df2["B"]])) + + # with nans + _maybe_remove(store, "df") + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df["string"] = "foo" + df.loc[df.index[1:4], "string"] = np.nan + df["string2"] = "bar" + df.loc[df.index[4:8], "string2"] = np.nan + df["string3"] = "bah" + df.loc[df.index[1:], "string3"] = np.nan + store.append("df", df) + result = store.select("df") + tm.assert_frame_equal(result, df) + + with ensure_clean_store(setup_path) as store: + df = DataFrame({"A": "foo", "B": "bar"}, index=range(10)) + + # a min_itemsize that creates a data_column + _maybe_remove(store, "df") + store.append("df", df, min_itemsize={"A": 200}) + check_col("df", "A", 200) + assert store.get_storer("df").data_columns == ["A"] + + # a min_itemsize that creates a data_column2 + _maybe_remove(store, "df") + store.append("df", df, data_columns=["B"], min_itemsize={"A": 200}) + check_col("df", "A", 200) + assert store.get_storer("df").data_columns == ["B", "A"] + + # a min_itemsize that creates a data_column2 + _maybe_remove(store, "df") + store.append("df", df, data_columns=["B"], min_itemsize={"values": 200}) + check_col("df", "B", 200) + check_col("df", "values_block_0", 200) + assert store.get_storer("df").data_columns == ["B"] + + # infer the .typ on subsequent appends + _maybe_remove(store, "df") + store.append("df", df[:5], min_itemsize=200) + store.append("df", df[5:], min_itemsize=200) + tm.assert_frame_equal(store["df"], df) + + # invalid min_itemsize keys + df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"]) + _maybe_remove(store, "df") + msg = re.escape( + "min_itemsize has the key [foo] which is not an axis or data_column" + ) + with pytest.raises(ValueError, match=msg): + store.append("df", df, min_itemsize={"foo": 20, "foobar": 20}) + + +def test_append_with_empty_string(setup_path): + with ensure_clean_store(setup_path) as store: + # with all empty strings (GH 12242) + df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]}) + store.append("df", df[:-1], min_itemsize={"x": 1}) + store.append("df", df[-1:], min_itemsize={"x": 1}) + tm.assert_frame_equal(store.select("df"), df) + + +def test_append_with_data_columns(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df.iloc[0, df.columns.get_loc("B")] = 1.0 + _maybe_remove(store, "df") + store.append("df", df[:2], data_columns=["B"]) + store.append("df", df[2:]) + tm.assert_frame_equal(store["df"], df) + + # check that we have indices created + assert store._handle.root.df.table.cols.index.is_indexed is True + assert store._handle.root.df.table.cols.B.is_indexed is True + + # data column searching + result = store.select("df", "B>0") + expected = df[df.B > 0] + tm.assert_frame_equal(result, expected) + + # data column searching (with an indexable and a data_columns) + result = store.select("df", "B>0 and index>df.index[3]") + df_new = df.reindex(index=df.index[4:]) + expected = df_new[df_new.B > 0] + tm.assert_frame_equal(result, expected) + + # data column selection with a string data_column + df_new = df.copy() + df_new["string"] = "foo" + df_new.loc[df_new.index[1:4], "string"] = np.nan + df_new.loc[df_new.index[5:6], "string"] = "bar" + _maybe_remove(store, "df") + store.append("df", df_new, data_columns=["string"]) + result = store.select("df", "string='foo'") + expected = df_new[df_new.string == "foo"] + tm.assert_frame_equal(result, expected) + + # using min_itemsize and a data column + def check_col(key, name, size): + assert ( + getattr(store.get_storer(key).table.description, name).itemsize == size + ) + + with ensure_clean_store(setup_path) as store: + _maybe_remove(store, "df") + store.append("df", df_new, data_columns=["string"], min_itemsize={"string": 30}) + check_col("df", "string", 30) + _maybe_remove(store, "df") + store.append("df", df_new, data_columns=["string"], min_itemsize=30) + check_col("df", "string", 30) + _maybe_remove(store, "df") + store.append("df", df_new, data_columns=["string"], min_itemsize={"values": 30}) + check_col("df", "string", 30) + + with ensure_clean_store(setup_path) as store: + df_new["string2"] = "foobarbah" + df_new["string_block1"] = "foobarbah1" + df_new["string_block2"] = "foobarbah2" + _maybe_remove(store, "df") + store.append( + "df", + df_new, + data_columns=["string", "string2"], + min_itemsize={"string": 30, "string2": 40, "values": 50}, + ) + check_col("df", "string", 30) + check_col("df", "string2", 40) + check_col("df", "values_block_1", 50) + + with ensure_clean_store(setup_path) as store: + # multiple data columns + df_new = df.copy() + df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0 + df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0 + df_new["string"] = "foo" + + sl = df_new.columns.get_loc("string") + df_new.iloc[1:4, sl] = np.nan + df_new.iloc[5:6, sl] = "bar" + + df_new["string2"] = "foo" + sl = df_new.columns.get_loc("string2") + df_new.iloc[2:5, sl] = np.nan + df_new.iloc[7:8, sl] = "bar" + _maybe_remove(store, "df") + store.append("df", df_new, data_columns=["A", "B", "string", "string2"]) + result = store.select("df", "string='foo' and string2='foo' and A>0 and B<0") + expected = df_new[ + (df_new.string == "foo") + & (df_new.string2 == "foo") + & (df_new.A > 0) + & (df_new.B < 0) + ] + tm.assert_frame_equal(result, expected, check_freq=False) + # FIXME: 2020-05-07 freq check randomly fails in the CI + + # yield an empty frame + result = store.select("df", "string='foo' and string2='cool'") + expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")] + tm.assert_frame_equal(result, expected) + + with ensure_clean_store(setup_path) as store: + # doc example + df_dc = df.copy() + df_dc["string"] = "foo" + df_dc.loc[df_dc.index[4:6], "string"] = np.nan + df_dc.loc[df_dc.index[7:9], "string"] = "bar" + df_dc["string2"] = "cool" + df_dc["datetime"] = Timestamp("20010102").as_unit("ns") + df_dc.loc[df_dc.index[3:5], ["A", "B", "datetime"]] = np.nan + + _maybe_remove(store, "df_dc") + store.append( + "df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"] + ) + result = store.select("df_dc", "B>0") + + expected = df_dc[df_dc.B > 0] + tm.assert_frame_equal(result, expected) + + result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"]) + expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")] + tm.assert_frame_equal(result, expected, check_freq=False) + # FIXME: 2020-12-07 intermittent build failures here with freq of + # None instead of BDay(4) + + with ensure_clean_store(setup_path) as store: + # doc example part 2 + + index = date_range("1/1/2000", periods=8) + df_dc = DataFrame( + np.random.default_rng(2).standard_normal((8, 3)), + index=index, + columns=["A", "B", "C"], + ) + df_dc["string"] = "foo" + df_dc.loc[df_dc.index[4:6], "string"] = np.nan + df_dc.loc[df_dc.index[7:9], "string"] = "bar" + df_dc[["B", "C"]] = df_dc[["B", "C"]].abs() + df_dc["string2"] = "cool" + + # on-disk operations + store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"]) + + result = store.select("df_dc", "B>0") + expected = df_dc[df_dc.B > 0] + tm.assert_frame_equal(result, expected) + + result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"']) + expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")] + tm.assert_frame_equal(result, expected) + + +def test_append_hierarchical(tmp_path, setup_path, multiindex_dataframe_random_data): + df = multiindex_dataframe_random_data + df.columns.name = None + + with ensure_clean_store(setup_path) as store: + store.append("mi", df) + result = store.select("mi") + tm.assert_frame_equal(result, df) + + # GH 3748 + result = store.select("mi", columns=["A", "B"]) + expected = df.reindex(columns=["A", "B"]) + tm.assert_frame_equal(result, expected) + + path = tmp_path / "test.hdf" + df.to_hdf(path, key="df", format="table") + result = read_hdf(path, "df", columns=["A", "B"]) + expected = df.reindex(columns=["A", "B"]) + tm.assert_frame_equal(result, expected) + + +def test_append_misc(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + store.append("df", df, chunksize=1) + result = store.select("df") + tm.assert_frame_equal(result, df) + + store.append("df1", df, expectedrows=10) + result = store.select("df1") + tm.assert_frame_equal(result, df) + + +@pytest.mark.parametrize("chunksize", [10, 200, 1000]) +def test_append_misc_chunksize(setup_path, chunksize): + # more chunksize in append tests + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df["string"] = "foo" + df["float322"] = 1.0 + df["float322"] = df["float322"].astype("float32") + df["bool"] = df["float322"] > 0 + df["time1"] = Timestamp("20130101").as_unit("ns") + df["time2"] = Timestamp("20130102").as_unit("ns") + with ensure_clean_store(setup_path, mode="w") as store: + store.append("obj", df, chunksize=chunksize) + result = store.select("obj") + tm.assert_frame_equal(result, df) + + +def test_append_misc_empty_frame(setup_path): + # empty frame, GH4273 + with ensure_clean_store(setup_path) as store: + # 0 len + df_empty = DataFrame(columns=list("ABC")) + store.append("df", df_empty) + with pytest.raises(KeyError, match="'No object named df in the file'"): + store.select("df") + + # repeated append of 0/non-zero frames + df = DataFrame(np.random.default_rng(2).random((10, 3)), columns=list("ABC")) + store.append("df", df) + tm.assert_frame_equal(store.select("df"), df) + store.append("df", df_empty) + tm.assert_frame_equal(store.select("df"), df) + + # store + df = DataFrame(columns=list("ABC")) + store.put("df2", df) + tm.assert_frame_equal(store.select("df2"), df) + + +# TODO(ArrayManager) currently we rely on falling back to BlockManager, but +# the conversion from AM->BM converts the invalid object dtype column into +# a datetime64 column no longer raising an error +@td.skip_array_manager_not_yet_implemented +def test_append_raise(setup_path): + with ensure_clean_store(setup_path) as store: + # test append with invalid input to get good error messages + + # list in column + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df["invalid"] = [["a"]] * len(df) + assert df.dtypes["invalid"] == np.object_ + msg = re.escape( + """Cannot serialize the column [invalid] +because its data contents are not [string] but [mixed] object dtype""" + ) + with pytest.raises(TypeError, match=msg): + store.append("df", df) + + # multiple invalid columns + df["invalid2"] = [["a"]] * len(df) + df["invalid3"] = [["a"]] * len(df) + with pytest.raises(TypeError, match=msg): + store.append("df", df) + + # datetime with embedded nans as object + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + s = Series(datetime.datetime(2001, 1, 2), index=df.index) + s = s.astype(object) + s[0:5] = np.nan + df["invalid"] = s + assert df.dtypes["invalid"] == np.object_ + msg = "too many timezones in this block, create separate data columns" + with pytest.raises(TypeError, match=msg): + store.append("df", df) + + # directly ndarray + msg = "value must be None, Series, or DataFrame" + with pytest.raises(TypeError, match=msg): + store.append("df", np.arange(10)) + + # series directly + msg = re.escape( + "cannot properly create the storer for: " + "[group->df,value->]" + ) + with pytest.raises(TypeError, match=msg): + store.append("df", Series(np.arange(10))) + + # appending an incompatible table + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + store.append("df", df) + + df["foo"] = "foo" + msg = re.escape( + "invalid combination of [non_index_axes] on appending data " + "[(1, ['A', 'B', 'C', 'D', 'foo'])] vs current table " + "[(1, ['A', 'B', 'C', 'D'])]" + ) + with pytest.raises(ValueError, match=msg): + store.append("df", df) + + # incompatible type (GH 41897) + _maybe_remove(store, "df") + df["foo"] = Timestamp("20130101") + store.append("df", df) + df["foo"] = "bar" + msg = re.escape( + "invalid combination of [values_axes] on appending data " + "[name->values_block_1,cname->values_block_1," + "dtype->bytes24,kind->string,shape->(1, 30)] " + "vs current table " + "[name->values_block_1,cname->values_block_1," + "dtype->datetime64[s],kind->datetime64[s],shape->None]" + ) + with pytest.raises(ValueError, match=msg): + store.append("df", df) + + +def test_append_with_timedelta(setup_path): + # GH 3577 + # append timedelta + + ts = Timestamp("20130101").as_unit("ns") + df = DataFrame( + { + "A": ts, + "B": [ts + timedelta(days=i, seconds=10) for i in range(10)], + } + ) + df["C"] = df["A"] - df["B"] + df.loc[3:5, "C"] = np.nan + + with ensure_clean_store(setup_path) as store: + # table + _maybe_remove(store, "df") + store.append("df", df, data_columns=True) + result = store.select("df") + tm.assert_frame_equal(result, df) + + result = store.select("df", where="C<100000") + tm.assert_frame_equal(result, df) + + result = store.select("df", where="C0", "B>0"], selector="df1" + ) + expected = df[(df.A > 0) & (df.B > 0)] + tm.assert_frame_equal(result, expected) + + +def test_append_to_multiple_dropna(setup_path): + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ).rename(columns="{}_2".format) + df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan + df = concat([df1, df2], axis=1) + + with ensure_clean_store(setup_path) as store: + # dropna=True should guarantee rows are synchronized + store.append_to_multiple( + {"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True + ) + result = store.select_as_multiple(["df1", "df2"]) + expected = df.dropna() + tm.assert_frame_equal(result, expected, check_index_type=True) + tm.assert_index_equal(store.select("df1").index, store.select("df2").index) + + +def test_append_to_multiple_dropna_false(setup_path): + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df2 = df1.copy().rename(columns="{}_2".format) + df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan + df = concat([df1, df2], axis=1) + + with ensure_clean_store(setup_path) as store, pd.option_context( + "io.hdf.dropna_table", True + ): + # dropna=False shouldn't synchronize row indexes + store.append_to_multiple( + {"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False + ) + + msg = "all tables must have exactly the same nrows!" + with pytest.raises(ValueError, match=msg): + store.select_as_multiple(["df1a", "df2a"]) + + assert not store.select("df1a").index.equals(store.select("df2a").index) + + +def test_append_to_multiple_min_itemsize(setup_path): + # GH 11238 + df = DataFrame( + { + "IX": np.arange(1, 21), + "Num": np.arange(1, 21), + "BigNum": np.arange(1, 21) * 88, + "Str": ["a" for _ in range(20)], + "LongStr": ["abcde" for _ in range(20)], + } + ) + expected = df.iloc[[0]] + + with ensure_clean_store(setup_path) as store: + store.append_to_multiple( + { + "index": ["IX"], + "nums": ["Num", "BigNum"], + "strs": ["Str", "LongStr"], + }, + df.iloc[[0]], + "index", + min_itemsize={"Str": 10, "LongStr": 100, "Num": 2}, + ) + result = store.select_as_multiple(["index", "nums", "strs"]) + tm.assert_frame_equal(result, expected, check_index_type=True) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_categorical.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..58ebdfe7696b4bc9f37c193a3e2a4f19f55c3fe8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_categorical.py @@ -0,0 +1,214 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + DataFrame, + Series, + _testing as tm, + concat, + read_hdf, +) +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) + +pytestmark = pytest.mark.single_cpu + + +def test_categorical(setup_path): + with ensure_clean_store(setup_path) as store: + # Basic + _maybe_remove(store, "s") + s = Series( + Categorical( + ["a", "b", "b", "a", "a", "c"], + categories=["a", "b", "c", "d"], + ordered=False, + ) + ) + store.append("s", s, format="table") + result = store.select("s") + tm.assert_series_equal(s, result) + + _maybe_remove(store, "s_ordered") + s = Series( + Categorical( + ["a", "b", "b", "a", "a", "c"], + categories=["a", "b", "c", "d"], + ordered=True, + ) + ) + store.append("s_ordered", s, format="table") + result = store.select("s_ordered") + tm.assert_series_equal(s, result) + + _maybe_remove(store, "df") + df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]}) + store.append("df", df, format="table") + result = store.select("df") + tm.assert_frame_equal(result, df) + + # Dtypes + _maybe_remove(store, "si") + s = Series([1, 1, 2, 2, 3, 4, 5]).astype("category") + store.append("si", s) + result = store.select("si") + tm.assert_series_equal(result, s) + + _maybe_remove(store, "si2") + s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype("category") + store.append("si2", s) + result = store.select("si2") + tm.assert_series_equal(result, s) + + # Multiple + _maybe_remove(store, "df2") + df2 = df.copy() + df2["s2"] = Series(list("abcdefg")).astype("category") + store.append("df2", df2) + result = store.select("df2") + tm.assert_frame_equal(result, df2) + + # Make sure the metadata is OK + info = store.info() + assert "/df2 " in info + # df2._mgr.blocks[0] and df2._mgr.blocks[2] are Categorical + assert "/df2/meta/values_block_0/meta" in info + assert "/df2/meta/values_block_2/meta" in info + + # unordered + _maybe_remove(store, "s2") + s = Series( + Categorical( + ["a", "b", "b", "a", "a", "c"], + categories=["a", "b", "c", "d"], + ordered=False, + ) + ) + store.append("s2", s, format="table") + result = store.select("s2") + tm.assert_series_equal(result, s) + + # Query + _maybe_remove(store, "df3") + store.append("df3", df, data_columns=["s"]) + expected = df[df.s.isin(["b", "c"])] + result = store.select("df3", where=['s in ["b","c"]']) + tm.assert_frame_equal(result, expected) + + expected = df[df.s.isin(["b", "c"])] + result = store.select("df3", where=['s = ["b","c"]']) + tm.assert_frame_equal(result, expected) + + expected = df[df.s.isin(["d"])] + result = store.select("df3", where=['s in ["d"]']) + tm.assert_frame_equal(result, expected) + + expected = df[df.s.isin(["f"])] + result = store.select("df3", where=['s in ["f"]']) + tm.assert_frame_equal(result, expected) + + # Appending with same categories is ok + store.append("df3", df) + + df = concat([df, df]) + expected = df[df.s.isin(["b", "c"])] + result = store.select("df3", where=['s in ["b","c"]']) + tm.assert_frame_equal(result, expected) + + # Appending must have the same categories + df3 = df.copy() + df3["s"] = df3["s"].cat.remove_unused_categories() + + msg = "cannot append a categorical with different categories to the existing" + with pytest.raises(ValueError, match=msg): + store.append("df3", df3) + + # Remove, and make sure meta data is removed (its a recursive + # removal so should be). + result = store.select("df3/meta/s/meta") + assert result is not None + store.remove("df3") + + with pytest.raises( + KeyError, match="'No object named df3/meta/s/meta in the file'" + ): + store.select("df3/meta/s/meta") + + +def test_categorical_conversion(tmp_path, setup_path): + # GH13322 + # Check that read_hdf with categorical columns doesn't return rows if + # where criteria isn't met. + obsids = ["ESP_012345_6789", "ESP_987654_3210"] + imgids = ["APF00006np", "APF0001imm"] + data = [4.3, 9.8] + + # Test without categories + df = DataFrame({"obsids": obsids, "imgids": imgids, "data": data}) + + # We are expecting an empty DataFrame matching types of df + expected = df.iloc[[], :] + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table", data_columns=True) + result = read_hdf(path, "df", where="obsids=B") + tm.assert_frame_equal(result, expected) + + # Test with categories + df.obsids = df.obsids.astype("category") + df.imgids = df.imgids.astype("category") + + # We are expecting an empty DataFrame matching types of df + expected = df.iloc[[], :] + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table", data_columns=True) + result = read_hdf(path, "df", where="obsids=B") + tm.assert_frame_equal(result, expected) + + +def test_categorical_nan_only_columns(tmp_path, setup_path): + # GH18413 + # Check that read_hdf with categorical columns with NaN-only values can + # be read back. + df = DataFrame( + { + "a": ["a", "b", "c", np.nan], + "b": [np.nan, np.nan, np.nan, np.nan], + "c": [1, 2, 3, 4], + "d": Series([None] * 4, dtype=object), + } + ) + df["a"] = df.a.astype("category") + df["b"] = df.b.astype("category") + df["d"] = df.b.astype("category") + expected = df + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table", data_columns=True) + result = read_hdf(path, "df") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "where, df, expected", + [ + ('col=="q"', DataFrame({"col": ["a", "b", "s"]}), DataFrame({"col": []})), + ('col=="a"', DataFrame({"col": ["a", "b", "s"]}), DataFrame({"col": ["a"]})), + ], +) +def test_convert_value( + tmp_path, setup_path, where: str, df: DataFrame, expected: DataFrame +): + # GH39420 + # Check that read_hdf with categorical columns can filter by where condition. + df.col = df.col.astype("category") + max_widths = {"col": 1} + categorical_values = sorted(df.col.unique()) + expected.col = expected.col.astype("category") + expected.col = expected.col.cat.set_categories(categorical_values) + + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table", min_itemsize=max_widths) + result = read_hdf(path, where=where) + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_compat.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..b07fb3ddd3ac829f5b90d6fd7226926aeed284e6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_compat.py @@ -0,0 +1,75 @@ +import pytest + +import pandas as pd +import pandas._testing as tm + +tables = pytest.importorskip("tables") + + +@pytest.fixture +def pytables_hdf5_file(tmp_path): + """ + Use PyTables to create a simple HDF5 file. + """ + table_schema = { + "c0": tables.Time64Col(pos=0), + "c1": tables.StringCol(5, pos=1), + "c2": tables.Int64Col(pos=2), + } + + t0 = 1_561_105_000.0 + + testsamples = [ + {"c0": t0, "c1": "aaaaa", "c2": 1}, + {"c0": t0 + 1, "c1": "bbbbb", "c2": 2}, + {"c0": t0 + 2, "c1": "ccccc", "c2": 10**5}, + {"c0": t0 + 3, "c1": "ddddd", "c2": 4_294_967_295}, + ] + + objname = "pandas_test_timeseries" + + path = tmp_path / "written_with_pytables.h5" + with tables.open_file(path, mode="w") as f: + t = f.create_table("/", name=objname, description=table_schema) + for sample in testsamples: + for key, value in sample.items(): + t.row[key] = value + t.row.append() + + yield path, objname, pd.DataFrame(testsamples) + + +class TestReadPyTablesHDF5: + """ + A group of tests which covers reading HDF5 files written by plain PyTables + (not written by pandas). + + Was introduced for regression-testing issue 11188. + """ + + def test_read_complete(self, pytables_hdf5_file): + path, objname, df = pytables_hdf5_file + result = pd.read_hdf(path, key=objname) + expected = df + tm.assert_frame_equal(result, expected, check_index_type=True) + + def test_read_with_start(self, pytables_hdf5_file): + path, objname, df = pytables_hdf5_file + # This is a regression test for pandas-dev/pandas/issues/11188 + result = pd.read_hdf(path, key=objname, start=1) + expected = df[1:].reset_index(drop=True) + tm.assert_frame_equal(result, expected, check_index_type=True) + + def test_read_with_stop(self, pytables_hdf5_file): + path, objname, df = pytables_hdf5_file + # This is a regression test for pandas-dev/pandas/issues/11188 + result = pd.read_hdf(path, key=objname, stop=1) + expected = df[:1].reset_index(drop=True) + tm.assert_frame_equal(result, expected, check_index_type=True) + + def test_read_with_startstop(self, pytables_hdf5_file): + path, objname, df = pytables_hdf5_file + # This is a regression test for pandas-dev/pandas/issues/11188 + result = pd.read_hdf(path, key=objname, start=1, stop=2) + expected = df[1:2].reset_index(drop=True) + tm.assert_frame_equal(result, expected, check_index_type=True) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_complex.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_complex.py new file mode 100644 index 0000000000000000000000000000000000000000..c5cac5a5caf090d85d7284103459c6f03d3d41ce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_complex.py @@ -0,0 +1,195 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm +from pandas.tests.io.pytables.common import ensure_clean_store + +from pandas.io.pytables import read_hdf + + +def test_complex_fixed(tmp_path, setup_path): + df = DataFrame( + np.random.default_rng(2).random((4, 5)).astype(np.complex64), + index=list("abcd"), + columns=list("ABCDE"), + ) + + path = tmp_path / setup_path + df.to_hdf(path, key="df") + reread = read_hdf(path, "df") + tm.assert_frame_equal(df, reread) + + df = DataFrame( + np.random.default_rng(2).random((4, 5)).astype(np.complex128), + index=list("abcd"), + columns=list("ABCDE"), + ) + path = tmp_path / setup_path + df.to_hdf(path, key="df") + reread = read_hdf(path, "df") + tm.assert_frame_equal(df, reread) + + +def test_complex_table(tmp_path, setup_path): + df = DataFrame( + np.random.default_rng(2).random((4, 5)).astype(np.complex64), + index=list("abcd"), + columns=list("ABCDE"), + ) + + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table") + reread = read_hdf(path, key="df") + tm.assert_frame_equal(df, reread) + + df = DataFrame( + np.random.default_rng(2).random((4, 5)).astype(np.complex128), + index=list("abcd"), + columns=list("ABCDE"), + ) + + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table", mode="w") + reread = read_hdf(path, "df") + tm.assert_frame_equal(df, reread) + + +def test_complex_mixed_fixed(tmp_path, setup_path): + complex64 = np.array( + [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64 + ) + complex128 = np.array( + [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128 + ) + df = DataFrame( + { + "A": [1, 2, 3, 4], + "B": ["a", "b", "c", "d"], + "C": complex64, + "D": complex128, + "E": [1.0, 2.0, 3.0, 4.0], + }, + index=list("abcd"), + ) + path = tmp_path / setup_path + df.to_hdf(path, key="df") + reread = read_hdf(path, "df") + tm.assert_frame_equal(df, reread) + + +def test_complex_mixed_table(tmp_path, setup_path): + complex64 = np.array( + [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64 + ) + complex128 = np.array( + [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128 + ) + df = DataFrame( + { + "A": [1, 2, 3, 4], + "B": ["a", "b", "c", "d"], + "C": complex64, + "D": complex128, + "E": [1.0, 2.0, 3.0, 4.0], + }, + index=list("abcd"), + ) + + with ensure_clean_store(setup_path) as store: + store.append("df", df, data_columns=["A", "B"]) + result = store.select("df", where="A>2") + tm.assert_frame_equal(df.loc[df.A > 2], result) + + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table") + reread = read_hdf(path, "df") + tm.assert_frame_equal(df, reread) + + +def test_complex_across_dimensions_fixed(tmp_path, setup_path): + complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) + s = Series(complex128, index=list("abcd")) + df = DataFrame({"A": s, "B": s}) + + objs = [s, df] + comps = [tm.assert_series_equal, tm.assert_frame_equal] + for obj, comp in zip(objs, comps): + path = tmp_path / setup_path + obj.to_hdf(path, key="obj", format="fixed") + reread = read_hdf(path, "obj") + comp(obj, reread) + + +def test_complex_across_dimensions(tmp_path, setup_path): + complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) + s = Series(complex128, index=list("abcd")) + df = DataFrame({"A": s, "B": s}) + + path = tmp_path / setup_path + df.to_hdf(path, key="obj", format="table") + reread = read_hdf(path, "obj") + tm.assert_frame_equal(df, reread) + + +def test_complex_indexing_error(setup_path): + complex128 = np.array( + [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128 + ) + df = DataFrame( + {"A": [1, 2, 3, 4], "B": ["a", "b", "c", "d"], "C": complex128}, + index=list("abcd"), + ) + + msg = ( + "Columns containing complex values can be stored " + "but cannot be indexed when using table format. " + "Either use fixed format, set index=False, " + "or do not include the columns containing complex " + "values to data_columns when initializing the table." + ) + + with ensure_clean_store(setup_path) as store: + with pytest.raises(TypeError, match=msg): + store.append("df", df, data_columns=["C"]) + + +def test_complex_series_error(tmp_path, setup_path): + complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) + s = Series(complex128, index=list("abcd")) + + msg = ( + "Columns containing complex values can be stored " + "but cannot be indexed when using table format. " + "Either use fixed format, set index=False, " + "or do not include the columns containing complex " + "values to data_columns when initializing the table." + ) + + path = tmp_path / setup_path + with pytest.raises(TypeError, match=msg): + s.to_hdf(path, key="obj", format="t") + + path = tmp_path / setup_path + s.to_hdf(path, key="obj", format="t", index=False) + reread = read_hdf(path, "obj") + tm.assert_series_equal(s, reread) + + +def test_complex_append(setup_path): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(100).astype(np.complex128), + "b": np.random.default_rng(2).standard_normal(100), + } + ) + + with ensure_clean_store(setup_path) as store: + store.append("df", df, data_columns=["b"]) + store.append("df", df) + result = store.select("df") + tm.assert_frame_equal(pd.concat([df, df], axis=0), result) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_errors.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..20211010988924ed601f52ed9adf03ca081838c8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_errors.py @@ -0,0 +1,251 @@ +import datetime +from io import BytesIO +import re + +import numpy as np +import pytest + +from pandas import ( + CategoricalIndex, + DataFrame, + HDFStore, + Index, + MultiIndex, + _testing as tm, + date_range, + read_hdf, +) +from pandas.tests.io.pytables.common import ensure_clean_store + +from pandas.io.pytables import ( + Term, + _maybe_adjust_name, +) + +pytestmark = pytest.mark.single_cpu + + +def test_pass_spec_to_storer(setup_path): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + with ensure_clean_store(setup_path) as store: + store.put("df", df) + msg = ( + "cannot pass a column specification when reading a Fixed format " + "store. this store must be selected in its entirety" + ) + with pytest.raises(TypeError, match=msg): + store.select("df", columns=["A"]) + msg = ( + "cannot pass a where specification when reading from a Fixed " + "format store. this store must be selected in its entirety" + ) + with pytest.raises(TypeError, match=msg): + store.select("df", where=[("columns=A")]) + + +def test_table_index_incompatible_dtypes(setup_path): + df1 = DataFrame({"a": [1, 2, 3]}) + df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3)) + + with ensure_clean_store(setup_path) as store: + store.put("frame", df1, format="table") + msg = re.escape("incompatible kind in col [integer - datetime64[ns]]") + with pytest.raises(TypeError, match=msg): + store.put("frame", df2, format="table", append=True) + + +def test_unimplemented_dtypes_table_columns(setup_path): + with ensure_clean_store(setup_path) as store: + dtypes = [("date", datetime.date(2001, 1, 2))] + + # currently not supported dtypes #### + for n, f in dtypes: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df[n] = f + msg = re.escape(f"[{n}] is not implemented as a table column") + with pytest.raises(TypeError, match=msg): + store.append(f"df1_{n}", df) + + # frame + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df["obj1"] = "foo" + df["obj2"] = "bar" + df["datetime1"] = datetime.date(2001, 1, 2) + df = df._consolidate() + + with ensure_clean_store(setup_path) as store: + # this fails because we have a date in the object block...... + msg = re.escape( + """Cannot serialize the column [datetime1] +because its data contents are not [string] but [date] object dtype""" + ) + with pytest.raises(TypeError, match=msg): + store.append("df_unimplemented", df) + + +def test_invalid_terms(tmp_path, setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df["string"] = "foo" + df.loc[df.index[0:4], "string"] = "bar" + + store.put("df", df, format="table") + + # some invalid terms + msg = re.escape("__init__() missing 1 required positional argument: 'where'") + with pytest.raises(TypeError, match=msg): + Term() + + # more invalid + msg = re.escape( + "cannot process expression [df.index[3]], " + "[2000-01-06 00:00:00] is not a valid condition" + ) + with pytest.raises(ValueError, match=msg): + store.select("df", "df.index[3]") + + msg = "invalid syntax" + with pytest.raises(SyntaxError, match=msg): + store.select("df", "index>") + + # from the docs + path = tmp_path / setup_path + dfq = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=list("ABCD"), + index=date_range("20130101", periods=10), + ) + dfq.to_hdf(path, key="dfq", format="table", data_columns=True) + + # check ok + read_hdf(path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']") + read_hdf(path, "dfq", where="A>0 or C>0") + + # catch the invalid reference + path = tmp_path / setup_path + dfq = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=list("ABCD"), + index=date_range("20130101", periods=10), + ) + dfq.to_hdf(path, key="dfq", format="table") + + msg = ( + r"The passed where expression: A>0 or C>0\n\s*" + r"contains an invalid variable reference\n\s*" + r"all of the variable references must be a reference to\n\s*" + r"an axis \(e.g. 'index' or 'columns'\), or a data_column\n\s*" + r"The currently defined references are: index,columns\n" + ) + with pytest.raises(ValueError, match=msg): + read_hdf(path, "dfq", where="A>0 or C>0") + + +def test_append_with_diff_col_name_types_raises_value_error(setup_path): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 1))) + df2 = DataFrame({"a": np.random.default_rng(2).standard_normal(10)}) + df3 = DataFrame({(1, 2): np.random.default_rng(2).standard_normal(10)}) + df4 = DataFrame({("1", 2): np.random.default_rng(2).standard_normal(10)}) + df5 = DataFrame({("1", 2, object): np.random.default_rng(2).standard_normal(10)}) + + with ensure_clean_store(setup_path) as store: + name = "df_diff_valerror" + store.append(name, df) + + for d in (df2, df3, df4, df5): + msg = re.escape( + "cannot match existing table structure for [0] on appending data" + ) + with pytest.raises(ValueError, match=msg): + store.append(name, d) + + +def test_invalid_complib(setup_path): + df = DataFrame( + np.random.default_rng(2).random((4, 5)), + index=list("abcd"), + columns=list("ABCDE"), + ) + with tm.ensure_clean(setup_path) as path: + msg = r"complib only supports \[.*\] compression." + with pytest.raises(ValueError, match=msg): + df.to_hdf(path, key="df", complib="foolib") + + +@pytest.mark.parametrize( + "idx", + [ + date_range("2019", freq="D", periods=3, tz="UTC"), + CategoricalIndex(list("abc")), + ], +) +def test_to_hdf_multiindex_extension_dtype(idx, tmp_path, setup_path): + # GH 7775 + mi = MultiIndex.from_arrays([idx, idx]) + df = DataFrame(0, index=mi, columns=["a"]) + path = tmp_path / setup_path + with pytest.raises(NotImplementedError, match="Saving a MultiIndex"): + df.to_hdf(path, key="df") + + +def test_unsuppored_hdf_file_error(datapath): + # GH 9539 + data_path = datapath("io", "data", "legacy_hdf/incompatible_dataset.h5") + message = ( + r"Dataset\(s\) incompatible with Pandas data types, " + "not table, or no datasets found in HDF5 file." + ) + + with pytest.raises(ValueError, match=message): + read_hdf(data_path) + + +def test_read_hdf_errors(setup_path, tmp_path): + df = DataFrame( + np.random.default_rng(2).random((4, 5)), + index=list("abcd"), + columns=list("ABCDE"), + ) + + path = tmp_path / setup_path + msg = r"File [\S]* does not exist" + with pytest.raises(OSError, match=msg): + read_hdf(path, "key") + + df.to_hdf(path, key="df") + store = HDFStore(path, mode="r") + store.close() + + msg = "The HDFStore must be open for reading." + with pytest.raises(OSError, match=msg): + read_hdf(store, "df") + + +def test_read_hdf_generic_buffer_errors(): + msg = "Support for generic buffers has not been implemented." + with pytest.raises(NotImplementedError, match=msg): + read_hdf(BytesIO(b""), "df") + + +@pytest.mark.parametrize("bad_version", [(1, 2), (1,), [], "12", "123"]) +def test_maybe_adjust_name_bad_version_raises(bad_version): + msg = "Version is incorrect, expected sequence of 3 integers" + with pytest.raises(ValueError, match=msg): + _maybe_adjust_name("values_block_0", version=bad_version) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_file_handling.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_file_handling.py new file mode 100644 index 0000000000000000000000000000000000000000..d93de16816725cf6a3a326301eadcd35345a96d2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_file_handling.py @@ -0,0 +1,495 @@ +import os + +import numpy as np +import pytest + +from pandas.compat import ( + PY311, + is_ci_environment, + is_platform_linux, + is_platform_little_endian, +) +from pandas.errors import ( + ClosedFileError, + PossibleDataLossError, +) + +from pandas import ( + DataFrame, + HDFStore, + Index, + Series, + _testing as tm, + date_range, + read_hdf, +) +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, + tables, +) + +from pandas.io import pytables +from pandas.io.pytables import Term + +pytestmark = pytest.mark.single_cpu + + +@pytest.mark.parametrize("mode", ["r", "r+", "a", "w"]) +def test_mode(setup_path, tmp_path, mode): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + msg = r"[\S]* does not exist" + path = tmp_path / setup_path + + # constructor + if mode in ["r", "r+"]: + with pytest.raises(OSError, match=msg): + HDFStore(path, mode=mode) + + else: + with HDFStore(path, mode=mode) as store: + assert store._handle.mode == mode + + path = tmp_path / setup_path + + # context + if mode in ["r", "r+"]: + with pytest.raises(OSError, match=msg): + with HDFStore(path, mode=mode) as store: + pass + else: + with HDFStore(path, mode=mode) as store: + assert store._handle.mode == mode + + path = tmp_path / setup_path + + # conv write + if mode in ["r", "r+"]: + with pytest.raises(OSError, match=msg): + df.to_hdf(path, key="df", mode=mode) + df.to_hdf(path, key="df", mode="w") + else: + df.to_hdf(path, key="df", mode=mode) + + # conv read + if mode in ["w"]: + msg = ( + "mode w is not allowed while performing a read. " + r"Allowed modes are r, r\+ and a." + ) + with pytest.raises(ValueError, match=msg): + read_hdf(path, "df", mode=mode) + else: + result = read_hdf(path, "df", mode=mode) + tm.assert_frame_equal(result, df) + + +def test_default_mode(tmp_path, setup_path): + # read_hdf uses default mode + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + path = tmp_path / setup_path + df.to_hdf(path, key="df", mode="w") + result = read_hdf(path, "df") + tm.assert_frame_equal(result, df) + + +def test_reopen_handle(tmp_path, setup_path): + path = tmp_path / setup_path + + store = HDFStore(path, mode="a") + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + + msg = ( + r"Re-opening the file \[[\S]*\] with mode \[a\] will delete the " + "current file!" + ) + # invalid mode change + with pytest.raises(PossibleDataLossError, match=msg): + store.open("w") + + store.close() + assert not store.is_open + + # truncation ok here + store.open("w") + assert store.is_open + assert len(store) == 0 + store.close() + assert not store.is_open + + store = HDFStore(path, mode="a") + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + + # reopen as read + store.open("r") + assert store.is_open + assert len(store) == 1 + assert store._mode == "r" + store.close() + assert not store.is_open + + # reopen as append + store.open("a") + assert store.is_open + assert len(store) == 1 + assert store._mode == "a" + store.close() + assert not store.is_open + + # reopen as append (again) + store.open("a") + assert store.is_open + assert len(store) == 1 + assert store._mode == "a" + store.close() + assert not store.is_open + + +def test_open_args(setup_path): + with tm.ensure_clean(setup_path) as path: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # create an in memory store + store = HDFStore( + path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0 + ) + store["df"] = df + store.append("df2", df) + + tm.assert_frame_equal(store["df"], df) + tm.assert_frame_equal(store["df2"], df) + + store.close() + + # the file should not have actually been written + assert not os.path.exists(path) + + +def test_flush(setup_path): + with ensure_clean_store(setup_path) as store: + store["a"] = Series(range(5)) + store.flush() + store.flush(fsync=True) + + +def test_complibs_default_settings(tmp_path, setup_path): + # GH15943 + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # Set complevel and check if complib is automatically set to + # default value + tmpfile = tmp_path / setup_path + df.to_hdf(tmpfile, key="df", complevel=9) + result = read_hdf(tmpfile, "df") + tm.assert_frame_equal(result, df) + + with tables.open_file(tmpfile, mode="r") as h5file: + for node in h5file.walk_nodes(where="/df", classname="Leaf"): + assert node.filters.complevel == 9 + assert node.filters.complib == "zlib" + + # Set complib and check to see if compression is disabled + tmpfile = tmp_path / setup_path + df.to_hdf(tmpfile, key="df", complib="zlib") + result = read_hdf(tmpfile, "df") + tm.assert_frame_equal(result, df) + + with tables.open_file(tmpfile, mode="r") as h5file: + for node in h5file.walk_nodes(where="/df", classname="Leaf"): + assert node.filters.complevel == 0 + assert node.filters.complib is None + + # Check if not setting complib or complevel results in no compression + tmpfile = tmp_path / setup_path + df.to_hdf(tmpfile, key="df") + result = read_hdf(tmpfile, "df") + tm.assert_frame_equal(result, df) + + with tables.open_file(tmpfile, mode="r") as h5file: + for node in h5file.walk_nodes(where="/df", classname="Leaf"): + assert node.filters.complevel == 0 + assert node.filters.complib is None + + +def test_complibs_default_settings_override(tmp_path, setup_path): + # Check if file-defaults can be overridden on a per table basis + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + tmpfile = tmp_path / setup_path + store = HDFStore(tmpfile) + store.append("dfc", df, complevel=9, complib="blosc") + store.append("df", df) + store.close() + + with tables.open_file(tmpfile, mode="r") as h5file: + for node in h5file.walk_nodes(where="/df", classname="Leaf"): + assert node.filters.complevel == 0 + assert node.filters.complib is None + for node in h5file.walk_nodes(where="/dfc", classname="Leaf"): + assert node.filters.complevel == 9 + assert node.filters.complib == "blosc" + + +@pytest.mark.parametrize("lvl", range(10)) +@pytest.mark.parametrize("lib", tables.filters.all_complibs) +@pytest.mark.filterwarnings("ignore:object name is not a valid") +@pytest.mark.skipif( + not PY311 and is_ci_environment() and is_platform_linux(), + reason="Segfaulting in a CI environment" + # with xfail, would sometimes raise UnicodeDecodeError + # invalid state byte +) +def test_complibs(tmp_path, lvl, lib, request): + # GH14478 + if PY311 and is_platform_linux() and lib == "blosc2" and lvl != 0: + request.applymarker( + pytest.mark.xfail(reason=f"Fails for {lib} on Linux and PY > 3.11") + ) + df = DataFrame( + np.ones((30, 4)), columns=list("ABCD"), index=np.arange(30).astype(np.str_) + ) + + # Remove lzo if its not available on this platform + if not tables.which_lib_version("lzo"): + pytest.skip("lzo not available") + # Remove bzip2 if its not available on this platform + if not tables.which_lib_version("bzip2"): + pytest.skip("bzip2 not available") + + tmpfile = tmp_path / f"{lvl}_{lib}.h5" + gname = f"{lvl}_{lib}" + + # Write and read file to see if data is consistent + df.to_hdf(tmpfile, key=gname, complib=lib, complevel=lvl) + result = read_hdf(tmpfile, gname) + tm.assert_frame_equal(result, df) + + # Open file and check metadata for correct amount of compression + with tables.open_file(tmpfile, mode="r") as h5table: + for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"): + assert node.filters.complevel == lvl + if lvl == 0: + assert node.filters.complib is None + else: + assert node.filters.complib == lib + + +@pytest.mark.skipif( + not is_platform_little_endian(), reason="reason platform is not little endian" +) +def test_encoding(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame({"A": "foo", "B": "bar"}, index=range(5)) + df.loc[2, "A"] = np.nan + df.loc[3, "B"] = np.nan + _maybe_remove(store, "df") + store.append("df", df, encoding="ascii") + tm.assert_frame_equal(store["df"], df) + + expected = df.reindex(columns=["A"]) + result = store.select("df", Term("columns=A", encoding="ascii")) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "val", + [ + [b"E\xc9, 17", b"", b"a", b"b", b"c"], + [b"E\xc9, 17", b"a", b"b", b"c"], + [b"EE, 17", b"", b"a", b"b", b"c"], + [b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"], + [b"", b"a", b"b", b"c"], + [b"\xf8\xfc", b"a", b"b", b"c"], + [b"A\xf8\xfc", b"", b"a", b"b", b"c"], + [np.nan, b"", b"b", b"c"], + [b"A\xf8\xfc", np.nan, b"", b"b", b"c"], + ], +) +@pytest.mark.parametrize("dtype", ["category", object]) +def test_latin_encoding(tmp_path, setup_path, dtype, val): + enc = "latin-1" + nan_rep = "" + key = "data" + + val = [x.decode(enc) if isinstance(x, bytes) else x for x in val] + ser = Series(val, dtype=dtype) + + store = tmp_path / setup_path + ser.to_hdf(store, key=key, format="table", encoding=enc, nan_rep=nan_rep) + retr = read_hdf(store, key) + + # TODO:(3.0): once Categorical replace deprecation is enforced, + # we may be able to re-simplify the construction of s_nan + if dtype == "category": + if nan_rep in ser.cat.categories: + s_nan = ser.cat.remove_categories([nan_rep]) + else: + s_nan = ser + else: + s_nan = ser.replace(nan_rep, np.nan) + + tm.assert_series_equal(s_nan, retr) + + +def test_multiple_open_close(tmp_path, setup_path): + # gh-4409: open & close multiple times + + path = tmp_path / setup_path + + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_hdf(path, key="df", mode="w", format="table") + + # single + store = HDFStore(path) + assert "CLOSED" not in store.info() + assert store.is_open + + store.close() + assert "CLOSED" in store.info() + assert not store.is_open + + path = tmp_path / setup_path + + if pytables._table_file_open_policy_is_strict: + # multiples + store1 = HDFStore(path) + msg = ( + r"The file [\S]* is already opened\. Please close it before " + r"reopening in write mode\." + ) + with pytest.raises(ValueError, match=msg): + HDFStore(path) + + store1.close() + else: + # multiples + store1 = HDFStore(path) + store2 = HDFStore(path) + + assert "CLOSED" not in store1.info() + assert "CLOSED" not in store2.info() + assert store1.is_open + assert store2.is_open + + store1.close() + assert "CLOSED" in store1.info() + assert not store1.is_open + assert "CLOSED" not in store2.info() + assert store2.is_open + + store2.close() + assert "CLOSED" in store1.info() + assert "CLOSED" in store2.info() + assert not store1.is_open + assert not store2.is_open + + # nested close + store = HDFStore(path, mode="w") + store.append("df", df) + + store2 = HDFStore(path) + store2.append("df2", df) + store2.close() + assert "CLOSED" in store2.info() + assert not store2.is_open + + store.close() + assert "CLOSED" in store.info() + assert not store.is_open + + # double closing + store = HDFStore(path, mode="w") + store.append("df", df) + + store2 = HDFStore(path) + store.close() + assert "CLOSED" in store.info() + assert not store.is_open + + store2.close() + assert "CLOSED" in store2.info() + assert not store2.is_open + + # ops on a closed store + path = tmp_path / setup_path + + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_hdf(path, key="df", mode="w", format="table") + + store = HDFStore(path) + store.close() + + msg = r"[\S]* file is not open!" + with pytest.raises(ClosedFileError, match=msg): + store.keys() + + with pytest.raises(ClosedFileError, match=msg): + "df" in store + + with pytest.raises(ClosedFileError, match=msg): + len(store) + + with pytest.raises(ClosedFileError, match=msg): + store["df"] + + with pytest.raises(ClosedFileError, match=msg): + store.select("df") + + with pytest.raises(ClosedFileError, match=msg): + store.get("df") + + with pytest.raises(ClosedFileError, match=msg): + store.append("df2", df) + + with pytest.raises(ClosedFileError, match=msg): + store.put("df3", df) + + with pytest.raises(ClosedFileError, match=msg): + store.get_storer("df2") + + with pytest.raises(ClosedFileError, match=msg): + store.remove("df2") + + with pytest.raises(ClosedFileError, match=msg): + store.select("df") + + msg = "'HDFStore' object has no attribute 'df'" + with pytest.raises(AttributeError, match=msg): + store.df + + +def test_fspath(): + with tm.ensure_clean("foo.h5") as path: + with HDFStore(path) as store: + assert os.fspath(store) == str(path) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_keys.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_keys.py new file mode 100644 index 0000000000000000000000000000000000000000..55bd3f0d5a03a1636ae07ea9e1e3776743fd6464 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_keys.py @@ -0,0 +1,87 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + HDFStore, + Index, + Series, + date_range, +) +from pandas.tests.io.pytables.common import ( + ensure_clean_store, + tables, +) + +pytestmark = pytest.mark.single_cpu + + +def test_keys(setup_path): + with ensure_clean_store(setup_path) as store: + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + store["b"] = Series( + range(10), dtype="float64", index=[f"i_{i}" for i in range(10)] + ) + store["c"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + assert len(store) == 3 + expected = {"/a", "/b", "/c"} + assert set(store.keys()) == expected + assert set(store) == expected + + +def test_non_pandas_keys(tmp_path, setup_path): + class Table1(tables.IsDescription): + value1 = tables.Float32Col() + + class Table2(tables.IsDescription): + value2 = tables.Float32Col() + + class Table3(tables.IsDescription): + value3 = tables.Float32Col() + + path = tmp_path / setup_path + with tables.open_file(path, mode="w") as h5file: + group = h5file.create_group("/", "group") + h5file.create_table(group, "table1", Table1, "Table 1") + h5file.create_table(group, "table2", Table2, "Table 2") + h5file.create_table(group, "table3", Table3, "Table 3") + with HDFStore(path) as store: + assert len(store.keys(include="native")) == 3 + expected = {"/group/table1", "/group/table2", "/group/table3"} + assert set(store.keys(include="native")) == expected + assert set(store.keys(include="pandas")) == set() + for name in expected: + df = store.get(name) + assert len(df.columns) == 1 + + +def test_keys_illegal_include_keyword_value(setup_path): + with ensure_clean_store(setup_path) as store: + with pytest.raises( + ValueError, + match="`include` should be either 'pandas' or 'native' but is 'illegal'", + ): + store.keys(include="illegal") + + +def test_keys_ignore_hdf_softlink(setup_path): + # GH 20523 + # Puts a softlink into HDF file and rereads + + with ensure_clean_store(setup_path) as store: + df = DataFrame({"A": range(5), "B": range(5)}) + store.put("df", df) + + assert store.keys() == ["/df"] + + store._handle.create_soft_link(store._handle.root, "symlink", "df") + + # Should ignore the softlink + assert store.keys() == ["/df"] diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_put.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_put.py new file mode 100644 index 0000000000000000000000000000000000000000..bc5f046b7fa3308e30959953f8751056de941a32 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_put.py @@ -0,0 +1,374 @@ +import re + +import numpy as np +import pytest + +from pandas._libs.tslibs import Timestamp + +import pandas as pd +from pandas import ( + DataFrame, + HDFStore, + Index, + MultiIndex, + Series, + _testing as tm, + concat, + date_range, +) +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) +from pandas.util import _test_decorators as td + +pytestmark = pytest.mark.single_cpu + + +def test_format_type(tmp_path, setup_path): + df = DataFrame({"A": [1, 2]}) + with HDFStore(tmp_path / setup_path) as store: + store.put("a", df, format="fixed") + store.put("b", df, format="table") + + assert store.get_storer("a").format_type == "fixed" + assert store.get_storer("b").format_type == "table" + + +def test_format_kwarg_in_constructor(tmp_path, setup_path): + # GH 13291 + + msg = "format is not a defined argument for HDFStore" + + with pytest.raises(ValueError, match=msg): + HDFStore(tmp_path / setup_path, format="table") + + +def test_api_default_format(tmp_path, setup_path): + # default_format option + with ensure_clean_store(setup_path) as store: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + with pd.option_context("io.hdf.default_format", "fixed"): + _maybe_remove(store, "df") + store.put("df", df) + assert not store.get_storer("df").is_table + + msg = "Can only append to Tables" + with pytest.raises(ValueError, match=msg): + store.append("df2", df) + + with pd.option_context("io.hdf.default_format", "table"): + _maybe_remove(store, "df") + store.put("df", df) + assert store.get_storer("df").is_table + + _maybe_remove(store, "df2") + store.append("df2", df) + assert store.get_storer("df").is_table + + path = tmp_path / setup_path + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + with pd.option_context("io.hdf.default_format", "fixed"): + df.to_hdf(path, key="df") + with HDFStore(path) as store: + assert not store.get_storer("df").is_table + with pytest.raises(ValueError, match=msg): + df.to_hdf(path, key="df2", append=True) + + with pd.option_context("io.hdf.default_format", "table"): + df.to_hdf(path, key="df3") + with HDFStore(path) as store: + assert store.get_storer("df3").is_table + df.to_hdf(path, key="df4", append=True) + with HDFStore(path) as store: + assert store.get_storer("df4").is_table + + +def test_put(setup_path): + with ensure_clean_store(setup_path) as store: + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=20, freq="B"), + ) + store["a"] = ts + store["b"] = df[:10] + store["foo/bar/bah"] = df[:10] + store["foo"] = df[:10] + store["/foo"] = df[:10] + store.put("c", df[:10], format="table") + + # not OK, not a table + msg = "Can only append to Tables" + with pytest.raises(ValueError, match=msg): + store.put("b", df[10:], append=True) + + # node does not currently exist, test _is_table_type returns False + # in this case + _maybe_remove(store, "f") + with pytest.raises(ValueError, match=msg): + store.put("f", df[10:], append=True) + + # can't put to a table (use append instead) + with pytest.raises(ValueError, match=msg): + store.put("c", df[10:], append=True) + + # overwrite table + store.put("c", df[:10], format="table", append=False) + tm.assert_frame_equal(df[:10], store["c"]) + + +def test_put_string_index(setup_path): + with ensure_clean_store(setup_path) as store: + index = Index([f"I am a very long string index: {i}" for i in range(20)]) + s = Series(np.arange(20), index=index) + df = DataFrame({"A": s, "B": s}) + + store["a"] = s + tm.assert_series_equal(store["a"], s) + + store["b"] = df + tm.assert_frame_equal(store["b"], df) + + # mixed length + index = Index( + ["abcdefghijklmnopqrstuvwxyz1234567890"] + + [f"I am a very long string index: {i}" for i in range(20)] + ) + s = Series(np.arange(21), index=index) + df = DataFrame({"A": s, "B": s}) + store["a"] = s + tm.assert_series_equal(store["a"], s) + + store["b"] = df + tm.assert_frame_equal(store["b"], df) + + +def test_put_compression(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + store.put("c", df, format="table", complib="zlib") + tm.assert_frame_equal(store["c"], df) + + # can't compress if format='fixed' + msg = "Compression not supported on Fixed format stores" + with pytest.raises(ValueError, match=msg): + store.put("b", df, format="fixed", complib="zlib") + + +@td.skip_if_windows +def test_put_compression_blosc(setup_path): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + with ensure_clean_store(setup_path) as store: + # can't compress if format='fixed' + msg = "Compression not supported on Fixed format stores" + with pytest.raises(ValueError, match=msg): + store.put("b", df, format="fixed", complib="blosc") + + store.put("c", df, format="table", complib="blosc") + tm.assert_frame_equal(store["c"], df) + + +def test_put_mixed_type(setup_path): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df["obj1"] = "foo" + df["obj2"] = "bar" + df["bool1"] = df["A"] > 0 + df["bool2"] = df["B"] > 0 + df["bool3"] = True + df["int1"] = 1 + df["int2"] = 2 + df["timestamp1"] = Timestamp("20010102").as_unit("ns") + df["timestamp2"] = Timestamp("20010103").as_unit("ns") + df["datetime1"] = Timestamp("20010102").as_unit("ns") + df["datetime2"] = Timestamp("20010103").as_unit("ns") + df.loc[df.index[3:6], ["obj1"]] = np.nan + df = df._consolidate() + + with ensure_clean_store(setup_path) as store: + _maybe_remove(store, "df") + + with tm.assert_produces_warning(pd.errors.PerformanceWarning): + store.put("df", df) + + expected = store.get("df") + tm.assert_frame_equal(expected, df) + + +@pytest.mark.parametrize("format", ["table", "fixed"]) +@pytest.mark.parametrize( + "index", + [ + Index([str(i) for i in range(10)]), + Index(np.arange(10, dtype=float)), + Index(np.arange(10)), + date_range("2020-01-01", periods=10), + pd.period_range("2020-01-01", periods=10), + ], +) +def test_store_index_types(setup_path, format, index): + # GH5386 + # test storing various index types + + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + columns=list("AB"), + index=index, + ) + _maybe_remove(store, "df") + store.put("df", df, format=format) + tm.assert_frame_equal(df, store["df"]) + + +def test_column_multiindex(setup_path): + # GH 4710 + # recreate multi-indexes properly + + index = MultiIndex.from_tuples( + [("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"] + ) + df = DataFrame(np.arange(12).reshape(3, 4), columns=index) + expected = df.set_axis(df.index.to_numpy()) + + with ensure_clean_store(setup_path) as store: + store.put("df", df) + tm.assert_frame_equal( + store["df"], expected, check_index_type=True, check_column_type=True + ) + + store.put("df1", df, format="table") + tm.assert_frame_equal( + store["df1"], expected, check_index_type=True, check_column_type=True + ) + + msg = re.escape("cannot use a multi-index on axis [1] with data_columns ['A']") + with pytest.raises(ValueError, match=msg): + store.put("df2", df, format="table", data_columns=["A"]) + msg = re.escape("cannot use a multi-index on axis [1] with data_columns True") + with pytest.raises(ValueError, match=msg): + store.put("df3", df, format="table", data_columns=True) + + # appending multi-column on existing table (see GH 6167) + with ensure_clean_store(setup_path) as store: + store.append("df2", df) + store.append("df2", df) + + tm.assert_frame_equal(store["df2"], concat((df, df))) + + # non_index_axes name + df = DataFrame(np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")) + expected = df.set_axis(df.index.to_numpy()) + + with ensure_clean_store(setup_path) as store: + store.put("df1", df, format="table") + tm.assert_frame_equal( + store["df1"], expected, check_index_type=True, check_column_type=True + ) + + +def test_store_multiindex(setup_path): + # validate multi-index names + # GH 5527 + with ensure_clean_store(setup_path) as store: + + def make_index(names=None): + dti = date_range("2013-12-01", "2013-12-02") + mi = MultiIndex.from_product([dti, range(2), range(3)], names=names) + return mi + + # no names + _maybe_remove(store, "df") + df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index()) + store.append("df", df) + tm.assert_frame_equal(store.select("df"), df) + + # partial names + _maybe_remove(store, "df") + df = DataFrame( + np.zeros((12, 2)), + columns=["a", "b"], + index=make_index(["date", None, None]), + ) + store.append("df", df) + tm.assert_frame_equal(store.select("df"), df) + + # series + _maybe_remove(store, "ser") + ser = Series(np.zeros(12), index=make_index(["date", None, None])) + store.append("ser", ser) + xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"])) + tm.assert_series_equal(store.select("ser"), xp) + + # dup with column + _maybe_remove(store, "df") + df = DataFrame( + np.zeros((12, 2)), + columns=["a", "b"], + index=make_index(["date", "a", "t"]), + ) + msg = "duplicate names/columns in the multi-index when storing as a table" + with pytest.raises(ValueError, match=msg): + store.append("df", df) + + # dup within level + _maybe_remove(store, "df") + df = DataFrame( + np.zeros((12, 2)), + columns=["a", "b"], + index=make_index(["date", "date", "date"]), + ) + with pytest.raises(ValueError, match=msg): + store.append("df", df) + + # fully names + _maybe_remove(store, "df") + df = DataFrame( + np.zeros((12, 2)), + columns=["a", "b"], + index=make_index(["date", "s", "t"]), + ) + store.append("df", df) + tm.assert_frame_equal(store.select("df"), df) + + +@pytest.mark.parametrize("format", ["fixed", "table"]) +def test_store_periodindex(tmp_path, setup_path, format): + # GH 7796 + # test of PeriodIndex in HDFStore + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 1)), + index=pd.period_range("20220101", freq="M", periods=5), + ) + + path = tmp_path / setup_path + df.to_hdf(path, key="df", mode="w", format=format) + expected = pd.read_hdf(path, "df") + tm.assert_frame_equal(df, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_pytables_missing.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_pytables_missing.py new file mode 100644 index 0000000000000000000000000000000000000000..8d9d3afc4ad6f04b070a96922bdba3b5208ba6a9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_pytables_missing.py @@ -0,0 +1,14 @@ +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm + + +@td.skip_if_installed("tables") +def test_pytables_raises(): + df = pd.DataFrame({"A": [1, 2]}) + with pytest.raises(ImportError, match="tables"): + with tm.ensure_clean("foo.h5") as path: + df.to_hdf(path, key="df") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_read.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_read.py new file mode 100644 index 0000000000000000000000000000000000000000..e4a3ea1fc9db871275f07d1d39f75cc1f91216d2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_read.py @@ -0,0 +1,412 @@ +from contextlib import closing +from pathlib import Path +import re + +import numpy as np +import pytest + +from pandas._libs.tslibs import Timestamp +from pandas.compat import is_platform_windows + +import pandas as pd +from pandas import ( + DataFrame, + HDFStore, + Index, + Series, + _testing as tm, + date_range, + read_hdf, +) +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) +from pandas.util import _test_decorators as td + +from pandas.io.pytables import TableIterator + +pytestmark = pytest.mark.single_cpu + + +def test_read_missing_key_close_store(tmp_path, setup_path): + # GH 25766 + path = tmp_path / setup_path + df = DataFrame({"a": range(2), "b": range(2)}) + df.to_hdf(path, key="k1") + + with pytest.raises(KeyError, match="'No object named k2 in the file'"): + read_hdf(path, "k2") + + # smoke test to test that file is properly closed after + # read with KeyError before another write + df.to_hdf(path, key="k2") + + +def test_read_index_error_close_store(tmp_path, setup_path): + # GH 25766 + path = tmp_path / setup_path + df = DataFrame({"A": [], "B": []}, index=[]) + df.to_hdf(path, key="k1") + + with pytest.raises(IndexError, match=r"list index out of range"): + read_hdf(path, "k1", stop=0) + + # smoke test to test that file is properly closed after + # read with IndexError before another write + df.to_hdf(path, key="k1") + + +def test_read_missing_key_opened_store(tmp_path, setup_path): + # GH 28699 + path = tmp_path / setup_path + df = DataFrame({"a": range(2), "b": range(2)}) + df.to_hdf(path, key="k1") + + with HDFStore(path, "r") as store: + with pytest.raises(KeyError, match="'No object named k2 in the file'"): + read_hdf(store, "k2") + + # Test that the file is still open after a KeyError and that we can + # still read from it. + read_hdf(store, "k1") + + +def test_read_column(setup_path): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + with ensure_clean_store(setup_path) as store: + _maybe_remove(store, "df") + + # GH 17912 + # HDFStore.select_column should raise a KeyError + # exception if the key is not a valid store + with pytest.raises(KeyError, match="No object named df in the file"): + store.select_column("df", "index") + + store.append("df", df) + # error + with pytest.raises( + KeyError, match=re.escape("'column [foo] not found in the table'") + ): + store.select_column("df", "foo") + + msg = re.escape("select_column() got an unexpected keyword argument 'where'") + with pytest.raises(TypeError, match=msg): + store.select_column("df", "index", where=["index>5"]) + + # valid + result = store.select_column("df", "index") + tm.assert_almost_equal(result.values, Series(df.index).values) + assert isinstance(result, Series) + + # not a data indexable column + msg = re.escape( + "column [values_block_0] can not be extracted individually; " + "it is not data indexable" + ) + with pytest.raises(ValueError, match=msg): + store.select_column("df", "values_block_0") + + # a data column + df2 = df.copy() + df2["string"] = "foo" + store.append("df2", df2, data_columns=["string"]) + result = store.select_column("df2", "string") + tm.assert_almost_equal(result.values, df2["string"].values) + + # a data column with NaNs, result excludes the NaNs + df3 = df.copy() + df3["string"] = "foo" + df3.loc[df3.index[4:6], "string"] = np.nan + store.append("df3", df3, data_columns=["string"]) + result = store.select_column("df3", "string") + tm.assert_almost_equal(result.values, df3["string"].values) + + # start/stop + result = store.select_column("df3", "string", start=2) + tm.assert_almost_equal(result.values, df3["string"].values[2:]) + + result = store.select_column("df3", "string", start=-2) + tm.assert_almost_equal(result.values, df3["string"].values[-2:]) + + result = store.select_column("df3", "string", stop=2) + tm.assert_almost_equal(result.values, df3["string"].values[:2]) + + result = store.select_column("df3", "string", stop=-2) + tm.assert_almost_equal(result.values, df3["string"].values[:-2]) + + result = store.select_column("df3", "string", start=2, stop=-2) + tm.assert_almost_equal(result.values, df3["string"].values[2:-2]) + + result = store.select_column("df3", "string", start=-2, stop=2) + tm.assert_almost_equal(result.values, df3["string"].values[-2:2]) + + # GH 10392 - make sure column name is preserved + df4 = DataFrame({"A": np.random.default_rng(2).standard_normal(10), "B": "foo"}) + store.append("df4", df4, data_columns=True) + expected = df4["B"] + result = store.select_column("df4", "B") + tm.assert_series_equal(result, expected) + + +def test_pytables_native_read(datapath): + with ensure_clean_store( + datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r" + ) as store: + d2 = store["detector/readout"] + assert isinstance(d2, DataFrame) + + +@pytest.mark.skipif(is_platform_windows(), reason="native2 read fails oddly on windows") +def test_pytables_native2_read(datapath): + with ensure_clean_store( + datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r" + ) as store: + str(store) + d1 = store["detector"] + assert isinstance(d1, DataFrame) + + +def test_legacy_table_fixed_format_read_py2(datapath): + # GH 24510 + # legacy table with fixed format written in Python 2 + with ensure_clean_store( + datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r" + ) as store: + result = store.select("df") + expected = DataFrame( + [[1, 2, 3, "D"]], + columns=["A", "B", "C", "D"], + index=Index(["ABC"], name="INDEX_NAME"), + ) + tm.assert_frame_equal(expected, result) + + +def test_legacy_table_fixed_format_read_datetime_py2(datapath): + # GH 31750 + # legacy table with fixed format and datetime64 column written in Python 2 + expected = DataFrame( + [[Timestamp("2020-02-06T18:00")]], + columns=["A"], + index=Index(["date"]), + dtype="M8[ns]", + ) + with ensure_clean_store( + datapath("io", "data", "legacy_hdf", "legacy_table_fixed_datetime_py2.h5"), + mode="r", + ) as store: + result = store.select("df") + tm.assert_frame_equal(expected, result) + + +def test_legacy_table_read_py2(datapath): + # issue: 24925 + # legacy table written in Python 2 + with ensure_clean_store( + datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r" + ) as store: + result = store.select("table") + + expected = DataFrame({"a": ["a", "b"], "b": [2, 3]}) + tm.assert_frame_equal(expected, result) + + +def test_read_hdf_open_store(tmp_path, setup_path): + # GH10330 + # No check for non-string path_or-buf, and no test of open store + df = DataFrame( + np.random.default_rng(2).random((4, 5)), + index=list("abcd"), + columns=list("ABCDE"), + ) + df.index.name = "letters" + df = df.set_index(keys="E", append=True) + + path = tmp_path / setup_path + df.to_hdf(path, key="df", mode="w") + direct = read_hdf(path, "df") + with HDFStore(path, mode="r") as store: + indirect = read_hdf(store, "df") + tm.assert_frame_equal(direct, indirect) + assert store.is_open + + +def test_read_hdf_index_not_view(tmp_path, setup_path): + # GH 37441 + # Ensure that the index of the DataFrame is not a view + # into the original recarray that pytables reads in + df = DataFrame( + np.random.default_rng(2).random((4, 5)), + index=[0, 1, 2, 3], + columns=list("ABCDE"), + ) + + path = tmp_path / setup_path + df.to_hdf(path, key="df", mode="w", format="table") + + df2 = read_hdf(path, "df") + assert df2.index._data.base is None + tm.assert_frame_equal(df, df2) + + +def test_read_hdf_iterator(tmp_path, setup_path): + df = DataFrame( + np.random.default_rng(2).random((4, 5)), + index=list("abcd"), + columns=list("ABCDE"), + ) + df.index.name = "letters" + df = df.set_index(keys="E", append=True) + + path = tmp_path / setup_path + df.to_hdf(path, key="df", mode="w", format="t") + direct = read_hdf(path, "df") + iterator = read_hdf(path, "df", iterator=True) + with closing(iterator.store): + assert isinstance(iterator, TableIterator) + indirect = next(iterator.__iter__()) + tm.assert_frame_equal(direct, indirect) + + +def test_read_nokey(tmp_path, setup_path): + # GH10443 + df = DataFrame( + np.random.default_rng(2).random((4, 5)), + index=list("abcd"), + columns=list("ABCDE"), + ) + + # Categorical dtype not supported for "fixed" format. So no need + # to test with that dtype in the dataframe here. + path = tmp_path / setup_path + df.to_hdf(path, key="df", mode="a") + reread = read_hdf(path) + tm.assert_frame_equal(df, reread) + df.to_hdf(path, key="df2", mode="a") + + msg = "key must be provided when HDF5 file contains multiple datasets." + with pytest.raises(ValueError, match=msg): + read_hdf(path) + + +def test_read_nokey_table(tmp_path, setup_path): + # GH13231 + df = DataFrame({"i": range(5), "c": Series(list("abacd"), dtype="category")}) + + path = tmp_path / setup_path + df.to_hdf(path, key="df", mode="a", format="table") + reread = read_hdf(path) + tm.assert_frame_equal(df, reread) + df.to_hdf(path, key="df2", mode="a", format="table") + + msg = "key must be provided when HDF5 file contains multiple datasets." + with pytest.raises(ValueError, match=msg): + read_hdf(path) + + +def test_read_nokey_empty(tmp_path, setup_path): + path = tmp_path / setup_path + store = HDFStore(path) + store.close() + msg = re.escape( + "Dataset(s) incompatible with Pandas data types, not table, or no " + "datasets found in HDF5 file." + ) + with pytest.raises(ValueError, match=msg): + read_hdf(path) + + +def test_read_from_pathlib_path(tmp_path, setup_path): + # GH11773 + expected = DataFrame( + np.random.default_rng(2).random((4, 5)), + index=list("abcd"), + columns=list("ABCDE"), + ) + filename = tmp_path / setup_path + path_obj = Path(filename) + + expected.to_hdf(path_obj, key="df", mode="a") + actual = read_hdf(path_obj, key="df") + + tm.assert_frame_equal(expected, actual) + + +@td.skip_if_no("py.path") +def test_read_from_py_localpath(tmp_path, setup_path): + # GH11773 + from py.path import local as LocalPath + + expected = DataFrame( + np.random.default_rng(2).random((4, 5)), + index=list("abcd"), + columns=list("ABCDE"), + ) + filename = tmp_path / setup_path + path_obj = LocalPath(filename) + + expected.to_hdf(path_obj, key="df", mode="a") + actual = read_hdf(path_obj, key="df") + + tm.assert_frame_equal(expected, actual) + + +@pytest.mark.parametrize("format", ["fixed", "table"]) +def test_read_hdf_series_mode_r(tmp_path, format, setup_path): + # GH 16583 + # Tests that reading a Series saved to an HDF file + # still works if a mode='r' argument is supplied + series = Series(range(10), dtype=np.float64) + path = tmp_path / setup_path + series.to_hdf(path, key="data", format=format) + result = read_hdf(path, key="data", mode="r") + tm.assert_series_equal(result, series) + + +@pytest.mark.filterwarnings(r"ignore:Period with BDay freq is deprecated:FutureWarning") +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_read_py2_hdf_file_in_py3(datapath): + # GH 16781 + + # tests reading a PeriodIndex DataFrame written in Python2 in Python3 + + # the file was generated in Python 2.7 like so: + # + # df = DataFrame([1.,2,3], index=pd.PeriodIndex( + # ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B')) + # df.to_hdf('periodindex_0.20.1_x86_64_darwin_2.7.13.h5', 'p') + + expected = DataFrame( + [1.0, 2, 3], + index=pd.PeriodIndex(["2015-01-01", "2015-01-02", "2015-01-05"], freq="B"), + ) + + with ensure_clean_store( + datapath( + "io", "data", "legacy_hdf", "periodindex_0.20.1_x86_64_darwin_2.7.13.h5" + ), + mode="r", + ) as store: + result = store["p"] + tm.assert_frame_equal(result, expected) + + +def test_read_infer_string(tmp_path, setup_path): + # GH#54431 + pytest.importorskip("pyarrow") + df = DataFrame({"a": ["a", "b", None]}) + path = tmp_path / setup_path + df.to_hdf(path, key="data", format="table") + with pd.option_context("future.infer_string", True): + result = read_hdf(path, key="data", mode="r") + expected = DataFrame( + {"a": ["a", "b", None]}, + dtype="string[pyarrow_numpy]", + columns=Index(["a"], dtype="string[pyarrow_numpy]"), + ) + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_retain_attributes.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_retain_attributes.py new file mode 100644 index 0000000000000000000000000000000000000000..6284b826c3cf01fed6ce50e5519f6c2f543b8c64 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_retain_attributes.py @@ -0,0 +1,92 @@ +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + Series, + _testing as tm, + date_range, + errors, + read_hdf, +) +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) + +pytestmark = pytest.mark.single_cpu + + +def test_retain_index_attributes(setup_path, unit): + # GH 3499, losing frequency info on index recreation + dti = date_range("2000-1-1", periods=3, freq="h", unit=unit) + df = DataFrame({"A": Series(range(3), index=dti)}) + + with ensure_clean_store(setup_path) as store: + _maybe_remove(store, "data") + store.put("data", df, format="table") + + result = store.get("data") + tm.assert_frame_equal(df, result) + + for attr in ["freq", "tz", "name"]: + for idx in ["index", "columns"]: + assert getattr(getattr(df, idx), attr, None) == getattr( + getattr(result, idx), attr, None + ) + + dti2 = date_range("2002-1-1", periods=3, freq="D", unit=unit) + # try to append a table with a different frequency + with tm.assert_produces_warning(errors.AttributeConflictWarning): + df2 = DataFrame({"A": Series(range(3), index=dti2)}) + store.append("data", df2) + + assert store.get_storer("data").info["index"]["freq"] is None + + # this is ok + _maybe_remove(store, "df2") + dti3 = DatetimeIndex( + ["2001-01-01", "2001-01-02", "2002-01-01"], dtype=f"M8[{unit}]" + ) + df2 = DataFrame( + { + "A": Series( + range(3), + index=dti3, + ) + } + ) + store.append("df2", df2) + dti4 = date_range("2002-1-1", periods=3, freq="D", unit=unit) + df3 = DataFrame({"A": Series(range(3), index=dti4)}) + store.append("df2", df3) + + +def test_retain_index_attributes2(tmp_path, setup_path): + path = tmp_path / setup_path + + with tm.assert_produces_warning(errors.AttributeConflictWarning): + df = DataFrame( + {"A": Series(range(3), index=date_range("2000-1-1", periods=3, freq="h"))} + ) + df.to_hdf(path, key="data", mode="w", append=True) + df2 = DataFrame( + {"A": Series(range(3), index=date_range("2002-1-1", periods=3, freq="D"))} + ) + + df2.to_hdf(path, key="data", append=True) + + idx = date_range("2000-1-1", periods=3, freq="h") + idx.name = "foo" + df = DataFrame({"A": Series(range(3), index=idx)}) + df.to_hdf(path, key="data", mode="w", append=True) + + assert read_hdf(path, key="data").index.name == "foo" + + with tm.assert_produces_warning(errors.AttributeConflictWarning): + idx2 = date_range("2001-1-1", periods=3, freq="h") + idx2.name = "bar" + df2 = DataFrame({"A": Series(range(3), index=idx2)}) + df2.to_hdf(path, key="data", append=True) + + assert read_hdf(path, "data").index.name is None diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_round_trip.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_round_trip.py new file mode 100644 index 0000000000000000000000000000000000000000..4ba9787a5a6b9ec3dcfa60b64b0e43a8af1d1afc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_round_trip.py @@ -0,0 +1,578 @@ +import datetime +import re + +import numpy as np +import pytest + +from pandas._libs.tslibs import Timestamp +from pandas.compat import is_platform_windows + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + Series, + _testing as tm, + bdate_range, + date_range, + read_hdf, +) +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) +from pandas.util import _test_decorators as td + +pytestmark = pytest.mark.single_cpu + + +def test_conv_read_write(): + with tm.ensure_clean() as path: + + def roundtrip(key, obj, **kwargs): + obj.to_hdf(path, key=key, **kwargs) + return read_hdf(path, key) + + o = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + tm.assert_series_equal(o, roundtrip("series", o)) + + o = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)]) + tm.assert_series_equal(o, roundtrip("string_series", o)) + + o = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + tm.assert_frame_equal(o, roundtrip("frame", o)) + + # table + df = DataFrame({"A": range(5), "B": range(5)}) + df.to_hdf(path, key="table", append=True) + result = read_hdf(path, "table", where=["index>2"]) + tm.assert_frame_equal(df[df.index > 2], result) + + +def test_long_strings(setup_path): + # GH6166 + data = ["a" * 50] * 10 + df = DataFrame({"a": data}, index=data) + + with ensure_clean_store(setup_path) as store: + store.append("df", df, data_columns=["a"]) + + result = store.select("df") + tm.assert_frame_equal(df, result) + + +def test_api(tmp_path, setup_path): + # GH4584 + # API issue when to_hdf doesn't accept append AND format args + path = tmp_path / setup_path + + df = DataFrame(range(20)) + df.iloc[:10].to_hdf(path, key="df", append=True, format="table") + df.iloc[10:].to_hdf(path, key="df", append=True, format="table") + tm.assert_frame_equal(read_hdf(path, "df"), df) + + # append to False + df.iloc[:10].to_hdf(path, key="df", append=False, format="table") + df.iloc[10:].to_hdf(path, key="df", append=True, format="table") + tm.assert_frame_equal(read_hdf(path, "df"), df) + + +def test_api_append(tmp_path, setup_path): + path = tmp_path / setup_path + + df = DataFrame(range(20)) + df.iloc[:10].to_hdf(path, key="df", append=True) + df.iloc[10:].to_hdf(path, key="df", append=True, format="table") + tm.assert_frame_equal(read_hdf(path, "df"), df) + + # append to False + df.iloc[:10].to_hdf(path, key="df", append=False, format="table") + df.iloc[10:].to_hdf(path, key="df", append=True) + tm.assert_frame_equal(read_hdf(path, "df"), df) + + +def test_api_2(tmp_path, setup_path): + path = tmp_path / setup_path + + df = DataFrame(range(20)) + df.to_hdf(path, key="df", append=False, format="fixed") + tm.assert_frame_equal(read_hdf(path, "df"), df) + + df.to_hdf(path, key="df", append=False, format="f") + tm.assert_frame_equal(read_hdf(path, "df"), df) + + df.to_hdf(path, key="df", append=False) + tm.assert_frame_equal(read_hdf(path, "df"), df) + + df.to_hdf(path, key="df") + tm.assert_frame_equal(read_hdf(path, "df"), df) + + with ensure_clean_store(setup_path) as store: + df = DataFrame(range(20)) + + _maybe_remove(store, "df") + store.append("df", df.iloc[:10], append=True, format="table") + store.append("df", df.iloc[10:], append=True, format="table") + tm.assert_frame_equal(store.select("df"), df) + + # append to False + _maybe_remove(store, "df") + store.append("df", df.iloc[:10], append=False, format="table") + store.append("df", df.iloc[10:], append=True, format="table") + tm.assert_frame_equal(store.select("df"), df) + + # formats + _maybe_remove(store, "df") + store.append("df", df.iloc[:10], append=False, format="table") + store.append("df", df.iloc[10:], append=True, format="table") + tm.assert_frame_equal(store.select("df"), df) + + _maybe_remove(store, "df") + store.append("df", df.iloc[:10], append=False, format="table") + store.append("df", df.iloc[10:], append=True, format=None) + tm.assert_frame_equal(store.select("df"), df) + + +def test_api_invalid(tmp_path, setup_path): + path = tmp_path / setup_path + # Invalid. + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + msg = "Can only append to Tables" + + with pytest.raises(ValueError, match=msg): + df.to_hdf(path, key="df", append=True, format="f") + + with pytest.raises(ValueError, match=msg): + df.to_hdf(path, key="df", append=True, format="fixed") + + msg = r"invalid HDFStore format specified \[foo\]" + + with pytest.raises(TypeError, match=msg): + df.to_hdf(path, key="df", append=True, format="foo") + + with pytest.raises(TypeError, match=msg): + df.to_hdf(path, key="df", append=False, format="foo") + + # File path doesn't exist + path = "" + msg = f"File {path} does not exist" + + with pytest.raises(FileNotFoundError, match=msg): + read_hdf(path, "df") + + +def test_get(setup_path): + with ensure_clean_store(setup_path) as store: + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + left = store.get("a") + right = store["a"] + tm.assert_series_equal(left, right) + + left = store.get("/a") + right = store["/a"] + tm.assert_series_equal(left, right) + + with pytest.raises(KeyError, match="'No object named b in the file'"): + store.get("b") + + +def test_put_integer(setup_path): + # non-date, non-string index + df = DataFrame(np.random.default_rng(2).standard_normal((50, 100))) + _check_roundtrip(df, tm.assert_frame_equal, setup_path) + + +def test_table_values_dtypes_roundtrip(setup_path): + with ensure_clean_store(setup_path) as store: + df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8") + store.append("df_f8", df1) + tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes) + + df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8") + store.append("df_i8", df2) + tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes) + + # incompatible dtype + msg = re.escape( + "invalid combination of [values_axes] on appending data " + "[name->values_block_0,cname->values_block_0," + "dtype->float64,kind->float,shape->(1, 3)] vs " + "current table [name->values_block_0," + "cname->values_block_0,dtype->int64,kind->integer," + "shape->None]" + ) + with pytest.raises(ValueError, match=msg): + store.append("df_i8", df1) + + # check creation/storage/retrieval of float32 (a bit hacky to + # actually create them thought) + df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"]) + store.append("df_f4", df1) + tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes) + assert df1.dtypes.iloc[0] == "float32" + + # check with mixed dtypes + df1 = DataFrame( + { + c: Series(np.random.default_rng(2).integers(5), dtype=c) + for c in ["float32", "float64", "int32", "int64", "int16", "int8"] + } + ) + df1["string"] = "foo" + df1["float322"] = 1.0 + df1["float322"] = df1["float322"].astype("float32") + df1["bool"] = df1["float32"] > 0 + df1["time1"] = Timestamp("20130101") + df1["time2"] = Timestamp("20130102") + + store.append("df_mixed_dtypes1", df1) + result = store.select("df_mixed_dtypes1").dtypes.value_counts() + result.index = [str(i) for i in result.index] + expected = Series( + { + "float32": 2, + "float64": 1, + "int32": 1, + "bool": 1, + "int16": 1, + "int8": 1, + "int64": 1, + "object": 1, + "datetime64[ns]": 2, + }, + name="count", + ) + result = result.sort_index() + expected = expected.sort_index() + tm.assert_series_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") +def test_series(setup_path): + s = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)]) + _check_roundtrip(s, tm.assert_series_equal, path=setup_path) + + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + _check_roundtrip(ts, tm.assert_series_equal, path=setup_path) + + ts2 = Series(ts.index, Index(ts.index, dtype=object)) + _check_roundtrip(ts2, tm.assert_series_equal, path=setup_path) + + ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object)) + _check_roundtrip( + ts3, tm.assert_series_equal, path=setup_path, check_index_type=False + ) + + +def test_float_index(setup_path): + # GH #454 + index = np.random.default_rng(2).standard_normal(10) + s = Series(np.random.default_rng(2).standard_normal(10), index=index) + _check_roundtrip(s, tm.assert_series_equal, path=setup_path) + + +def test_tuple_index(setup_path): + # GH #492 + col = np.arange(10) + idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)] + data = np.random.default_rng(2).standard_normal(30).reshape((3, 10)) + DF = DataFrame(data, index=idx, columns=col) + + with tm.assert_produces_warning(pd.errors.PerformanceWarning): + _check_roundtrip(DF, tm.assert_frame_equal, path=setup_path) + + +@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") +def test_index_types(setup_path): + values = np.random.default_rng(2).standard_normal(2) + + func = lambda lhs, rhs: tm.assert_series_equal(lhs, rhs, check_index_type=True) + + ser = Series(values, [0, "y"]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [datetime.datetime.today(), 0]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, ["y", 0]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [datetime.date.today(), "a"]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [0, "y"]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [datetime.datetime.today(), 0]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, ["y", 0]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [datetime.date.today(), "a"]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [1.23, "b"]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [1, 1.53]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [1, 5]) + _check_roundtrip(ser, func, path=setup_path) + + dti = DatetimeIndex(["2012-01-01", "2012-01-02"], dtype="M8[ns]") + ser = Series(values, index=dti) + _check_roundtrip(ser, func, path=setup_path) + + ser.index = ser.index.as_unit("s") + _check_roundtrip(ser, func, path=setup_path) + + +def test_timeseries_preepoch(setup_path, request): + dr = bdate_range("1/1/1940", "1/1/1960") + ts = Series(np.random.default_rng(2).standard_normal(len(dr)), index=dr) + try: + _check_roundtrip(ts, tm.assert_series_equal, path=setup_path) + except OverflowError: + if is_platform_windows(): + request.applymarker( + pytest.mark.xfail("known failure on some windows platforms") + ) + raise + + +@pytest.mark.parametrize( + "compression", [False, pytest.param(True, marks=td.skip_if_windows)] +) +def test_frame(compression, setup_path): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # put in some random NAs + df.iloc[0, 0] = np.nan + df.iloc[5, 3] = np.nan + + _check_roundtrip_table( + df, tm.assert_frame_equal, path=setup_path, compression=compression + ) + _check_roundtrip( + df, tm.assert_frame_equal, path=setup_path, compression=compression + ) + + tdf = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + _check_roundtrip( + tdf, tm.assert_frame_equal, path=setup_path, compression=compression + ) + + with ensure_clean_store(setup_path) as store: + # not consolidated + df["foo"] = np.random.default_rng(2).standard_normal(len(df)) + store["df"] = df + recons = store["df"] + assert recons._mgr.is_consolidated() + + # empty + _check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path) + + +def test_empty_series_frame(setup_path): + s0 = Series(dtype=object) + s1 = Series(name="myseries", dtype=object) + df0 = DataFrame() + df1 = DataFrame(index=["a", "b", "c"]) + df2 = DataFrame(columns=["d", "e", "f"]) + + _check_roundtrip(s0, tm.assert_series_equal, path=setup_path) + _check_roundtrip(s1, tm.assert_series_equal, path=setup_path) + _check_roundtrip(df0, tm.assert_frame_equal, path=setup_path) + _check_roundtrip(df1, tm.assert_frame_equal, path=setup_path) + _check_roundtrip(df2, tm.assert_frame_equal, path=setup_path) + + +@pytest.mark.parametrize("dtype", [np.int64, np.float64, object, "m8[ns]", "M8[ns]"]) +def test_empty_series(dtype, setup_path): + s = Series(dtype=dtype) + _check_roundtrip(s, tm.assert_series_equal, path=setup_path) + + +def test_can_serialize_dates(setup_path): + rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")] + frame = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng + ) + + _check_roundtrip(frame, tm.assert_frame_equal, path=setup_path) + + +def test_store_hierarchical(setup_path, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + _check_roundtrip(frame, tm.assert_frame_equal, path=setup_path) + _check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path) + _check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path) + + # check that the names are stored + with ensure_clean_store(setup_path) as store: + store["frame"] = frame + recons = store["frame"] + tm.assert_frame_equal(recons, frame) + + +@pytest.mark.parametrize( + "compression", [False, pytest.param(True, marks=td.skip_if_windows)] +) +def test_store_mixed(compression, setup_path): + def _make_one(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df["obj1"] = "foo" + df["obj2"] = "bar" + df["bool1"] = df["A"] > 0 + df["bool2"] = df["B"] > 0 + df["int1"] = 1 + df["int2"] = 2 + return df._consolidate() + + df1 = _make_one() + df2 = _make_one() + + _check_roundtrip(df1, tm.assert_frame_equal, path=setup_path) + _check_roundtrip(df2, tm.assert_frame_equal, path=setup_path) + + with ensure_clean_store(setup_path) as store: + store["obj"] = df1 + tm.assert_frame_equal(store["obj"], df1) + store["obj"] = df2 + tm.assert_frame_equal(store["obj"], df2) + + # check that can store Series of all of these types + _check_roundtrip( + df1["obj1"], + tm.assert_series_equal, + path=setup_path, + compression=compression, + ) + _check_roundtrip( + df1["bool1"], + tm.assert_series_equal, + path=setup_path, + compression=compression, + ) + _check_roundtrip( + df1["int1"], + tm.assert_series_equal, + path=setup_path, + compression=compression, + ) + + +def _check_roundtrip(obj, comparator, path, compression=False, **kwargs): + options = {} + if compression: + options["complib"] = "blosc" + + with ensure_clean_store(path, "w", **options) as store: + store["obj"] = obj + retrieved = store["obj"] + comparator(retrieved, obj, **kwargs) + + +def _check_roundtrip_table(obj, comparator, path, compression=False): + options = {} + if compression: + options["complib"] = "blosc" + + with ensure_clean_store(path, "w", **options) as store: + store.put("obj", obj, format="table") + retrieved = store["obj"] + + comparator(retrieved, obj) + + +def test_unicode_index(setup_path): + unicode_values = ["\u03c3", "\u03c3\u03c3"] + + s = Series( + np.random.default_rng(2).standard_normal(len(unicode_values)), + unicode_values, + ) + _check_roundtrip(s, tm.assert_series_equal, path=setup_path) + + +def test_unicode_longer_encoded(setup_path): + # GH 11234 + char = "\u0394" + df = DataFrame({"A": [char]}) + with ensure_clean_store(setup_path) as store: + store.put("df", df, format="table", encoding="utf-8") + result = store.get("df") + tm.assert_frame_equal(result, df) + + df = DataFrame({"A": ["a", char], "B": ["b", "b"]}) + with ensure_clean_store(setup_path) as store: + store.put("df", df, format="table", encoding="utf-8") + result = store.get("df") + tm.assert_frame_equal(result, df) + + +def test_store_datetime_mixed(setup_path): + df = DataFrame({"a": [1, 2, 3], "b": [1.0, 2.0, 3.0], "c": ["a", "b", "c"]}) + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + df["d"] = ts.index[:3] + _check_roundtrip(df, tm.assert_frame_equal, path=setup_path) + + +def test_round_trip_equals(tmp_path, setup_path): + # GH 9330 + df = DataFrame({"B": [1, 2], "A": ["x", "y"]}) + + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table") + other = read_hdf(path, "df") + tm.assert_frame_equal(df, other) + assert df.equals(other) + assert other.equals(df) + + +def test_infer_string_columns(tmp_path, setup_path): + # GH# + pytest.importorskip("pyarrow") + path = tmp_path / setup_path + with pd.option_context("future.infer_string", True): + df = DataFrame(1, columns=list("ABCD"), index=list(range(10))).set_index( + ["A", "B"] + ) + expected = df.copy() + df.to_hdf(path, key="df", format="table") + + result = read_hdf(path, "df") + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_select.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_select.py new file mode 100644 index 0000000000000000000000000000000000000000..0e303d1c890c5b6ea4fcfb8d526297981a14069b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_select.py @@ -0,0 +1,1047 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs import Timestamp + +import pandas as pd +from pandas import ( + DataFrame, + HDFStore, + Index, + MultiIndex, + Series, + _testing as tm, + bdate_range, + concat, + date_range, + isna, + read_hdf, +) +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) + +from pandas.io.pytables import Term + +pytestmark = pytest.mark.single_cpu + + +def test_select_columns_in_where(setup_path): + # GH 6169 + # recreate multi-indexes when columns is passed + # in the `where` argument + index = MultiIndex( + levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], + codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=["foo_name", "bar_name"], + ) + + # With a DataFrame + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), + index=index, + columns=["A", "B", "C"], + ) + + with ensure_clean_store(setup_path) as store: + store.put("df", df, format="table") + expected = df[["A"]] + + tm.assert_frame_equal(store.select("df", columns=["A"]), expected) + + tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected) + + # With a Series + s = Series(np.random.default_rng(2).standard_normal(10), index=index, name="A") + with ensure_clean_store(setup_path) as store: + store.put("s", s, format="table") + tm.assert_series_equal(store.select("s", where="columns=['A']"), s) + + +def test_select_with_dups(setup_path): + # single dtypes + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), columns=["A", "A", "B", "B"] + ) + df.index = date_range("20130101 9:30", periods=10, freq="min") + + with ensure_clean_store(setup_path) as store: + store.append("df", df) + + result = store.select("df") + expected = df + tm.assert_frame_equal(result, expected, by_blocks=True) + + result = store.select("df", columns=df.columns) + expected = df + tm.assert_frame_equal(result, expected, by_blocks=True) + + result = store.select("df", columns=["A"]) + expected = df.loc[:, ["A"]] + tm.assert_frame_equal(result, expected) + + # dups across dtypes + df = concat( + [ + DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=["A", "A", "B", "B"], + ), + DataFrame( + np.random.default_rng(2).integers(0, 10, size=20).reshape(10, 2), + columns=["A", "C"], + ), + ], + axis=1, + ) + df.index = date_range("20130101 9:30", periods=10, freq="min") + + with ensure_clean_store(setup_path) as store: + store.append("df", df) + + result = store.select("df") + expected = df + tm.assert_frame_equal(result, expected, by_blocks=True) + + result = store.select("df", columns=df.columns) + expected = df + tm.assert_frame_equal(result, expected, by_blocks=True) + + expected = df.loc[:, ["A"]] + result = store.select("df", columns=["A"]) + tm.assert_frame_equal(result, expected, by_blocks=True) + + expected = df.loc[:, ["B", "A"]] + result = store.select("df", columns=["B", "A"]) + tm.assert_frame_equal(result, expected, by_blocks=True) + + # duplicates on both index and columns + with ensure_clean_store(setup_path) as store: + store.append("df", df) + store.append("df", df) + + expected = df.loc[:, ["B", "A"]] + expected = concat([expected, expected]) + result = store.select("df", columns=["B", "A"]) + tm.assert_frame_equal(result, expected, by_blocks=True) + + +def test_select(setup_path): + with ensure_clean_store(setup_path) as store: + # select with columns= + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + _maybe_remove(store, "df") + store.append("df", df) + result = store.select("df", columns=["A", "B"]) + expected = df.reindex(columns=["A", "B"]) + tm.assert_frame_equal(expected, result) + + # equivalently + result = store.select("df", [("columns=['A', 'B']")]) + expected = df.reindex(columns=["A", "B"]) + tm.assert_frame_equal(expected, result) + + # with a data column + _maybe_remove(store, "df") + store.append("df", df, data_columns=["A"]) + result = store.select("df", ["A > 0"], columns=["A", "B"]) + expected = df[df.A > 0].reindex(columns=["A", "B"]) + tm.assert_frame_equal(expected, result) + + # all a data columns + _maybe_remove(store, "df") + store.append("df", df, data_columns=True) + result = store.select("df", ["A > 0"], columns=["A", "B"]) + expected = df[df.A > 0].reindex(columns=["A", "B"]) + tm.assert_frame_equal(expected, result) + + # with a data column, but different columns + _maybe_remove(store, "df") + store.append("df", df, data_columns=["A"]) + result = store.select("df", ["A > 0"], columns=["C", "D"]) + expected = df[df.A > 0].reindex(columns=["C", "D"]) + tm.assert_frame_equal(expected, result) + + +def test_select_dtypes(setup_path): + with ensure_clean_store(setup_path) as store: + # with a Timestamp data column (GH #2637) + df = DataFrame( + { + "ts": bdate_range("2012-01-01", periods=300), + "A": np.random.default_rng(2).standard_normal(300), + } + ) + _maybe_remove(store, "df") + store.append("df", df, data_columns=["ts", "A"]) + + result = store.select("df", "ts>=Timestamp('2012-02-01')") + expected = df[df.ts >= Timestamp("2012-02-01")] + tm.assert_frame_equal(expected, result) + + # bool columns (GH #2849) + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=["A", "B"] + ) + df["object"] = "foo" + df.loc[4:5, "object"] = "bar" + df["boolv"] = df["A"] > 0 + _maybe_remove(store, "df") + store.append("df", df, data_columns=True) + + expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa: E712 + for v in [True, "true", 1]: + result = store.select("df", f"boolv == {v}", columns=["A", "boolv"]) + tm.assert_frame_equal(expected, result) + + expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa: E712 + for v in [False, "false", 0]: + result = store.select("df", f"boolv == {v}", columns=["A", "boolv"]) + tm.assert_frame_equal(expected, result) + + # integer index + df = DataFrame( + { + "A": np.random.default_rng(2).random(20), + "B": np.random.default_rng(2).random(20), + } + ) + _maybe_remove(store, "df_int") + store.append("df_int", df) + result = store.select("df_int", "index<10 and columns=['A']") + expected = df.reindex(index=list(df.index)[0:10], columns=["A"]) + tm.assert_frame_equal(expected, result) + + # float index + df = DataFrame( + { + "A": np.random.default_rng(2).random(20), + "B": np.random.default_rng(2).random(20), + "index": np.arange(20, dtype="f8"), + } + ) + _maybe_remove(store, "df_float") + store.append("df_float", df) + result = store.select("df_float", "index<10.0 and columns=['A']") + expected = df.reindex(index=list(df.index)[0:10], columns=["A"]) + tm.assert_frame_equal(expected, result) + + with ensure_clean_store(setup_path) as store: + # floats w/o NaN + df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64") + df["cols"] = (df["cols"] + 10).apply(str) + + store.append("df1", df, data_columns=True) + result = store.select("df1", where="values>2.0") + expected = df[df["values"] > 2.0] + tm.assert_frame_equal(expected, result) + + # floats with NaN + df.iloc[0] = np.nan + expected = df[df["values"] > 2.0] + + store.append("df2", df, data_columns=True, index=False) + result = store.select("df2", where="values>2.0") + tm.assert_frame_equal(expected, result) + + # https://github.com/PyTables/PyTables/issues/282 + # bug in selection when 0th row has a np.nan and an index + # store.append('df3',df,data_columns=True) + # result = store.select( + # 'df3', where='values>2.0') + # tm.assert_frame_equal(expected, result) + + # not in first position float with NaN ok too + df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64") + df["cols"] = (df["cols"] + 10).apply(str) + + df.iloc[1] = np.nan + expected = df[df["values"] > 2.0] + + store.append("df4", df, data_columns=True) + result = store.select("df4", where="values>2.0") + tm.assert_frame_equal(expected, result) + + # test selection with comparison against numpy scalar + # GH 11283 + with ensure_clean_store(setup_path) as store: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + expected = df[df["A"] > 0] + + store.append("df", df, data_columns=True) + np_zero = np.float64(0) # noqa: F841 + result = store.select("df", where=["A>np_zero"]) + tm.assert_frame_equal(expected, result) + + +def test_select_with_many_inputs(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame( + { + "ts": bdate_range("2012-01-01", periods=300), + "A": np.random.default_rng(2).standard_normal(300), + "B": range(300), + "users": ["a"] * 50 + + ["b"] * 50 + + ["c"] * 100 + + [f"a{i:03d}" for i in range(100)], + } + ) + _maybe_remove(store, "df") + store.append("df", df, data_columns=["ts", "A", "B", "users"]) + + # regular select + result = store.select("df", "ts>=Timestamp('2012-02-01')") + expected = df[df.ts >= Timestamp("2012-02-01")] + tm.assert_frame_equal(expected, result) + + # small selector + result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']") + expected = df[ + (df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"]) + ] + tm.assert_frame_equal(expected, result) + + # big selector along the columns + selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)] + result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector") + expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)] + tm.assert_frame_equal(expected, result) + + selector = range(100, 200) + result = store.select("df", "B=selector") + expected = df[df.B.isin(selector)] + tm.assert_frame_equal(expected, result) + assert len(result) == 100 + + # big selector along the index + selector = Index(df.ts[0:100].values) + result = store.select("df", "ts=selector") + expected = df[df.ts.isin(selector.values)] + tm.assert_frame_equal(expected, result) + assert len(result) == 100 + + +def test_select_iterator(tmp_path, setup_path): + # single table + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + _maybe_remove(store, "df") + store.append("df", df) + + expected = store.select("df") + + results = list(store.select("df", iterator=True)) + result = concat(results) + tm.assert_frame_equal(expected, result) + + results = list(store.select("df", chunksize=2)) + assert len(results) == 5 + result = concat(results) + tm.assert_frame_equal(expected, result) + + results = list(store.select("df", chunksize=2)) + result = concat(results) + tm.assert_frame_equal(result, expected) + + path = tmp_path / setup_path + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df.to_hdf(path, key="df_non_table") + + msg = "can only use an iterator or chunksize on a table" + with pytest.raises(TypeError, match=msg): + read_hdf(path, "df_non_table", chunksize=2) + + with pytest.raises(TypeError, match=msg): + read_hdf(path, "df_non_table", iterator=True) + + path = tmp_path / setup_path + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df.to_hdf(path, key="df", format="table") + + results = list(read_hdf(path, "df", chunksize=2)) + result = concat(results) + + assert len(results) == 5 + tm.assert_frame_equal(result, df) + tm.assert_frame_equal(result, read_hdf(path, "df")) + + # multiple + + with ensure_clean_store(setup_path) as store: + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + store.append("df1", df1, data_columns=True) + df2 = df1.copy().rename(columns="{}_2".format) + df2["foo"] = "bar" + store.append("df2", df2) + + df = concat([df1, df2], axis=1) + + # full selection + expected = store.select_as_multiple(["df1", "df2"], selector="df1") + results = list( + store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=2) + ) + result = concat(results) + tm.assert_frame_equal(expected, result) + + +def test_select_iterator_complete_8014(setup_path): + # GH 8014 + # using iterator and where clause + chunksize = 1e4 + + # no iterator + with ensure_clean_store(setup_path) as store: + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) + _maybe_remove(store, "df") + store.append("df", expected) + + beg_dt = expected.index[0] + end_dt = expected.index[-1] + + # select w/o iteration and no where clause works + result = store.select("df") + tm.assert_frame_equal(expected, result) + + # select w/o iterator and where clause, single term, begin + # of range, works + where = f"index >= '{beg_dt}'" + result = store.select("df", where=where) + tm.assert_frame_equal(expected, result) + + # select w/o iterator and where clause, single term, end + # of range, works + where = f"index <= '{end_dt}'" + result = store.select("df", where=where) + tm.assert_frame_equal(expected, result) + + # select w/o iterator and where clause, inclusive range, + # works + where = f"index >= '{beg_dt}' & index <= '{end_dt}'" + result = store.select("df", where=where) + tm.assert_frame_equal(expected, result) + + # with iterator, full range + with ensure_clean_store(setup_path) as store: + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) + _maybe_remove(store, "df") + store.append("df", expected) + + beg_dt = expected.index[0] + end_dt = expected.index[-1] + + # select w/iterator and no where clause works + results = list(store.select("df", chunksize=chunksize)) + result = concat(results) + tm.assert_frame_equal(expected, result) + + # select w/iterator and where clause, single term, begin of range + where = f"index >= '{beg_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + tm.assert_frame_equal(expected, result) + + # select w/iterator and where clause, single term, end of range + where = f"index <= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + tm.assert_frame_equal(expected, result) + + # select w/iterator and where clause, inclusive range + where = f"index >= '{beg_dt}' & index <= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + tm.assert_frame_equal(expected, result) + + +def test_select_iterator_non_complete_8014(setup_path): + # GH 8014 + # using iterator and where clause + chunksize = 1e4 + + # with iterator, non complete range + with ensure_clean_store(setup_path) as store: + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) + _maybe_remove(store, "df") + store.append("df", expected) + + beg_dt = expected.index[1] + end_dt = expected.index[-2] + + # select w/iterator and where clause, single term, begin of range + where = f"index >= '{beg_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + rexpected = expected[expected.index >= beg_dt] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause, single term, end of range + where = f"index <= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + rexpected = expected[expected.index <= end_dt] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause, inclusive range + where = f"index >= '{beg_dt}' & index <= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)] + tm.assert_frame_equal(rexpected, result) + + # with iterator, empty where + with ensure_clean_store(setup_path) as store: + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) + _maybe_remove(store, "df") + store.append("df", expected) + + end_dt = expected.index[-1] + + # select w/iterator and where clause, single term, begin of range + where = f"index > '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + assert 0 == len(results) + + +def test_select_iterator_many_empty_frames(setup_path): + # GH 8014 + # using iterator and where clause can return many empty + # frames. + chunksize = 10_000 + + # with iterator, range limited to the first chunk + with ensure_clean_store(setup_path) as store: + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) + _maybe_remove(store, "df") + store.append("df", expected) + + beg_dt = expected.index[0] + end_dt = expected.index[chunksize - 1] + + # select w/iterator and where clause, single term, begin of range + where = f"index >= '{beg_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + rexpected = expected[expected.index >= beg_dt] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause, single term, end of range + where = f"index <= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + + assert len(results) == 1 + result = concat(results) + rexpected = expected[expected.index <= end_dt] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause, inclusive range + where = f"index >= '{beg_dt}' & index <= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + + # should be 1, is 10 + assert len(results) == 1 + result = concat(results) + rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause which selects + # *nothing*. + # + # To be consistent with Python idiom I suggest this should + # return [] e.g. `for e in []: print True` never prints + # True. + + where = f"index <= '{beg_dt}' & index >= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + + # should be [] + assert len(results) == 0 + + +def test_frame_select(setup_path): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + with ensure_clean_store(setup_path) as store: + store.put("frame", df, format="table") + date = df.index[len(df) // 2] + + crit1 = Term("index>=date") + assert crit1.env.scope["date"] == date + + crit2 = "columns=['A', 'D']" + crit3 = "columns=A" + + result = store.select("frame", [crit1, crit2]) + expected = df.loc[date:, ["A", "D"]] + tm.assert_frame_equal(result, expected) + + result = store.select("frame", [crit3]) + expected = df.loc[:, ["A"]] + tm.assert_frame_equal(result, expected) + + # invalid terms + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + store.append("df_time", df) + msg = "day is out of range for month: 0" + with pytest.raises(ValueError, match=msg): + store.select("df_time", "index>0") + + # can't select if not written as table + # store['frame'] = df + # with pytest.raises(ValueError): + # store.select('frame', [crit1, crit2]) + + +def test_frame_select_complex(setup_path): + # select via complex criteria + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df["string"] = "foo" + df.loc[df.index[0:4], "string"] = "bar" + + with ensure_clean_store(setup_path) as store: + store.put("df", df, format="table", data_columns=["string"]) + + # empty + result = store.select("df", 'index>df.index[3] & string="bar"') + expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")] + tm.assert_frame_equal(result, expected) + + result = store.select("df", 'index>df.index[3] & string="foo"') + expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")] + tm.assert_frame_equal(result, expected) + + # or + result = store.select("df", 'index>df.index[3] | string="bar"') + expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")] + tm.assert_frame_equal(result, expected) + + result = store.select( + "df", '(index>df.index[3] & index<=df.index[6]) | string="bar"' + ) + expected = df.loc[ + ((df.index > df.index[3]) & (df.index <= df.index[6])) + | (df.string == "bar") + ] + tm.assert_frame_equal(result, expected) + + # invert + result = store.select("df", 'string!="bar"') + expected = df.loc[df.string != "bar"] + tm.assert_frame_equal(result, expected) + + # invert not implemented in numexpr :( + msg = "cannot use an invert condition when passing to numexpr" + with pytest.raises(NotImplementedError, match=msg): + store.select("df", '~(string="bar")') + + # invert ok for filters + result = store.select("df", "~(columns=['A','B'])") + expected = df.loc[:, df.columns.difference(["A", "B"])] + tm.assert_frame_equal(result, expected) + + # in + result = store.select("df", "index>df.index[3] & columns in ['A','B']") + expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"]) + tm.assert_frame_equal(result, expected) + + +def test_frame_select_complex2(tmp_path): + pp = tmp_path / "params.hdf" + hh = tmp_path / "hist.hdf" + + # use non-trivial selection criteria + params = DataFrame({"A": [1, 1, 2, 2, 3]}) + params.to_hdf(pp, key="df", mode="w", format="table", data_columns=["A"]) + + selection = read_hdf(pp, "df", where="A=[2,3]") + hist = DataFrame( + np.random.default_rng(2).standard_normal((25, 1)), + columns=["data"], + index=MultiIndex.from_tuples( + [(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"] + ), + ) + + hist.to_hdf(hh, key="df", mode="w", format="table") + + expected = read_hdf(hh, "df", where="l1=[2, 3, 4]") + + # scope with list like + l0 = selection.index.tolist() # noqa: F841 + with HDFStore(hh) as store: + result = store.select("df", where="l1=l0") + tm.assert_frame_equal(result, expected) + + result = read_hdf(hh, "df", where="l1=l0") + tm.assert_frame_equal(result, expected) + + # index + index = selection.index # noqa: F841 + result = read_hdf(hh, "df", where="l1=index") + tm.assert_frame_equal(result, expected) + + result = read_hdf(hh, "df", where="l1=selection.index") + tm.assert_frame_equal(result, expected) + + result = read_hdf(hh, "df", where="l1=selection.index.tolist()") + tm.assert_frame_equal(result, expected) + + result = read_hdf(hh, "df", where="l1=list(selection.index)") + tm.assert_frame_equal(result, expected) + + # scope with index + with HDFStore(hh) as store: + result = store.select("df", where="l1=index") + tm.assert_frame_equal(result, expected) + + result = store.select("df", where="l1=selection.index") + tm.assert_frame_equal(result, expected) + + result = store.select("df", where="l1=selection.index.tolist()") + tm.assert_frame_equal(result, expected) + + result = store.select("df", where="l1=list(selection.index)") + tm.assert_frame_equal(result, expected) + + +def test_invalid_filtering(setup_path): + # can't use more than one filter (atm) + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + with ensure_clean_store(setup_path) as store: + store.put("df", df, format="table") + + msg = "unable to collapse Joint Filters" + # not implemented + with pytest.raises(NotImplementedError, match=msg): + store.select("df", "columns=['A'] | columns=['B']") + + # in theory we could deal with this + with pytest.raises(NotImplementedError, match=msg): + store.select("df", "columns=['A','B'] & columns=['C']") + + +def test_string_select(setup_path): + # GH 2973 + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + # test string ==/!= + df["x"] = "none" + df.loc[df.index[2:7], "x"] = "" + + store.append("df", df, data_columns=["x"]) + + result = store.select("df", "x=none") + expected = df[df.x == "none"] + tm.assert_frame_equal(result, expected) + + result = store.select("df", "x!=none") + expected = df[df.x != "none"] + tm.assert_frame_equal(result, expected) + + df2 = df.copy() + df2.loc[df2.x == "", "x"] = np.nan + + store.append("df2", df2, data_columns=["x"]) + result = store.select("df2", "x!=none") + expected = df2[isna(df2.x)] + tm.assert_frame_equal(result, expected) + + # int ==/!= + df["int"] = 1 + df.loc[df.index[2:7], "int"] = 2 + + store.append("df3", df, data_columns=["int"]) + + result = store.select("df3", "int=2") + expected = df[df.int == 2] + tm.assert_frame_equal(result, expected) + + result = store.select("df3", "int!=2") + expected = df[df.int != 2] + tm.assert_frame_equal(result, expected) + + +def test_select_as_multiple(setup_path): + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df2 = df1.copy().rename(columns="{}_2".format) + df2["foo"] = "bar" + + with ensure_clean_store(setup_path) as store: + msg = "keys must be a list/tuple" + # no tables stored + with pytest.raises(TypeError, match=msg): + store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1") + + store.append("df1", df1, data_columns=["A", "B"]) + store.append("df2", df2) + + # exceptions + with pytest.raises(TypeError, match=msg): + store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1") + + with pytest.raises(TypeError, match=msg): + store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1") + + msg = "'No object named df3 in the file'" + with pytest.raises(KeyError, match=msg): + store.select_as_multiple( + ["df1", "df3"], where=["A>0", "B>0"], selector="df1" + ) + + with pytest.raises(KeyError, match=msg): + store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1") + + with pytest.raises(KeyError, match="'No object named df4 in the file'"): + store.select_as_multiple( + ["df1", "df2"], where=["A>0", "B>0"], selector="df4" + ) + + # default select + result = store.select("df1", ["A>0", "B>0"]) + expected = store.select_as_multiple( + ["df1"], where=["A>0", "B>0"], selector="df1" + ) + tm.assert_frame_equal(result, expected) + expected = store.select_as_multiple("df1", where=["A>0", "B>0"], selector="df1") + tm.assert_frame_equal(result, expected) + + # multiple + result = store.select_as_multiple( + ["df1", "df2"], where=["A>0", "B>0"], selector="df1" + ) + expected = concat([df1, df2], axis=1) + expected = expected[(expected.A > 0) & (expected.B > 0)] + tm.assert_frame_equal(result, expected, check_freq=False) + # FIXME: 2021-01-20 this is failing with freq None vs 4B on some builds + + # multiple (diff selector) + result = store.select_as_multiple( + ["df1", "df2"], where="index>df2.index[4]", selector="df2" + ) + expected = concat([df1, df2], axis=1) + expected = expected[5:] + tm.assert_frame_equal(result, expected) + + # test exception for diff rows + df3 = df1.copy().head(2) + store.append("df3", df3) + msg = "all tables must have exactly the same nrows!" + with pytest.raises(ValueError, match=msg): + store.select_as_multiple( + ["df1", "df3"], where=["A>0", "B>0"], selector="df1" + ) + + +def test_nan_selection_bug_4858(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame({"cols": range(6), "values": range(6)}, dtype="float64") + df["cols"] = (df["cols"] + 10).apply(str) + df.iloc[0] = np.nan + + expected = DataFrame( + {"cols": ["13.0", "14.0", "15.0"], "values": [3.0, 4.0, 5.0]}, + index=[3, 4, 5], + ) + + # write w/o the index on that particular column + store.append("df", df, data_columns=True, index=["cols"]) + result = store.select("df", where="values>2.0") + tm.assert_frame_equal(result, expected) + + +def test_query_with_nested_special_character(setup_path): + df = DataFrame( + { + "a": ["a", "a", "c", "b", "test & test", "c", "b", "e"], + "b": [1, 2, 3, 4, 5, 6, 7, 8], + } + ) + expected = df[df.a == "test & test"] + with ensure_clean_store(setup_path) as store: + store.append("test", df, format="table", data_columns=True) + result = store.select("test", 'a = "test & test"') + tm.assert_frame_equal(expected, result) + + +def test_query_long_float_literal(setup_path): + # GH 14241 + df = DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]}) + + with ensure_clean_store(setup_path) as store: + store.append("test", df, format="table", data_columns=True) + + cutoff = 1000000000.0006 + result = store.select("test", f"A < {cutoff:.4f}") + assert result.empty + + cutoff = 1000000000.0010 + result = store.select("test", f"A > {cutoff:.4f}") + expected = df.loc[[1, 2], :] + tm.assert_frame_equal(expected, result) + + exact = 1000000000.0011 + result = store.select("test", f"A == {exact:.4f}") + expected = df.loc[[1], :] + tm.assert_frame_equal(expected, result) + + +def test_query_compare_column_type(setup_path): + # GH 15492 + df = DataFrame( + { + "date": ["2014-01-01", "2014-01-02"], + "real_date": date_range("2014-01-01", periods=2), + "float": [1.1, 1.2], + "int": [1, 2], + }, + columns=["date", "real_date", "float", "int"], + ) + + with ensure_clean_store(setup_path) as store: + store.append("test", df, format="table", data_columns=True) + + ts = Timestamp("2014-01-01") # noqa: F841 + result = store.select("test", where="real_date > ts") + expected = df.loc[[1], :] + tm.assert_frame_equal(expected, result) + + for op in ["<", ">", "=="]: + # non strings to string column always fail + for v in [2.1, True, Timestamp("2014-01-01"), pd.Timedelta(1, "s")]: + query = f"date {op} v" + msg = f"Cannot compare {v} of type {type(v)} to string column" + with pytest.raises(TypeError, match=msg): + store.select("test", where=query) + + # strings to other columns must be convertible to type + v = "a" + for col in ["int", "float", "real_date"]: + query = f"{col} {op} v" + if col == "real_date": + msg = 'Given date string "a" not likely a datetime' + else: + msg = "could not convert string to" + with pytest.raises(ValueError, match=msg): + store.select("test", where=query) + + for v, col in zip( + ["1", "1.1", "2014-01-01"], ["int", "float", "real_date"] + ): + query = f"{col} {op} v" + result = store.select("test", where=query) + + if op == "==": + expected = df.loc[[0], :] + elif op == ">": + expected = df.loc[[1], :] + else: + expected = df.loc[[], :] + tm.assert_frame_equal(expected, result) + + +@pytest.mark.parametrize("where", ["", (), (None,), [], [None]]) +def test_select_empty_where(tmp_path, where): + # GH26610 + + df = DataFrame([1, 2, 3]) + path = tmp_path / "empty_where.h5" + with HDFStore(path) as store: + store.put("df", df, "t") + result = read_hdf(store, "df", where=where) + tm.assert_frame_equal(result, df) + + +def test_select_large_integer(tmp_path): + path = tmp_path / "large_int.h5" + + df = DataFrame( + zip( + ["a", "b", "c", "d"], + [-9223372036854775801, -9223372036854775802, -9223372036854775803, 123], + ), + columns=["x", "y"], + ) + result = None + with HDFStore(path) as s: + s.append("data", df, data_columns=True, index=False) + result = s.select("data", where="y==-9223372036854775801").get("y").get(0) + expected = df["y"][0] + + assert expected == result diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_store.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_store.py new file mode 100644 index 0000000000000000000000000000000000000000..82d3052e7f5d6738801e973a9f34307ea0dff2cf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_store.py @@ -0,0 +1,1119 @@ +import contextlib +import datetime as dt +import hashlib +import tempfile +import time + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + Timestamp, + concat, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) + +from pandas.io.pytables import ( + HDFStore, + read_hdf, +) + +pytestmark = pytest.mark.single_cpu + +tables = pytest.importorskip("tables") + + +def test_context(setup_path): + with tm.ensure_clean(setup_path) as path: + try: + with HDFStore(path) as tbl: + raise ValueError("blah") + except ValueError: + pass + with tm.ensure_clean(setup_path) as path: + with HDFStore(path) as tbl: + tbl["a"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + assert len(tbl) == 1 + assert type(tbl["a"]) == DataFrame + + +def test_no_track_times(tmp_path, setup_path): + # GH 32682 + # enables to set track_times (see `pytables` `create_table` documentation) + + def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128): + h = hash_factory() + with open(filename, "rb") as f: + for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""): + h.update(chunk) + return h.digest() + + def create_h5_and_return_checksum(tmp_path, track_times): + path = tmp_path / setup_path + df = DataFrame({"a": [1]}) + + with HDFStore(path, mode="w") as hdf: + hdf.put( + "table", + df, + format="table", + data_columns=True, + index=None, + track_times=track_times, + ) + + return checksum(path) + + checksum_0_tt_false = create_h5_and_return_checksum(tmp_path, track_times=False) + checksum_0_tt_true = create_h5_and_return_checksum(tmp_path, track_times=True) + + # sleep is necessary to create h5 with different creation time + time.sleep(1) + + checksum_1_tt_false = create_h5_and_return_checksum(tmp_path, track_times=False) + checksum_1_tt_true = create_h5_and_return_checksum(tmp_path, track_times=True) + + # checksums are the same if track_time = False + assert checksum_0_tt_false == checksum_1_tt_false + + # checksums are NOT same if track_time = True + assert checksum_0_tt_true != checksum_1_tt_true + + +def test_iter_empty(setup_path): + with ensure_clean_store(setup_path) as store: + # GH 12221 + assert list(store) == [] + + +def test_repr(setup_path): + with ensure_clean_store(setup_path) as store: + repr(store) + store.info() + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + store["b"] = Series( + range(10), dtype="float64", index=[f"i_{i}" for i in range(10)] + ) + store["c"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df["obj1"] = "foo" + df["obj2"] = "bar" + df["bool1"] = df["A"] > 0 + df["bool2"] = df["B"] > 0 + df["bool3"] = True + df["int1"] = 1 + df["int2"] = 2 + df["timestamp1"] = Timestamp("20010102") + df["timestamp2"] = Timestamp("20010103") + df["datetime1"] = dt.datetime(2001, 1, 2, 0, 0) + df["datetime2"] = dt.datetime(2001, 1, 3, 0, 0) + df.loc[df.index[3:6], ["obj1"]] = np.nan + df = df._consolidate() + + with tm.assert_produces_warning(pd.errors.PerformanceWarning): + store["df"] = df + + # make a random group in hdf space + store._handle.create_group(store._handle.root, "bah") + + assert store.filename in repr(store) + assert store.filename in str(store) + store.info() + + # storers + with ensure_clean_store(setup_path) as store: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + store.append("df", df) + + s = store.get_storer("df") + repr(s) + str(s) + + +def test_contains(setup_path): + with ensure_clean_store(setup_path) as store: + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + store["b"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + store["foo/bar"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + assert "a" in store + assert "b" in store + assert "c" not in store + assert "foo/bar" in store + assert "/foo/bar" in store + assert "/foo/b" not in store + assert "bar" not in store + + # gh-2694: tables.NaturalNameWarning + with tm.assert_produces_warning( + tables.NaturalNameWarning, check_stacklevel=False + ): + store["node())"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + assert "node())" in store + + +def test_versioning(setup_path): + with ensure_clean_store(setup_path) as store: + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + store["b"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=20, freq="B"), + ) + _maybe_remove(store, "df1") + store.append("df1", df[:10]) + store.append("df1", df[10:]) + assert store.root.a._v_attrs.pandas_version == "0.15.2" + assert store.root.b._v_attrs.pandas_version == "0.15.2" + assert store.root.df1._v_attrs.pandas_version == "0.15.2" + + # write a file and wipe its versioning + _maybe_remove(store, "df2") + store.append("df2", df) + + # this is an error because its table_type is appendable, but no + # version info + store.get_node("df2")._v_attrs.pandas_version = None + + msg = "'NoneType' object has no attribute 'startswith'" + + with pytest.raises(Exception, match=msg): + store.select("df2") + + +@pytest.mark.parametrize( + "where, expected", + [ + ( + "/", + { + "": ({"first_group", "second_group"}, set()), + "/first_group": (set(), {"df1", "df2"}), + "/second_group": ({"third_group"}, {"df3", "s1"}), + "/second_group/third_group": (set(), {"df4"}), + }, + ), + ( + "/second_group", + { + "/second_group": ({"third_group"}, {"df3", "s1"}), + "/second_group/third_group": (set(), {"df4"}), + }, + ), + ], +) +def test_walk(where, expected): + # GH10143 + objs = { + "df1": DataFrame([1, 2, 3]), + "df2": DataFrame([4, 5, 6]), + "df3": DataFrame([6, 7, 8]), + "df4": DataFrame([9, 10, 11]), + "s1": Series([10, 9, 8]), + # Next 3 items aren't pandas objects and should be ignored + "a1": np.array([[1, 2, 3], [4, 5, 6]]), + "tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"), + "tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"), + } + + with ensure_clean_store("walk_groups.hdf", mode="w") as store: + store.put("/first_group/df1", objs["df1"]) + store.put("/first_group/df2", objs["df2"]) + store.put("/second_group/df3", objs["df3"]) + store.put("/second_group/s1", objs["s1"]) + store.put("/second_group/third_group/df4", objs["df4"]) + # Create non-pandas objects + store._handle.create_array("/first_group", "a1", objs["a1"]) + store._handle.create_table("/first_group", "tb1", obj=objs["tb1"]) + store._handle.create_table("/second_group", "tb2", obj=objs["tb2"]) + + assert len(list(store.walk(where=where))) == len(expected) + for path, groups, leaves in store.walk(where=where): + assert path in expected + expected_groups, expected_frames = expected[path] + assert expected_groups == set(groups) + assert expected_frames == set(leaves) + for leaf in leaves: + frame_path = "/".join([path, leaf]) + obj = store.get(frame_path) + if "df" in leaf: + tm.assert_frame_equal(obj, objs[leaf]) + else: + tm.assert_series_equal(obj, objs[leaf]) + + +def test_getattr(setup_path): + with ensure_clean_store(setup_path) as store: + s = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + store["a"] = s + + # test attribute access + result = store.a + tm.assert_series_equal(result, s) + result = getattr(store, "a") + tm.assert_series_equal(result, s) + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + store["df"] = df + result = store.df + tm.assert_frame_equal(result, df) + + # errors + for x in ["d", "mode", "path", "handle", "complib"]: + msg = f"'HDFStore' object has no attribute '{x}'" + with pytest.raises(AttributeError, match=msg): + getattr(store, x) + + # not stores + for x in ["mode", "path", "handle", "complib"]: + getattr(store, f"_{x}") + + +def test_store_dropna(tmp_path, setup_path): + df_with_missing = DataFrame( + {"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]}, + index=list("abc"), + ) + df_without_missing = DataFrame( + {"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac") + ) + + # # Test to make sure defaults are to not drop. + # # Corresponding to Issue 9382 + path = tmp_path / setup_path + df_with_missing.to_hdf(path, key="df", format="table") + reloaded = read_hdf(path, "df") + tm.assert_frame_equal(df_with_missing, reloaded) + + path = tmp_path / setup_path + df_with_missing.to_hdf(path, key="df", format="table", dropna=False) + reloaded = read_hdf(path, "df") + tm.assert_frame_equal(df_with_missing, reloaded) + + path = tmp_path / setup_path + df_with_missing.to_hdf(path, key="df", format="table", dropna=True) + reloaded = read_hdf(path, "df") + tm.assert_frame_equal(df_without_missing, reloaded) + + +def test_keyword_deprecation(tmp_path, setup_path): + # GH 54229 + path = tmp_path / setup_path + + msg = ( + "Starting with pandas version 3.0 all arguments of to_hdf except for the " + "argument 'path_or_buf' will be keyword-only." + ) + df = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 1, "B": 2, "C": 3}]) + + with tm.assert_produces_warning(FutureWarning, match=msg): + df.to_hdf(path, "key") + + +def test_to_hdf_with_min_itemsize(tmp_path, setup_path): + path = tmp_path / setup_path + + # min_itemsize in index with to_hdf (GH 10381) + df = DataFrame( + { + "A": [0.0, 1.0, 2.0, 3.0, 4.0], + "B": [0.0, 1.0, 0.0, 1.0, 0.0], + "C": Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object), + "D": date_range("20130101", periods=5), + } + ).set_index("C") + df.to_hdf(path, key="ss3", format="table", min_itemsize={"index": 6}) + # just make sure there is a longer string: + df2 = df.copy().reset_index().assign(C="longer").set_index("C") + df2.to_hdf(path, key="ss3", append=True, format="table") + tm.assert_frame_equal(read_hdf(path, "ss3"), concat([df, df2])) + + # same as above, with a Series + df["B"].to_hdf(path, key="ss4", format="table", min_itemsize={"index": 6}) + df2["B"].to_hdf(path, key="ss4", append=True, format="table") + tm.assert_series_equal(read_hdf(path, "ss4"), concat([df["B"], df2["B"]])) + + +@pytest.mark.parametrize("format", ["fixed", "table"]) +def test_to_hdf_errors(tmp_path, format, setup_path): + data = ["\ud800foo"] + ser = Series(data, index=Index(data)) + path = tmp_path / setup_path + # GH 20835 + ser.to_hdf(path, key="table", format=format, errors="surrogatepass") + + result = read_hdf(path, "table", errors="surrogatepass") + tm.assert_series_equal(result, ser) + + +def test_create_table_index(setup_path): + with ensure_clean_store(setup_path) as store: + + def col(t, column): + return getattr(store.get_storer(t).table.cols, column) + + # data columns + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df["string"] = "foo" + df["string2"] = "bar" + store.append("f", df, data_columns=["string", "string2"]) + assert col("f", "index").is_indexed is True + assert col("f", "string").is_indexed is True + assert col("f", "string2").is_indexed is True + + # specify index=columns + store.append("f2", df, index=["string"], data_columns=["string", "string2"]) + assert col("f2", "index").is_indexed is False + assert col("f2", "string").is_indexed is True + assert col("f2", "string2").is_indexed is False + + # try to index a non-table + _maybe_remove(store, "f2") + store.put("f2", df) + msg = "cannot create table index on a Fixed format store" + with pytest.raises(TypeError, match=msg): + store.create_table_index("f2") + + +def test_create_table_index_data_columns_argument(setup_path): + # GH 28156 + + with ensure_clean_store(setup_path) as store: + + def col(t, column): + return getattr(store.get_storer(t).table.cols, column) + + # data columns + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df["string"] = "foo" + df["string2"] = "bar" + store.append("f", df, data_columns=["string"]) + assert col("f", "index").is_indexed is True + assert col("f", "string").is_indexed is True + + msg = "'Cols' object has no attribute 'string2'" + with pytest.raises(AttributeError, match=msg): + col("f", "string2").is_indexed + + # try to index a col which isn't a data_column + msg = ( + "column string2 is not a data_column.\n" + "In order to read column string2 you must reload the dataframe \n" + "into HDFStore and include string2 with the data_columns argument." + ) + with pytest.raises(AttributeError, match=msg): + store.create_table_index("f", columns=["string2"]) + + +def test_mi_data_columns(setup_path): + # GH 14435 + idx = MultiIndex.from_arrays( + [date_range("2000-01-01", periods=5), range(5)], names=["date", "id"] + ) + df = DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx) + + with ensure_clean_store(setup_path) as store: + store.append("df", df, data_columns=True) + + actual = store.select("df", where="id == 1") + expected = df.iloc[[1], :] + tm.assert_frame_equal(actual, expected) + + +def test_table_mixed_dtypes(setup_path): + # frame + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df["obj1"] = "foo" + df["obj2"] = "bar" + df["bool1"] = df["A"] > 0 + df["bool2"] = df["B"] > 0 + df["bool3"] = True + df["int1"] = 1 + df["int2"] = 2 + df["timestamp1"] = Timestamp("20010102").as_unit("ns") + df["timestamp2"] = Timestamp("20010103").as_unit("ns") + df["datetime1"] = Timestamp("20010102").as_unit("ns") + df["datetime2"] = Timestamp("20010103").as_unit("ns") + df.loc[df.index[3:6], ["obj1"]] = np.nan + df = df._consolidate() + + with ensure_clean_store(setup_path) as store: + store.append("df1_mixed", df) + tm.assert_frame_equal(store.select("df1_mixed"), df) + + +def test_calendar_roundtrip_issue(setup_path): + # 8591 + # doc example from tseries holiday section + weekmask_egypt = "Sun Mon Tue Wed Thu" + holidays = [ + "2012-05-01", + dt.datetime(2013, 5, 1), + np.datetime64("2014-05-01"), + ] + bday_egypt = pd.offsets.CustomBusinessDay( + holidays=holidays, weekmask=weekmask_egypt + ) + mydt = dt.datetime(2013, 4, 30) + dts = date_range(mydt, periods=5, freq=bday_egypt) + + s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split())) + + with ensure_clean_store(setup_path) as store: + store.put("fixed", s) + result = store.select("fixed") + tm.assert_series_equal(result, s) + + store.append("table", s) + result = store.select("table") + tm.assert_series_equal(result, s) + + +def test_remove(setup_path): + with ensure_clean_store(setup_path) as store: + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + store["a"] = ts + store["b"] = df + _maybe_remove(store, "a") + assert len(store) == 1 + tm.assert_frame_equal(df, store["b"]) + + _maybe_remove(store, "b") + assert len(store) == 0 + + # nonexistence + with pytest.raises( + KeyError, match="'No object named a_nonexistent_store in the file'" + ): + store.remove("a_nonexistent_store") + + # pathing + store["a"] = ts + store["b/foo"] = df + _maybe_remove(store, "foo") + _maybe_remove(store, "b/foo") + assert len(store) == 1 + + store["a"] = ts + store["b/foo"] = df + _maybe_remove(store, "b") + assert len(store) == 1 + + # __delitem__ + store["a"] = ts + store["b"] = df + del store["a"] + del store["b"] + assert len(store) == 0 + + +def test_same_name_scoping(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 2)), + index=date_range("20130101", periods=20), + ) + store.put("df", df, format="table") + expected = df[df.index > Timestamp("20130105")] + + result = store.select("df", "index>datetime.datetime(2013,1,5)") + tm.assert_frame_equal(result, expected) + + # changes what 'datetime' points to in the namespace where + # 'select' does the lookup + + # technically an error, but allow it + result = store.select("df", "index>datetime.datetime(2013,1,5)") + tm.assert_frame_equal(result, expected) + + result = store.select("df", "index>datetime(2013,1,5)") + tm.assert_frame_equal(result, expected) + + +def test_store_index_name(setup_path): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.index.name = "foo" + + with ensure_clean_store(setup_path) as store: + store["frame"] = df + recons = store["frame"] + tm.assert_frame_equal(recons, df) + + +@pytest.mark.parametrize("tz", [None, "US/Pacific"]) +@pytest.mark.parametrize("table_format", ["table", "fixed"]) +def test_store_index_name_numpy_str(tmp_path, table_format, setup_path, unit, tz): + # GH #13492 + idx = DatetimeIndex( + [dt.date(2000, 1, 1), dt.date(2000, 1, 2)], + name="cols\u05d2", + ).tz_localize(tz) + idx1 = ( + DatetimeIndex( + [dt.date(2010, 1, 1), dt.date(2010, 1, 2)], + name="rows\u05d0", + ) + .as_unit(unit) + .tz_localize(tz) + ) + df = DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1) + + # This used to fail, returning numpy strings instead of python strings. + path = tmp_path / setup_path + df.to_hdf(path, key="df", format=table_format) + df2 = read_hdf(path, "df") + + tm.assert_frame_equal(df, df2, check_names=True) + + assert isinstance(df2.index.name, str) + assert isinstance(df2.columns.name, str) + + +def test_store_series_name(setup_path): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + series = df["A"] + + with ensure_clean_store(setup_path) as store: + store["series"] = series + recons = store["series"] + tm.assert_series_equal(recons, series) + + +def test_overwrite_node(setup_path): + with ensure_clean_store(setup_path) as store: + store["a"] = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + store["a"] = ts + + tm.assert_series_equal(store["a"], ts) + + +def test_coordinates(setup_path): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + with ensure_clean_store(setup_path) as store: + _maybe_remove(store, "df") + store.append("df", df) + + # all + c = store.select_as_coordinates("df") + assert (c.values == np.arange(len(df.index))).all() + + # get coordinates back & test vs frame + _maybe_remove(store, "df") + + df = DataFrame({"A": range(5), "B": range(5)}) + store.append("df", df) + c = store.select_as_coordinates("df", ["index<3"]) + assert (c.values == np.arange(3)).all() + result = store.select("df", where=c) + expected = df.loc[0:2, :] + tm.assert_frame_equal(result, expected) + + c = store.select_as_coordinates("df", ["index>=3", "index<=4"]) + assert (c.values == np.arange(2) + 3).all() + result = store.select("df", where=c) + expected = df.loc[3:4, :] + tm.assert_frame_equal(result, expected) + assert isinstance(c, Index) + + # multiple tables + _maybe_remove(store, "df1") + _maybe_remove(store, "df2") + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df2 = df1.copy().rename(columns="{}_2".format) + store.append("df1", df1, data_columns=["A", "B"]) + store.append("df2", df2) + + c = store.select_as_coordinates("df1", ["A>0", "B>0"]) + df1_result = store.select("df1", c) + df2_result = store.select("df2", c) + result = concat([df1_result, df2_result], axis=1) + + expected = concat([df1, df2], axis=1) + expected = expected[(expected.A > 0) & (expected.B > 0)] + tm.assert_frame_equal(result, expected, check_freq=False) + # FIXME: 2021-01-18 on some (mostly windows) builds we get freq=None + # but expect freq="18B" + + # pass array/mask as the coordinates + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((1000, 2)), + index=date_range("20000101", periods=1000), + ) + store.append("df", df) + c = store.select_column("df", "index") + where = c[DatetimeIndex(c).month == 5].index + expected = df.iloc[where] + + # locations + result = store.select("df", where=where) + tm.assert_frame_equal(result, expected) + + # boolean + result = store.select("df", where=where) + tm.assert_frame_equal(result, expected) + + # invalid + msg = ( + "where must be passed as a string, PyTablesExpr, " + "or list-like of PyTablesExpr" + ) + with pytest.raises(TypeError, match=msg): + store.select("df", where=np.arange(len(df), dtype="float64")) + + with pytest.raises(TypeError, match=msg): + store.select("df", where=np.arange(len(df) + 1)) + + with pytest.raises(TypeError, match=msg): + store.select("df", where=np.arange(len(df)), start=5) + + with pytest.raises(TypeError, match=msg): + store.select("df", where=np.arange(len(df)), start=5, stop=10) + + # selection with filter + selection = date_range("20000101", periods=500) + result = store.select("df", where="index in selection") + expected = df[df.index.isin(selection)] + tm.assert_frame_equal(result, expected) + + # list + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + store.append("df2", df) + result = store.select("df2", where=[0, 3, 5]) + expected = df.iloc[[0, 3, 5]] + tm.assert_frame_equal(result, expected) + + # boolean + where = [True] * 10 + where[-2] = False + result = store.select("df2", where=where) + expected = df.loc[where] + tm.assert_frame_equal(result, expected) + + # start/stop + result = store.select("df2", start=5, stop=10) + expected = df[5:10] + tm.assert_frame_equal(result, expected) + + +def test_start_stop_table(setup_path): + with ensure_clean_store(setup_path) as store: + # table + df = DataFrame( + { + "A": np.random.default_rng(2).random(20), + "B": np.random.default_rng(2).random(20), + } + ) + store.append("df", df) + + result = store.select("df", "columns=['A']", start=0, stop=5) + expected = df.loc[0:4, ["A"]] + tm.assert_frame_equal(result, expected) + + # out of range + result = store.select("df", "columns=['A']", start=30, stop=40) + assert len(result) == 0 + expected = df.loc[30:40, ["A"]] + tm.assert_frame_equal(result, expected) + + +def test_start_stop_multiple(setup_path): + # GH 16209 + with ensure_clean_store(setup_path) as store: + df = DataFrame({"foo": [1, 2], "bar": [1, 2]}) + + store.append_to_multiple( + {"selector": ["foo"], "data": None}, df, selector="selector" + ) + result = store.select_as_multiple( + ["selector", "data"], selector="selector", start=0, stop=1 + ) + expected = df.loc[[0], ["foo", "bar"]] + tm.assert_frame_equal(result, expected) + + +def test_start_stop_fixed(setup_path): + with ensure_clean_store(setup_path) as store: + # fixed, GH 8287 + df = DataFrame( + { + "A": np.random.default_rng(2).random(20), + "B": np.random.default_rng(2).random(20), + }, + index=date_range("20130101", periods=20), + ) + store.put("df", df) + + result = store.select("df", start=0, stop=5) + expected = df.iloc[0:5, :] + tm.assert_frame_equal(result, expected) + + result = store.select("df", start=5, stop=10) + expected = df.iloc[5:10, :] + tm.assert_frame_equal(result, expected) + + # out of range + result = store.select("df", start=30, stop=40) + expected = df.iloc[30:40, :] + tm.assert_frame_equal(result, expected) + + # series + s = df.A + store.put("s", s) + result = store.select("s", start=0, stop=5) + expected = s.iloc[0:5] + tm.assert_series_equal(result, expected) + + result = store.select("s", start=5, stop=10) + expected = s.iloc[5:10] + tm.assert_series_equal(result, expected) + + # sparse; not implemented + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.iloc[3:5, 1:3] = np.nan + df.iloc[8:10, -2] = np.nan + + +def test_select_filter_corner(setup_path): + df = DataFrame(np.random.default_rng(2).standard_normal((50, 100))) + df.index = [f"{c:3d}" for c in df.index] + df.columns = [f"{c:3d}" for c in df.columns] + + with ensure_clean_store(setup_path) as store: + store.put("frame", df, format="table") + + crit = "columns=df.columns[:75]" + result = store.select("frame", [crit]) + tm.assert_frame_equal(result, df.loc[:, df.columns[:75]]) + + crit = "columns=df.columns[:75:2]" + result = store.select("frame", [crit]) + tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]]) + + +def test_path_pathlib(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + result = tm.round_trip_pathlib( + lambda p: df.to_hdf(p, key="df"), lambda p: read_hdf(p, "df") + ) + tm.assert_frame_equal(df, result) + + +@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)]) +def test_contiguous_mixed_data_table(start, stop, setup_path): + # GH 17021 + df = DataFrame( + { + "a": Series([20111010, 20111011, 20111012]), + "b": Series(["ab", "cd", "ab"]), + } + ) + + with ensure_clean_store(setup_path) as store: + store.append("test_dataset", df) + + result = store.select("test_dataset", start=start, stop=stop) + tm.assert_frame_equal(df[start:stop], result) + + +def test_path_pathlib_hdfstore(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + def writer(path): + with HDFStore(path) as store: + df.to_hdf(store, key="df") + + def reader(path): + with HDFStore(path) as store: + return read_hdf(store, "df") + + result = tm.round_trip_pathlib(writer, reader) + tm.assert_frame_equal(df, result) + + +def test_pickle_path_localpath(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + result = tm.round_trip_pathlib( + lambda p: df.to_hdf(p, key="df"), lambda p: read_hdf(p, "df") + ) + tm.assert_frame_equal(df, result) + + +def test_path_localpath_hdfstore(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + def writer(path): + with HDFStore(path) as store: + df.to_hdf(store, key="df") + + def reader(path): + with HDFStore(path) as store: + return read_hdf(store, "df") + + result = tm.round_trip_localpath(writer, reader) + tm.assert_frame_equal(df, result) + + +@pytest.mark.parametrize("propindexes", [True, False]) +def test_copy(propindexes): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + with tm.ensure_clean() as path: + with HDFStore(path) as st: + st.append("df", df, data_columns=["A"]) + with tempfile.NamedTemporaryFile() as new_f: + with HDFStore(path) as store: + with contextlib.closing( + store.copy(new_f.name, keys=None, propindexes=propindexes) + ) as tstore: + # check keys + keys = store.keys() + assert set(keys) == set(tstore.keys()) + # check indices & nrows + for k in tstore.keys(): + if tstore.get_storer(k).is_table: + new_t = tstore.get_storer(k) + orig_t = store.get_storer(k) + + assert orig_t.nrows == new_t.nrows + + # check propindixes + if propindexes: + for a in orig_t.axes: + if a.is_indexed: + assert new_t[a.name].is_indexed + + +def test_duplicate_column_name(tmp_path, setup_path): + df = DataFrame(columns=["a", "a"], data=[[0, 0]]) + + path = tmp_path / setup_path + msg = "Columns index has to be unique for fixed format" + with pytest.raises(ValueError, match=msg): + df.to_hdf(path, key="df", format="fixed") + + df.to_hdf(path, key="df", format="table") + other = read_hdf(path, "df") + + tm.assert_frame_equal(df, other) + assert df.equals(other) + assert other.equals(df) + + +def test_preserve_timedeltaindex_type(setup_path): + # GH9635 + df = DataFrame(np.random.default_rng(2).normal(size=(10, 5))) + df.index = timedelta_range(start="0s", periods=10, freq="1s", name="example") + + with ensure_clean_store(setup_path) as store: + store["df"] = df + tm.assert_frame_equal(store["df"], df) + + +def test_columns_multiindex_modified(tmp_path, setup_path): + # BUG: 7212 + + df = DataFrame( + np.random.default_rng(2).random((4, 5)), + index=list("abcd"), + columns=list("ABCDE"), + ) + df.index.name = "letters" + df = df.set_index(keys="E", append=True) + + data_columns = df.index.names + df.columns.tolist() + path = tmp_path / setup_path + df.to_hdf( + path, + key="df", + mode="a", + append=True, + data_columns=data_columns, + index=False, + ) + cols2load = list("BCD") + cols2load_original = list(cols2load) + # GH#10055 make sure read_hdf call does not alter cols2load inplace + read_hdf(path, "df", columns=cols2load) + assert cols2load_original == cols2load + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +@pytest.mark.parametrize( + "columns", + [ + Index([0, 1], dtype=np.int64), + Index([0.0, 1.0], dtype=np.float64), + date_range("2020-01-01", periods=2), + timedelta_range("1 day", periods=2), + period_range("2020-01-01", periods=2, freq="D"), + ], +) +def test_to_hdf_with_object_column_names_should_fail(tmp_path, setup_path, columns): + # GH9057 + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)), columns=columns) + path = tmp_path / setup_path + msg = "cannot have non-object label DataIndexableCol" + with pytest.raises(ValueError, match=msg): + df.to_hdf(path, key="df", format="table", data_columns=True) + + +@pytest.mark.parametrize("dtype", [None, "category"]) +def test_to_hdf_with_object_column_names_should_run(tmp_path, setup_path, dtype): + # GH9057 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + columns=Index(["a", "b"], dtype=dtype), + ) + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table", data_columns=True) + result = read_hdf(path, "df", where=f"index = [{df.index[0]}]") + assert len(result) + + +def test_hdfstore_strides(setup_path): + # GH22073 + df = DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}) + with ensure_clean_store(setup_path) as store: + store.put("df", df) + assert df["a"].values.strides == store["df"]["a"].values.strides + + +def test_store_bool_index(tmp_path, setup_path): + # GH#48667 + df = DataFrame([[1]], columns=[True], index=Index([False], dtype="bool")) + expected = df.copy() + + # # Test to make sure defaults are to not drop. + # # Corresponding to Issue 9382 + path = tmp_path / setup_path + df.to_hdf(path, key="a") + result = read_hdf(path, "a") + tm.assert_frame_equal(expected, result) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_subclass.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_subclass.py new file mode 100644 index 0000000000000000000000000000000000000000..03622faa2b5a8f65d709c23cab23fd3680084cb4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_subclass.py @@ -0,0 +1,52 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + +from pandas.io.pytables import ( + HDFStore, + read_hdf, +) + +pytest.importorskip("tables") + + +class TestHDFStoreSubclass: + # GH 33748 + def test_supported_for_subclass_dataframe(self, tmp_path): + data = {"a": [1, 2], "b": [3, 4]} + sdf = tm.SubclassedDataFrame(data, dtype=np.intp) + + expected = DataFrame(data, dtype=np.intp) + + path = tmp_path / "temp.h5" + sdf.to_hdf(path, key="df") + result = read_hdf(path, "df") + tm.assert_frame_equal(result, expected) + + path = tmp_path / "temp.h5" + with HDFStore(path) as store: + store.put("df", sdf) + result = read_hdf(path, "df") + tm.assert_frame_equal(result, expected) + + def test_supported_for_subclass_series(self, tmp_path): + data = [1, 2, 3] + sser = tm.SubclassedSeries(data, dtype=np.intp) + + expected = Series(data, dtype=np.intp) + + path = tmp_path / "temp.h5" + sser.to_hdf(path, key="ser") + result = read_hdf(path, "ser") + tm.assert_series_equal(result, expected) + + path = tmp_path / "temp.h5" + with HDFStore(path) as store: + store.put("ser", sser) + result = read_hdf(path, "ser") + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_time_series.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_time_series.py new file mode 100644 index 0000000000000000000000000000000000000000..726dd0d42034756b205c427c60546afb2901be49 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_time_series.py @@ -0,0 +1,72 @@ +import datetime + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + Series, + _testing as tm, + date_range, + period_range, +) +from pandas.tests.io.pytables.common import ensure_clean_store + +pytestmark = pytest.mark.single_cpu + + +@pytest.mark.parametrize("unit", ["us", "ns"]) +def test_store_datetime_fractional_secs(setup_path, unit): + dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456) + dti = DatetimeIndex([dt], dtype=f"M8[{unit}]") + series = Series([0], index=dti) + with ensure_clean_store(setup_path) as store: + store["a"] = series + assert store["a"].index[0] == dt + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_tseries_indices_series(setup_path): + with ensure_clean_store(setup_path) as store: + idx = date_range("2020-01-01", periods=10) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + store["a"] = ser + result = store["a"] + + tm.assert_series_equal(result, ser) + assert result.index.freq == ser.index.freq + tm.assert_class_equal(result.index, ser.index, obj="series index") + + idx = period_range("2020-01-01", periods=10, freq="D") + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + store["a"] = ser + result = store["a"] + + tm.assert_series_equal(result, ser) + assert result.index.freq == ser.index.freq + tm.assert_class_equal(result.index, ser.index, obj="series index") + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_tseries_indices_frame(setup_path): + with ensure_clean_store(setup_path) as store: + idx = date_range("2020-01-01", periods=10) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), index=idx + ) + store["a"] = df + result = store["a"] + + tm.assert_frame_equal(result, df) + assert result.index.freq == df.index.freq + tm.assert_class_equal(result.index, df.index, obj="dataframe index") + + idx = period_range("2020-01-01", periods=10, freq="D") + df = DataFrame(np.random.default_rng(2).standard_normal((len(idx), 3)), idx) + store["a"] = df + result = store["a"] + + tm.assert_frame_equal(result, df) + assert result.index.freq == df.index.freq + tm.assert_class_equal(result.index, df.index, obj="dataframe index") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_timezones.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_timezones.py new file mode 100644 index 0000000000000000000000000000000000000000..c5613daf62207319d9dc1e10beea669cd7248f38 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/pytables/test_timezones.py @@ -0,0 +1,378 @@ +from datetime import ( + date, + timedelta, +) + +import numpy as np +import pytest + +from pandas._libs.tslibs.timezones import maybe_get_tz +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) + + +def _compare_with_tz(a, b): + tm.assert_frame_equal(a, b) + + # compare the zones on each element + for c in a.columns: + for i in a.index: + a_e = a.loc[i, c] + b_e = b.loc[i, c] + if not (a_e == b_e and a_e.tz == b_e.tz): + raise AssertionError(f"invalid tz comparison [{a_e}] [{b_e}]") + + +# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows +# filename issues. +gettz_dateutil = lambda x: maybe_get_tz("dateutil/" + x) +gettz_pytz = lambda x: x + + +@pytest.mark.parametrize("gettz", [gettz_dateutil, gettz_pytz]) +def test_append_with_timezones(setup_path, gettz): + # as columns + + # Single-tzinfo, no DST transition + df_est = DataFrame( + { + "A": [ + Timestamp("20130102 2:00:00", tz=gettz("US/Eastern")).as_unit("ns") + + timedelta(hours=1) * i + for i in range(5) + ] + } + ) + + # frame with all columns having same tzinfo, but different sides + # of DST transition + df_crosses_dst = DataFrame( + { + "A": Timestamp("20130102", tz=gettz("US/Eastern")).as_unit("ns"), + "B": Timestamp("20130603", tz=gettz("US/Eastern")).as_unit("ns"), + }, + index=range(5), + ) + + df_mixed_tz = DataFrame( + { + "A": Timestamp("20130102", tz=gettz("US/Eastern")).as_unit("ns"), + "B": Timestamp("20130102", tz=gettz("EET")).as_unit("ns"), + }, + index=range(5), + ) + + df_different_tz = DataFrame( + { + "A": Timestamp("20130102", tz=gettz("US/Eastern")).as_unit("ns"), + "B": Timestamp("20130102", tz=gettz("CET")).as_unit("ns"), + }, + index=range(5), + ) + + with ensure_clean_store(setup_path) as store: + _maybe_remove(store, "df_tz") + store.append("df_tz", df_est, data_columns=["A"]) + result = store["df_tz"] + _compare_with_tz(result, df_est) + tm.assert_frame_equal(result, df_est) + + # select with tz aware + expected = df_est[df_est.A >= df_est.A[3]] + result = store.select("df_tz", where="A>=df_est.A[3]") + _compare_with_tz(result, expected) + + # ensure we include dates in DST and STD time here. + _maybe_remove(store, "df_tz") + store.append("df_tz", df_crosses_dst) + result = store["df_tz"] + _compare_with_tz(result, df_crosses_dst) + tm.assert_frame_equal(result, df_crosses_dst) + + msg = ( + r"invalid info for \[values_block_1\] for \[tz\], " + r"existing_value \[(dateutil/.*)?(US/Eastern|America/New_York)\] " + r"conflicts with new value \[(dateutil/.*)?EET\]" + ) + with pytest.raises(ValueError, match=msg): + store.append("df_tz", df_mixed_tz) + + # this is ok + _maybe_remove(store, "df_tz") + store.append("df_tz", df_mixed_tz, data_columns=["A", "B"]) + result = store["df_tz"] + _compare_with_tz(result, df_mixed_tz) + tm.assert_frame_equal(result, df_mixed_tz) + + # can't append with diff timezone + msg = ( + r"invalid info for \[B\] for \[tz\], " + r"existing_value \[(dateutil/.*)?EET\] " + r"conflicts with new value \[(dateutil/.*)?CET\]" + ) + with pytest.raises(ValueError, match=msg): + store.append("df_tz", df_different_tz) + + +@pytest.mark.parametrize("gettz", [gettz_dateutil, gettz_pytz]) +def test_append_with_timezones_as_index(setup_path, gettz): + # GH#4098 example + + dti = date_range("2000-1-1", periods=3, freq="h", tz=gettz("US/Eastern")) + dti = dti._with_freq(None) # freq doesn't round-trip + + df = DataFrame({"A": Series(range(3), index=dti)}) + + with ensure_clean_store(setup_path) as store: + _maybe_remove(store, "df") + store.put("df", df) + result = store.select("df") + tm.assert_frame_equal(result, df) + + _maybe_remove(store, "df") + store.append("df", df) + result = store.select("df") + tm.assert_frame_equal(result, df) + + +def test_roundtrip_tz_aware_index(setup_path, unit): + # GH 17618 + ts = Timestamp("2000-01-01 01:00:00", tz="US/Eastern") + dti = DatetimeIndex([ts]).as_unit(unit) + df = DataFrame(data=[0], index=dti) + + with ensure_clean_store(setup_path) as store: + store.put("frame", df, format="fixed") + recons = store["frame"] + tm.assert_frame_equal(recons, df) + + value = recons.index[0]._value + denom = {"ns": 1, "us": 1000, "ms": 10**6, "s": 10**9}[unit] + assert value == 946706400000000000 // denom + + +def test_store_index_name_with_tz(setup_path): + # GH 13884 + df = DataFrame({"A": [1, 2]}) + df.index = DatetimeIndex([1234567890123456787, 1234567890123456788]) + df.index = df.index.tz_localize("UTC") + df.index.name = "foo" + + with ensure_clean_store(setup_path) as store: + store.put("frame", df, format="table") + recons = store["frame"] + tm.assert_frame_equal(recons, df) + + +def test_tseries_select_index_column(setup_path): + # GH7777 + # selecting a UTC datetimeindex column did + # not preserve UTC tzinfo set before storing + + # check that no tz still works + rng = date_range("1/1/2000", "1/30/2000") + frame = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng + ) + + with ensure_clean_store(setup_path) as store: + store.append("frame", frame) + result = store.select_column("frame", "index") + assert rng.tz == DatetimeIndex(result.values).tz + + # check utc + rng = date_range("1/1/2000", "1/30/2000", tz="UTC") + frame = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng + ) + + with ensure_clean_store(setup_path) as store: + store.append("frame", frame) + result = store.select_column("frame", "index") + assert rng.tz == result.dt.tz + + # double check non-utc + rng = date_range("1/1/2000", "1/30/2000", tz="US/Eastern") + frame = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng + ) + + with ensure_clean_store(setup_path) as store: + store.append("frame", frame) + result = store.select_column("frame", "index") + assert rng.tz == result.dt.tz + + +def test_timezones_fixed_format_frame_non_empty(setup_path): + with ensure_clean_store(setup_path) as store: + # index + rng = date_range("1/1/2000", "1/30/2000", tz="US/Eastern") + rng = rng._with_freq(None) # freq doesn't round-trip + df = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng + ) + store["df"] = df + result = store["df"] + tm.assert_frame_equal(result, df) + + # as data + # GH11411 + _maybe_remove(store, "df") + df = DataFrame( + { + "A": rng, + "B": rng.tz_convert("UTC").tz_localize(None), + "C": rng.tz_convert("CET"), + "D": range(len(rng)), + }, + index=rng, + ) + store["df"] = df + result = store["df"] + tm.assert_frame_equal(result, df) + + +def test_timezones_fixed_format_empty(setup_path, tz_aware_fixture, frame_or_series): + # GH 20594 + + dtype = pd.DatetimeTZDtype(tz=tz_aware_fixture) + + obj = Series(dtype=dtype, name="A") + if frame_or_series is DataFrame: + obj = obj.to_frame() + + with ensure_clean_store(setup_path) as store: + store["obj"] = obj + result = store["obj"] + tm.assert_equal(result, obj) + + +def test_timezones_fixed_format_series_nonempty(setup_path, tz_aware_fixture): + # GH 20594 + + dtype = pd.DatetimeTZDtype(tz=tz_aware_fixture) + + with ensure_clean_store(setup_path) as store: + s = Series([0], dtype=dtype) + store["s"] = s + result = store["s"] + tm.assert_series_equal(result, s) + + +def test_fixed_offset_tz(setup_path): + rng = date_range("1/1/2000 00:00:00-07:00", "1/30/2000 00:00:00-07:00") + frame = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng + ) + + with ensure_clean_store(setup_path) as store: + store["frame"] = frame + recons = store["frame"] + tm.assert_index_equal(recons.index, rng) + assert rng.tz == recons.index.tz + + +@td.skip_if_windows +def test_store_timezone(setup_path): + # GH2852 + # issue storing datetime.date with a timezone as it resets when read + # back in a new timezone + + # original method + with ensure_clean_store(setup_path) as store: + today = date(2013, 9, 10) + df = DataFrame([1, 2, 3], index=[today, today, today]) + store["obj1"] = df + result = store["obj1"] + tm.assert_frame_equal(result, df) + + # with tz setting + with ensure_clean_store(setup_path) as store: + with tm.set_timezone("EST5EDT"): + today = date(2013, 9, 10) + df = DataFrame([1, 2, 3], index=[today, today, today]) + store["obj1"] = df + + with tm.set_timezone("CST6CDT"): + result = store["obj1"] + + tm.assert_frame_equal(result, df) + + +def test_legacy_datetimetz_object(datapath): + # legacy from < 0.17.0 + # 8260 + expected = DataFrame( + { + "A": Timestamp("20130102", tz="US/Eastern").as_unit("ns"), + "B": Timestamp("20130603", tz="CET").as_unit("ns"), + }, + index=range(5), + ) + with ensure_clean_store( + datapath("io", "data", "legacy_hdf", "datetimetz_object.h5"), mode="r" + ) as store: + result = store["df"] + tm.assert_frame_equal(result, expected) + + +def test_dst_transitions(setup_path): + # make sure we are not failing on transitions + with ensure_clean_store(setup_path) as store: + times = date_range( + "2013-10-26 23:00", + "2013-10-27 01:00", + tz="Europe/London", + freq="h", + ambiguous="infer", + ) + times = times._with_freq(None) # freq doesn't round-trip + + for i in [times, times + pd.Timedelta("10min")]: + _maybe_remove(store, "df") + df = DataFrame({"A": range(len(i)), "B": i}, index=i) + store.append("df", df) + result = store.select("df") + tm.assert_frame_equal(result, df) + + +def test_read_with_where_tz_aware_index(tmp_path, setup_path): + # GH 11926 + periods = 10 + dts = date_range("20151201", periods=periods, freq="D", tz="UTC") + mi = pd.MultiIndex.from_arrays([dts, range(periods)], names=["DATE", "NO"]) + expected = DataFrame({"MYCOL": 0}, index=mi) + + key = "mykey" + path = tmp_path / setup_path + with pd.HDFStore(path) as store: + store.append(key, expected, format="table", append=True) + result = pd.read_hdf(path, key, where="DATE > 20151130") + tm.assert_frame_equal(result, expected) + + +def test_py2_created_with_datetimez(datapath): + # The test HDF5 file was created in Python 2, but could not be read in + # Python 3. + # + # GH26443 + index = DatetimeIndex(["2019-01-01T18:00"], dtype="M8[ns, America/New_York]") + expected = DataFrame({"data": 123}, index=index) + with ensure_clean_store( + datapath("io", "data", "legacy_hdf", "gh26443.h5"), mode="r" + ) as store: + result = store["key"] + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bafd11d21768e30b6b50e81208eb772409ef87a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/conftest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b769cd45e325fbaaccfd1580be4f80fee0c528a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/conftest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_to_xml.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_to_xml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8791800d150b0d153b6da1cc518e2ebde91305f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_to_xml.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a3e80b9f6b590068b7fed6f04c90d40571fbbfa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml_dtypes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml_dtypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9dbcf23bcc49b0f6e390b30d6cddcd16d8530e98 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml_dtypes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/conftest.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..aafda0ff62bbdf94331fb7cb8fe5d51b6eb1d63a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/conftest.py @@ -0,0 +1,38 @@ +from pathlib import Path + +import pytest + + +@pytest.fixture +def xml_data_path(): + return Path(__file__).parent.parent / "data" / "xml" + + +@pytest.fixture +def xml_books(xml_data_path, datapath): + return datapath(xml_data_path / "books.xml") + + +@pytest.fixture +def xml_doc_ch_utf(xml_data_path, datapath): + return datapath(xml_data_path / "doc_ch_utf.xml") + + +@pytest.fixture +def xml_baby_names(xml_data_path, datapath): + return datapath(xml_data_path / "baby_names.xml") + + +@pytest.fixture +def kml_cta_rail_lines(xml_data_path, datapath): + return datapath(xml_data_path / "cta_rail_lines.kml") + + +@pytest.fixture +def xsl_flatten_doc(xml_data_path, datapath): + return datapath(xml_data_path / "flatten_doc.xsl") + + +@pytest.fixture +def xsl_row_field_output(xml_data_path, datapath): + return datapath(xml_data_path / "row_field_output.xsl") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/test_to_xml.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/test_to_xml.py new file mode 100644 index 0000000000000000000000000000000000000000..37251a58b0c119ef1da15c259e9e77a456b86ac9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/test_to_xml.py @@ -0,0 +1,1375 @@ +from __future__ import annotations + +from io import ( + BytesIO, + StringIO, +) +import os + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + NA, + DataFrame, + Index, +) +import pandas._testing as tm + +from pandas.io.common import get_handle +from pandas.io.xml import read_xml + +# CHECKLIST + +# [x] - ValueError: "Values for parser can only be lxml or etree." + +# etree +# [x] - ImportError: "lxml not found, please install or use the etree parser." +# [X] - TypeError: "...is not a valid type for attr_cols" +# [X] - TypeError: "...is not a valid type for elem_cols" +# [X] - LookupError: "unknown encoding" +# [X] - KeyError: "...is not included in namespaces" +# [X] - KeyError: "no valid column" +# [X] - ValueError: "To use stylesheet, you need lxml installed..." +# [] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.) +# [X] - FileNotFoundError: "No such file or directory" +# [X] - PermissionError: "Forbidden" + +# lxml +# [X] - TypeError: "...is not a valid type for attr_cols" +# [X] - TypeError: "...is not a valid type for elem_cols" +# [X] - LookupError: "unknown encoding" +# [] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.) +# [X] - FileNotFoundError: "No such file or directory" +# [X] - KeyError: "...is not included in namespaces" +# [X] - KeyError: "no valid column" +# [X] - ValueError: "stylesheet is not a url, file, or xml string." +# [] - LookupError: (NEED WRONG ENCODING FOR FILE OUTPUT) +# [] - URLError: (USUALLY DUE TO NETWORKING) +# [] - HTTPError: (NEED AN ONLINE STYLESHEET) +# [X] - OSError: "failed to load external entity" +# [X] - XMLSyntaxError: "Opening and ending tag mismatch" +# [X] - XSLTApplyError: "Cannot resolve URI" +# [X] - XSLTParseError: "failed to compile" +# [X] - PermissionError: "Forbidden" + + +@pytest.fixture +def geom_df(): + return DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4, np.nan, 3], + } + ) + + +@pytest.fixture +def planet_df(): + return DataFrame( + { + "planet": [ + "Mercury", + "Venus", + "Earth", + "Mars", + "Jupiter", + "Saturn", + "Uranus", + "Neptune", + ], + "type": [ + "terrestrial", + "terrestrial", + "terrestrial", + "terrestrial", + "gas giant", + "gas giant", + "ice giant", + "ice giant", + ], + "location": [ + "inner", + "inner", + "inner", + "inner", + "outer", + "outer", + "outer", + "outer", + ], + "mass": [ + 0.330114, + 4.86747, + 5.97237, + 0.641712, + 1898.187, + 568.3174, + 86.8127, + 102.4126, + ], + } + ) + + +@pytest.fixture +def from_file_expected(): + return """\ + + + + 0 + cooking + Everyday Italian + Giada De Laurentiis + 2005 + 30.0 + + + 1 + children + Harry Potter + J K. Rowling + 2005 + 29.99 + + + 2 + web + Learning XML + Erik T. Ray + 2003 + 39.95 + +""" + + +def equalize_decl(doc): + # etree and lxml differ on quotes and case in xml declaration + if doc is not None: + doc = doc.replace( + ' + + + cooking + Everyday Italian + Giada De Laurentiis + 2005 + 30.0 + + + children + Harry Potter + J K. Rowling + 2005 + 29.99 + + + web + Learning XML + Erik T. Ray + 2003 + 39.95 + +""" + + df_file = read_xml(xml_books, parser=parser) + + with tm.ensure_clean("test.xml") as path: + df_file.to_xml(path, index=False, parser=parser) + with open(path, "rb") as f: + output = f.read().decode("utf-8").strip() + + output = equalize_decl(output) + + assert output == expected + + +def test_index_false_rename_row_root(xml_books, parser): + expected = """\ + + + + cooking + Everyday Italian + Giada De Laurentiis + 2005 + 30.0 + + + children + Harry Potter + J K. Rowling + 2005 + 29.99 + + + web + Learning XML + Erik T. Ray + 2003 + 39.95 + +""" + + df_file = read_xml(xml_books, parser=parser) + + with tm.ensure_clean("test.xml") as path: + df_file.to_xml( + path, index=False, root_name="books", row_name="book", parser=parser + ) + with open(path, "rb") as f: + output = f.read().decode("utf-8").strip() + + output = equalize_decl(output) + + assert output == expected + + +@pytest.mark.parametrize( + "offset_index", [list(range(10, 13)), [str(i) for i in range(10, 13)]] +) +def test_index_false_with_offset_input_index(parser, offset_index, geom_df): + """ + Tests that the output does not contain the `` field when the index of the + input Dataframe has an offset. + + This is a regression test for issue #42458. + """ + + expected = """\ + + + + square + 360 + 4.0 + + + circle + 360 + + + + triangle + 180 + 3.0 + +""" + + offset_geom_df = geom_df.copy() + offset_geom_df.index = Index(offset_index) + output = offset_geom_df.to_xml(index=False, parser=parser) + output = equalize_decl(output) + + assert output == expected + + +# NA_REP + +na_expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + +def test_na_elem_output(parser, geom_df): + output = geom_df.to_xml(parser=parser) + output = equalize_decl(output) + + assert output == na_expected + + +def test_na_empty_str_elem_option(parser, geom_df): + output = geom_df.to_xml(na_rep="", parser=parser) + output = equalize_decl(output) + + assert output == na_expected + + +def test_na_empty_elem_option(parser, geom_df): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + 0.0 + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml(na_rep="0.0", parser=parser) + output = equalize_decl(output) + + assert output == expected + + +# ATTR_COLS + + +def test_attrs_cols_nan_output(parser, geom_df): + expected = """\ + + + + + +""" + + output = geom_df.to_xml(attr_cols=["shape", "degrees", "sides"], parser=parser) + output = equalize_decl(output) + + assert output == expected + + +def test_attrs_cols_prefix(parser, geom_df): + expected = """\ + + + + + +""" + + output = geom_df.to_xml( + attr_cols=["index", "shape", "degrees", "sides"], + namespaces={"doc": "http://example.xom"}, + prefix="doc", + parser=parser, + ) + output = equalize_decl(output) + + assert output == expected + + +def test_attrs_unknown_column(parser, geom_df): + with pytest.raises(KeyError, match=("no valid column")): + geom_df.to_xml(attr_cols=["shape", "degree", "sides"], parser=parser) + + +def test_attrs_wrong_type(parser, geom_df): + with pytest.raises(TypeError, match=("is not a valid type for attr_cols")): + geom_df.to_xml(attr_cols='"shape", "degree", "sides"', parser=parser) + + +# ELEM_COLS + + +def test_elems_cols_nan_output(parser, geom_df): + elems_cols_expected = """\ + + + + 360 + 4.0 + square + + + 360 + + circle + + + 180 + 3.0 + triangle + +""" + + output = geom_df.to_xml( + index=False, elem_cols=["degrees", "sides", "shape"], parser=parser + ) + output = equalize_decl(output) + + assert output == elems_cols_expected + + +def test_elems_unknown_column(parser, geom_df): + with pytest.raises(KeyError, match=("no valid column")): + geom_df.to_xml(elem_cols=["shape", "degree", "sides"], parser=parser) + + +def test_elems_wrong_type(parser, geom_df): + with pytest.raises(TypeError, match=("is not a valid type for elem_cols")): + geom_df.to_xml(elem_cols='"shape", "degree", "sides"', parser=parser) + + +def test_elems_and_attrs_cols(parser, geom_df): + elems_cols_expected = """\ + + + + 360 + 4.0 + + + 360 + + + + 180 + 3.0 + +""" + + output = geom_df.to_xml( + index=False, + elem_cols=["degrees", "sides"], + attr_cols=["shape"], + parser=parser, + ) + output = equalize_decl(output) + + assert output == elems_cols_expected + + +# HIERARCHICAL COLUMNS + + +def test_hierarchical_columns(parser, planet_df): + expected = """\ + + + + inner + terrestrial + 4 + 11.81 + 2.95 + + + outer + gas giant + 2 + 2466.5 + 1233.25 + + + outer + ice giant + 2 + 189.23 + 94.61 + + + All + + 8 + 2667.54 + 333.44 + +""" + + pvt = planet_df.pivot_table( + index=["location", "type"], + values="mass", + aggfunc=["count", "sum", "mean"], + margins=True, + ).round(2) + + output = pvt.to_xml(parser=parser) + output = equalize_decl(output) + + assert output == expected + + +def test_hierarchical_attrs_columns(parser, planet_df): + expected = """\ + + + + + + +""" + + pvt = planet_df.pivot_table( + index=["location", "type"], + values="mass", + aggfunc=["count", "sum", "mean"], + margins=True, + ).round(2) + + output = pvt.to_xml(attr_cols=list(pvt.reset_index().columns.values), parser=parser) + output = equalize_decl(output) + + assert output == expected + + +# MULTIINDEX + + +def test_multi_index(parser, planet_df): + expected = """\ + + + + inner + terrestrial + 4 + 11.81 + 2.95 + + + outer + gas giant + 2 + 2466.5 + 1233.25 + + + outer + ice giant + 2 + 189.23 + 94.61 + +""" + + agg = ( + planet_df.groupby(["location", "type"])["mass"] + .agg(["count", "sum", "mean"]) + .round(2) + ) + + output = agg.to_xml(parser=parser) + output = equalize_decl(output) + + assert output == expected + + +def test_multi_index_attrs_cols(parser, planet_df): + expected = """\ + + + + + +""" + + agg = ( + planet_df.groupby(["location", "type"])["mass"] + .agg(["count", "sum", "mean"]) + .round(2) + ) + output = agg.to_xml(attr_cols=list(agg.reset_index().columns.values), parser=parser) + output = equalize_decl(output) + + assert output == expected + + +# NAMESPACE + + +def test_default_namespace(parser, geom_df): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml(namespaces={"": "http://example.com"}, parser=parser) + output = equalize_decl(output) + + assert output == expected + + +def test_unused_namespaces(parser, geom_df): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml( + namespaces={"oth": "http://other.org", "ex": "http://example.com"}, + parser=parser, + ) + output = equalize_decl(output) + + assert output == expected + + +# PREFIX + + +def test_namespace_prefix(parser, geom_df): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml( + namespaces={"doc": "http://example.com"}, prefix="doc", parser=parser + ) + output = equalize_decl(output) + + assert output == expected + + +def test_missing_prefix_in_nmsp(parser, geom_df): + with pytest.raises(KeyError, match=("doc is not included in namespaces")): + geom_df.to_xml( + namespaces={"": "http://example.com"}, prefix="doc", parser=parser + ) + + +def test_namespace_prefix_and_default(parser, geom_df): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml( + namespaces={"": "http://example.com", "doc": "http://other.org"}, + prefix="doc", + parser=parser, + ) + output = equalize_decl(output) + + assert output == expected + + +# ENCODING + +encoding_expected = """\ + + + + 0 + 1 + José + Sofía + + + 1 + 2 + Luis + Valentina + + + 2 + 3 + Carlos + Isabella + + + 3 + 4 + Juan + Camila + + + 4 + 5 + Jorge + Valeria + +""" + + +def test_encoding_option_str(xml_baby_names, parser): + df_file = read_xml(xml_baby_names, parser=parser, encoding="ISO-8859-1").head(5) + + output = df_file.to_xml(encoding="ISO-8859-1", parser=parser) + + if output is not None: + # etree and lxml differ on quotes and case in xml declaration + output = output.replace( + ' + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml(xml_declaration=False) + + assert output == expected + + +def test_no_pretty_print_with_decl(parser, geom_df): + expected = ( + "\n" + "0square" + "3604.0" + "1circle360" + "2" + "triangle1803.0" + "" + ) + + output = geom_df.to_xml(pretty_print=False, parser=parser) + output = equalize_decl(output) + + # etree adds space for closed tags + if output is not None: + output = output.replace(" />", "/>") + + assert output == expected + + +def test_no_pretty_print_no_decl(parser, geom_df): + expected = ( + "0square" + "3604.0" + "1circle360" + "2" + "triangle1803.0" + "" + ) + + output = geom_df.to_xml(xml_declaration=False, pretty_print=False, parser=parser) + + # etree adds space for closed tags + if output is not None: + output = output.replace(" />", "/>") + + assert output == expected + + +# PARSER + + +@td.skip_if_installed("lxml") +def test_default_parser_no_lxml(geom_df): + with pytest.raises( + ImportError, match=("lxml not found, please install or use the etree parser.") + ): + geom_df.to_xml() + + +def test_unknown_parser(geom_df): + with pytest.raises( + ValueError, match=("Values for parser can only be lxml or etree.") + ): + geom_df.to_xml(parser="bs4") + + +# STYLESHEET + +xsl_expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + +def test_stylesheet_file_like(xsl_row_field_output, mode, geom_df): + pytest.importorskip("lxml") + with open( + xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None + ) as f: + assert geom_df.to_xml(stylesheet=f) == xsl_expected + + +def test_stylesheet_io(xsl_row_field_output, mode, geom_df): + # note: By default the bodies of untyped functions are not checked, + # consider using --check-untyped-defs + pytest.importorskip("lxml") + xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked] + + with open( + xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None + ) as f: + if mode == "rb": + xsl_obj = BytesIO(f.read()) + else: + xsl_obj = StringIO(f.read()) + + output = geom_df.to_xml(stylesheet=xsl_obj) + + assert output == xsl_expected + + +def test_stylesheet_buffered_reader(xsl_row_field_output, mode, geom_df): + pytest.importorskip("lxml") + with open( + xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None + ) as f: + xsl_obj = f.read() + + output = geom_df.to_xml(stylesheet=xsl_obj) + + assert output == xsl_expected + + +def test_stylesheet_wrong_path(geom_df): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = os.path.join("data", "xml", "row_field_output.xslt") + + with pytest.raises( + lxml_etree.XMLSyntaxError, + match=("Start tag expected, '<' not found"), + ): + geom_df.to_xml(stylesheet=xsl) + + +@pytest.mark.parametrize("val", ["", b""]) +def test_empty_string_stylesheet(val, geom_df): + lxml_etree = pytest.importorskip("lxml.etree") + + msg = "|".join( + [ + "Document is empty", + "Start tag expected, '<' not found", + # Seen on Mac with lxml 4.9.1 + r"None \(line 0\)", + ] + ) + + with pytest.raises(lxml_etree.XMLSyntaxError, match=msg): + geom_df.to_xml(stylesheet=val) + + +def test_incorrect_xsl_syntax(geom_df): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = """\ + + + + + + + + + + + + + + + + + + +""" + + with pytest.raises( + lxml_etree.XMLSyntaxError, match=("Opening and ending tag mismatch") + ): + geom_df.to_xml(stylesheet=xsl) + + +def test_incorrect_xsl_eval(geom_df): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = """\ + + + + + + + + + + + + + + + + + + +""" + + with pytest.raises(lxml_etree.XSLTParseError, match=("failed to compile")): + geom_df.to_xml(stylesheet=xsl) + + +def test_incorrect_xsl_apply(geom_df): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = """\ + + + + + + + + + +""" + + with pytest.raises(lxml_etree.XSLTApplyError, match=("Cannot resolve URI")): + with tm.ensure_clean("test.xml") as path: + geom_df.to_xml(path, stylesheet=xsl) + + +def test_stylesheet_with_etree(geom_df): + xsl = """\ + + + + + + + + + """ + + with pytest.raises( + ValueError, match=("To use stylesheet, you need lxml installed") + ): + geom_df.to_xml(parser="etree", stylesheet=xsl) + + +def test_style_to_csv(geom_df): + pytest.importorskip("lxml") + xsl = """\ + + + + + , + + ,shape,degrees,sides + + + + + + + +""" + + out_csv = geom_df.to_csv(lineterminator="\n") + + if out_csv is not None: + out_csv = out_csv.strip() + out_xml = geom_df.to_xml(stylesheet=xsl) + + assert out_csv == out_xml + + +def test_style_to_string(geom_df): + pytest.importorskip("lxml") + xsl = """\ + + + + + + + shape degrees sides + + + + + + + +""" + + out_str = geom_df.to_string() + out_xml = geom_df.to_xml(na_rep="NaN", stylesheet=xsl) + + assert out_xml == out_str + + +def test_style_to_json(geom_df): + pytest.importorskip("lxml") + xsl = """\ + + + + + " + + + {"shape":{ + + },"degrees":{ + + },"sides":{ + + }} + + + + + + + + + + + + + + + + + , + + +""" + + out_json = geom_df.to_json() + out_xml = geom_df.to_xml(stylesheet=xsl) + + assert out_json == out_xml + + +# COMPRESSION + + +geom_xml = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + +def test_compression_output(parser, compression_only, geom_df): + with tm.ensure_clean() as path: + geom_df.to_xml(path, parser=parser, compression=compression_only) + + with get_handle( + path, + "r", + compression=compression_only, + ) as handle_obj: + output = handle_obj.handle.read() + + output = equalize_decl(output) + + assert geom_xml == output.strip() + + +def test_filename_and_suffix_comp( + parser, compression_only, geom_df, compression_to_extension +): + compfile = "xml." + compression_to_extension[compression_only] + with tm.ensure_clean(filename=compfile) as path: + geom_df.to_xml(path, parser=parser, compression=compression_only) + + with get_handle( + path, + "r", + compression=compression_only, + ) as handle_obj: + output = handle_obj.handle.read() + + output = equalize_decl(output) + + assert geom_xml == output.strip() + + +def test_ea_dtypes(any_numeric_ea_dtype, parser): + # GH#43903 + expected = """ + + + 0 + + +""" + df = DataFrame({"a": [NA]}).astype(any_numeric_ea_dtype) + result = df.to_xml(parser=parser) + assert equalize_decl(result).strip() == expected + + +def test_unsuported_compression(parser, geom_df): + with pytest.raises(ValueError, match="Unrecognized compression type"): + with tm.ensure_clean() as path: + geom_df.to_xml(path, parser=parser, compression="7z") + + +# STORAGE OPTIONS + + +@pytest.mark.single_cpu +def test_s3_permission_output(parser, s3_public_bucket, geom_df): + s3fs = pytest.importorskip("s3fs") + pytest.importorskip("lxml") + + with tm.external_error_raised((PermissionError, FileNotFoundError)): + fs = s3fs.S3FileSystem(anon=True) + fs.ls(s3_public_bucket.name) + + geom_df.to_xml( + f"s3://{s3_public_bucket.name}/geom.xml", compression="zip", parser=parser + ) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/test_xml.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/test_xml.py new file mode 100644 index 0000000000000000000000000000000000000000..6f429c1ecbf8aad459e78aba0b480299406483d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/test_xml.py @@ -0,0 +1,2097 @@ +from __future__ import annotations + +from io import ( + BytesIO, + StringIO, +) +from lzma import LZMAError +import os +from tarfile import ReadError +from urllib.error import HTTPError +from xml.etree.ElementTree import ParseError +from zipfile import BadZipFile + +import numpy as np +import pytest + +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + EmptyDataError, + ParserError, +) +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + NA, + DataFrame, + Series, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) +from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics + +from pandas.io.common import get_handle +from pandas.io.xml import read_xml + +# CHECK LIST + +# [x] - ValueError: "Values for parser can only be lxml or etree." + +# etree +# [X] - ImportError: "lxml not found, please install or use the etree parser." +# [X] - TypeError: "expected str, bytes or os.PathLike object, not NoneType" +# [X] - ValueError: "Either element or attributes can be parsed not both." +# [X] - ValueError: "xpath does not return any nodes..." +# [X] - SyntaxError: "You have used an incorrect or unsupported XPath" +# [X] - ValueError: "names does not match length of child elements in xpath." +# [X] - TypeError: "...is not a valid type for names" +# [X] - ValueError: "To use stylesheet, you need lxml installed..." +# [] - URLError: (GENERAL ERROR WITH HTTPError AS SUBCLASS) +# [X] - HTTPError: "HTTP Error 404: Not Found" +# [] - OSError: (GENERAL ERROR WITH FileNotFoundError AS SUBCLASS) +# [X] - FileNotFoundError: "No such file or directory" +# [] - ParseError (FAILSAFE CATCH ALL FOR VERY COMPLEX XML) +# [X] - UnicodeDecodeError: "'utf-8' codec can't decode byte 0xe9..." +# [X] - UnicodeError: "UTF-16 stream does not start with BOM" +# [X] - BadZipFile: "File is not a zip file" +# [X] - OSError: "Invalid data stream" +# [X] - LZMAError: "Input format not supported by decoder" +# [X] - ValueError: "Unrecognized compression type" +# [X] - PermissionError: "Forbidden" + +# lxml +# [X] - ValueError: "Either element or attributes can be parsed not both." +# [X] - AttributeError: "__enter__" +# [X] - XSLTApplyError: "Cannot resolve URI" +# [X] - XSLTParseError: "document is not a stylesheet" +# [X] - ValueError: "xpath does not return any nodes." +# [X] - XPathEvalError: "Invalid expression" +# [] - XPathSyntaxError: (OLD VERSION IN lxml FOR XPATH ERRORS) +# [X] - TypeError: "empty namespace prefix is not supported in XPath" +# [X] - ValueError: "names does not match length of child elements in xpath." +# [X] - TypeError: "...is not a valid type for names" +# [X] - LookupError: "unknown encoding" +# [] - URLError: (USUALLY DUE TO NETWORKING) +# [X - HTTPError: "HTTP Error 404: Not Found" +# [X] - OSError: "failed to load external entity" +# [X] - XMLSyntaxError: "Start tag expected, '<' not found" +# [] - ParserError: (FAILSAFE CATCH ALL FOR VERY COMPLEX XML +# [X] - ValueError: "Values for parser can only be lxml or etree." +# [X] - UnicodeDecodeError: "'utf-8' codec can't decode byte 0xe9..." +# [X] - UnicodeError: "UTF-16 stream does not start with BOM" +# [X] - BadZipFile: "File is not a zip file" +# [X] - OSError: "Invalid data stream" +# [X] - LZMAError: "Input format not supported by decoder" +# [X] - ValueError: "Unrecognized compression type" +# [X] - PermissionError: "Forbidden" + +geom_df = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4, np.nan, 3], + } +) + +xml_default_nmsp = """\ + + + + square + 360 + 4 + + + circle + 360 + + + + triangle + 180 + 3 + +""" + +xml_prefix_nmsp = """\ + + + + square + 360 + 4.0 + + + circle + 360 + + + + triangle + 180 + 3.0 + +""" + + +df_kml = DataFrame( + { + "id": { + 0: "ID_00001", + 1: "ID_00002", + 2: "ID_00003", + 3: "ID_00004", + 4: "ID_00005", + }, + "name": { + 0: "Blue Line (Forest Park)", + 1: "Red, Purple Line", + 2: "Red, Purple Line", + 3: "Red, Purple Line", + 4: "Red, Purple Line", + }, + "styleUrl": { + 0: "#LineStyle01", + 1: "#LineStyle01", + 2: "#LineStyle01", + 3: "#LineStyle01", + 4: "#LineStyle01", + }, + "extrude": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, + "altitudeMode": { + 0: "clampedToGround", + 1: "clampedToGround", + 2: "clampedToGround", + 3: "clampedToGround", + 4: "clampedToGround", + }, + "coordinates": { + 0: ( + "-87.77678526964958,41.8708863930319,0 " + "-87.77826234150609,41.87097820122218,0 " + "-87.78251583439344,41.87130129991005,0 " + "-87.78418294588424,41.87145055520308,0 " + "-87.7872369165933,41.8717239119163,0 " + "-87.79160214925886,41.87210797280065,0" + ), + 1: ( + "-87.65758750947528,41.96427269188822,0 " + "-87.65802133507393,41.96581929055245,0 " + "-87.65819033925305,41.96621846093642,0 " + "-87.6583189819129,41.96650362897086,0 " + "-87.65835858701473,41.96669002089185,0 " + "-87.65838428411853,41.96688150295095,0 " + "-87.65842208882658,41.96745896091846,0 " + "-87.65846556843937,41.9683761425439,0 " + "-87.65849296214573,41.96913893870342,0" + ), + 2: ( + "-87.65492939166126,41.95377494531437,0 " + "-87.65557043199591,41.95376544118533,0 " + "-87.65606302030132,41.95376391658746,0 " + "-87.65623502146268,41.95377379126367,0 " + "-87.65634748981634,41.95380103566435,0 " + "-87.65646537904269,41.95387703994676,0 " + "-87.65656532461145,41.95396622645799,0 " + "-87.65664760856414,41.95404201996044,0 " + "-87.65671750555913,41.95416647054043,0 " + "-87.65673983607117,41.95429949810849,0 " + "-87.65673866475777,41.95441024240925,0 " + "-87.6567690255541,41.95490657227902,0 " + "-87.65683672482363,41.95692259283837,0 " + "-87.6568900886376,41.95861070983142,0 " + "-87.65699865558875,41.96181418669004,0 " + "-87.65756347177603,41.96397045777844,0 " + "-87.65758750947528,41.96427269188822,0" + ), + 3: ( + "-87.65362593118043,41.94742799535678,0 " + "-87.65363554415794,41.94819886386848,0 " + "-87.6536456393239,41.95059994675451,0 " + "-87.65365831235026,41.95108288489359,0 " + "-87.6536604873874,41.9519954657554,0 " + "-87.65362592053201,41.95245597302328,0 " + "-87.65367158496069,41.95311153649393,0 " + "-87.65368468595476,41.9533202828916,0 " + "-87.65369271253692,41.95343095587119,0 " + "-87.65373335834569,41.95351536301472,0 " + "-87.65378605844126,41.95358212680591,0 " + "-87.65385067928185,41.95364452823767,0 " + "-87.6539390793817,41.95370263886964,0 " + "-87.6540786298351,41.95373403675265,0 " + "-87.65430648647626,41.9537535411832,0 " + "-87.65492939166126,41.95377494531437,0" + ), + 4: ( + "-87.65345391792157,41.94217681262115,0 " + "-87.65342448305786,41.94237224420864,0 " + "-87.65339745703922,41.94268217746244,0 " + "-87.65337753982941,41.94288140770284,0 " + "-87.65336256753105,41.94317369618263,0 " + "-87.65338799707138,41.94357253961736,0 " + "-87.65340240886648,41.94389158188269,0 " + "-87.65341837392448,41.94406444407721,0 " + "-87.65342275247338,41.94421065714904,0 " + "-87.65347469646018,41.94434829382345,0 " + "-87.65351486483024,41.94447699917548,0 " + "-87.65353483605053,41.9453896864472,0 " + "-87.65361975532807,41.94689193720703,0 " + "-87.65362593118043,41.94742799535678,0" + ), + }, + } +) + + +def test_literal_xml_deprecation(): + # GH 53809 + pytest.importorskip("lxml") + msg = ( + "Passing literal xml to 'read_xml' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object." + ) + + with tm.assert_produces_warning(FutureWarning, match=msg): + read_xml(xml_default_nmsp) + + +@pytest.fixture(params=["rb", "r"]) +def mode(request): + return request.param + + +@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"]) +def parser(request): + return request.param + + +def read_xml_iterparse(data, **kwargs): + with tm.ensure_clean() as path: + with open(path, "w", encoding="utf-8") as f: + f.write(data) + return read_xml(path, **kwargs) + + +def read_xml_iterparse_comp(comp_path, compression_only, **kwargs): + with get_handle(comp_path, "r", compression=compression_only) as handles: + with tm.ensure_clean() as path: + with open(path, "w", encoding="utf-8") as f: + f.write(handles.handle.read()) + return read_xml(path, **kwargs) + + +# FILE / URL + + +def test_parser_consistency_file(xml_books): + pytest.importorskip("lxml") + df_file_lxml = read_xml(xml_books, parser="lxml") + df_file_etree = read_xml(xml_books, parser="etree") + + df_iter_lxml = read_xml( + xml_books, + parser="lxml", + iterparse={"book": ["category", "title", "year", "author", "price"]}, + ) + df_iter_etree = read_xml( + xml_books, + parser="etree", + iterparse={"book": ["category", "title", "year", "author", "price"]}, + ) + + tm.assert_frame_equal(df_file_lxml, df_file_etree) + tm.assert_frame_equal(df_file_lxml, df_iter_lxml) + tm.assert_frame_equal(df_iter_lxml, df_iter_etree) + + +@pytest.mark.network +@pytest.mark.single_cpu +def test_parser_consistency_url(parser, httpserver): + httpserver.serve_content(content=xml_default_nmsp) + + df_xpath = read_xml(StringIO(xml_default_nmsp), parser=parser) + df_iter = read_xml( + BytesIO(xml_default_nmsp.encode()), + parser=parser, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + tm.assert_frame_equal(df_xpath, df_iter) + + +def test_file_like(xml_books, parser, mode): + with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f: + df_file = read_xml(f, parser=parser) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + + +def test_file_io(xml_books, parser, mode): + with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f: + xml_obj = f.read() + + df_io = read_xml( + (BytesIO(xml_obj) if isinstance(xml_obj, bytes) else StringIO(xml_obj)), + parser=parser, + ) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_io, df_expected) + + +def test_file_buffered_reader_string(xml_books, parser, mode): + with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f: + xml_obj = f.read() + + if mode == "rb": + xml_obj = StringIO(xml_obj.decode()) + elif mode == "r": + xml_obj = StringIO(xml_obj) + + df_str = read_xml(xml_obj, parser=parser) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_str, df_expected) + + +def test_file_buffered_reader_no_xml_declaration(xml_books, parser, mode): + with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f: + next(f) + xml_obj = f.read() + + if mode == "rb": + xml_obj = StringIO(xml_obj.decode()) + elif mode == "r": + xml_obj = StringIO(xml_obj) + + df_str = read_xml(xml_obj, parser=parser) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_str, df_expected) + + +def test_string_charset(parser): + txt = "<中文標籤>12" + + df_str = read_xml(StringIO(txt), parser=parser) + + df_expected = DataFrame({"c1": 1, "c2": 2}, index=[0]) + + tm.assert_frame_equal(df_str, df_expected) + + +def test_file_charset(xml_doc_ch_utf, parser): + df_file = read_xml(xml_doc_ch_utf, parser=parser) + + df_expected = DataFrame( + { + "問": [ + "問 若箇是邪而言破邪 何者是正而道(Sorry, this is Big5 only)申正", + "問 既破有得申無得 亦應但破性執申假名以不", + "問 既破性申假 亦應但破有申無 若有無兩洗 亦應性假雙破耶", + ], + "答": [ + "".join( + [ + "答 邪既無量 正亦多途 大略為言不出二種 謂", + "有得與無得 有得是邪須破 無得是正須申\n\t\t故", + ] + ), + None, + "答 不例 有無皆是性 所以須雙破 既分性假異 故有破不破", + ], + "a": [ + None, + "答 性執是有得 假名是無得 今破有得申無得 即是破性執申假名也", + None, + ], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + + +def test_file_handle_close(xml_books, parser): + with open(xml_books, "rb") as f: + read_xml(BytesIO(f.read()), parser=parser) + + assert not f.closed + + +@pytest.mark.parametrize("val", ["", b""]) +def test_empty_string_lxml(val): + lxml_etree = pytest.importorskip("lxml.etree") + + msg = "|".join( + [ + "Document is empty", + # Seen on Mac with lxml 4.91 + r"None \(line 0\)", + ] + ) + with pytest.raises(lxml_etree.XMLSyntaxError, match=msg): + if isinstance(val, str): + read_xml(StringIO(val), parser="lxml") + else: + read_xml(BytesIO(val), parser="lxml") + + +@pytest.mark.parametrize("val", ["", b""]) +def test_empty_string_etree(val): + with pytest.raises(ParseError, match="no element found"): + if isinstance(val, str): + read_xml(StringIO(val), parser="etree") + else: + read_xml(BytesIO(val), parser="etree") + + +def test_wrong_file_path(parser): + msg = ( + "Passing literal xml to 'read_xml' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object." + ) + filename = os.path.join("data", "html", "books.xml") + + with pytest.raises( + FutureWarning, + match=msg, + ): + read_xml(filename, parser=parser) + + +@pytest.mark.network +@pytest.mark.single_cpu +def test_url(httpserver, xml_file): + pytest.importorskip("lxml") + with open(xml_file, encoding="utf-8") as f: + httpserver.serve_content(content=f.read()) + df_url = read_xml(httpserver.url, xpath=".//book[count(*)=4]") + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_url, df_expected) + + +@pytest.mark.network +@pytest.mark.single_cpu +def test_wrong_url(parser, httpserver): + httpserver.serve_content("NOT FOUND", code=404) + with pytest.raises(HTTPError, match=("HTTP Error 404: NOT FOUND")): + read_xml(httpserver.url, xpath=".//book[count(*)=4]", parser=parser) + + +# CONTENT + + +def test_whitespace(parser): + xml = """ + + + + square + + 360 + + + + circle + + 360 + + + + triangle + + 180 + + """ + + df_xpath = read_xml(StringIO(xml), parser=parser, dtype="string") + + df_iter = read_xml_iterparse( + xml, + parser=parser, + iterparse={"row": ["sides", "shape", "degrees"]}, + dtype="string", + ) + + df_expected = DataFrame( + { + "sides": [" 4 ", " 0 ", " 3 "], + "shape": [ + "\n square\n ", + "\n circle\n ", + "\n triangle\n ", + ], + "degrees": ["\t360\t", "\t360\t", "\t180\t"], + }, + dtype="string", + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +# XPATH + + +def test_empty_xpath_lxml(xml_books): + pytest.importorskip("lxml") + with pytest.raises(ValueError, match=("xpath does not return any nodes")): + read_xml(xml_books, xpath=".//python", parser="lxml") + + +def test_bad_xpath_etree(xml_books): + with pytest.raises( + SyntaxError, match=("You have used an incorrect or unsupported XPath") + ): + read_xml(xml_books, xpath=".//[book]", parser="etree") + + +def test_bad_xpath_lxml(xml_books): + lxml_etree = pytest.importorskip("lxml.etree") + + with pytest.raises(lxml_etree.XPathEvalError, match=("Invalid expression")): + read_xml(xml_books, xpath=".//[book]", parser="lxml") + + +# NAMESPACE + + +def test_default_namespace(parser): + df_nmsp = read_xml( + StringIO(xml_default_nmsp), + xpath=".//ns:row", + namespaces={"ns": "http://example.com"}, + parser=parser, + ) + + df_iter = read_xml_iterparse( + xml_default_nmsp, + parser=parser, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_nmsp, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_prefix_namespace(parser): + df_nmsp = read_xml( + StringIO(xml_prefix_nmsp), + xpath=".//doc:row", + namespaces={"doc": "http://example.com"}, + parser=parser, + ) + df_iter = read_xml_iterparse( + xml_prefix_nmsp, parser=parser, iterparse={"row": ["shape", "degrees", "sides"]} + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_nmsp, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_consistency_default_namespace(): + pytest.importorskip("lxml") + df_lxml = read_xml( + StringIO(xml_default_nmsp), + xpath=".//ns:row", + namespaces={"ns": "http://example.com"}, + parser="lxml", + ) + + df_etree = read_xml( + StringIO(xml_default_nmsp), + xpath=".//doc:row", + namespaces={"doc": "http://example.com"}, + parser="etree", + ) + + tm.assert_frame_equal(df_lxml, df_etree) + + +def test_consistency_prefix_namespace(): + pytest.importorskip("lxml") + df_lxml = read_xml( + StringIO(xml_prefix_nmsp), + xpath=".//doc:row", + namespaces={"doc": "http://example.com"}, + parser="lxml", + ) + + df_etree = read_xml( + StringIO(xml_prefix_nmsp), + xpath=".//doc:row", + namespaces={"doc": "http://example.com"}, + parser="etree", + ) + + tm.assert_frame_equal(df_lxml, df_etree) + + +# PREFIX + + +def test_missing_prefix_with_default_namespace(xml_books, parser): + with pytest.raises(ValueError, match=("xpath does not return any nodes")): + read_xml(xml_books, xpath=".//Placemark", parser=parser) + + +def test_missing_prefix_definition_etree(kml_cta_rail_lines): + with pytest.raises(SyntaxError, match=("you used an undeclared namespace prefix")): + read_xml(kml_cta_rail_lines, xpath=".//kml:Placemark", parser="etree") + + +def test_missing_prefix_definition_lxml(kml_cta_rail_lines): + lxml_etree = pytest.importorskip("lxml.etree") + + with pytest.raises(lxml_etree.XPathEvalError, match=("Undefined namespace prefix")): + read_xml(kml_cta_rail_lines, xpath=".//kml:Placemark", parser="lxml") + + +@pytest.mark.parametrize("key", ["", None]) +def test_none_namespace_prefix(key): + pytest.importorskip("lxml") + with pytest.raises( + TypeError, match=("empty namespace prefix is not supported in XPath") + ): + read_xml( + StringIO(xml_default_nmsp), + xpath=".//kml:Placemark", + namespaces={key: "http://www.opengis.net/kml/2.2"}, + parser="lxml", + ) + + +# ELEMS AND ATTRS + + +def test_file_elems_and_attrs(xml_books, parser): + df_file = read_xml(xml_books, parser=parser) + df_iter = read_xml( + xml_books, + parser=parser, + iterparse={"book": ["category", "title", "author", "year", "price"]}, + ) + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_file_only_attrs(xml_books, parser): + df_file = read_xml(xml_books, attrs_only=True, parser=parser) + df_iter = read_xml(xml_books, parser=parser, iterparse={"book": ["category"]}) + df_expected = DataFrame({"category": ["cooking", "children", "web"]}) + + tm.assert_frame_equal(df_file, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_file_only_elems(xml_books, parser): + df_file = read_xml(xml_books, elems_only=True, parser=parser) + df_iter = read_xml( + xml_books, + parser=parser, + iterparse={"book": ["title", "author", "year", "price"]}, + ) + df_expected = DataFrame( + { + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_elem_and_attrs_only(kml_cta_rail_lines, parser): + with pytest.raises( + ValueError, + match=("Either element or attributes can be parsed not both"), + ): + read_xml(kml_cta_rail_lines, elems_only=True, attrs_only=True, parser=parser) + + +def test_empty_attrs_only(parser): + xml = """ + + + square + 360 + + + circle + 360 + + + triangle + 180 + + """ + + with pytest.raises( + ValueError, + match=("xpath does not return any nodes or attributes"), + ): + read_xml(StringIO(xml), xpath="./row", attrs_only=True, parser=parser) + + +def test_empty_elems_only(parser): + xml = """ + + + + + """ + + with pytest.raises( + ValueError, + match=("xpath does not return any nodes or attributes"), + ): + read_xml(StringIO(xml), xpath="./row", elems_only=True, parser=parser) + + +def test_attribute_centric_xml(): + pytest.importorskip("lxml") + xml = """\ + + + + + + + + + + + + + + + + + +""" + + df_lxml = read_xml(StringIO(xml), xpath=".//station") + df_etree = read_xml(StringIO(xml), xpath=".//station", parser="etree") + + df_iter_lx = read_xml_iterparse(xml, iterparse={"station": ["Name", "coords"]}) + df_iter_et = read_xml_iterparse( + xml, parser="etree", iterparse={"station": ["Name", "coords"]} + ) + + tm.assert_frame_equal(df_lxml, df_etree) + tm.assert_frame_equal(df_iter_lx, df_iter_et) + + +# NAMES + + +def test_names_option_output(xml_books, parser): + df_file = read_xml( + xml_books, names=["Col1", "Col2", "Col3", "Col4", "Col5"], parser=parser + ) + df_iter = read_xml( + xml_books, + parser=parser, + names=["Col1", "Col2", "Col3", "Col4", "Col5"], + iterparse={"book": ["category", "title", "author", "year", "price"]}, + ) + + df_expected = DataFrame( + { + "Col1": ["cooking", "children", "web"], + "Col2": ["Everyday Italian", "Harry Potter", "Learning XML"], + "Col3": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "Col4": [2005, 2005, 2003], + "Col5": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_repeat_names(parser): + xml = """\ + + + circle + curved + + + sphere + curved + +""" + df_xpath = read_xml( + StringIO(xml), + xpath=".//shape", + parser=parser, + names=["type_dim", "shape", "type_edge"], + ) + + df_iter = read_xml_iterparse( + xml, + parser=parser, + iterparse={"shape": ["type", "name", "type"]}, + names=["type_dim", "shape", "type_edge"], + ) + + df_expected = DataFrame( + { + "type_dim": ["2D", "3D"], + "shape": ["circle", "sphere"], + "type_edge": ["curved", "curved"], + } + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_repeat_values_new_names(parser): + xml = """\ + + + rectangle + rectangle + + + square + rectangle + + + ellipse + ellipse + + + circle + ellipse + +""" + df_xpath = read_xml( + StringIO(xml), xpath=".//shape", parser=parser, names=["name", "group"] + ) + + df_iter = read_xml_iterparse( + xml, + parser=parser, + iterparse={"shape": ["name", "family"]}, + names=["name", "group"], + ) + + df_expected = DataFrame( + { + "name": ["rectangle", "square", "ellipse", "circle"], + "group": ["rectangle", "rectangle", "ellipse", "ellipse"], + } + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_repeat_elements(parser): + xml = """\ + + + circle + ellipse + 360 + 0 + + + triangle + polygon + 180 + 3 + + + square + polygon + 360 + 4 + +""" + df_xpath = read_xml( + StringIO(xml), + xpath=".//shape", + parser=parser, + names=["name", "family", "degrees", "sides"], + ) + + df_iter = read_xml_iterparse( + xml, + parser=parser, + iterparse={"shape": ["value", "value", "value", "value"]}, + names=["name", "family", "degrees", "sides"], + ) + + df_expected = DataFrame( + { + "name": ["circle", "triangle", "square"], + "family": ["ellipse", "polygon", "polygon"], + "degrees": [360, 180, 360], + "sides": [0, 3, 4], + } + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_names_option_wrong_length(xml_books, parser): + with pytest.raises(ValueError, match=("names does not match length")): + read_xml(xml_books, names=["Col1", "Col2", "Col3"], parser=parser) + + +def test_names_option_wrong_type(xml_books, parser): + with pytest.raises(TypeError, match=("is not a valid type for names")): + read_xml(xml_books, names="Col1, Col2, Col3", parser=parser) + + +# ENCODING + + +def test_wrong_encoding(xml_baby_names, parser): + with pytest.raises(UnicodeDecodeError, match=("'utf-8' codec can't decode")): + read_xml(xml_baby_names, parser=parser) + + +def test_utf16_encoding(xml_baby_names, parser): + with pytest.raises( + UnicodeError, + match=( + "UTF-16 stream does not start with BOM|" + "'utf-16-le' codec can't decode byte" + ), + ): + read_xml(xml_baby_names, encoding="UTF-16", parser=parser) + + +def test_unknown_encoding(xml_baby_names, parser): + with pytest.raises(LookupError, match=("unknown encoding: UFT-8")): + read_xml(xml_baby_names, encoding="UFT-8", parser=parser) + + +def test_ascii_encoding(xml_baby_names, parser): + with pytest.raises(UnicodeDecodeError, match=("'ascii' codec can't decode byte")): + read_xml(xml_baby_names, encoding="ascii", parser=parser) + + +def test_parser_consistency_with_encoding(xml_baby_names): + pytest.importorskip("lxml") + df_xpath_lxml = read_xml(xml_baby_names, parser="lxml", encoding="ISO-8859-1") + df_xpath_etree = read_xml(xml_baby_names, parser="etree", encoding="iso-8859-1") + + df_iter_lxml = read_xml( + xml_baby_names, + parser="lxml", + encoding="ISO-8859-1", + iterparse={"row": ["rank", "malename", "femalename"]}, + ) + df_iter_etree = read_xml( + xml_baby_names, + parser="etree", + encoding="ISO-8859-1", + iterparse={"row": ["rank", "malename", "femalename"]}, + ) + + tm.assert_frame_equal(df_xpath_lxml, df_xpath_etree) + tm.assert_frame_equal(df_xpath_etree, df_iter_etree) + tm.assert_frame_equal(df_iter_lxml, df_iter_etree) + + +def test_wrong_encoding_for_lxml(): + pytest.importorskip("lxml") + # GH#45133 + data = """ + + c + + +""" + with pytest.raises(TypeError, match="encoding None"): + read_xml(StringIO(data), parser="lxml", encoding=None) + + +def test_none_encoding_etree(): + # GH#45133 + data = """ + + c + + +""" + result = read_xml(StringIO(data), parser="etree", encoding=None) + expected = DataFrame({"a": ["c"]}) + tm.assert_frame_equal(result, expected) + + +# PARSER + + +@td.skip_if_installed("lxml") +def test_default_parser_no_lxml(xml_books): + with pytest.raises( + ImportError, match=("lxml not found, please install or use the etree parser.") + ): + read_xml(xml_books) + + +def test_wrong_parser(xml_books): + with pytest.raises( + ValueError, match=("Values for parser can only be lxml or etree.") + ): + read_xml(xml_books, parser="bs4") + + +# STYLESHEET + + +def test_stylesheet_file(kml_cta_rail_lines, xsl_flatten_doc): + pytest.importorskip("lxml") + df_style = read_xml( + kml_cta_rail_lines, + xpath=".//k:Placemark", + namespaces={"k": "http://www.opengis.net/kml/2.2"}, + stylesheet=xsl_flatten_doc, + ) + + df_iter = read_xml( + kml_cta_rail_lines, + iterparse={ + "Placemark": [ + "id", + "name", + "styleUrl", + "extrude", + "altitudeMode", + "coordinates", + ] + }, + ) + + tm.assert_frame_equal(df_kml, df_style) + tm.assert_frame_equal(df_kml, df_iter) + + +def test_stylesheet_file_like(kml_cta_rail_lines, xsl_flatten_doc, mode): + pytest.importorskip("lxml") + with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f: + df_style = read_xml( + kml_cta_rail_lines, + xpath=".//k:Placemark", + namespaces={"k": "http://www.opengis.net/kml/2.2"}, + stylesheet=f, + ) + + tm.assert_frame_equal(df_kml, df_style) + + +def test_stylesheet_io(kml_cta_rail_lines, xsl_flatten_doc, mode): + # note: By default the bodies of untyped functions are not checked, + # consider using --check-untyped-defs + pytest.importorskip("lxml") + xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked] + + with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f: + if mode == "rb": + xsl_obj = BytesIO(f.read()) + else: + xsl_obj = StringIO(f.read()) + + df_style = read_xml( + kml_cta_rail_lines, + xpath=".//k:Placemark", + namespaces={"k": "http://www.opengis.net/kml/2.2"}, + stylesheet=xsl_obj, + ) + + tm.assert_frame_equal(df_kml, df_style) + + +def test_stylesheet_buffered_reader(kml_cta_rail_lines, xsl_flatten_doc, mode): + pytest.importorskip("lxml") + with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f: + xsl_obj = f.read() + + df_style = read_xml( + kml_cta_rail_lines, + xpath=".//k:Placemark", + namespaces={"k": "http://www.opengis.net/kml/2.2"}, + stylesheet=xsl_obj, + ) + + tm.assert_frame_equal(df_kml, df_style) + + +def test_style_charset(): + pytest.importorskip("lxml") + xml = "<中文標籤>12" + + xsl = """\ + + + + + + + + + + + + <根> + + + + +""" + + df_orig = read_xml(StringIO(xml)) + df_style = read_xml(StringIO(xml), stylesheet=xsl) + + tm.assert_frame_equal(df_orig, df_style) + + +def test_not_stylesheet(kml_cta_rail_lines, xml_books): + lxml_etree = pytest.importorskip("lxml.etree") + + with pytest.raises( + lxml_etree.XSLTParseError, match=("document is not a stylesheet") + ): + read_xml(kml_cta_rail_lines, stylesheet=xml_books) + + +def test_incorrect_xsl_syntax(kml_cta_rail_lines): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = """\ + + + + + + + + + + + + + + + +""" + + with pytest.raises( + lxml_etree.XMLSyntaxError, match=("Extra content at the end of the document") + ): + read_xml(kml_cta_rail_lines, stylesheet=xsl) + + +def test_incorrect_xsl_eval(kml_cta_rail_lines): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = """\ + + + + + + + + + + + + + + + +""" + + with pytest.raises(lxml_etree.XSLTParseError, match=("failed to compile")): + read_xml(kml_cta_rail_lines, stylesheet=xsl) + + +def test_incorrect_xsl_apply(kml_cta_rail_lines): + lxml_etree = pytest.importorskip("lxml.etree") + + xsl = """\ + + + + + + + + + +""" + + with pytest.raises(lxml_etree.XSLTApplyError, match=("Cannot resolve URI")): + read_xml(kml_cta_rail_lines, stylesheet=xsl) + + +def test_wrong_stylesheet(kml_cta_rail_lines, xml_data_path): + xml_etree = pytest.importorskip("lxml.etree") + + xsl = xml_data_path / "flatten.xsl" + + with pytest.raises( + xml_etree.XMLSyntaxError, + match=("Start tag expected, '<' not found"), + ): + read_xml(kml_cta_rail_lines, stylesheet=xsl) + + +def test_stylesheet_file_close(kml_cta_rail_lines, xsl_flatten_doc, mode): + # note: By default the bodies of untyped functions are not checked, + # consider using --check-untyped-defs + pytest.importorskip("lxml") + xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked] + + with open(xsl_flatten_doc, mode, encoding="utf-8" if mode == "r" else None) as f: + if mode == "rb": + xsl_obj = BytesIO(f.read()) + else: + xsl_obj = StringIO(f.read()) + + read_xml(kml_cta_rail_lines, stylesheet=xsl_obj) + + assert not f.closed + + +def test_stylesheet_with_etree(kml_cta_rail_lines, xsl_flatten_doc): + pytest.importorskip("lxml") + with pytest.raises( + ValueError, match=("To use stylesheet, you need lxml installed") + ): + read_xml(kml_cta_rail_lines, parser="etree", stylesheet=xsl_flatten_doc) + + +@pytest.mark.parametrize("val", ["", b""]) +def test_empty_stylesheet(val): + pytest.importorskip("lxml") + msg = ( + "Passing literal xml to 'read_xml' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object." + ) + kml = os.path.join("data", "xml", "cta_rail_lines.kml") + + with pytest.raises(FutureWarning, match=msg): + read_xml(kml, stylesheet=val) + + +# ITERPARSE +def test_file_like_iterparse(xml_books, parser, mode): + with open(xml_books, mode, encoding="utf-8" if mode == "r" else None) as f: + if mode == "r" and parser == "lxml": + with pytest.raises( + TypeError, match=("reading file objects must return bytes objects") + ): + read_xml( + f, + parser=parser, + iterparse={ + "book": ["category", "title", "year", "author", "price"] + }, + ) + return None + else: + df_filelike = read_xml( + f, + parser=parser, + iterparse={"book": ["category", "title", "year", "author", "price"]}, + ) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_filelike, df_expected) + + +def test_file_io_iterparse(xml_books, parser, mode): + funcIO = StringIO if mode == "r" else BytesIO + with open( + xml_books, + mode, + encoding="utf-8" if mode == "r" else None, + ) as f: + with funcIO(f.read()) as b: + if mode == "r" and parser == "lxml": + with pytest.raises( + TypeError, match=("reading file objects must return bytes objects") + ): + read_xml( + b, + parser=parser, + iterparse={ + "book": ["category", "title", "year", "author", "price"] + }, + ) + return None + else: + df_fileio = read_xml( + b, + parser=parser, + iterparse={ + "book": ["category", "title", "year", "author", "price"] + }, + ) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_fileio, df_expected) + + +@pytest.mark.network +@pytest.mark.single_cpu +def test_url_path_error(parser, httpserver, xml_file): + with open(xml_file, encoding="utf-8") as f: + httpserver.serve_content(content=f.read()) + with pytest.raises( + ParserError, match=("iterparse is designed for large XML files") + ): + read_xml( + httpserver.url, + parser=parser, + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + +def test_compression_error(parser, compression_only): + with tm.ensure_clean(filename="geom_xml.zip") as path: + geom_df.to_xml(path, parser=parser, compression=compression_only) + + with pytest.raises( + ParserError, match=("iterparse is designed for large XML files") + ): + read_xml( + path, + parser=parser, + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + compression=compression_only, + ) + + +def test_wrong_dict_type(xml_books, parser): + with pytest.raises(TypeError, match="list is not a valid type for iterparse"): + read_xml( + xml_books, + parser=parser, + iterparse=["category", "title", "year", "author", "price"], + ) + + +def test_wrong_dict_value(xml_books, parser): + with pytest.raises( + TypeError, match=" is not a valid type for value in iterparse" + ): + read_xml(xml_books, parser=parser, iterparse={"book": "category"}) + + +def test_bad_xml(parser): + bad_xml = """\ + + + square + 00360 + 4.0 + 2020-01-01 + + + circle + 00360 + + 2021-01-01 + + + triangle + 00180 + 3.0 + 2022-01-01 + +""" + with tm.ensure_clean(filename="bad.xml") as path: + with open(path, "w", encoding="utf-8") as f: + f.write(bad_xml) + + with pytest.raises( + SyntaxError, + match=( + "Extra content at the end of the document|" + "junk after document element" + ), + ): + read_xml( + path, + parser=parser, + parse_dates=["date"], + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + +def test_comment(parser): + xml = """\ + + + + + circle + 2D + + + sphere + 3D + + + + +""" + + df_xpath = read_xml(StringIO(xml), xpath=".//shape", parser=parser) + + df_iter = read_xml_iterparse( + xml, parser=parser, iterparse={"shape": ["name", "type"]} + ) + + df_expected = DataFrame( + { + "name": ["circle", "sphere"], + "type": ["2D", "3D"], + } + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_dtd(parser): + xml = """\ + + + + +]> + + + circle + 2D + + + sphere + 3D + +""" + + df_xpath = read_xml(StringIO(xml), xpath=".//shape", parser=parser) + + df_iter = read_xml_iterparse( + xml, parser=parser, iterparse={"shape": ["name", "type"]} + ) + + df_expected = DataFrame( + { + "name": ["circle", "sphere"], + "type": ["2D", "3D"], + } + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_processing_instruction(parser): + xml = """\ + + + + + +, , ?> + + + circle + 2D + + + sphere + 3D + +""" + + df_xpath = read_xml(StringIO(xml), xpath=".//shape", parser=parser) + + df_iter = read_xml_iterparse( + xml, parser=parser, iterparse={"shape": ["name", "type"]} + ) + + df_expected = DataFrame( + { + "name": ["circle", "sphere"], + "type": ["2D", "3D"], + } + ) + + tm.assert_frame_equal(df_xpath, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_no_result(xml_books, parser): + with pytest.raises( + ParserError, match="No result from selected items in iterparse." + ): + read_xml( + xml_books, + parser=parser, + iterparse={"node": ["attr1", "elem1", "elem2", "elem3"]}, + ) + + +def test_empty_data(xml_books, parser): + with pytest.raises(EmptyDataError, match="No columns to parse from file"): + read_xml( + xml_books, + parser=parser, + iterparse={"book": ["attr1", "elem1", "elem2", "elem3"]}, + ) + + +def test_online_stylesheet(): + pytest.importorskip("lxml") + xml = """\ + + + + Empire Burlesque + Bob Dylan + USA + Columbia + 10.90 + 1985 + + + Hide your heart + Bonnie Tyler + UK + CBS Records + 9.90 + 1988 + + + Greatest Hits + Dolly Parton + USA + RCA + 9.90 + 1982 + + + Still got the blues + Gary Moore + UK + Virgin records + 10.20 + 1990 + + + Eros + Eros Ramazzotti + EU + BMG + 9.90 + 1997 + + + One night only + Bee Gees + UK + Polydor + 10.90 + 1998 + + + Sylvias Mother + Dr.Hook + UK + CBS + 8.10 + 1973 + + + Maggie May + Rod Stewart + UK + Pickwick + 8.50 + 1990 + + + Romanza + Andrea Bocelli + EU + Polydor + 10.80 + 1996 + + + When a man loves a woman + Percy Sledge + USA + Atlantic + 8.70 + 1987 + + + Black angel + Savage Rose + EU + Mega + 10.90 + 1995 + + + 1999 Grammy Nominees + Many + USA + Grammy + 10.20 + 1999 + + + For the good times + Kenny Rogers + UK + Mucik Master + 8.70 + 1995 + + + Big Willie style + Will Smith + USA + Columbia + 9.90 + 1997 + + + Tupelo Honey + Van Morrison + UK + Polydor + 8.20 + 1971 + + + Soulsville + Jorn Hoel + Norway + WEA + 7.90 + 1996 + + + The very best of + Cat Stevens + UK + Island + 8.90 + 1990 + + + Stop + Sam Brown + UK + A and M + 8.90 + 1988 + + + Bridge of Spies + T`Pau + UK + Siren + 7.90 + 1987 + + + Private Dancer + Tina Turner + UK + Capitol + 8.90 + 1983 + + + Midt om natten + Kim Larsen + EU + Medley + 7.80 + 1983 + + + Pavarotti Gala Concert + Luciano Pavarotti + UK + DECCA + 9.90 + 1991 + + + The dock of the bay + Otis Redding + USA + Stax Records + 7.90 + 1968 + + + Picture book + Simply Red + EU + Elektra + 7.20 + 1985 + + + Red + The Communards + UK + London + 7.80 + 1987 + + + Unchain my heart + Joe Cocker + USA + EMI + 8.20 + 1987 + + +""" + xsl = """\ + + + + + +

My CD Collection

+ + + + + + + + + + + +
TitleArtist
+ + +
+
+""" + + df_xsl = read_xml( + StringIO(xml), + xpath=".//tr[td and position() <= 6]", + names=["title", "artist"], + stylesheet=xsl, + ) + + df_expected = DataFrame( + { + "title": { + 0: "Empire Burlesque", + 1: "Hide your heart", + 2: "Greatest Hits", + 3: "Still got the blues", + 4: "Eros", + }, + "artist": { + 0: "Bob Dylan", + 1: "Bonnie Tyler", + 2: "Dolly Parton", + 3: "Gary Moore", + 4: "Eros Ramazzotti", + }, + } + ) + + tm.assert_frame_equal(df_expected, df_xsl) + + +# COMPRESSION + + +def test_compression_read(parser, compression_only): + with tm.ensure_clean() as comp_path: + geom_df.to_xml( + comp_path, index=False, parser=parser, compression=compression_only + ) + + df_xpath = read_xml(comp_path, parser=parser, compression=compression_only) + + df_iter = read_xml_iterparse_comp( + comp_path, + compression_only, + parser=parser, + iterparse={"row": ["shape", "degrees", "sides"]}, + compression=compression_only, + ) + + tm.assert_frame_equal(df_xpath, geom_df) + tm.assert_frame_equal(df_iter, geom_df) + + +def test_wrong_compression(parser, compression, compression_only): + actual_compression = compression + attempted_compression = compression_only + + if actual_compression == attempted_compression: + pytest.skip(f"{actual_compression} == {attempted_compression}") + + errors = { + "bz2": (OSError, "Invalid data stream"), + "gzip": (OSError, "Not a gzipped file"), + "zip": (BadZipFile, "File is not a zip file"), + "tar": (ReadError, "file could not be opened successfully"), + } + zstd = import_optional_dependency("zstandard", errors="ignore") + if zstd is not None: + errors["zstd"] = (zstd.ZstdError, "Unknown frame descriptor") + lzma = import_optional_dependency("lzma", errors="ignore") + if lzma is not None: + errors["xz"] = (LZMAError, "Input format not supported by decoder") + error_cls, error_str = errors[attempted_compression] + + with tm.ensure_clean() as path: + geom_df.to_xml(path, parser=parser, compression=actual_compression) + + with pytest.raises(error_cls, match=error_str): + read_xml(path, parser=parser, compression=attempted_compression) + + +def test_unsuported_compression(parser): + with pytest.raises(ValueError, match="Unrecognized compression type"): + with tm.ensure_clean() as path: + read_xml(path, parser=parser, compression="7z") + + +# STORAGE OPTIONS + + +@pytest.mark.network +@pytest.mark.single_cpu +def test_s3_parser_consistency(s3_public_bucket_with_data, s3so): + pytest.importorskip("s3fs") + pytest.importorskip("lxml") + s3 = f"s3://{s3_public_bucket_with_data.name}/books.xml" + + df_lxml = read_xml(s3, parser="lxml", storage_options=s3so) + + df_etree = read_xml(s3, parser="etree", storage_options=s3so) + + tm.assert_frame_equal(df_lxml, df_etree) + + +def test_read_xml_nullable_dtypes( + parser, string_storage, dtype_backend, using_infer_string +): + # GH#50500 + data = """ + + + x + 1 + 4.0 + x + 2 + 4.0 + + True + False + + + y + 2 + 5.0 + + + + + False + + +""" + + if using_infer_string: + pa = pytest.importorskip("pyarrow") + string_array = ArrowStringArrayNumpySemantics(pa.array(["x", "y"])) + string_array_na = ArrowStringArrayNumpySemantics(pa.array(["x", None])) + + elif string_storage == "python": + string_array = StringArray(np.array(["x", "y"], dtype=np.object_)) + string_array_na = StringArray(np.array(["x", NA], dtype=np.object_)) + + elif dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + string_array = ArrowExtensionArray(pa.array(["x", "y"])) + string_array_na = ArrowExtensionArray(pa.array(["x", None])) + + else: + pa = pytest.importorskip("pyarrow") + string_array = ArrowStringArray(pa.array(["x", "y"])) + string_array_na = ArrowStringArray(pa.array(["x", None])) + + with pd.option_context("mode.string_storage", string_storage): + result = read_xml(StringIO(data), parser=parser, dtype_backend=dtype_backend) + + expected = DataFrame( + { + "a": string_array, + "b": Series([1, 2], dtype="Int64"), + "c": Series([4.0, 5.0], dtype="Float64"), + "d": string_array_na, + "e": Series([2, NA], dtype="Int64"), + "f": Series([4.0, NA], dtype="Float64"), + "g": Series([NA, NA], dtype="Int64"), + "h": Series([True, False], dtype="boolean"), + "i": Series([False, NA], dtype="boolean"), + } + ) + + if dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + expected = DataFrame( + { + col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True)) + for col in expected.columns + } + ) + expected["g"] = ArrowExtensionArray(pa.array([None, None])) + + tm.assert_frame_equal(result, expected) + + +def test_invalid_dtype_backend(): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + read_xml("test", dtype_backend="numpy") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/test_xml_dtypes.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/test_xml_dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..a85576ff13f5c1011b41c0ba4735619c5f5fb742 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/xml/test_xml_dtypes.py @@ -0,0 +1,485 @@ +from __future__ import annotations + +from io import StringIO + +import pytest + +from pandas.errors import ParserWarning +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + DatetimeIndex, + Series, + to_datetime, +) +import pandas._testing as tm + +from pandas.io.xml import read_xml + + +@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"]) +def parser(request): + return request.param + + +@pytest.fixture( + params=[None, {"book": ["category", "title", "author", "year", "price"]}] +) +def iterparse(request): + return request.param + + +def read_xml_iterparse(data, **kwargs): + with tm.ensure_clean() as path: + with open(path, "w", encoding="utf-8") as f: + f.write(data) + return read_xml(path, **kwargs) + + +xml_types = """\ + + + + square + 00360 + 4.0 + + + circle + 00360 + + + + triangle + 00180 + 3.0 + +""" + +xml_dates = """ + + + square + 00360 + 4.0 + 2020-01-01 + + + circle + 00360 + + 2021-01-01 + + + triangle + 00180 + 3.0 + 2022-01-01 + +""" + + +# DTYPE + + +def test_dtype_single_str(parser): + df_result = read_xml(StringIO(xml_types), dtype={"degrees": "str"}, parser=parser) + df_iter = read_xml_iterparse( + xml_types, + parser=parser, + dtype={"degrees": "str"}, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": ["00360", "00360", "00180"], + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_dtypes_all_str(parser): + df_result = read_xml(StringIO(xml_dates), dtype="string", parser=parser) + df_iter = read_xml_iterparse( + xml_dates, + parser=parser, + dtype="string", + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": ["00360", "00360", "00180"], + "sides": ["4.0", None, "3.0"], + "date": ["2020-01-01", "2021-01-01", "2022-01-01"], + }, + dtype="string", + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_dtypes_with_names(parser): + df_result = read_xml( + StringIO(xml_dates), + names=["Col1", "Col2", "Col3", "Col4"], + dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64[ns]"}, + parser=parser, + ) + df_iter = read_xml_iterparse( + xml_dates, + parser=parser, + names=["Col1", "Col2", "Col3", "Col4"], + dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64[ns]"}, + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + df_expected = DataFrame( + { + "Col1": ["square", "circle", "triangle"], + "Col2": Series(["00360", "00360", "00180"]).astype("string"), + "Col3": Series([4.0, float("nan"), 3.0]).astype("Int64"), + "Col4": DatetimeIndex( + ["2020-01-01", "2021-01-01", "2022-01-01"], dtype="M8[ns]" + ), + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_dtype_nullable_int(parser): + df_result = read_xml(StringIO(xml_types), dtype={"sides": "Int64"}, parser=parser) + df_iter = read_xml_iterparse( + xml_types, + parser=parser, + dtype={"sides": "Int64"}, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": Series([4.0, float("nan"), 3.0]).astype("Int64"), + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_dtype_float(parser): + df_result = read_xml(StringIO(xml_types), dtype={"degrees": "float"}, parser=parser) + df_iter = read_xml_iterparse( + xml_types, + parser=parser, + dtype={"degrees": "float"}, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": Series([360, 360, 180]).astype("float"), + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_wrong_dtype(xml_books, parser, iterparse): + with pytest.raises( + ValueError, match=('Unable to parse string "Everyday Italian" at position 0') + ): + read_xml( + xml_books, dtype={"title": "Int64"}, parser=parser, iterparse=iterparse + ) + + +def test_both_dtype_converters(parser): + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": ["00360", "00360", "00180"], + "sides": [4.0, float("nan"), 3.0], + } + ) + + with tm.assert_produces_warning(ParserWarning, match="Both a converter and dtype"): + df_result = read_xml( + StringIO(xml_types), + dtype={"degrees": "str"}, + converters={"degrees": str}, + parser=parser, + ) + df_iter = read_xml_iterparse( + xml_types, + dtype={"degrees": "str"}, + converters={"degrees": str}, + parser=parser, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +# CONVERTERS + + +def test_converters_str(parser): + df_result = read_xml( + StringIO(xml_types), converters={"degrees": str}, parser=parser + ) + df_iter = read_xml_iterparse( + xml_types, + parser=parser, + converters={"degrees": str}, + iterparse={"row": ["shape", "degrees", "sides"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": ["00360", "00360", "00180"], + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_converters_date(parser): + convert_to_datetime = lambda x: to_datetime(x) + df_result = read_xml( + StringIO(xml_dates), converters={"date": convert_to_datetime}, parser=parser + ) + df_iter = read_xml_iterparse( + xml_dates, + parser=parser, + converters={"date": convert_to_datetime}, + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + "date": to_datetime(["2020-01-01", "2021-01-01", "2022-01-01"]), + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_wrong_converters_type(xml_books, parser, iterparse): + with pytest.raises(TypeError, match=("Type converters must be a dict or subclass")): + read_xml( + xml_books, converters={"year", str}, parser=parser, iterparse=iterparse + ) + + +def test_callable_func_converters(xml_books, parser, iterparse): + with pytest.raises(TypeError, match=("'float' object is not callable")): + read_xml( + xml_books, converters={"year": float()}, parser=parser, iterparse=iterparse + ) + + +def test_callable_str_converters(xml_books, parser, iterparse): + with pytest.raises(TypeError, match=("'str' object is not callable")): + read_xml( + xml_books, converters={"year": "float"}, parser=parser, iterparse=iterparse + ) + + +# PARSE DATES + + +def test_parse_dates_column_name(parser): + df_result = read_xml(StringIO(xml_dates), parse_dates=["date"], parser=parser) + df_iter = read_xml_iterparse( + xml_dates, + parser=parser, + parse_dates=["date"], + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + "date": to_datetime(["2020-01-01", "2021-01-01", "2022-01-01"]), + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_parse_dates_column_index(parser): + df_result = read_xml(StringIO(xml_dates), parse_dates=[3], parser=parser) + df_iter = read_xml_iterparse( + xml_dates, + parser=parser, + parse_dates=[3], + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + "date": to_datetime(["2020-01-01", "2021-01-01", "2022-01-01"]), + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_parse_dates_true(parser): + df_result = read_xml(StringIO(xml_dates), parse_dates=True, parser=parser) + + df_iter = read_xml_iterparse( + xml_dates, + parser=parser, + parse_dates=True, + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + "date": ["2020-01-01", "2021-01-01", "2022-01-01"], + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_parse_dates_dictionary(parser): + xml = """ + + + square + 360 + 4.0 + 2020 + 12 + 31 + + + circle + 360 + + 2021 + 12 + 31 + + + triangle + 180 + 3.0 + 2022 + 12 + 31 + +""" + + df_result = read_xml( + StringIO(xml), parse_dates={"date_end": ["year", "month", "day"]}, parser=parser + ) + df_iter = read_xml_iterparse( + xml, + parser=parser, + parse_dates={"date_end": ["year", "month", "day"]}, + iterparse={"row": ["shape", "degrees", "sides", "year", "month", "day"]}, + ) + + df_expected = DataFrame( + { + "date_end": to_datetime(["2020-12-31", "2021-12-31", "2022-12-31"]), + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_day_first_parse_dates(parser): + xml = """\ + + + + square + 00360 + 4.0 + 31/12/2020 + + + circle + 00360 + + 31/12/2021 + + + triangle + 00180 + 3.0 + 31/12/2022 + +""" + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + "date": to_datetime(["2020-12-31", "2021-12-31", "2022-12-31"]), + } + ) + + with tm.assert_produces_warning( + UserWarning, match="Parsing dates in %d/%m/%Y format" + ): + df_result = read_xml(StringIO(xml), parse_dates=["date"], parser=parser) + df_iter = read_xml_iterparse( + xml, + parse_dates=["date"], + parser=parser, + iterparse={"row": ["shape", "degrees", "sides", "date"]}, + ) + + tm.assert_frame_equal(df_result, df_expected) + tm.assert_frame_equal(df_iter, df_expected) + + +def test_wrong_parse_dates_type(xml_books, parser, iterparse): + with pytest.raises( + TypeError, match=("Only booleans, lists, and dictionaries are accepted") + ): + read_xml(xml_books, parse_dates={"date"}, parser=parser, iterparse=iterparse)