diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54fc3ef5f76078cb2c477e80624afb7c5ea17b82 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d954b3f463ae1bb3c8e74e3c73bb595e6b55dc85 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/conftest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_c_parser_only.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_c_parser_only.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68a9d41f208b7d35fc404b8776b14caad797fa6b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_c_parser_only.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_comment.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_comment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcc14c832fa189b1212af674cb979641d78ba087 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_comment.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_compression.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_compression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1db26087eb0ec1b681e2865695e473b10dde37c2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_compression.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_concatenate_chunks.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_concatenate_chunks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa1b9289813ad963388d8d4162e537f11373b0bf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_concatenate_chunks.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_converters.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_converters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b6904b51b377096982427bcb91ce5103e7d3ce2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_converters.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_dialect.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_dialect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94405f0a37fccff34bcf2d87a4b618bfe17e270a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_dialect.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_encoding.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_encoding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2c5d13a1e74f54f0413506068c208eb834a620d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_encoding.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_header.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_header.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8a056c5aab7a0845b6784fcb49acc43bce9c499 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_header.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_index_col.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_index_col.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18340756613cff123259b116ed16725ad651af92 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_index_col.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_mangle_dupes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_mangle_dupes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..366ac5b5559a33aacaf0053e6ac83788bfe87dde Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_mangle_dupes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_multi_thread.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_multi_thread.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b11ffc1b2d844c13da0c17e5689cad8f2b467a9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_multi_thread.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_na_values.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_na_values.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82ec7075d3a178ce3c1281a0c131e25feae697f5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_na_values.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_parse_dates.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_parse_dates.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79686b344dd45cbca7849eb119e716e3454fa6dd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_parse_dates.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_python_parser_only.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_python_parser_only.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..486ba9bb349db11ae3231f264c5965d459459db4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_python_parser_only.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_quoting.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_quoting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a30e11dd44b7814d37a6fcfb549fdf2a058816b4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_quoting.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_read_fwf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_read_fwf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..758a802fbe97c73a0e2045c28775e6403150db38 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_read_fwf.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_skiprows.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_skiprows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca6c1a4fe408c6b00ae795337aa50ceb4da30b1e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_skiprows.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_textreader.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_textreader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30b82095bae01704032ad4bbd1c1d2a7a87723a2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_textreader.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_unsupported.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_unsupported.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4eecf215c63ff6e5d793bbddb118b929bc8ffe34 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_unsupported.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_upcast.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_upcast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c462f6782d6b4943010cf930056077b6201cf960 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_upcast.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..854af0a36bf72c90540ae8870b1957a0ea5c6397 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_chunksize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_chunksize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f10433e6e27254df3dbe98540d42b0a1a3e3760e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_chunksize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_common_basic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_common_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81c88fc723b24d748e668834d0ab8d153baaa146 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_common_basic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_data_list.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_data_list.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d22c24f4bcd6132c278bd56f82871125f215d13d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_data_list.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_decimal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_decimal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9d76257cb2800aca1481dbead611957e682fb44 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_decimal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_file_buffer_url.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_file_buffer_url.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b7e1b9486aade485db06a0f9326f453728c6718 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_file_buffer_url.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_float.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_float.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9f7b37489000478c05ed8ed23af1e21da8c579d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_float.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_index.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bfc3a8d70970185c2cde62729ee6a37854db1e6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_index.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_inf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_inf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0877ffdd6e4911542f9e80cf30f9d6337c129115 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_inf.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_ints.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_ints.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90e5cc28e4dc330083748cc67fba57ef7572c6b3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_ints.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_iterator.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_iterator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be2e1c76f7bc984817c76aca3010d5e8ec276606 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_iterator.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_read_errors.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_read_errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32ea38ad2b4b8243d161cbc283fe6a00e83a0122 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_read_errors.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_verbose.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_verbose.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f5afcd174c9c6e09f7529fd69b17bddb90f4ff1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_verbose.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_chunksize.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_chunksize.py new file mode 100644 index 0000000000000000000000000000000000000000..9f42cf674b0a7744e174b108955ac6f4aabcd179 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_chunksize.py @@ -0,0 +1,378 @@ +""" +Tests that work on both the Python and C engines but do not have a +specific classification into the other test modules. +""" +from io import StringIO + +import numpy as np +import pytest + +from pandas._libs import parsers as libparsers +from pandas.errors import DtypeWarning + +from pandas import ( + DataFrame, + concat, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.mark.parametrize("index_col", [0, "index"]) +def test_read_chunksize_with_index(all_parsers, index_col): + parser = all_parsers + data = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + + expected = DataFrame( + [ + ["foo", 2, 3, 4, 5], + ["bar", 7, 8, 9, 10], + ["baz", 12, 13, 14, 15], + ["qux", 12, 13, 14, 15], + ["foo2", 12, 13, 14, 15], + ["bar2", 12, 13, 14, 15], + ], + columns=["index", "A", "B", "C", "D"], + ) + expected = expected.set_index("index") + + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + with parser.read_csv(StringIO(data), index_col=0, chunksize=2) as reader: + list(reader) + return + + with parser.read_csv(StringIO(data), index_col=0, chunksize=2) as reader: + chunks = list(reader) + tm.assert_frame_equal(chunks[0], expected[:2]) + tm.assert_frame_equal(chunks[1], expected[2:4]) + tm.assert_frame_equal(chunks[2], expected[4:]) + + +@pytest.mark.parametrize("chunksize", [1.3, "foo", 0]) +def test_read_chunksize_bad(all_parsers, chunksize): + data = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + parser = all_parsers + msg = r"'chunksize' must be an integer >=1" + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + + with pytest.raises(ValueError, match=msg): + with parser.read_csv(StringIO(data), chunksize=chunksize) as _: + pass + + +@pytest.mark.parametrize("chunksize", [2, 8]) +def test_read_chunksize_and_nrows(all_parsers, chunksize): + # see gh-15755 + data = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + parser = all_parsers + kwargs = {"index_col": 0, "nrows": 5} + + if parser.engine == "pyarrow": + msg = "The 'nrows' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), **kwargs) + return + + expected = parser.read_csv(StringIO(data), **kwargs) + with parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs) as reader: + tm.assert_frame_equal(concat(reader), expected) + + +def test_read_chunksize_and_nrows_changing_size(all_parsers): + data = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + parser = all_parsers + kwargs = {"index_col": 0, "nrows": 5} + + if parser.engine == "pyarrow": + msg = "The 'nrows' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), **kwargs) + return + + expected = parser.read_csv(StringIO(data), **kwargs) + with parser.read_csv(StringIO(data), chunksize=8, **kwargs) as reader: + tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2]) + tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5]) + + with pytest.raises(StopIteration, match=""): + reader.get_chunk(size=3) + + +def test_get_chunk_passed_chunksize(all_parsers): + parser = all_parsers + data = """A,B,C +1,2,3 +4,5,6 +7,8,9 +1,2,3""" + + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + with parser.read_csv(StringIO(data), chunksize=2) as reader: + reader.get_chunk() + return + + with parser.read_csv(StringIO(data), chunksize=2) as reader: + result = reader.get_chunk() + + expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("kwargs", [{}, {"index_col": 0}]) +def test_read_chunksize_compat(all_parsers, kwargs): + # see gh-12185 + data = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + parser = all_parsers + result = parser.read_csv(StringIO(data), **kwargs) + + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + with parser.read_csv(StringIO(data), chunksize=2, **kwargs) as reader: + concat(reader) + return + + with parser.read_csv(StringIO(data), chunksize=2, **kwargs) as reader: + via_reader = concat(reader) + tm.assert_frame_equal(via_reader, result) + + +def test_read_chunksize_jagged_names(all_parsers): + # see gh-23509 + parser = all_parsers + data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)]) + + expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10]) + + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + with parser.read_csv( + StringIO(data), names=range(10), chunksize=4 + ) as reader: + concat(reader) + return + + with parser.read_csv(StringIO(data), names=range(10), chunksize=4) as reader: + result = concat(reader) + tm.assert_frame_equal(result, expected) + + +def test_chunk_begins_with_newline_whitespace(all_parsers): + # see gh-10022 + parser = all_parsers + data = "\n hello\nworld\n" + + result = parser.read_csv(StringIO(data), header=None) + expected = DataFrame([" hello", "world"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.slow +def test_chunks_have_consistent_numerical_type(all_parsers, monkeypatch): + # mainly an issue with the C parser + heuristic = 2**3 + parser = all_parsers + integers = [str(i) for i in range(heuristic - 1)] + data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers) + + # Coercions should work without warnings. + with monkeypatch.context() as m: + m.setattr(libparsers, "DEFAULT_BUFFER_HEURISTIC", heuristic) + result = parser.read_csv(StringIO(data)) + + assert type(result.a[0]) is np.float64 + assert result.a.dtype == float + + +def test_warn_if_chunks_have_mismatched_type(all_parsers): + warning_type = None + parser = all_parsers + size = 10000 + + # see gh-3866: if chunks are different types and can't + # be coerced using numerical types, then issue warning. + if parser.engine == "c" and parser.low_memory: + warning_type = DtypeWarning + # Use larger size to hit warning path + size = 499999 + + integers = [str(i) for i in range(size)] + data = "a\n" + "\n".join(integers + ["a", "b"] + integers) + + buf = StringIO(data) + + if parser.engine == "pyarrow": + df = parser.read_csv( + buf, + ) + else: + df = parser.read_csv_check_warnings( + warning_type, + r"Columns \(0\) have mixed types. " + "Specify dtype option on import or set low_memory=False.", + buf, + ) + + assert df.a.dtype == object + + +@pytest.mark.parametrize("iterator", [True, False]) +def test_empty_with_nrows_chunksize(all_parsers, iterator): + # see gh-9535 + parser = all_parsers + expected = DataFrame(columns=["foo", "bar"]) + + nrows = 10 + data = StringIO("foo,bar\n") + + if parser.engine == "pyarrow": + msg = ( + "The '(nrows|chunksize)' option is not supported with the 'pyarrow' engine" + ) + with pytest.raises(ValueError, match=msg): + if iterator: + with parser.read_csv(data, chunksize=nrows) as reader: + next(iter(reader)) + else: + parser.read_csv(data, nrows=nrows) + return + + if iterator: + with parser.read_csv(data, chunksize=nrows) as reader: + result = next(iter(reader)) + else: + result = parser.read_csv(data, nrows=nrows) + + tm.assert_frame_equal(result, expected) + + +def test_read_csv_memory_growth_chunksize(all_parsers): + # see gh-24805 + # + # Let's just make sure that we don't crash + # as we iteratively process all chunks. + parser = all_parsers + + with tm.ensure_clean() as path: + with open(path, "w", encoding="utf-8") as f: + for i in range(1000): + f.write(str(i) + "\n") + + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + with parser.read_csv(path, chunksize=20) as result: + for _ in result: + pass + return + + with parser.read_csv(path, chunksize=20) as result: + for _ in result: + pass + + +def test_chunksize_with_usecols_second_block_shorter(all_parsers): + # GH#21211 + parser = all_parsers + data = """1,2,3,4 +5,6,7,8 +9,10,11 +""" + + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), + names=["a", "b"], + chunksize=2, + usecols=[0, 1], + header=None, + ) + return + + result_chunks = parser.read_csv( + StringIO(data), + names=["a", "b"], + chunksize=2, + usecols=[0, 1], + header=None, + ) + + expected_frames = [ + DataFrame({"a": [1, 5], "b": [2, 6]}), + DataFrame({"a": [9], "b": [10]}, index=[2]), + ] + + for i, result in enumerate(result_chunks): + tm.assert_frame_equal(result, expected_frames[i]) + + +def test_chunksize_second_block_shorter(all_parsers): + # GH#21211 + parser = all_parsers + data = """a,b,c,d +1,2,3,4 +5,6,7,8 +9,10,11 +""" + + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), chunksize=2) + return + + result_chunks = parser.read_csv(StringIO(data), chunksize=2) + + expected_frames = [ + DataFrame({"a": [1, 5], "b": [2, 6], "c": [3, 7], "d": [4, 8]}), + DataFrame({"a": [9], "b": [10], "c": [11], "d": [np.nan]}, index=[2]), + ] + + for i, result in enumerate(result_chunks): + tm.assert_frame_equal(result, expected_frames[i]) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_common_basic.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_common_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..7ffc49e941c14fb9e1a3d2d771bb493b9b283a36 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_common_basic.py @@ -0,0 +1,979 @@ +""" +Tests that work on both the Python and C engines but do not have a +specific classification into the other test modules. +""" +from datetime import datetime +from inspect import signature +from io import StringIO +import os +from pathlib import Path +import sys + +import numpy as np +import pytest + +from pandas.errors import ( + EmptyDataError, + ParserError, + ParserWarning, +) + +from pandas import ( + DataFrame, + Index, + Timestamp, + compat, +) +import pandas._testing as tm + +from pandas.io.parsers import TextFileReader +from pandas.io.parsers.c_parser_wrapper import CParserWrapper + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + + +def test_override_set_noconvert_columns(): + # see gh-17351 + # + # Usecols needs to be sorted in _set_noconvert_columns based + # on the test_usecols_with_parse_dates test from test_usecols.py + class MyTextFileReader(TextFileReader): + def __init__(self) -> None: + self._currow = 0 + self.squeeze = False + + class MyCParserWrapper(CParserWrapper): + def _set_noconvert_columns(self): + if self.usecols_dtype == "integer": + # self.usecols is a set, which is documented as unordered + # but in practice, a CPython set of integers is sorted. + # In other implementations this assumption does not hold. + # The following code simulates a different order, which + # before GH 17351 would cause the wrong columns to be + # converted via the parse_dates parameter + self.usecols = list(self.usecols) + self.usecols.reverse() + return CParserWrapper._set_noconvert_columns(self) + + data = """a,b,c,d,e +0,1,2014-01-01,09:00,4 +0,1,2014-01-02,10:00,4""" + + parse_dates = [[1, 2]] + cols = { + "a": [0, 0], + "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")], + } + expected = DataFrame(cols, columns=["c_d", "a"]) + + parser = MyTextFileReader() + parser.options = { + "usecols": [0, 2, 3], + "parse_dates": parse_dates, + "delimiter": ",", + } + parser.engine = "c" + parser._engine = MyCParserWrapper(StringIO(data), **parser.options) + + result = parser.read() + tm.assert_frame_equal(result, expected) + + +def test_read_csv_local(all_parsers, csv1): + prefix = "file:///" if compat.is_platform_windows() else "file://" + parser = all_parsers + + fname = prefix + str(os.path.abspath(csv1)) + result = parser.read_csv(fname, index_col=0, parse_dates=True) + # TODO: make unit check more specific + if parser.engine == "pyarrow": + result.index = result.index.as_unit("ns") + expected = DataFrame( + [ + [0.980269, 3.685731, -0.364216805298, -1.159738], + [1.047916, -0.041232, -0.16181208307, 0.212549], + [0.498581, 0.731168, -0.537677223318, 1.346270], + [1.120202, 1.567621, 0.00364077397681, 0.675253], + [-0.487094, 0.571455, -1.6116394093, 0.103469], + [0.836649, 0.246462, 0.588542635376, 1.062782], + [-0.157161, 1.340307, 1.1957779562, -1.097007], + ], + columns=["A", "B", "C", "D"], + index=Index( + [ + datetime(2000, 1, 3), + datetime(2000, 1, 4), + datetime(2000, 1, 5), + datetime(2000, 1, 6), + datetime(2000, 1, 7), + datetime(2000, 1, 10), + datetime(2000, 1, 11), + ], + name="index", + ), + ) + tm.assert_frame_equal(result, expected) + + +def test_1000_sep(all_parsers): + parser = all_parsers + data = """A|B|C +1|2,334|5 +10|13|10. +""" + expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]}) + + if parser.engine == "pyarrow": + msg = "The 'thousands' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), sep="|", thousands=",") + return + + result = parser.read_csv(StringIO(data), sep="|", thousands=",") + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # ValueError: Found non-unique column index +def test_unnamed_columns(all_parsers): + data = """A,B,C,, +1,2,3,4,5 +6,7,8,9,10 +11,12,13,14,15 +""" + parser = all_parsers + expected = DataFrame( + [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], + dtype=np.int64, + columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"], + ) + result = parser.read_csv(StringIO(data)) + tm.assert_frame_equal(result, expected) + + +def test_csv_mixed_type(all_parsers): + data = """A,B,C +a,1,2 +b,3,4 +c,4,5 +""" + parser = all_parsers + expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]}) + result = parser.read_csv(StringIO(data)) + tm.assert_frame_equal(result, expected) + + +def test_read_csv_low_memory_no_rows_with_index(all_parsers): + # see gh-21141 + parser = all_parsers + + if not parser.low_memory: + pytest.skip("This is a low-memory specific test") + + data = """A,B,C +1,1,1,2 +2,2,3,4 +3,3,4,5 +""" + + if parser.engine == "pyarrow": + msg = "The 'nrows' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0) + return + + result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0) + expected = DataFrame(columns=["A", "B", "C"]) + tm.assert_frame_equal(result, expected) + + +def test_read_csv_dataframe(all_parsers, csv1): + parser = all_parsers + result = parser.read_csv(csv1, index_col=0, parse_dates=True) + # TODO: make unit check more specific + if parser.engine == "pyarrow": + result.index = result.index.as_unit("ns") + expected = DataFrame( + [ + [0.980269, 3.685731, -0.364216805298, -1.159738], + [1.047916, -0.041232, -0.16181208307, 0.212549], + [0.498581, 0.731168, -0.537677223318, 1.346270], + [1.120202, 1.567621, 0.00364077397681, 0.675253], + [-0.487094, 0.571455, -1.6116394093, 0.103469], + [0.836649, 0.246462, 0.588542635376, 1.062782], + [-0.157161, 1.340307, 1.1957779562, -1.097007], + ], + columns=["A", "B", "C", "D"], + index=Index( + [ + datetime(2000, 1, 3), + datetime(2000, 1, 4), + datetime(2000, 1, 5), + datetime(2000, 1, 6), + datetime(2000, 1, 7), + datetime(2000, 1, 10), + datetime(2000, 1, 11), + ], + name="index", + ), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("nrows", [3, 3.0]) +def test_read_nrows(all_parsers, nrows): + # see gh-10476 + data = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + expected = DataFrame( + [["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]], + columns=["index", "A", "B", "C", "D"], + ) + parser = all_parsers + + if parser.engine == "pyarrow": + msg = "The 'nrows' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), nrows=nrows) + return + + result = parser.read_csv(StringIO(data), nrows=nrows) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("nrows", [1.2, "foo", -1]) +def test_read_nrows_bad(all_parsers, nrows): + data = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + msg = r"'nrows' must be an integer >=0" + parser = all_parsers + if parser.engine == "pyarrow": + msg = "The 'nrows' option is not supported with the 'pyarrow' engine" + + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), nrows=nrows) + + +def test_nrows_skipfooter_errors(all_parsers): + msg = "'skipfooter' not supported with 'nrows'" + data = "a\n1\n2\n3\n4\n5\n6" + parser = all_parsers + + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), skipfooter=1, nrows=5) + + +@skip_pyarrow +def test_missing_trailing_delimiters(all_parsers): + parser = all_parsers + data = """A,B,C,D +1,2,3,4 +1,3,3, +1,4,5""" + + result = parser.read_csv(StringIO(data)) + expected = DataFrame( + [[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]], + columns=["A", "B", "C", "D"], + ) + tm.assert_frame_equal(result, expected) + + +def test_skip_initial_space(all_parsers): + data = ( + '"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, ' + "1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, " + "314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, " + "70.06056, 344.98370, 1, 1, -0.689265, -0.692787, " + "0.212036, 14.7674, 41.605, -9999.0, -9999.0, " + "-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128" + ) + parser = all_parsers + + if parser.engine == "pyarrow": + msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), + names=list(range(33)), + header=None, + na_values=["-9999.0"], + skipinitialspace=True, + ) + return + + result = parser.read_csv( + StringIO(data), + names=list(range(33)), + header=None, + na_values=["-9999.0"], + skipinitialspace=True, + ) + expected = DataFrame( + [ + [ + "09-Apr-2012", + "01:10:18.300", + 2456026.548822908, + 12849, + 1.00361, + 1.12551, + 330.65659, + 355626618.16711, + 73.48821, + 314.11625, + 1917.09447, + 179.71425, + 80.0, + 240.0, + -350, + 70.06056, + 344.9837, + 1, + 1, + -0.689265, + -0.692787, + 0.212036, + 14.7674, + 41.605, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + 0, + 12, + 128, + ] + ] + ) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow +def test_trailing_delimiters(all_parsers): + # see gh-2442 + data = """A,B,C +1,2,3, +4,5,6, +7,8,9,""" + parser = all_parsers + result = parser.read_csv(StringIO(data), index_col=False) + + expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]}) + tm.assert_frame_equal(result, expected) + + +def test_escapechar(all_parsers): + # https://stackoverflow.com/questions/13824840/feature-request-for- + # pandas-read-csv + data = '''SEARCH_TERM,ACTUAL_URL +"bra tv board","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" +"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord" +"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' + + parser = all_parsers + result = parser.read_csv( + StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8" + ) + + assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series' + + tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"])) + + +def test_ignore_leading_whitespace(all_parsers): + # see gh-3374, gh-6607 + parser = all_parsers + data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9" + + if parser.engine == "pyarrow": + msg = "the 'pyarrow' engine does not support regex separators" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), sep=r"\s+") + return + result = parser.read_csv(StringIO(data), sep=r"\s+") + + expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]}) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow +@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]]) +def test_uneven_lines_with_usecols(all_parsers, usecols): + # see gh-12203 + parser = all_parsers + data = r"""a,b,c +0,1,2 +3,4,5,6,7 +8,9,10""" + + if usecols is None: + # Make sure that an error is still raised + # when the "usecols" parameter is not provided. + msg = r"Expected \d+ fields in line \d+, saw \d+" + with pytest.raises(ParserError, match=msg): + parser.read_csv(StringIO(data)) + else: + expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]}) + + result = parser.read_csv(StringIO(data), usecols=usecols) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow +@pytest.mark.parametrize( + "data,kwargs,expected", + [ + # First, check to see that the response of parser when faced with no + # provided columns raises the correct error, with or without usecols. + ("", {}, None), + ("", {"usecols": ["X"]}, None), + ( + ",,", + {"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]}, + DataFrame(columns=["X"], index=[0], dtype=np.float64), + ), + ( + "", + {"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]}, + DataFrame(columns=["X"]), + ), + ], +) +def test_read_empty_with_usecols(all_parsers, data, kwargs, expected): + # see gh-12493 + parser = all_parsers + + if expected is None: + msg = "No columns to parse from file" + with pytest.raises(EmptyDataError, match=msg): + parser.read_csv(StringIO(data), **kwargs) + else: + result = parser.read_csv(StringIO(data), **kwargs) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "kwargs,expected", + [ + # gh-8661, gh-8679: this should ignore six lines, including + # lines with trailing whitespace and blank lines. + ( + { + "header": None, + "delim_whitespace": True, + "skiprows": [0, 1, 2, 3, 5, 6], + "skip_blank_lines": True, + }, + DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]), + ), + # gh-8983: test skipping set of rows after a row with trailing spaces. + ( + { + "delim_whitespace": True, + "skiprows": [1, 2, 3, 5, 6], + "skip_blank_lines": True, + }, + DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}), + ), + ], +) +def test_trailing_spaces(all_parsers, kwargs, expected): + data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa: E501 + parser = all_parsers + + depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated" + + if parser.engine == "pyarrow": + msg = "The 'delim_whitespace' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + parser.read_csv(StringIO(data.replace(",", " ")), **kwargs) + return + + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs) + tm.assert_frame_equal(result, expected) + + +def test_raise_on_sep_with_delim_whitespace(all_parsers): + # see gh-6607 + data = "a b c\n1 2 3" + parser = all_parsers + + depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated" + with pytest.raises(ValueError, match="you can only specify one"): + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True) + + +def test_read_filepath_or_buffer(all_parsers): + # see gh-43366 + parser = all_parsers + + with pytest.raises(TypeError, match="Expected file path name or file-like"): + parser.read_csv(filepath_or_buffer=b"input") + + +@pytest.mark.parametrize("delim_whitespace", [True, False]) +def test_single_char_leading_whitespace(all_parsers, delim_whitespace): + # see gh-9710 + parser = all_parsers + data = """\ +MyColumn +a +b +a +b\n""" + + expected = DataFrame({"MyColumn": list("abab")}) + depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated" + + if parser.engine == "pyarrow": + msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + parser.read_csv( + StringIO(data), + skipinitialspace=True, + delim_whitespace=delim_whitespace, + ) + return + + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "sep,skip_blank_lines,exp_data", + [ + (",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]), + (r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]), + ( + ",", + False, + [ + [1.0, 2.0, 4.0], + [np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan], + [5.0, np.nan, 10.0], + [np.nan, np.nan, np.nan], + [-70.0, 0.4, 1.0], + ], + ), + ], +) +def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data, request): + parser = all_parsers + data = """\ +A,B,C +1,2.,4. + + +5.,NaN,10.0 + +-70,.4,1 +""" + + if sep == r"\s+": + data = data.replace(",", " ") + + if parser.engine == "pyarrow": + msg = "the 'pyarrow' engine does not support regex separators" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines + ) + return + + result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines) + expected = DataFrame(exp_data, columns=["A", "B", "C"]) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow +def test_whitespace_lines(all_parsers): + parser = all_parsers + data = """ + +\t \t\t +\t +A,B,C +\t 1,2.,4. +5.,NaN,10.0 +""" + expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]) + result = parser.read_csv(StringIO(data)) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data,expected", + [ + ( + """ A B C D +a 1 2 3 4 +b 1 2 3 4 +c 1 2 3 4 +""", + DataFrame( + [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], + columns=["A", "B", "C", "D"], + index=["a", "b", "c"], + ), + ), + ( + " a b c\n1 2 3 \n4 5 6\n 7 8 9", + DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]), + ), + ], +) +def test_whitespace_regex_separator(all_parsers, data, expected): + # see gh-6607 + parser = all_parsers + if parser.engine == "pyarrow": + msg = "the 'pyarrow' engine does not support regex separators" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), sep=r"\s+") + return + + result = parser.read_csv(StringIO(data), sep=r"\s+") + tm.assert_frame_equal(result, expected) + + +def test_sub_character(all_parsers, csv_dir_path): + # see gh-16893 + filename = os.path.join(csv_dir_path, "sub_char.csv") + expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"]) + + parser = all_parsers + result = parser.read_csv(filename) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"]) +def test_filename_with_special_chars(all_parsers, filename): + # see gh-15086. + parser = all_parsers + df = DataFrame({"a": [1, 2, 3]}) + + with tm.ensure_clean(filename) as path: + df.to_csv(path, index=False) + + result = parser.read_csv(path) + tm.assert_frame_equal(result, df) + + +def test_read_table_same_signature_as_read_csv(all_parsers): + # GH-34976 + parser = all_parsers + + table_sign = signature(parser.read_table) + csv_sign = signature(parser.read_csv) + + assert table_sign.parameters.keys() == csv_sign.parameters.keys() + assert table_sign.return_annotation == csv_sign.return_annotation + + for key, csv_param in csv_sign.parameters.items(): + table_param = table_sign.parameters[key] + if key == "sep": + assert csv_param.default == "," + assert table_param.default == "\t" + assert table_param.annotation == csv_param.annotation + assert table_param.kind == csv_param.kind + continue + + assert table_param == csv_param + + +def test_read_table_equivalency_to_read_csv(all_parsers): + # see gh-21948 + # As of 0.25.0, read_table is undeprecated + parser = all_parsers + data = "a\tb\n1\t2\n3\t4" + expected = parser.read_csv(StringIO(data), sep="\t") + result = parser.read_table(StringIO(data)) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("read_func", ["read_csv", "read_table"]) +def test_read_csv_and_table_sys_setprofile(all_parsers, read_func): + # GH#41069 + parser = all_parsers + data = "a b\n0 1" + + sys.setprofile(lambda *a, **k: None) + result = getattr(parser, read_func)(StringIO(data)) + sys.setprofile(None) + + expected = DataFrame({"a b": ["0 1"]}) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow +def test_first_row_bom(all_parsers): + # see gh-26545 + parser = all_parsers + data = '''\ufeff"Head1"\t"Head2"\t"Head3"''' + + result = parser.read_csv(StringIO(data), delimiter="\t") + expected = DataFrame(columns=["Head1", "Head2", "Head3"]) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow +def test_first_row_bom_unquoted(all_parsers): + # see gh-36343 + parser = all_parsers + data = """\ufeffHead1\tHead2\tHead3""" + + result = parser.read_csv(StringIO(data), delimiter="\t") + expected = DataFrame(columns=["Head1", "Head2", "Head3"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("nrows", range(1, 6)) +def test_blank_lines_between_header_and_data_rows(all_parsers, nrows): + # GH 28071 + ref = DataFrame( + [[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]], + columns=list("ab"), + ) + csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4" + parser = all_parsers + + if parser.engine == "pyarrow": + msg = "The 'nrows' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False + ) + return + + df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False) + tm.assert_frame_equal(df, ref[:nrows]) + + +@skip_pyarrow +def test_no_header_two_extra_columns(all_parsers): + # GH 26218 + column_names = ["one", "two", "three"] + ref = DataFrame([["foo", "bar", "baz"]], columns=column_names) + stream = StringIO("foo,bar,baz,bam,blah") + parser = all_parsers + df = parser.read_csv_check_warnings( + ParserWarning, + "Length of header or names does not match length of data. " + "This leads to a loss of data with index_col=False.", + stream, + header=None, + names=column_names, + index_col=False, + ) + tm.assert_frame_equal(df, ref) + + +def test_read_csv_names_not_accepting_sets(all_parsers): + # GH 34946 + data = """\ + 1,2,3 + 4,5,6\n""" + parser = all_parsers + with pytest.raises(ValueError, match="Names should be an ordered collection."): + parser.read_csv(StringIO(data), names=set("QAZ")) + + +def test_read_table_delim_whitespace_default_sep(all_parsers): + # GH: 35958 + f = StringIO("a b c\n1 -2 -3\n4 5 6") + parser = all_parsers + + depr_msg = "The 'delim_whitespace' keyword in pd.read_table is deprecated" + + if parser.engine == "pyarrow": + msg = "The 'delim_whitespace' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + parser.read_table(f, delim_whitespace=True) + return + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + result = parser.read_table(f, delim_whitespace=True) + expected = DataFrame({"a": [1, 4], "b": [-2, 5], "c": [-3, 6]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("delimiter", [",", "\t"]) +def test_read_csv_delim_whitespace_non_default_sep(all_parsers, delimiter): + # GH: 35958 + f = StringIO("a b c\n1 -2 -3\n4 5 6") + parser = all_parsers + msg = ( + "Specified a delimiter with both sep and " + "delim_whitespace=True; you can only specify one." + ) + depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated" + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + with pytest.raises(ValueError, match=msg): + parser.read_csv(f, delim_whitespace=True, sep=delimiter) + + with pytest.raises(ValueError, match=msg): + parser.read_csv(f, delim_whitespace=True, delimiter=delimiter) + + +def test_read_csv_delimiter_and_sep_no_default(all_parsers): + # GH#39823 + f = StringIO("a,b\n1,2") + parser = all_parsers + msg = "Specified a sep and a delimiter; you can only specify one." + with pytest.raises(ValueError, match=msg): + parser.read_csv(f, sep=" ", delimiter=".") + + +@pytest.mark.parametrize("kwargs", [{"delimiter": "\n"}, {"sep": "\n"}]) +def test_read_csv_line_break_as_separator(kwargs, all_parsers): + # GH#43528 + parser = all_parsers + data = """a,b,c +1,2,3 + """ + msg = ( + r"Specified \\n as separator or delimiter. This forces the python engine " + r"which does not accept a line terminator. Hence it is not allowed to use " + r"the line terminator as separator." + ) + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), **kwargs) + + +@pytest.mark.parametrize("delimiter", [",", "\t"]) +def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter): + # GH: 35958 + f = StringIO("a b c\n1 -2 -3\n4 5 6") + parser = all_parsers + msg = ( + "Specified a delimiter with both sep and " + "delim_whitespace=True; you can only specify one." + ) + depr_msg = "The 'delim_whitespace' keyword in pd.read_table is deprecated" + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + with pytest.raises(ValueError, match=msg): + parser.read_table(f, delim_whitespace=True, sep=delimiter) + + with pytest.raises(ValueError, match=msg): + parser.read_table(f, delim_whitespace=True, delimiter=delimiter) + + +@skip_pyarrow +def test_dict_keys_as_names(all_parsers): + # GH: 36928 + data = "1,2" + + keys = {"a": int, "b": int}.keys() + parser = all_parsers + + result = parser.read_csv(StringIO(data), names=keys) + expected = DataFrame({"a": [1], "b": [2]}) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xed in position 0 +def test_encoding_surrogatepass(all_parsers): + # GH39017 + parser = all_parsers + content = b"\xed\xbd\xbf" + decoded = content.decode("utf-8", errors="surrogatepass") + expected = DataFrame({decoded: [decoded]}, index=[decoded * 2]) + expected.index.name = decoded * 2 + + with tm.ensure_clean() as path: + Path(path).write_bytes( + content * 2 + b"," + content + b"\n" + content * 2 + b"," + content + ) + df = parser.read_csv(path, encoding_errors="surrogatepass", index_col=0) + tm.assert_frame_equal(df, expected) + with pytest.raises(UnicodeDecodeError, match="'utf-8' codec can't decode byte"): + parser.read_csv(path) + + +def test_malformed_second_line(all_parsers): + # see GH14782 + parser = all_parsers + data = "\na\nb\n" + result = parser.read_csv(StringIO(data), skip_blank_lines=False, header=1) + expected = DataFrame({"a": ["b"]}) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow +def test_short_single_line(all_parsers): + # GH 47566 + parser = all_parsers + columns = ["a", "b", "c"] + data = "1,2" + result = parser.read_csv(StringIO(data), header=None, names=columns) + expected = DataFrame({"a": [1], "b": [2], "c": [np.nan]}) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # ValueError: Length mismatch: Expected axis has 2 elements +def test_short_multi_line(all_parsers): + # GH 47566 + parser = all_parsers + columns = ["a", "b", "c"] + data = "1,2\n1,2" + result = parser.read_csv(StringIO(data), header=None, names=columns) + expected = DataFrame({"a": [1, 1], "b": [2, 2], "c": [np.nan, np.nan]}) + tm.assert_frame_equal(result, expected) + + +def test_read_seek(all_parsers): + # GH48646 + parser = all_parsers + prefix = "### DATA\n" + content = "nkey,value\ntables,rectangular\n" + with tm.ensure_clean() as path: + Path(path).write_text(prefix + content, encoding="utf-8") + with open(path, encoding="utf-8") as file: + file.readline() + actual = parser.read_csv(file) + expected = parser.read_csv(StringIO(content)) + tm.assert_frame_equal(actual, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_data_list.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_data_list.py new file mode 100644 index 0000000000000000000000000000000000000000..3b0ff9e08d349e0c8012ebd743285b285d15a846 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_data_list.py @@ -0,0 +1,91 @@ +""" +Tests that work on both the Python and C engines but do not have a +specific classification into the other test modules. +""" +import csv +from io import StringIO + +import pytest + +from pandas import DataFrame +import pandas._testing as tm + +from pandas.io.parsers import TextParser + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") + + +@xfail_pyarrow +def test_read_data_list(all_parsers): + parser = all_parsers + kwargs = {"index_col": 0} + data = "A,B,C\nfoo,1,2,3\nbar,4,5,6" + + data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]] + expected = parser.read_csv(StringIO(data), **kwargs) + + with TextParser(data_list, chunksize=2, **kwargs) as parser: + result = parser.read() + + tm.assert_frame_equal(result, expected) + + +def test_reader_list(all_parsers): + data = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + parser = all_parsers + kwargs = {"index_col": 0} + + lines = list(csv.reader(StringIO(data))) + with TextParser(lines, chunksize=2, **kwargs) as reader: + chunks = list(reader) + + expected = parser.read_csv(StringIO(data), **kwargs) + + tm.assert_frame_equal(chunks[0], expected[:2]) + tm.assert_frame_equal(chunks[1], expected[2:4]) + tm.assert_frame_equal(chunks[2], expected[4:]) + + +def test_reader_list_skiprows(all_parsers): + data = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + parser = all_parsers + kwargs = {"index_col": 0} + + lines = list(csv.reader(StringIO(data))) + with TextParser(lines, chunksize=2, skiprows=[1], **kwargs) as reader: + chunks = list(reader) + + expected = parser.read_csv(StringIO(data), **kwargs) + + tm.assert_frame_equal(chunks[0], expected[1:3]) + + +def test_read_csv_parse_simple_list(all_parsers): + parser = all_parsers + data = """foo +bar baz +qux foo +foo +bar""" + + result = parser.read_csv(StringIO(data), header=None) + expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"]) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_decimal.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_decimal.py new file mode 100644 index 0000000000000000000000000000000000000000..4ceca037f589a3fdb60421cc5366dc208f9edf5a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_decimal.py @@ -0,0 +1,72 @@ +""" +Tests that work on both the Python and C engines but do not have a +specific classification into the other test modules. +""" +from io import StringIO + +import pytest + +from pandas import DataFrame +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.mark.parametrize( + "data,thousands,decimal", + [ + ( + """A|B|C +1|2,334.01|5 +10|13|10. +""", + ",", + ".", + ), + ( + """A|B|C +1|2.334,01|5 +10|13|10, +""", + ".", + ",", + ), + ], +) +def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal): + parser = all_parsers + expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]}) + + if parser.engine == "pyarrow": + msg = "The 'thousands' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), sep="|", thousands=thousands, decimal=decimal + ) + return + + result = parser.read_csv( + StringIO(data), sep="|", thousands=thousands, decimal=decimal + ) + tm.assert_frame_equal(result, expected) + + +def test_euro_decimal_format(all_parsers): + parser = all_parsers + data = """Id;Number1;Number2;Text1;Text2;Number3 +1;1521,1541;187101,9543;ABC;poi;4,738797819 +2;121,12;14897,76;DEF;uyt;0,377320872 +3;878,158;108013,434;GHI;rez;2,735694704""" + + result = parser.read_csv(StringIO(data), sep=";", decimal=",") + expected = DataFrame( + [ + [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819], + [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872], + [3, 878.158, 108013.434, "GHI", "rez", 2.735694704], + ], + columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"], + ) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_file_buffer_url.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_file_buffer_url.py new file mode 100644 index 0000000000000000000000000000000000000000..a7a8d031da215b95b9145d1a55a6cf8e5d7d9555 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_file_buffer_url.py @@ -0,0 +1,478 @@ +""" +Tests that work on both the Python and C engines but do not have a +specific classification into the other test modules. +""" +from io import ( + BytesIO, + StringIO, +) +import os +import platform +from urllib.error import URLError +import uuid + +import numpy as np +import pytest + +from pandas.errors import ( + EmptyDataError, + ParserError, +) +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + Index, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + + +@pytest.mark.network +@pytest.mark.single_cpu +def test_url(all_parsers, csv_dir_path, httpserver): + parser = all_parsers + kwargs = {"sep": "\t"} + + local_path = os.path.join(csv_dir_path, "salaries.csv") + with open(local_path, encoding="utf-8") as f: + httpserver.serve_content(content=f.read()) + + url_result = parser.read_csv(httpserver.url, **kwargs) + + local_result = parser.read_csv(local_path, **kwargs) + tm.assert_frame_equal(url_result, local_result) + + +@pytest.mark.slow +def test_local_file(all_parsers, csv_dir_path): + parser = all_parsers + kwargs = {"sep": "\t"} + + local_path = os.path.join(csv_dir_path, "salaries.csv") + local_result = parser.read_csv(local_path, **kwargs) + url = "file://localhost/" + local_path + + try: + url_result = parser.read_csv(url, **kwargs) + tm.assert_frame_equal(url_result, local_result) + except URLError: + # Fails on some systems. + pytest.skip("Failing on: " + " ".join(platform.uname())) + + +@xfail_pyarrow # AssertionError: DataFrame.index are different +def test_path_path_lib(all_parsers): + parser = all_parsers + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0)) + tm.assert_frame_equal(df, result) + + +@xfail_pyarrow # AssertionError: DataFrame.index are different +def test_path_local_path(all_parsers): + parser = all_parsers + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + result = tm.round_trip_localpath( + df.to_csv, lambda p: parser.read_csv(p, index_col=0) + ) + tm.assert_frame_equal(df, result) + + +def test_nonexistent_path(all_parsers): + # gh-2428: pls no segfault + # gh-14086: raise more helpful FileNotFoundError + # GH#29233 "File foo" instead of "File b'foo'" + parser = all_parsers + path = f"{uuid.uuid4()}.csv" + + msg = r"\[Errno 2\]" + with pytest.raises(FileNotFoundError, match=msg) as e: + parser.read_csv(path) + assert path == e.value.filename + + +@td.skip_if_windows # os.chmod does not work in windows +def test_no_permission(all_parsers): + # GH 23784 + parser = all_parsers + + msg = r"\[Errno 13\]" + with tm.ensure_clean() as path: + os.chmod(path, 0) # make file unreadable + + # verify that this process cannot open the file (not running as sudo) + try: + with open(path, encoding="utf-8"): + pass + pytest.skip("Running as sudo.") + except PermissionError: + pass + + with pytest.raises(PermissionError, match=msg) as e: + parser.read_csv(path) + assert path == e.value.filename + + +@pytest.mark.parametrize( + "data,kwargs,expected,msg", + [ + # gh-10728: WHITESPACE_LINE + ( + "a,b,c\n4,5,6\n ", + {}, + DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), + None, + ), + # gh-10548: EAT_LINE_COMMENT + ( + "a,b,c\n4,5,6\n#comment", + {"comment": "#"}, + DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), + None, + ), + # EAT_CRNL_NOP + ( + "a,b,c\n4,5,6\n\r", + {}, + DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), + None, + ), + # EAT_COMMENT + ( + "a,b,c\n4,5,6#comment", + {"comment": "#"}, + DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), + None, + ), + # SKIP_LINE + ( + "a,b,c\n4,5,6\nskipme", + {"skiprows": [2]}, + DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), + None, + ), + # EAT_LINE_COMMENT + ( + "a,b,c\n4,5,6\n#comment", + {"comment": "#", "skip_blank_lines": False}, + DataFrame([[4, 5, 6]], columns=["a", "b", "c"]), + None, + ), + # IN_FIELD + ( + "a,b,c\n4,5,6\n ", + {"skip_blank_lines": False}, + DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]), + None, + ), + # EAT_CRNL + ( + "a,b,c\n4,5,6\n\r", + {"skip_blank_lines": False}, + DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]), + None, + ), + # ESCAPED_CHAR + ( + "a,b,c\n4,5,6\n\\", + {"escapechar": "\\"}, + None, + "(EOF following escape character)|(unexpected end of data)", + ), + # ESCAPE_IN_QUOTED_FIELD + ( + 'a,b,c\n4,5,6\n"\\', + {"escapechar": "\\"}, + None, + "(EOF inside string starting at row 2)|(unexpected end of data)", + ), + # IN_QUOTED_FIELD + ( + 'a,b,c\n4,5,6\n"', + {"escapechar": "\\"}, + None, + "(EOF inside string starting at row 2)|(unexpected end of data)", + ), + ], + ids=[ + "whitespace-line", + "eat-line-comment", + "eat-crnl-nop", + "eat-comment", + "skip-line", + "eat-line-comment", + "in-field", + "eat-crnl", + "escaped-char", + "escape-in-quoted-field", + "in-quoted-field", + ], +) +def test_eof_states(all_parsers, data, kwargs, expected, msg, request): + # see gh-10728, gh-10548 + parser = all_parsers + + if parser.engine == "pyarrow" and "comment" in kwargs: + msg = "The 'comment' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), **kwargs) + return + + if parser.engine == "pyarrow" and "\r" not in data: + # pandas.errors.ParserError: CSV parse error: Expected 3 columns, got 1: + # ValueError: skiprows argument must be an integer when using engine='pyarrow' + # AssertionError: Regex pattern did not match. + pytest.skip(reason="https://github.com/apache/arrow/issues/38676") + + if expected is None: + with pytest.raises(ParserError, match=msg): + parser.read_csv(StringIO(data), **kwargs) + else: + result = parser.read_csv(StringIO(data), **kwargs) + tm.assert_frame_equal(result, expected) + + +def test_temporary_file(all_parsers): + # see gh-13398 + parser = all_parsers + data = "0 0" + + with tm.ensure_clean(mode="w+", return_filelike=True) as new_file: + new_file.write(data) + new_file.flush() + new_file.seek(0) + + if parser.engine == "pyarrow": + msg = "the 'pyarrow' engine does not support regex separators" + with pytest.raises(ValueError, match=msg): + parser.read_csv(new_file, sep=r"\s+", header=None) + return + + result = parser.read_csv(new_file, sep=r"\s+", header=None) + + expected = DataFrame([[0, 0]]) + tm.assert_frame_equal(result, expected) + + +def test_internal_eof_byte(all_parsers): + # see gh-5500 + parser = all_parsers + data = "a,b\n1\x1a,2" + + expected = DataFrame([["1\x1a", 2]], columns=["a", "b"]) + result = parser.read_csv(StringIO(data)) + tm.assert_frame_equal(result, expected) + + +def test_internal_eof_byte_to_file(all_parsers): + # see gh-16559 + parser = all_parsers + data = b'c1,c2\r\n"test \x1a test", test\r\n' + expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"]) + path = f"__{uuid.uuid4()}__.csv" + + with tm.ensure_clean(path) as path: + with open(path, "wb") as f: + f.write(data) + + result = parser.read_csv(path) + tm.assert_frame_equal(result, expected) + + +def test_file_handle_string_io(all_parsers): + # gh-14418 + # + # Don't close user provided file handles. + parser = all_parsers + data = "a,b\n1,2" + + fh = StringIO(data) + parser.read_csv(fh) + assert not fh.closed + + +def test_file_handles_with_open(all_parsers, csv1): + # gh-14418 + # + # Don't close user provided file handles. + parser = all_parsers + + for mode in ["r", "rb"]: + with open(csv1, mode, encoding="utf-8" if mode == "r" else None) as f: + parser.read_csv(f) + assert not f.closed + + +def test_invalid_file_buffer_class(all_parsers): + # see gh-15337 + class InvalidBuffer: + pass + + parser = all_parsers + msg = "Invalid file path or buffer object type" + + with pytest.raises(ValueError, match=msg): + parser.read_csv(InvalidBuffer()) + + +def test_invalid_file_buffer_mock(all_parsers): + # see gh-15337 + parser = all_parsers + msg = "Invalid file path or buffer object type" + + class Foo: + pass + + with pytest.raises(ValueError, match=msg): + parser.read_csv(Foo()) + + +def test_valid_file_buffer_seems_invalid(all_parsers): + # gh-16135: we want to ensure that "tell" and "seek" + # aren't actually being used when we call `read_csv` + # + # Thus, while the object may look "invalid" (these + # methods are attributes of the `StringIO` class), + # it is still a valid file-object for our purposes. + class NoSeekTellBuffer(StringIO): + def tell(self): + raise AttributeError("No tell method") + + def seek(self, pos, whence=0): + raise AttributeError("No seek method") + + data = "a\n1" + parser = all_parsers + expected = DataFrame({"a": [1]}) + + result = parser.read_csv(NoSeekTellBuffer(data)) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("io_class", [StringIO, BytesIO]) +@pytest.mark.parametrize("encoding", [None, "utf-8"]) +def test_read_csv_file_handle(all_parsers, io_class, encoding): + """ + Test whether read_csv does not close user-provided file handles. + + GH 36980 + """ + parser = all_parsers + expected = DataFrame({"a": [1], "b": [2]}) + + content = "a,b\n1,2" + handle = io_class(content.encode("utf-8") if io_class == BytesIO else content) + + tm.assert_frame_equal(parser.read_csv(handle, encoding=encoding), expected) + assert not handle.closed + + +def test_memory_map_compression(all_parsers, compression): + """ + Support memory map for compressed files. + + GH 37621 + """ + parser = all_parsers + expected = DataFrame({"a": [1], "b": [2]}) + + with tm.ensure_clean() as path: + expected.to_csv(path, index=False, compression=compression) + + if parser.engine == "pyarrow": + msg = "The 'memory_map' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(path, memory_map=True, compression=compression) + return + + result = parser.read_csv(path, memory_map=True, compression=compression) + + tm.assert_frame_equal( + result, + expected, + ) + + +def test_context_manager(all_parsers, datapath): + # make sure that opened files are closed + parser = all_parsers + + path = datapath("io", "data", "csv", "iris.csv") + + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(path, chunksize=1) + return + + reader = parser.read_csv(path, chunksize=1) + assert not reader.handles.handle.closed + try: + with reader: + next(reader) + assert False + except AssertionError: + assert reader.handles.handle.closed + + +def test_context_manageri_user_provided(all_parsers, datapath): + # make sure that user-provided handles are not closed + parser = all_parsers + + with open(datapath("io", "data", "csv", "iris.csv"), encoding="utf-8") as path: + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(path, chunksize=1) + return + + reader = parser.read_csv(path, chunksize=1) + assert not reader.handles.handle.closed + try: + with reader: + next(reader) + assert False + except AssertionError: + assert not reader.handles.handle.closed + + +@skip_pyarrow # ParserError: Empty CSV file +def test_file_descriptor_leak(all_parsers, using_copy_on_write): + # GH 31488 + parser = all_parsers + with tm.ensure_clean() as path: + with pytest.raises(EmptyDataError, match="No columns to parse from file"): + parser.read_csv(path) + + +def test_memory_map(all_parsers, csv_dir_path): + mmap_file = os.path.join(csv_dir_path, "test_mmap.csv") + parser = all_parsers + + expected = DataFrame( + {"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]} + ) + + if parser.engine == "pyarrow": + msg = "The 'memory_map' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(mmap_file, memory_map=True) + return + + result = parser.read_csv(mmap_file, memory_map=True) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_float.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_float.py new file mode 100644 index 0000000000000000000000000000000000000000..6069c239362976cc242548a0dc52236ddb7d37d8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_float.py @@ -0,0 +1,79 @@ +""" +Tests that work on both the Python and C engines but do not have a +specific classification into the other test modules. +""" +from io import StringIO + +import numpy as np +import pytest + +from pandas.compat import is_platform_linux + +from pandas import DataFrame +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + + +@skip_pyarrow # ParserError: CSV parse error: Empty CSV file or block +def test_float_parser(all_parsers): + # see gh-9565 + parser = all_parsers + data = "45e-1,4.5,45.,inf,-inf" + result = parser.read_csv(StringIO(data), header=None) + + expected = DataFrame([[float(s) for s in data.split(",")]]) + tm.assert_frame_equal(result, expected) + + +def test_scientific_no_exponent(all_parsers_all_precisions): + # see gh-12215 + df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]}) + data = df.to_csv(index=False) + parser, precision = all_parsers_all_precisions + + df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision) + tm.assert_frame_equal(df_roundtrip, df) + + +@pytest.mark.parametrize( + "neg_exp", + [ + -617, + -100000, + pytest.param(-99999999999999999, marks=pytest.mark.skip_ubsan), + ], +) +def test_very_negative_exponent(all_parsers_all_precisions, neg_exp): + # GH#38753 + parser, precision = all_parsers_all_precisions + + data = f"data\n10E{neg_exp}" + result = parser.read_csv(StringIO(data), float_precision=precision) + expected = DataFrame({"data": [0.0]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.skip_ubsan +@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different +@pytest.mark.parametrize("exp", [999999999999999999, -999999999999999999]) +def test_too_many_exponent_digits(all_parsers_all_precisions, exp, request): + # GH#38753 + parser, precision = all_parsers_all_precisions + data = f"data\n10E{exp}" + result = parser.read_csv(StringIO(data), float_precision=precision) + if precision == "round_trip": + if exp == 999999999999999999 and is_platform_linux(): + mark = pytest.mark.xfail(reason="GH38794, on Linux gives object result") + request.applymarker(mark) + + value = np.inf if exp > 0 else 0.0 + expected = DataFrame({"data": [value]}) + else: + expected = DataFrame({"data": [f"10E{exp}"]}) + + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_inf.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_inf.py new file mode 100644 index 0000000000000000000000000000000000000000..74596b178d35d885f6cf405ad57fed680c206b7f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_inf.py @@ -0,0 +1,78 @@ +""" +Tests that work on both the Python and C engines but do not have a +specific classification into the other test modules. +""" +from io import StringIO + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + option_context, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") + + +@xfail_pyarrow # AssertionError: DataFrame.index are different +@pytest.mark.parametrize("na_filter", [True, False]) +def test_inf_parsing(all_parsers, na_filter): + parser = all_parsers + data = """\ +,A +a,inf +b,-inf +c,+Inf +d,-Inf +e,INF +f,-INF +g,+INf +h,-INf +i,inF +j,-inF""" + expected = DataFrame( + {"A": [float("inf"), float("-inf")] * 5}, + index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"], + ) + result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # AssertionError: DataFrame.index are different +@pytest.mark.parametrize("na_filter", [True, False]) +def test_infinity_parsing(all_parsers, na_filter): + parser = all_parsers + data = """\ +,A +a,Infinity +b,-Infinity +c,+Infinity +""" + expected = DataFrame( + {"A": [float("infinity"), float("-infinity"), float("+infinity")]}, + index=["a", "b", "c"], + ) + result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter) + tm.assert_frame_equal(result, expected) + + +def test_read_csv_with_use_inf_as_na(all_parsers): + # https://github.com/pandas-dev/pandas/issues/35493 + parser = all_parsers + data = "1.0\nNaN\n3.0" + msg = "use_inf_as_na option is deprecated" + warn = FutureWarning + if parser.engine == "pyarrow": + warn = (FutureWarning, DeprecationWarning) + + with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): + with option_context("use_inf_as_na", True): + result = parser.read_csv(StringIO(data), header=None) + expected = DataFrame([1.0, np.nan, 3.0]) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_iterator.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_iterator.py new file mode 100644 index 0000000000000000000000000000000000000000..a521c84aa007d921a50a8c3ae63c19bb9585c538 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_iterator.py @@ -0,0 +1,134 @@ +""" +Tests that work on both the Python and C engines but do not have a +specific classification into the other test modules. +""" +from io import StringIO + +import pytest + +from pandas import ( + DataFrame, + concat, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +def test_iterator(all_parsers): + # see gh-6607 + data = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + parser = all_parsers + kwargs = {"index_col": 0} + + expected = parser.read_csv(StringIO(data), **kwargs) + + if parser.engine == "pyarrow": + msg = "The 'iterator' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), iterator=True, **kwargs) + return + + with parser.read_csv(StringIO(data), iterator=True, **kwargs) as reader: + first_chunk = reader.read(3) + tm.assert_frame_equal(first_chunk, expected[:3]) + + last_chunk = reader.read(5) + tm.assert_frame_equal(last_chunk, expected[3:]) + + +def test_iterator2(all_parsers): + parser = all_parsers + data = """A,B,C +foo,1,2,3 +bar,4,5,6 +baz,7,8,9 +""" + + if parser.engine == "pyarrow": + msg = "The 'iterator' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), iterator=True) + return + + with parser.read_csv(StringIO(data), iterator=True) as reader: + result = list(reader) + + expected = DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + index=["foo", "bar", "baz"], + columns=["A", "B", "C"], + ) + tm.assert_frame_equal(result[0], expected) + + +def test_iterator_stop_on_chunksize(all_parsers): + # gh-3967: stopping iteration when chunksize is specified + parser = all_parsers + data = """A,B,C +foo,1,2,3 +bar,4,5,6 +baz,7,8,9 +""" + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), chunksize=1) + return + + with parser.read_csv(StringIO(data), chunksize=1) as reader: + result = list(reader) + + assert len(result) == 3 + expected = DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + index=["foo", "bar", "baz"], + columns=["A", "B", "C"], + ) + tm.assert_frame_equal(concat(result), expected) + + +@pytest.mark.parametrize( + "kwargs", [{"iterator": True, "chunksize": 1}, {"iterator": True}, {"chunksize": 1}] +) +def test_iterator_skipfooter_errors(all_parsers, kwargs): + msg = "'skipfooter' not supported for iteration" + parser = all_parsers + data = "a\n1\n2" + + if parser.engine == "pyarrow": + msg = ( + "The '(chunksize|iterator)' option is not supported with the " + "'pyarrow' engine" + ) + + with pytest.raises(ValueError, match=msg): + with parser.read_csv(StringIO(data), skipfooter=1, **kwargs) as _: + pass + + +def test_iteration_open_handle(all_parsers): + parser = all_parsers + kwargs = {"header": None} + + with tm.ensure_clean() as path: + with open(path, "w", encoding="utf-8") as f: + f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG") + + with open(path, encoding="utf-8") as f: + for line in f: + if "CCC" in line: + break + + result = parser.read_csv(f, **kwargs) + expected = DataFrame({0: ["DDD", "EEE", "FFF", "GGG"]}) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_read_errors.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_read_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..f5a724bad4fa2b899ed536d38163a0545160fe8b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_read_errors.py @@ -0,0 +1,320 @@ +""" +Tests that work on the Python, C and PyArrow engines but do not have a +specific classification into the other test modules. +""" +import codecs +import csv +from io import StringIO +import os +from pathlib import Path + +import numpy as np +import pytest + +from pandas.compat import PY311 +from pandas.errors import ( + EmptyDataError, + ParserError, + ParserWarning, +) + +from pandas import DataFrame +import pandas._testing as tm + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + + +def test_empty_decimal_marker(all_parsers): + data = """A|B|C +1|2,334|5 +10|13|10. +""" + # Parsers support only length-1 decimals + msg = "Only length-1 decimal markers supported" + parser = all_parsers + + if parser.engine == "pyarrow": + msg = ( + "only single character unicode strings can be " + "converted to Py_UCS4, got length 0" + ) + + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), decimal="") + + +def test_bad_stream_exception(all_parsers, csv_dir_path): + # see gh-13652 + # + # This test validates that both the Python engine and C engine will + # raise UnicodeDecodeError instead of C engine raising ParserError + # and swallowing the exception that caused read to fail. + path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv") + codec = codecs.lookup("utf-8") + utf8 = codecs.lookup("utf-8") + parser = all_parsers + msg = "'utf-8' codec can't decode byte" + + # Stream must be binary UTF8. + with open(path, "rb") as handle, codecs.StreamRecoder( + handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter + ) as stream: + with pytest.raises(UnicodeDecodeError, match=msg): + parser.read_csv(stream) + + +def test_malformed(all_parsers): + # see gh-6607 + parser = all_parsers + data = """ignore +A,B,C +1,2,3 # comment +1,2,3,4,5 +2,3,4 +""" + msg = "Expected 3 fields in line 4, saw 5" + err = ParserError + if parser.engine == "pyarrow": + msg = "The 'comment' option is not supported with the 'pyarrow' engine" + err = ValueError + with pytest.raises(err, match=msg): + parser.read_csv(StringIO(data), header=1, comment="#") + + +@pytest.mark.parametrize("nrows", [5, 3, None]) +def test_malformed_chunks(all_parsers, nrows): + data = """ignore +A,B,C +skip +1,2,3 +3,5,10 # comment +1,2,3,4,5 +2,3,4 +""" + parser = all_parsers + + if parser.engine == "pyarrow": + msg = "The 'iterator' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), + header=1, + comment="#", + iterator=True, + chunksize=1, + skiprows=[2], + ) + return + + msg = "Expected 3 fields in line 6, saw 5" + with parser.read_csv( + StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2] + ) as reader: + with pytest.raises(ParserError, match=msg): + reader.read(nrows) + + +@xfail_pyarrow # does not raise +def test_catch_too_many_names(all_parsers): + # see gh-5156 + data = """\ +1,2,3 +4,,6 +7,8,9 +10,11,12\n""" + parser = all_parsers + msg = ( + "Too many columns specified: expected 4 and found 3" + if parser.engine == "c" + else "Number of passed names did not match " + "number of header fields in the file" + ) + + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"]) + + +@skip_pyarrow # CSV parse error: Empty CSV file or block +@pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5]) +def test_raise_on_no_columns(all_parsers, nrows): + parser = all_parsers + data = "\n" * nrows + + msg = "No columns to parse from file" + with pytest.raises(EmptyDataError, match=msg): + parser.read_csv(StringIO(data)) + + +def test_unexpected_keyword_parameter_exception(all_parsers): + # GH-34976 + parser = all_parsers + + msg = "{}\\(\\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg.format("read_csv")): + parser.read_csv("foo.csv", foo=1) + with pytest.raises(TypeError, match=msg.format("read_table")): + parser.read_table("foo.tsv", foo=1) + + +def test_suppress_error_output(all_parsers): + # see gh-15925 + parser = all_parsers + data = "a\n1\n1,2,3\n4\n5,6,7" + expected = DataFrame({"a": [1, 4]}) + + result = parser.read_csv(StringIO(data), on_bad_lines="skip") + tm.assert_frame_equal(result, expected) + + +def test_error_bad_lines(all_parsers): + # see gh-15925 + parser = all_parsers + data = "a\n1\n1,2,3\n4\n5,6,7" + + msg = "Expected 1 fields in line 3, saw 3" + + if parser.engine == "pyarrow": + # "CSV parse error: Expected 1 columns, got 3: 1,2,3" + pytest.skip(reason="https://github.com/apache/arrow/issues/38676") + + with pytest.raises(ParserError, match=msg): + parser.read_csv(StringIO(data), on_bad_lines="error") + + +def test_warn_bad_lines(all_parsers): + # see gh-15925 + parser = all_parsers + data = "a\n1\n1,2,3\n4\n5,6,7" + expected = DataFrame({"a": [1, 4]}) + match_msg = "Skipping line" + + expected_warning = ParserWarning + if parser.engine == "pyarrow": + match_msg = "Expected 1 columns, but found 3: 1,2,3" + expected_warning = (ParserWarning, DeprecationWarning) + + with tm.assert_produces_warning( + expected_warning, match=match_msg, check_stacklevel=False + ): + result = parser.read_csv(StringIO(data), on_bad_lines="warn") + tm.assert_frame_equal(result, expected) + + +def test_read_csv_wrong_num_columns(all_parsers): + # Too few columns. + data = """A,B,C,D,E,F +1,2,3,4,5,6 +6,7,8,9,10,11,12 +11,12,13,14,15,16 +""" + parser = all_parsers + msg = "Expected 6 fields in line 3, saw 7" + + if parser.engine == "pyarrow": + # Expected 6 columns, got 7: 6,7,8,9,10,11,12 + pytest.skip(reason="https://github.com/apache/arrow/issues/38676") + + with pytest.raises(ParserError, match=msg): + parser.read_csv(StringIO(data)) + + +def test_null_byte_char(request, all_parsers): + # see gh-2741 + data = "\x00,foo" + names = ["a", "b"] + parser = all_parsers + + if parser.engine == "c" or (parser.engine == "python" and PY311): + if parser.engine == "python" and PY311: + request.applymarker( + pytest.mark.xfail( + reason="In Python 3.11, this is read as an empty character not null" + ) + ) + expected = DataFrame([[np.nan, "foo"]], columns=names) + out = parser.read_csv(StringIO(data), names=names) + tm.assert_frame_equal(out, expected) + else: + if parser.engine == "pyarrow": + # CSV parse error: Empty CSV file or block: " + # cannot infer number of columns" + pytest.skip(reason="https://github.com/apache/arrow/issues/38676") + else: + msg = "NULL byte detected" + with pytest.raises(ParserError, match=msg): + parser.read_csv(StringIO(data), names=names) + + +@pytest.mark.filterwarnings("always::ResourceWarning") +def test_open_file(request, all_parsers): + # GH 39024 + parser = all_parsers + + msg = "Could not determine delimiter" + err = csv.Error + if parser.engine == "c": + msg = "the 'c' engine does not support sep=None with delim_whitespace=False" + err = ValueError + elif parser.engine == "pyarrow": + msg = ( + "the 'pyarrow' engine does not support sep=None with delim_whitespace=False" + ) + err = ValueError + + with tm.ensure_clean() as path: + file = Path(path) + file.write_bytes(b"\xe4\na\n1") + + with tm.assert_produces_warning(None): + # should not trigger a ResourceWarning + with pytest.raises(err, match=msg): + parser.read_csv(file, sep=None, encoding_errors="replace") + + +def test_invalid_on_bad_line(all_parsers): + parser = all_parsers + data = "a\n1\n1,2,3\n4\n5,6,7" + with pytest.raises(ValueError, match="Argument abc is invalid for on_bad_lines"): + parser.read_csv(StringIO(data), on_bad_lines="abc") + + +def test_bad_header_uniform_error(all_parsers): + parser = all_parsers + data = "+++123456789...\ncol1,col2,col3,col4\n1,2,3,4\n" + msg = "Expected 2 fields in line 2, saw 4" + if parser.engine == "c": + msg = ( + "Could not construct index. Requested to use 1 " + "number of columns, but 3 left to parse." + ) + elif parser.engine == "pyarrow": + # "CSV parse error: Expected 1 columns, got 4: col1,col2,col3,col4" + pytest.skip(reason="https://github.com/apache/arrow/issues/38676") + + with pytest.raises(ParserError, match=msg): + parser.read_csv(StringIO(data), index_col=0, on_bad_lines="error") + + +def test_on_bad_lines_warn_correct_formatting(all_parsers): + # see gh-15925 + parser = all_parsers + data = """1,2 +a,b +a,b,c +a,b,d +a,b +""" + expected = DataFrame({"1": "a", "2": ["b"] * 2}) + match_msg = "Skipping line" + + expected_warning = ParserWarning + if parser.engine == "pyarrow": + match_msg = "Expected 2 columns, but found 3: a,b,c" + expected_warning = (ParserWarning, DeprecationWarning) + + with tm.assert_produces_warning( + expected_warning, match=match_msg, check_stacklevel=False + ): + result = parser.read_csv(StringIO(data), on_bad_lines="warn") + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4aa0bde367d82ede8fe3f24ef8adcff88b0cece Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_parse_dates.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_parse_dates.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69ed92a0d6b61f4d0c819c3aabd72b0630c9b3f1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_parse_dates.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_strings.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_strings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f44c21c593185ebf48f83bcf9aca4fd4e606278f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_strings.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_usecols_basic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_usecols_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65d41469171f7d9a3fdab7f62bb19ea39daa6b51 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_usecols_basic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_parse_dates.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_parse_dates.py new file mode 100644 index 0000000000000000000000000000000000000000..bc66189ca064e5f0cc474cbd072747978f60e2c3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_parse_dates.py @@ -0,0 +1,194 @@ +""" +Tests the usecols functionality during parsing +for all of the parsers defined in parsers.py +""" +from io import StringIO + +import pytest + +from pandas import ( + DataFrame, + Index, + Timestamp, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + +_msg_pyarrow_requires_names = ( + "The pyarrow engine does not allow 'usecols' to be integer column " + "positions. Pass a list of string column names instead." +) + + +@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]]) +def test_usecols_with_parse_dates(all_parsers, usecols): + # see gh-9755 + data = """a,b,c,d,e +0,1,2014-01-01,09:00,4 +0,1,2014-01-02,10:00,4""" + parser = all_parsers + parse_dates = [[1, 2]] + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + + cols = { + "a": [0, 0], + "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")], + } + expected = DataFrame(cols, columns=["c_d", "a"]) + if parser.engine == "pyarrow": + with pytest.raises(ValueError, match=_msg_pyarrow_requires_names): + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + parser.read_csv( + StringIO(data), usecols=usecols, parse_dates=parse_dates + ) + return + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), usecols=usecols, parse_dates=parse_dates + ) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # pyarrow.lib.ArrowKeyError: Column 'fdate' in include_columns +def test_usecols_with_parse_dates2(all_parsers): + # see gh-13604 + parser = all_parsers + data = """2008-02-07 09:40,1032.43 +2008-02-07 09:50,1042.54 +2008-02-07 10:00,1051.65""" + + names = ["date", "values"] + usecols = names[:] + parse_dates = [0] + + index = Index( + [ + Timestamp("2008-02-07 09:40"), + Timestamp("2008-02-07 09:50"), + Timestamp("2008-02-07 10:00"), + ], + name="date", + ) + cols = {"values": [1032.43, 1042.54, 1051.65]} + expected = DataFrame(cols, index=index) + + result = parser.read_csv( + StringIO(data), + parse_dates=parse_dates, + index_col=0, + usecols=usecols, + header=None, + names=names, + ) + tm.assert_frame_equal(result, expected) + + +def test_usecols_with_parse_dates3(all_parsers): + # see gh-14792 + parser = all_parsers + data = """a,b,c,d,e,f,g,h,i,j +2016/09/21,1,1,2,3,4,5,6,7,8""" + + usecols = list("abcdefghij") + parse_dates = [0] + + cols = { + "a": Timestamp("2016-09-21").as_unit("ns"), + "b": [1], + "c": [1], + "d": [2], + "e": [3], + "f": [4], + "g": [5], + "h": [6], + "i": [7], + "j": [8], + } + expected = DataFrame(cols, columns=usecols) + + result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates) + tm.assert_frame_equal(result, expected) + + +def test_usecols_with_parse_dates4(all_parsers): + data = "a,b,c,d,e,f,g,h,i,j\n2016/09/21,1,1,2,3,4,5,6,7,8" + usecols = list("abcdefghij") + parse_dates = [[0, 1]] + parser = all_parsers + + cols = { + "a_b": "2016/09/21 1", + "c": [1], + "d": [2], + "e": [3], + "f": [4], + "g": [5], + "h": [6], + "i": [7], + "j": [8], + } + expected = DataFrame(cols, columns=["a_b"] + list("cdefghij")) + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), + usecols=usecols, + parse_dates=parse_dates, + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]]) +@pytest.mark.parametrize( + "names", + [ + list("abcde"), # Names span all columns in original data. + list("acd"), # Names span only the selected columns. + ], +) +def test_usecols_with_parse_dates_and_names(all_parsers, usecols, names, request): + # see gh-9755 + s = """0,1,2014-01-01,09:00,4 +0,1,2014-01-02,10:00,4""" + parse_dates = [[1, 2]] + parser = all_parsers + + if parser.engine == "pyarrow" and not (len(names) == 3 and usecols[0] == 0): + mark = pytest.mark.xfail( + reason="Length mismatch in some cases, UserWarning in other" + ) + request.applymarker(mark) + + cols = { + "a": [0, 0], + "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")], + } + expected = DataFrame(cols, columns=["c_d", "a"]) + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(s), names=names, parse_dates=parse_dates, usecols=usecols + ) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_strings.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_strings.py new file mode 100644 index 0000000000000000000000000000000000000000..d4ade41d384659ae5571742b7d22620727365ad3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_strings.py @@ -0,0 +1,96 @@ +""" +Tests the usecols functionality during parsing +for all of the parsers defined in parsers.py +""" +from io import StringIO + +import pytest + +from pandas import DataFrame +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +def test_usecols_with_unicode_strings(all_parsers): + # see gh-13219 + data = """AAA,BBB,CCC,DDD +0.056674973,8,True,a +2.613230982,2,False,b +3.568935038,7,False,a""" + parser = all_parsers + + exp_data = { + "AAA": { + 0: 0.056674972999999997, + 1: 2.6132309819999997, + 2: 3.5689350380000002, + }, + "BBB": {0: 8, 1: 2, 2: 7}, + } + expected = DataFrame(exp_data) + + result = parser.read_csv(StringIO(data), usecols=["AAA", "BBB"]) + tm.assert_frame_equal(result, expected) + + +def test_usecols_with_single_byte_unicode_strings(all_parsers): + # see gh-13219 + data = """A,B,C,D +0.056674973,8,True,a +2.613230982,2,False,b +3.568935038,7,False,a""" + parser = all_parsers + + exp_data = { + "A": { + 0: 0.056674972999999997, + 1: 2.6132309819999997, + 2: 3.5689350380000002, + }, + "B": {0: 8, 1: 2, 2: 7}, + } + expected = DataFrame(exp_data) + + result = parser.read_csv(StringIO(data), usecols=["A", "B"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("usecols", [["AAA", b"BBB"], [b"AAA", "BBB"]]) +def test_usecols_with_mixed_encoding_strings(all_parsers, usecols): + data = """AAA,BBB,CCC,DDD +0.056674973,8,True,a +2.613230982,2,False,b +3.568935038,7,False,a""" + parser = all_parsers + _msg_validate_usecols_arg = ( + "'usecols' must either be list-like " + "of all strings, all unicode, all " + "integers or a callable." + ) + with pytest.raises(ValueError, match=_msg_validate_usecols_arg): + parser.read_csv(StringIO(data), usecols=usecols) + + +@pytest.mark.parametrize("usecols", [["あああ", "いい"], ["あああ", "いい"]]) +def test_usecols_with_multi_byte_characters(all_parsers, usecols): + data = """あああ,いい,ううう,ええええ +0.056674973,8,True,a +2.613230982,2,False,b +3.568935038,7,False,a""" + parser = all_parsers + + exp_data = { + "あああ": { + 0: 0.056674972999999997, + 1: 2.6132309819999997, + 2: 3.5689350380000002, + }, + "いい": {0: 8, 1: 2, 2: 7}, + } + expected = DataFrame(exp_data) + + result = parser.read_csv(StringIO(data), usecols=usecols) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0908a7ca0a3d8538297b2f7f3fedf4e31357e14c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..522a2cd1d9f78e4b322132ec87f37b4a7aa780cb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d88351cc8cdf6b0ae399a225b7cd3e037aea93a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68d29601bc73a443f3776769f040c59972a0a86b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b278d10d09ef7e473baa7288cfdcb740cf21d853 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f953b71a9967d23c6c50d499166942b544734142 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dcdb3182f3b599476d71fd7c957d0a254c67e37 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d10310fae2c3a154cbb5634a7b8b854d9155731e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f781ae39e6da6eb675d4af6c87ab0580b0303cce Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c64a9be4c23948bbb9f010bec0303aedec5bd5b2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/conftest.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..466e4ae8bb99c6c4e0f24045f71b4c3aa27b7851 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/conftest.py @@ -0,0 +1,9 @@ +import uuid + +import pytest + + +@pytest.fixture +def setup_path(): + """Fixture for setup path""" + return f"tmp.__{uuid.uuid4()}__.h5" diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_categorical.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..58ebdfe7696b4bc9f37c193a3e2a4f19f55c3fe8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_categorical.py @@ -0,0 +1,214 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + DataFrame, + Series, + _testing as tm, + concat, + read_hdf, +) +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) + +pytestmark = pytest.mark.single_cpu + + +def test_categorical(setup_path): + with ensure_clean_store(setup_path) as store: + # Basic + _maybe_remove(store, "s") + s = Series( + Categorical( + ["a", "b", "b", "a", "a", "c"], + categories=["a", "b", "c", "d"], + ordered=False, + ) + ) + store.append("s", s, format="table") + result = store.select("s") + tm.assert_series_equal(s, result) + + _maybe_remove(store, "s_ordered") + s = Series( + Categorical( + ["a", "b", "b", "a", "a", "c"], + categories=["a", "b", "c", "d"], + ordered=True, + ) + ) + store.append("s_ordered", s, format="table") + result = store.select("s_ordered") + tm.assert_series_equal(s, result) + + _maybe_remove(store, "df") + df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]}) + store.append("df", df, format="table") + result = store.select("df") + tm.assert_frame_equal(result, df) + + # Dtypes + _maybe_remove(store, "si") + s = Series([1, 1, 2, 2, 3, 4, 5]).astype("category") + store.append("si", s) + result = store.select("si") + tm.assert_series_equal(result, s) + + _maybe_remove(store, "si2") + s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype("category") + store.append("si2", s) + result = store.select("si2") + tm.assert_series_equal(result, s) + + # Multiple + _maybe_remove(store, "df2") + df2 = df.copy() + df2["s2"] = Series(list("abcdefg")).astype("category") + store.append("df2", df2) + result = store.select("df2") + tm.assert_frame_equal(result, df2) + + # Make sure the metadata is OK + info = store.info() + assert "/df2 " in info + # df2._mgr.blocks[0] and df2._mgr.blocks[2] are Categorical + assert "/df2/meta/values_block_0/meta" in info + assert "/df2/meta/values_block_2/meta" in info + + # unordered + _maybe_remove(store, "s2") + s = Series( + Categorical( + ["a", "b", "b", "a", "a", "c"], + categories=["a", "b", "c", "d"], + ordered=False, + ) + ) + store.append("s2", s, format="table") + result = store.select("s2") + tm.assert_series_equal(result, s) + + # Query + _maybe_remove(store, "df3") + store.append("df3", df, data_columns=["s"]) + expected = df[df.s.isin(["b", "c"])] + result = store.select("df3", where=['s in ["b","c"]']) + tm.assert_frame_equal(result, expected) + + expected = df[df.s.isin(["b", "c"])] + result = store.select("df3", where=['s = ["b","c"]']) + tm.assert_frame_equal(result, expected) + + expected = df[df.s.isin(["d"])] + result = store.select("df3", where=['s in ["d"]']) + tm.assert_frame_equal(result, expected) + + expected = df[df.s.isin(["f"])] + result = store.select("df3", where=['s in ["f"]']) + tm.assert_frame_equal(result, expected) + + # Appending with same categories is ok + store.append("df3", df) + + df = concat([df, df]) + expected = df[df.s.isin(["b", "c"])] + result = store.select("df3", where=['s in ["b","c"]']) + tm.assert_frame_equal(result, expected) + + # Appending must have the same categories + df3 = df.copy() + df3["s"] = df3["s"].cat.remove_unused_categories() + + msg = "cannot append a categorical with different categories to the existing" + with pytest.raises(ValueError, match=msg): + store.append("df3", df3) + + # Remove, and make sure meta data is removed (its a recursive + # removal so should be). + result = store.select("df3/meta/s/meta") + assert result is not None + store.remove("df3") + + with pytest.raises( + KeyError, match="'No object named df3/meta/s/meta in the file'" + ): + store.select("df3/meta/s/meta") + + +def test_categorical_conversion(tmp_path, setup_path): + # GH13322 + # Check that read_hdf with categorical columns doesn't return rows if + # where criteria isn't met. + obsids = ["ESP_012345_6789", "ESP_987654_3210"] + imgids = ["APF00006np", "APF0001imm"] + data = [4.3, 9.8] + + # Test without categories + df = DataFrame({"obsids": obsids, "imgids": imgids, "data": data}) + + # We are expecting an empty DataFrame matching types of df + expected = df.iloc[[], :] + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table", data_columns=True) + result = read_hdf(path, "df", where="obsids=B") + tm.assert_frame_equal(result, expected) + + # Test with categories + df.obsids = df.obsids.astype("category") + df.imgids = df.imgids.astype("category") + + # We are expecting an empty DataFrame matching types of df + expected = df.iloc[[], :] + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table", data_columns=True) + result = read_hdf(path, "df", where="obsids=B") + tm.assert_frame_equal(result, expected) + + +def test_categorical_nan_only_columns(tmp_path, setup_path): + # GH18413 + # Check that read_hdf with categorical columns with NaN-only values can + # be read back. + df = DataFrame( + { + "a": ["a", "b", "c", np.nan], + "b": [np.nan, np.nan, np.nan, np.nan], + "c": [1, 2, 3, 4], + "d": Series([None] * 4, dtype=object), + } + ) + df["a"] = df.a.astype("category") + df["b"] = df.b.astype("category") + df["d"] = df.b.astype("category") + expected = df + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table", data_columns=True) + result = read_hdf(path, "df") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "where, df, expected", + [ + ('col=="q"', DataFrame({"col": ["a", "b", "s"]}), DataFrame({"col": []})), + ('col=="a"', DataFrame({"col": ["a", "b", "s"]}), DataFrame({"col": ["a"]})), + ], +) +def test_convert_value( + tmp_path, setup_path, where: str, df: DataFrame, expected: DataFrame +): + # GH39420 + # Check that read_hdf with categorical columns can filter by where condition. + df.col = df.col.astype("category") + max_widths = {"col": 1} + categorical_values = sorted(df.col.unique()) + expected.col = expected.col.astype("category") + expected.col = expected.col.cat.set_categories(categorical_values) + + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table", min_itemsize=max_widths) + result = read_hdf(path, where=where) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_compat.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..b07fb3ddd3ac829f5b90d6fd7226926aeed284e6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_compat.py @@ -0,0 +1,75 @@ +import pytest + +import pandas as pd +import pandas._testing as tm + +tables = pytest.importorskip("tables") + + +@pytest.fixture +def pytables_hdf5_file(tmp_path): + """ + Use PyTables to create a simple HDF5 file. + """ + table_schema = { + "c0": tables.Time64Col(pos=0), + "c1": tables.StringCol(5, pos=1), + "c2": tables.Int64Col(pos=2), + } + + t0 = 1_561_105_000.0 + + testsamples = [ + {"c0": t0, "c1": "aaaaa", "c2": 1}, + {"c0": t0 + 1, "c1": "bbbbb", "c2": 2}, + {"c0": t0 + 2, "c1": "ccccc", "c2": 10**5}, + {"c0": t0 + 3, "c1": "ddddd", "c2": 4_294_967_295}, + ] + + objname = "pandas_test_timeseries" + + path = tmp_path / "written_with_pytables.h5" + with tables.open_file(path, mode="w") as f: + t = f.create_table("/", name=objname, description=table_schema) + for sample in testsamples: + for key, value in sample.items(): + t.row[key] = value + t.row.append() + + yield path, objname, pd.DataFrame(testsamples) + + +class TestReadPyTablesHDF5: + """ + A group of tests which covers reading HDF5 files written by plain PyTables + (not written by pandas). + + Was introduced for regression-testing issue 11188. + """ + + def test_read_complete(self, pytables_hdf5_file): + path, objname, df = pytables_hdf5_file + result = pd.read_hdf(path, key=objname) + expected = df + tm.assert_frame_equal(result, expected, check_index_type=True) + + def test_read_with_start(self, pytables_hdf5_file): + path, objname, df = pytables_hdf5_file + # This is a regression test for pandas-dev/pandas/issues/11188 + result = pd.read_hdf(path, key=objname, start=1) + expected = df[1:].reset_index(drop=True) + tm.assert_frame_equal(result, expected, check_index_type=True) + + def test_read_with_stop(self, pytables_hdf5_file): + path, objname, df = pytables_hdf5_file + # This is a regression test for pandas-dev/pandas/issues/11188 + result = pd.read_hdf(path, key=objname, stop=1) + expected = df[:1].reset_index(drop=True) + tm.assert_frame_equal(result, expected, check_index_type=True) + + def test_read_with_startstop(self, pytables_hdf5_file): + path, objname, df = pytables_hdf5_file + # This is a regression test for pandas-dev/pandas/issues/11188 + result = pd.read_hdf(path, key=objname, start=1, stop=2) + expected = df[1:2].reset_index(drop=True) + tm.assert_frame_equal(result, expected, check_index_type=True) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_errors.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_errors.py new file mode 100644 index 0000000000000000000000000000000000000000..20211010988924ed601f52ed9adf03ca081838c8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_errors.py @@ -0,0 +1,251 @@ +import datetime +from io import BytesIO +import re + +import numpy as np +import pytest + +from pandas import ( + CategoricalIndex, + DataFrame, + HDFStore, + Index, + MultiIndex, + _testing as tm, + date_range, + read_hdf, +) +from pandas.tests.io.pytables.common import ensure_clean_store + +from pandas.io.pytables import ( + Term, + _maybe_adjust_name, +) + +pytestmark = pytest.mark.single_cpu + + +def test_pass_spec_to_storer(setup_path): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + with ensure_clean_store(setup_path) as store: + store.put("df", df) + msg = ( + "cannot pass a column specification when reading a Fixed format " + "store. this store must be selected in its entirety" + ) + with pytest.raises(TypeError, match=msg): + store.select("df", columns=["A"]) + msg = ( + "cannot pass a where specification when reading from a Fixed " + "format store. this store must be selected in its entirety" + ) + with pytest.raises(TypeError, match=msg): + store.select("df", where=[("columns=A")]) + + +def test_table_index_incompatible_dtypes(setup_path): + df1 = DataFrame({"a": [1, 2, 3]}) + df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3)) + + with ensure_clean_store(setup_path) as store: + store.put("frame", df1, format="table") + msg = re.escape("incompatible kind in col [integer - datetime64[ns]]") + with pytest.raises(TypeError, match=msg): + store.put("frame", df2, format="table", append=True) + + +def test_unimplemented_dtypes_table_columns(setup_path): + with ensure_clean_store(setup_path) as store: + dtypes = [("date", datetime.date(2001, 1, 2))] + + # currently not supported dtypes #### + for n, f in dtypes: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df[n] = f + msg = re.escape(f"[{n}] is not implemented as a table column") + with pytest.raises(TypeError, match=msg): + store.append(f"df1_{n}", df) + + # frame + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df["obj1"] = "foo" + df["obj2"] = "bar" + df["datetime1"] = datetime.date(2001, 1, 2) + df = df._consolidate() + + with ensure_clean_store(setup_path) as store: + # this fails because we have a date in the object block...... + msg = re.escape( + """Cannot serialize the column [datetime1] +because its data contents are not [string] but [date] object dtype""" + ) + with pytest.raises(TypeError, match=msg): + store.append("df_unimplemented", df) + + +def test_invalid_terms(tmp_path, setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df["string"] = "foo" + df.loc[df.index[0:4], "string"] = "bar" + + store.put("df", df, format="table") + + # some invalid terms + msg = re.escape("__init__() missing 1 required positional argument: 'where'") + with pytest.raises(TypeError, match=msg): + Term() + + # more invalid + msg = re.escape( + "cannot process expression [df.index[3]], " + "[2000-01-06 00:00:00] is not a valid condition" + ) + with pytest.raises(ValueError, match=msg): + store.select("df", "df.index[3]") + + msg = "invalid syntax" + with pytest.raises(SyntaxError, match=msg): + store.select("df", "index>") + + # from the docs + path = tmp_path / setup_path + dfq = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=list("ABCD"), + index=date_range("20130101", periods=10), + ) + dfq.to_hdf(path, key="dfq", format="table", data_columns=True) + + # check ok + read_hdf(path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']") + read_hdf(path, "dfq", where="A>0 or C>0") + + # catch the invalid reference + path = tmp_path / setup_path + dfq = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=list("ABCD"), + index=date_range("20130101", periods=10), + ) + dfq.to_hdf(path, key="dfq", format="table") + + msg = ( + r"The passed where expression: A>0 or C>0\n\s*" + r"contains an invalid variable reference\n\s*" + r"all of the variable references must be a reference to\n\s*" + r"an axis \(e.g. 'index' or 'columns'\), or a data_column\n\s*" + r"The currently defined references are: index,columns\n" + ) + with pytest.raises(ValueError, match=msg): + read_hdf(path, "dfq", where="A>0 or C>0") + + +def test_append_with_diff_col_name_types_raises_value_error(setup_path): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 1))) + df2 = DataFrame({"a": np.random.default_rng(2).standard_normal(10)}) + df3 = DataFrame({(1, 2): np.random.default_rng(2).standard_normal(10)}) + df4 = DataFrame({("1", 2): np.random.default_rng(2).standard_normal(10)}) + df5 = DataFrame({("1", 2, object): np.random.default_rng(2).standard_normal(10)}) + + with ensure_clean_store(setup_path) as store: + name = "df_diff_valerror" + store.append(name, df) + + for d in (df2, df3, df4, df5): + msg = re.escape( + "cannot match existing table structure for [0] on appending data" + ) + with pytest.raises(ValueError, match=msg): + store.append(name, d) + + +def test_invalid_complib(setup_path): + df = DataFrame( + np.random.default_rng(2).random((4, 5)), + index=list("abcd"), + columns=list("ABCDE"), + ) + with tm.ensure_clean(setup_path) as path: + msg = r"complib only supports \[.*\] compression." + with pytest.raises(ValueError, match=msg): + df.to_hdf(path, key="df", complib="foolib") + + +@pytest.mark.parametrize( + "idx", + [ + date_range("2019", freq="D", periods=3, tz="UTC"), + CategoricalIndex(list("abc")), + ], +) +def test_to_hdf_multiindex_extension_dtype(idx, tmp_path, setup_path): + # GH 7775 + mi = MultiIndex.from_arrays([idx, idx]) + df = DataFrame(0, index=mi, columns=["a"]) + path = tmp_path / setup_path + with pytest.raises(NotImplementedError, match="Saving a MultiIndex"): + df.to_hdf(path, key="df") + + +def test_unsuppored_hdf_file_error(datapath): + # GH 9539 + data_path = datapath("io", "data", "legacy_hdf/incompatible_dataset.h5") + message = ( + r"Dataset\(s\) incompatible with Pandas data types, " + "not table, or no datasets found in HDF5 file." + ) + + with pytest.raises(ValueError, match=message): + read_hdf(data_path) + + +def test_read_hdf_errors(setup_path, tmp_path): + df = DataFrame( + np.random.default_rng(2).random((4, 5)), + index=list("abcd"), + columns=list("ABCDE"), + ) + + path = tmp_path / setup_path + msg = r"File [\S]* does not exist" + with pytest.raises(OSError, match=msg): + read_hdf(path, "key") + + df.to_hdf(path, key="df") + store = HDFStore(path, mode="r") + store.close() + + msg = "The HDFStore must be open for reading." + with pytest.raises(OSError, match=msg): + read_hdf(store, "df") + + +def test_read_hdf_generic_buffer_errors(): + msg = "Support for generic buffers has not been implemented." + with pytest.raises(NotImplementedError, match=msg): + read_hdf(BytesIO(b""), "df") + + +@pytest.mark.parametrize("bad_version", [(1, 2), (1,), [], "12", "123"]) +def test_maybe_adjust_name_bad_version_raises(bad_version): + msg = "Version is incorrect, expected sequence of 3 integers" + with pytest.raises(ValueError, match=msg): + _maybe_adjust_name("values_block_0", version=bad_version) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_file_handling.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_file_handling.py new file mode 100644 index 0000000000000000000000000000000000000000..d93de16816725cf6a3a326301eadcd35345a96d2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_file_handling.py @@ -0,0 +1,495 @@ +import os + +import numpy as np +import pytest + +from pandas.compat import ( + PY311, + is_ci_environment, + is_platform_linux, + is_platform_little_endian, +) +from pandas.errors import ( + ClosedFileError, + PossibleDataLossError, +) + +from pandas import ( + DataFrame, + HDFStore, + Index, + Series, + _testing as tm, + date_range, + read_hdf, +) +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, + tables, +) + +from pandas.io import pytables +from pandas.io.pytables import Term + +pytestmark = pytest.mark.single_cpu + + +@pytest.mark.parametrize("mode", ["r", "r+", "a", "w"]) +def test_mode(setup_path, tmp_path, mode): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + msg = r"[\S]* does not exist" + path = tmp_path / setup_path + + # constructor + if mode in ["r", "r+"]: + with pytest.raises(OSError, match=msg): + HDFStore(path, mode=mode) + + else: + with HDFStore(path, mode=mode) as store: + assert store._handle.mode == mode + + path = tmp_path / setup_path + + # context + if mode in ["r", "r+"]: + with pytest.raises(OSError, match=msg): + with HDFStore(path, mode=mode) as store: + pass + else: + with HDFStore(path, mode=mode) as store: + assert store._handle.mode == mode + + path = tmp_path / setup_path + + # conv write + if mode in ["r", "r+"]: + with pytest.raises(OSError, match=msg): + df.to_hdf(path, key="df", mode=mode) + df.to_hdf(path, key="df", mode="w") + else: + df.to_hdf(path, key="df", mode=mode) + + # conv read + if mode in ["w"]: + msg = ( + "mode w is not allowed while performing a read. " + r"Allowed modes are r, r\+ and a." + ) + with pytest.raises(ValueError, match=msg): + read_hdf(path, "df", mode=mode) + else: + result = read_hdf(path, "df", mode=mode) + tm.assert_frame_equal(result, df) + + +def test_default_mode(tmp_path, setup_path): + # read_hdf uses default mode + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + path = tmp_path / setup_path + df.to_hdf(path, key="df", mode="w") + result = read_hdf(path, "df") + tm.assert_frame_equal(result, df) + + +def test_reopen_handle(tmp_path, setup_path): + path = tmp_path / setup_path + + store = HDFStore(path, mode="a") + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + + msg = ( + r"Re-opening the file \[[\S]*\] with mode \[a\] will delete the " + "current file!" + ) + # invalid mode change + with pytest.raises(PossibleDataLossError, match=msg): + store.open("w") + + store.close() + assert not store.is_open + + # truncation ok here + store.open("w") + assert store.is_open + assert len(store) == 0 + store.close() + assert not store.is_open + + store = HDFStore(path, mode="a") + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + + # reopen as read + store.open("r") + assert store.is_open + assert len(store) == 1 + assert store._mode == "r" + store.close() + assert not store.is_open + + # reopen as append + store.open("a") + assert store.is_open + assert len(store) == 1 + assert store._mode == "a" + store.close() + assert not store.is_open + + # reopen as append (again) + store.open("a") + assert store.is_open + assert len(store) == 1 + assert store._mode == "a" + store.close() + assert not store.is_open + + +def test_open_args(setup_path): + with tm.ensure_clean(setup_path) as path: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # create an in memory store + store = HDFStore( + path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0 + ) + store["df"] = df + store.append("df2", df) + + tm.assert_frame_equal(store["df"], df) + tm.assert_frame_equal(store["df2"], df) + + store.close() + + # the file should not have actually been written + assert not os.path.exists(path) + + +def test_flush(setup_path): + with ensure_clean_store(setup_path) as store: + store["a"] = Series(range(5)) + store.flush() + store.flush(fsync=True) + + +def test_complibs_default_settings(tmp_path, setup_path): + # GH15943 + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # Set complevel and check if complib is automatically set to + # default value + tmpfile = tmp_path / setup_path + df.to_hdf(tmpfile, key="df", complevel=9) + result = read_hdf(tmpfile, "df") + tm.assert_frame_equal(result, df) + + with tables.open_file(tmpfile, mode="r") as h5file: + for node in h5file.walk_nodes(where="/df", classname="Leaf"): + assert node.filters.complevel == 9 + assert node.filters.complib == "zlib" + + # Set complib and check to see if compression is disabled + tmpfile = tmp_path / setup_path + df.to_hdf(tmpfile, key="df", complib="zlib") + result = read_hdf(tmpfile, "df") + tm.assert_frame_equal(result, df) + + with tables.open_file(tmpfile, mode="r") as h5file: + for node in h5file.walk_nodes(where="/df", classname="Leaf"): + assert node.filters.complevel == 0 + assert node.filters.complib is None + + # Check if not setting complib or complevel results in no compression + tmpfile = tmp_path / setup_path + df.to_hdf(tmpfile, key="df") + result = read_hdf(tmpfile, "df") + tm.assert_frame_equal(result, df) + + with tables.open_file(tmpfile, mode="r") as h5file: + for node in h5file.walk_nodes(where="/df", classname="Leaf"): + assert node.filters.complevel == 0 + assert node.filters.complib is None + + +def test_complibs_default_settings_override(tmp_path, setup_path): + # Check if file-defaults can be overridden on a per table basis + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + tmpfile = tmp_path / setup_path + store = HDFStore(tmpfile) + store.append("dfc", df, complevel=9, complib="blosc") + store.append("df", df) + store.close() + + with tables.open_file(tmpfile, mode="r") as h5file: + for node in h5file.walk_nodes(where="/df", classname="Leaf"): + assert node.filters.complevel == 0 + assert node.filters.complib is None + for node in h5file.walk_nodes(where="/dfc", classname="Leaf"): + assert node.filters.complevel == 9 + assert node.filters.complib == "blosc" + + +@pytest.mark.parametrize("lvl", range(10)) +@pytest.mark.parametrize("lib", tables.filters.all_complibs) +@pytest.mark.filterwarnings("ignore:object name is not a valid") +@pytest.mark.skipif( + not PY311 and is_ci_environment() and is_platform_linux(), + reason="Segfaulting in a CI environment" + # with xfail, would sometimes raise UnicodeDecodeError + # invalid state byte +) +def test_complibs(tmp_path, lvl, lib, request): + # GH14478 + if PY311 and is_platform_linux() and lib == "blosc2" and lvl != 0: + request.applymarker( + pytest.mark.xfail(reason=f"Fails for {lib} on Linux and PY > 3.11") + ) + df = DataFrame( + np.ones((30, 4)), columns=list("ABCD"), index=np.arange(30).astype(np.str_) + ) + + # Remove lzo if its not available on this platform + if not tables.which_lib_version("lzo"): + pytest.skip("lzo not available") + # Remove bzip2 if its not available on this platform + if not tables.which_lib_version("bzip2"): + pytest.skip("bzip2 not available") + + tmpfile = tmp_path / f"{lvl}_{lib}.h5" + gname = f"{lvl}_{lib}" + + # Write and read file to see if data is consistent + df.to_hdf(tmpfile, key=gname, complib=lib, complevel=lvl) + result = read_hdf(tmpfile, gname) + tm.assert_frame_equal(result, df) + + # Open file and check metadata for correct amount of compression + with tables.open_file(tmpfile, mode="r") as h5table: + for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"): + assert node.filters.complevel == lvl + if lvl == 0: + assert node.filters.complib is None + else: + assert node.filters.complib == lib + + +@pytest.mark.skipif( + not is_platform_little_endian(), reason="reason platform is not little endian" +) +def test_encoding(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame({"A": "foo", "B": "bar"}, index=range(5)) + df.loc[2, "A"] = np.nan + df.loc[3, "B"] = np.nan + _maybe_remove(store, "df") + store.append("df", df, encoding="ascii") + tm.assert_frame_equal(store["df"], df) + + expected = df.reindex(columns=["A"]) + result = store.select("df", Term("columns=A", encoding="ascii")) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "val", + [ + [b"E\xc9, 17", b"", b"a", b"b", b"c"], + [b"E\xc9, 17", b"a", b"b", b"c"], + [b"EE, 17", b"", b"a", b"b", b"c"], + [b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"], + [b"", b"a", b"b", b"c"], + [b"\xf8\xfc", b"a", b"b", b"c"], + [b"A\xf8\xfc", b"", b"a", b"b", b"c"], + [np.nan, b"", b"b", b"c"], + [b"A\xf8\xfc", np.nan, b"", b"b", b"c"], + ], +) +@pytest.mark.parametrize("dtype", ["category", object]) +def test_latin_encoding(tmp_path, setup_path, dtype, val): + enc = "latin-1" + nan_rep = "" + key = "data" + + val = [x.decode(enc) if isinstance(x, bytes) else x for x in val] + ser = Series(val, dtype=dtype) + + store = tmp_path / setup_path + ser.to_hdf(store, key=key, format="table", encoding=enc, nan_rep=nan_rep) + retr = read_hdf(store, key) + + # TODO:(3.0): once Categorical replace deprecation is enforced, + # we may be able to re-simplify the construction of s_nan + if dtype == "category": + if nan_rep in ser.cat.categories: + s_nan = ser.cat.remove_categories([nan_rep]) + else: + s_nan = ser + else: + s_nan = ser.replace(nan_rep, np.nan) + + tm.assert_series_equal(s_nan, retr) + + +def test_multiple_open_close(tmp_path, setup_path): + # gh-4409: open & close multiple times + + path = tmp_path / setup_path + + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_hdf(path, key="df", mode="w", format="table") + + # single + store = HDFStore(path) + assert "CLOSED" not in store.info() + assert store.is_open + + store.close() + assert "CLOSED" in store.info() + assert not store.is_open + + path = tmp_path / setup_path + + if pytables._table_file_open_policy_is_strict: + # multiples + store1 = HDFStore(path) + msg = ( + r"The file [\S]* is already opened\. Please close it before " + r"reopening in write mode\." + ) + with pytest.raises(ValueError, match=msg): + HDFStore(path) + + store1.close() + else: + # multiples + store1 = HDFStore(path) + store2 = HDFStore(path) + + assert "CLOSED" not in store1.info() + assert "CLOSED" not in store2.info() + assert store1.is_open + assert store2.is_open + + store1.close() + assert "CLOSED" in store1.info() + assert not store1.is_open + assert "CLOSED" not in store2.info() + assert store2.is_open + + store2.close() + assert "CLOSED" in store1.info() + assert "CLOSED" in store2.info() + assert not store1.is_open + assert not store2.is_open + + # nested close + store = HDFStore(path, mode="w") + store.append("df", df) + + store2 = HDFStore(path) + store2.append("df2", df) + store2.close() + assert "CLOSED" in store2.info() + assert not store2.is_open + + store.close() + assert "CLOSED" in store.info() + assert not store.is_open + + # double closing + store = HDFStore(path, mode="w") + store.append("df", df) + + store2 = HDFStore(path) + store.close() + assert "CLOSED" in store.info() + assert not store.is_open + + store2.close() + assert "CLOSED" in store2.info() + assert not store2.is_open + + # ops on a closed store + path = tmp_path / setup_path + + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.to_hdf(path, key="df", mode="w", format="table") + + store = HDFStore(path) + store.close() + + msg = r"[\S]* file is not open!" + with pytest.raises(ClosedFileError, match=msg): + store.keys() + + with pytest.raises(ClosedFileError, match=msg): + "df" in store + + with pytest.raises(ClosedFileError, match=msg): + len(store) + + with pytest.raises(ClosedFileError, match=msg): + store["df"] + + with pytest.raises(ClosedFileError, match=msg): + store.select("df") + + with pytest.raises(ClosedFileError, match=msg): + store.get("df") + + with pytest.raises(ClosedFileError, match=msg): + store.append("df2", df) + + with pytest.raises(ClosedFileError, match=msg): + store.put("df3", df) + + with pytest.raises(ClosedFileError, match=msg): + store.get_storer("df2") + + with pytest.raises(ClosedFileError, match=msg): + store.remove("df2") + + with pytest.raises(ClosedFileError, match=msg): + store.select("df") + + msg = "'HDFStore' object has no attribute 'df'" + with pytest.raises(AttributeError, match=msg): + store.df + + +def test_fspath(): + with tm.ensure_clean("foo.h5") as path: + with HDFStore(path) as store: + assert os.fspath(store) == str(path) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_keys.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_keys.py new file mode 100644 index 0000000000000000000000000000000000000000..55bd3f0d5a03a1636ae07ea9e1e3776743fd6464 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_keys.py @@ -0,0 +1,87 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + HDFStore, + Index, + Series, + date_range, +) +from pandas.tests.io.pytables.common import ( + ensure_clean_store, + tables, +) + +pytestmark = pytest.mark.single_cpu + + +def test_keys(setup_path): + with ensure_clean_store(setup_path) as store: + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + store["b"] = Series( + range(10), dtype="float64", index=[f"i_{i}" for i in range(10)] + ) + store["c"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + assert len(store) == 3 + expected = {"/a", "/b", "/c"} + assert set(store.keys()) == expected + assert set(store) == expected + + +def test_non_pandas_keys(tmp_path, setup_path): + class Table1(tables.IsDescription): + value1 = tables.Float32Col() + + class Table2(tables.IsDescription): + value2 = tables.Float32Col() + + class Table3(tables.IsDescription): + value3 = tables.Float32Col() + + path = tmp_path / setup_path + with tables.open_file(path, mode="w") as h5file: + group = h5file.create_group("/", "group") + h5file.create_table(group, "table1", Table1, "Table 1") + h5file.create_table(group, "table2", Table2, "Table 2") + h5file.create_table(group, "table3", Table3, "Table 3") + with HDFStore(path) as store: + assert len(store.keys(include="native")) == 3 + expected = {"/group/table1", "/group/table2", "/group/table3"} + assert set(store.keys(include="native")) == expected + assert set(store.keys(include="pandas")) == set() + for name in expected: + df = store.get(name) + assert len(df.columns) == 1 + + +def test_keys_illegal_include_keyword_value(setup_path): + with ensure_clean_store(setup_path) as store: + with pytest.raises( + ValueError, + match="`include` should be either 'pandas' or 'native' but is 'illegal'", + ): + store.keys(include="illegal") + + +def test_keys_ignore_hdf_softlink(setup_path): + # GH 20523 + # Puts a softlink into HDF file and rereads + + with ensure_clean_store(setup_path) as store: + df = DataFrame({"A": range(5), "B": range(5)}) + store.put("df", df) + + assert store.keys() == ["/df"] + + store._handle.create_soft_link(store._handle.root, "symlink", "df") + + # Should ignore the softlink + assert store.keys() == ["/df"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_put.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_put.py new file mode 100644 index 0000000000000000000000000000000000000000..bc5f046b7fa3308e30959953f8751056de941a32 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_put.py @@ -0,0 +1,374 @@ +import re + +import numpy as np +import pytest + +from pandas._libs.tslibs import Timestamp + +import pandas as pd +from pandas import ( + DataFrame, + HDFStore, + Index, + MultiIndex, + Series, + _testing as tm, + concat, + date_range, +) +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) +from pandas.util import _test_decorators as td + +pytestmark = pytest.mark.single_cpu + + +def test_format_type(tmp_path, setup_path): + df = DataFrame({"A": [1, 2]}) + with HDFStore(tmp_path / setup_path) as store: + store.put("a", df, format="fixed") + store.put("b", df, format="table") + + assert store.get_storer("a").format_type == "fixed" + assert store.get_storer("b").format_type == "table" + + +def test_format_kwarg_in_constructor(tmp_path, setup_path): + # GH 13291 + + msg = "format is not a defined argument for HDFStore" + + with pytest.raises(ValueError, match=msg): + HDFStore(tmp_path / setup_path, format="table") + + +def test_api_default_format(tmp_path, setup_path): + # default_format option + with ensure_clean_store(setup_path) as store: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + with pd.option_context("io.hdf.default_format", "fixed"): + _maybe_remove(store, "df") + store.put("df", df) + assert not store.get_storer("df").is_table + + msg = "Can only append to Tables" + with pytest.raises(ValueError, match=msg): + store.append("df2", df) + + with pd.option_context("io.hdf.default_format", "table"): + _maybe_remove(store, "df") + store.put("df", df) + assert store.get_storer("df").is_table + + _maybe_remove(store, "df2") + store.append("df2", df) + assert store.get_storer("df").is_table + + path = tmp_path / setup_path + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + with pd.option_context("io.hdf.default_format", "fixed"): + df.to_hdf(path, key="df") + with HDFStore(path) as store: + assert not store.get_storer("df").is_table + with pytest.raises(ValueError, match=msg): + df.to_hdf(path, key="df2", append=True) + + with pd.option_context("io.hdf.default_format", "table"): + df.to_hdf(path, key="df3") + with HDFStore(path) as store: + assert store.get_storer("df3").is_table + df.to_hdf(path, key="df4", append=True) + with HDFStore(path) as store: + assert store.get_storer("df4").is_table + + +def test_put(setup_path): + with ensure_clean_store(setup_path) as store: + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=20, freq="B"), + ) + store["a"] = ts + store["b"] = df[:10] + store["foo/bar/bah"] = df[:10] + store["foo"] = df[:10] + store["/foo"] = df[:10] + store.put("c", df[:10], format="table") + + # not OK, not a table + msg = "Can only append to Tables" + with pytest.raises(ValueError, match=msg): + store.put("b", df[10:], append=True) + + # node does not currently exist, test _is_table_type returns False + # in this case + _maybe_remove(store, "f") + with pytest.raises(ValueError, match=msg): + store.put("f", df[10:], append=True) + + # can't put to a table (use append instead) + with pytest.raises(ValueError, match=msg): + store.put("c", df[10:], append=True) + + # overwrite table + store.put("c", df[:10], format="table", append=False) + tm.assert_frame_equal(df[:10], store["c"]) + + +def test_put_string_index(setup_path): + with ensure_clean_store(setup_path) as store: + index = Index([f"I am a very long string index: {i}" for i in range(20)]) + s = Series(np.arange(20), index=index) + df = DataFrame({"A": s, "B": s}) + + store["a"] = s + tm.assert_series_equal(store["a"], s) + + store["b"] = df + tm.assert_frame_equal(store["b"], df) + + # mixed length + index = Index( + ["abcdefghijklmnopqrstuvwxyz1234567890"] + + [f"I am a very long string index: {i}" for i in range(20)] + ) + s = Series(np.arange(21), index=index) + df = DataFrame({"A": s, "B": s}) + store["a"] = s + tm.assert_series_equal(store["a"], s) + + store["b"] = df + tm.assert_frame_equal(store["b"], df) + + +def test_put_compression(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + store.put("c", df, format="table", complib="zlib") + tm.assert_frame_equal(store["c"], df) + + # can't compress if format='fixed' + msg = "Compression not supported on Fixed format stores" + with pytest.raises(ValueError, match=msg): + store.put("b", df, format="fixed", complib="zlib") + + +@td.skip_if_windows +def test_put_compression_blosc(setup_path): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + with ensure_clean_store(setup_path) as store: + # can't compress if format='fixed' + msg = "Compression not supported on Fixed format stores" + with pytest.raises(ValueError, match=msg): + store.put("b", df, format="fixed", complib="blosc") + + store.put("c", df, format="table", complib="blosc") + tm.assert_frame_equal(store["c"], df) + + +def test_put_mixed_type(setup_path): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df["obj1"] = "foo" + df["obj2"] = "bar" + df["bool1"] = df["A"] > 0 + df["bool2"] = df["B"] > 0 + df["bool3"] = True + df["int1"] = 1 + df["int2"] = 2 + df["timestamp1"] = Timestamp("20010102").as_unit("ns") + df["timestamp2"] = Timestamp("20010103").as_unit("ns") + df["datetime1"] = Timestamp("20010102").as_unit("ns") + df["datetime2"] = Timestamp("20010103").as_unit("ns") + df.loc[df.index[3:6], ["obj1"]] = np.nan + df = df._consolidate() + + with ensure_clean_store(setup_path) as store: + _maybe_remove(store, "df") + + with tm.assert_produces_warning(pd.errors.PerformanceWarning): + store.put("df", df) + + expected = store.get("df") + tm.assert_frame_equal(expected, df) + + +@pytest.mark.parametrize("format", ["table", "fixed"]) +@pytest.mark.parametrize( + "index", + [ + Index([str(i) for i in range(10)]), + Index(np.arange(10, dtype=float)), + Index(np.arange(10)), + date_range("2020-01-01", periods=10), + pd.period_range("2020-01-01", periods=10), + ], +) +def test_store_index_types(setup_path, format, index): + # GH5386 + # test storing various index types + + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + columns=list("AB"), + index=index, + ) + _maybe_remove(store, "df") + store.put("df", df, format=format) + tm.assert_frame_equal(df, store["df"]) + + +def test_column_multiindex(setup_path): + # GH 4710 + # recreate multi-indexes properly + + index = MultiIndex.from_tuples( + [("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"] + ) + df = DataFrame(np.arange(12).reshape(3, 4), columns=index) + expected = df.set_axis(df.index.to_numpy()) + + with ensure_clean_store(setup_path) as store: + store.put("df", df) + tm.assert_frame_equal( + store["df"], expected, check_index_type=True, check_column_type=True + ) + + store.put("df1", df, format="table") + tm.assert_frame_equal( + store["df1"], expected, check_index_type=True, check_column_type=True + ) + + msg = re.escape("cannot use a multi-index on axis [1] with data_columns ['A']") + with pytest.raises(ValueError, match=msg): + store.put("df2", df, format="table", data_columns=["A"]) + msg = re.escape("cannot use a multi-index on axis [1] with data_columns True") + with pytest.raises(ValueError, match=msg): + store.put("df3", df, format="table", data_columns=True) + + # appending multi-column on existing table (see GH 6167) + with ensure_clean_store(setup_path) as store: + store.append("df2", df) + store.append("df2", df) + + tm.assert_frame_equal(store["df2"], concat((df, df))) + + # non_index_axes name + df = DataFrame(np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")) + expected = df.set_axis(df.index.to_numpy()) + + with ensure_clean_store(setup_path) as store: + store.put("df1", df, format="table") + tm.assert_frame_equal( + store["df1"], expected, check_index_type=True, check_column_type=True + ) + + +def test_store_multiindex(setup_path): + # validate multi-index names + # GH 5527 + with ensure_clean_store(setup_path) as store: + + def make_index(names=None): + dti = date_range("2013-12-01", "2013-12-02") + mi = MultiIndex.from_product([dti, range(2), range(3)], names=names) + return mi + + # no names + _maybe_remove(store, "df") + df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index()) + store.append("df", df) + tm.assert_frame_equal(store.select("df"), df) + + # partial names + _maybe_remove(store, "df") + df = DataFrame( + np.zeros((12, 2)), + columns=["a", "b"], + index=make_index(["date", None, None]), + ) + store.append("df", df) + tm.assert_frame_equal(store.select("df"), df) + + # series + _maybe_remove(store, "ser") + ser = Series(np.zeros(12), index=make_index(["date", None, None])) + store.append("ser", ser) + xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"])) + tm.assert_series_equal(store.select("ser"), xp) + + # dup with column + _maybe_remove(store, "df") + df = DataFrame( + np.zeros((12, 2)), + columns=["a", "b"], + index=make_index(["date", "a", "t"]), + ) + msg = "duplicate names/columns in the multi-index when storing as a table" + with pytest.raises(ValueError, match=msg): + store.append("df", df) + + # dup within level + _maybe_remove(store, "df") + df = DataFrame( + np.zeros((12, 2)), + columns=["a", "b"], + index=make_index(["date", "date", "date"]), + ) + with pytest.raises(ValueError, match=msg): + store.append("df", df) + + # fully names + _maybe_remove(store, "df") + df = DataFrame( + np.zeros((12, 2)), + columns=["a", "b"], + index=make_index(["date", "s", "t"]), + ) + store.append("df", df) + tm.assert_frame_equal(store.select("df"), df) + + +@pytest.mark.parametrize("format", ["fixed", "table"]) +def test_store_periodindex(tmp_path, setup_path, format): + # GH 7796 + # test of PeriodIndex in HDFStore + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 1)), + index=pd.period_range("20220101", freq="M", periods=5), + ) + + path = tmp_path / setup_path + df.to_hdf(path, key="df", mode="w", format=format) + expected = pd.read_hdf(path, "df") + tm.assert_frame_equal(df, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_pytables_missing.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_pytables_missing.py new file mode 100644 index 0000000000000000000000000000000000000000..8d9d3afc4ad6f04b070a96922bdba3b5208ba6a9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_pytables_missing.py @@ -0,0 +1,14 @@ +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm + + +@td.skip_if_installed("tables") +def test_pytables_raises(): + df = pd.DataFrame({"A": [1, 2]}) + with pytest.raises(ImportError, match="tables"): + with tm.ensure_clean("foo.h5") as path: + df.to_hdf(path, key="df") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_round_trip.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_round_trip.py new file mode 100644 index 0000000000000000000000000000000000000000..4ba9787a5a6b9ec3dcfa60b64b0e43a8af1d1afc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_round_trip.py @@ -0,0 +1,578 @@ +import datetime +import re + +import numpy as np +import pytest + +from pandas._libs.tslibs import Timestamp +from pandas.compat import is_platform_windows + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + Series, + _testing as tm, + bdate_range, + date_range, + read_hdf, +) +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) +from pandas.util import _test_decorators as td + +pytestmark = pytest.mark.single_cpu + + +def test_conv_read_write(): + with tm.ensure_clean() as path: + + def roundtrip(key, obj, **kwargs): + obj.to_hdf(path, key=key, **kwargs) + return read_hdf(path, key) + + o = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + tm.assert_series_equal(o, roundtrip("series", o)) + + o = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)]) + tm.assert_series_equal(o, roundtrip("string_series", o)) + + o = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + tm.assert_frame_equal(o, roundtrip("frame", o)) + + # table + df = DataFrame({"A": range(5), "B": range(5)}) + df.to_hdf(path, key="table", append=True) + result = read_hdf(path, "table", where=["index>2"]) + tm.assert_frame_equal(df[df.index > 2], result) + + +def test_long_strings(setup_path): + # GH6166 + data = ["a" * 50] * 10 + df = DataFrame({"a": data}, index=data) + + with ensure_clean_store(setup_path) as store: + store.append("df", df, data_columns=["a"]) + + result = store.select("df") + tm.assert_frame_equal(df, result) + + +def test_api(tmp_path, setup_path): + # GH4584 + # API issue when to_hdf doesn't accept append AND format args + path = tmp_path / setup_path + + df = DataFrame(range(20)) + df.iloc[:10].to_hdf(path, key="df", append=True, format="table") + df.iloc[10:].to_hdf(path, key="df", append=True, format="table") + tm.assert_frame_equal(read_hdf(path, "df"), df) + + # append to False + df.iloc[:10].to_hdf(path, key="df", append=False, format="table") + df.iloc[10:].to_hdf(path, key="df", append=True, format="table") + tm.assert_frame_equal(read_hdf(path, "df"), df) + + +def test_api_append(tmp_path, setup_path): + path = tmp_path / setup_path + + df = DataFrame(range(20)) + df.iloc[:10].to_hdf(path, key="df", append=True) + df.iloc[10:].to_hdf(path, key="df", append=True, format="table") + tm.assert_frame_equal(read_hdf(path, "df"), df) + + # append to False + df.iloc[:10].to_hdf(path, key="df", append=False, format="table") + df.iloc[10:].to_hdf(path, key="df", append=True) + tm.assert_frame_equal(read_hdf(path, "df"), df) + + +def test_api_2(tmp_path, setup_path): + path = tmp_path / setup_path + + df = DataFrame(range(20)) + df.to_hdf(path, key="df", append=False, format="fixed") + tm.assert_frame_equal(read_hdf(path, "df"), df) + + df.to_hdf(path, key="df", append=False, format="f") + tm.assert_frame_equal(read_hdf(path, "df"), df) + + df.to_hdf(path, key="df", append=False) + tm.assert_frame_equal(read_hdf(path, "df"), df) + + df.to_hdf(path, key="df") + tm.assert_frame_equal(read_hdf(path, "df"), df) + + with ensure_clean_store(setup_path) as store: + df = DataFrame(range(20)) + + _maybe_remove(store, "df") + store.append("df", df.iloc[:10], append=True, format="table") + store.append("df", df.iloc[10:], append=True, format="table") + tm.assert_frame_equal(store.select("df"), df) + + # append to False + _maybe_remove(store, "df") + store.append("df", df.iloc[:10], append=False, format="table") + store.append("df", df.iloc[10:], append=True, format="table") + tm.assert_frame_equal(store.select("df"), df) + + # formats + _maybe_remove(store, "df") + store.append("df", df.iloc[:10], append=False, format="table") + store.append("df", df.iloc[10:], append=True, format="table") + tm.assert_frame_equal(store.select("df"), df) + + _maybe_remove(store, "df") + store.append("df", df.iloc[:10], append=False, format="table") + store.append("df", df.iloc[10:], append=True, format=None) + tm.assert_frame_equal(store.select("df"), df) + + +def test_api_invalid(tmp_path, setup_path): + path = tmp_path / setup_path + # Invalid. + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + msg = "Can only append to Tables" + + with pytest.raises(ValueError, match=msg): + df.to_hdf(path, key="df", append=True, format="f") + + with pytest.raises(ValueError, match=msg): + df.to_hdf(path, key="df", append=True, format="fixed") + + msg = r"invalid HDFStore format specified \[foo\]" + + with pytest.raises(TypeError, match=msg): + df.to_hdf(path, key="df", append=True, format="foo") + + with pytest.raises(TypeError, match=msg): + df.to_hdf(path, key="df", append=False, format="foo") + + # File path doesn't exist + path = "" + msg = f"File {path} does not exist" + + with pytest.raises(FileNotFoundError, match=msg): + read_hdf(path, "df") + + +def test_get(setup_path): + with ensure_clean_store(setup_path) as store: + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + left = store.get("a") + right = store["a"] + tm.assert_series_equal(left, right) + + left = store.get("/a") + right = store["/a"] + tm.assert_series_equal(left, right) + + with pytest.raises(KeyError, match="'No object named b in the file'"): + store.get("b") + + +def test_put_integer(setup_path): + # non-date, non-string index + df = DataFrame(np.random.default_rng(2).standard_normal((50, 100))) + _check_roundtrip(df, tm.assert_frame_equal, setup_path) + + +def test_table_values_dtypes_roundtrip(setup_path): + with ensure_clean_store(setup_path) as store: + df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8") + store.append("df_f8", df1) + tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes) + + df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8") + store.append("df_i8", df2) + tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes) + + # incompatible dtype + msg = re.escape( + "invalid combination of [values_axes] on appending data " + "[name->values_block_0,cname->values_block_0," + "dtype->float64,kind->float,shape->(1, 3)] vs " + "current table [name->values_block_0," + "cname->values_block_0,dtype->int64,kind->integer," + "shape->None]" + ) + with pytest.raises(ValueError, match=msg): + store.append("df_i8", df1) + + # check creation/storage/retrieval of float32 (a bit hacky to + # actually create them thought) + df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"]) + store.append("df_f4", df1) + tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes) + assert df1.dtypes.iloc[0] == "float32" + + # check with mixed dtypes + df1 = DataFrame( + { + c: Series(np.random.default_rng(2).integers(5), dtype=c) + for c in ["float32", "float64", "int32", "int64", "int16", "int8"] + } + ) + df1["string"] = "foo" + df1["float322"] = 1.0 + df1["float322"] = df1["float322"].astype("float32") + df1["bool"] = df1["float32"] > 0 + df1["time1"] = Timestamp("20130101") + df1["time2"] = Timestamp("20130102") + + store.append("df_mixed_dtypes1", df1) + result = store.select("df_mixed_dtypes1").dtypes.value_counts() + result.index = [str(i) for i in result.index] + expected = Series( + { + "float32": 2, + "float64": 1, + "int32": 1, + "bool": 1, + "int16": 1, + "int8": 1, + "int64": 1, + "object": 1, + "datetime64[ns]": 2, + }, + name="count", + ) + result = result.sort_index() + expected = expected.sort_index() + tm.assert_series_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") +def test_series(setup_path): + s = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)]) + _check_roundtrip(s, tm.assert_series_equal, path=setup_path) + + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + _check_roundtrip(ts, tm.assert_series_equal, path=setup_path) + + ts2 = Series(ts.index, Index(ts.index, dtype=object)) + _check_roundtrip(ts2, tm.assert_series_equal, path=setup_path) + + ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object)) + _check_roundtrip( + ts3, tm.assert_series_equal, path=setup_path, check_index_type=False + ) + + +def test_float_index(setup_path): + # GH #454 + index = np.random.default_rng(2).standard_normal(10) + s = Series(np.random.default_rng(2).standard_normal(10), index=index) + _check_roundtrip(s, tm.assert_series_equal, path=setup_path) + + +def test_tuple_index(setup_path): + # GH #492 + col = np.arange(10) + idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)] + data = np.random.default_rng(2).standard_normal(30).reshape((3, 10)) + DF = DataFrame(data, index=idx, columns=col) + + with tm.assert_produces_warning(pd.errors.PerformanceWarning): + _check_roundtrip(DF, tm.assert_frame_equal, path=setup_path) + + +@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") +def test_index_types(setup_path): + values = np.random.default_rng(2).standard_normal(2) + + func = lambda lhs, rhs: tm.assert_series_equal(lhs, rhs, check_index_type=True) + + ser = Series(values, [0, "y"]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [datetime.datetime.today(), 0]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, ["y", 0]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [datetime.date.today(), "a"]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [0, "y"]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [datetime.datetime.today(), 0]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, ["y", 0]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [datetime.date.today(), "a"]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [1.23, "b"]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [1, 1.53]) + _check_roundtrip(ser, func, path=setup_path) + + ser = Series(values, [1, 5]) + _check_roundtrip(ser, func, path=setup_path) + + dti = DatetimeIndex(["2012-01-01", "2012-01-02"], dtype="M8[ns]") + ser = Series(values, index=dti) + _check_roundtrip(ser, func, path=setup_path) + + ser.index = ser.index.as_unit("s") + _check_roundtrip(ser, func, path=setup_path) + + +def test_timeseries_preepoch(setup_path, request): + dr = bdate_range("1/1/1940", "1/1/1960") + ts = Series(np.random.default_rng(2).standard_normal(len(dr)), index=dr) + try: + _check_roundtrip(ts, tm.assert_series_equal, path=setup_path) + except OverflowError: + if is_platform_windows(): + request.applymarker( + pytest.mark.xfail("known failure on some windows platforms") + ) + raise + + +@pytest.mark.parametrize( + "compression", [False, pytest.param(True, marks=td.skip_if_windows)] +) +def test_frame(compression, setup_path): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + # put in some random NAs + df.iloc[0, 0] = np.nan + df.iloc[5, 3] = np.nan + + _check_roundtrip_table( + df, tm.assert_frame_equal, path=setup_path, compression=compression + ) + _check_roundtrip( + df, tm.assert_frame_equal, path=setup_path, compression=compression + ) + + tdf = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + _check_roundtrip( + tdf, tm.assert_frame_equal, path=setup_path, compression=compression + ) + + with ensure_clean_store(setup_path) as store: + # not consolidated + df["foo"] = np.random.default_rng(2).standard_normal(len(df)) + store["df"] = df + recons = store["df"] + assert recons._mgr.is_consolidated() + + # empty + _check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path) + + +def test_empty_series_frame(setup_path): + s0 = Series(dtype=object) + s1 = Series(name="myseries", dtype=object) + df0 = DataFrame() + df1 = DataFrame(index=["a", "b", "c"]) + df2 = DataFrame(columns=["d", "e", "f"]) + + _check_roundtrip(s0, tm.assert_series_equal, path=setup_path) + _check_roundtrip(s1, tm.assert_series_equal, path=setup_path) + _check_roundtrip(df0, tm.assert_frame_equal, path=setup_path) + _check_roundtrip(df1, tm.assert_frame_equal, path=setup_path) + _check_roundtrip(df2, tm.assert_frame_equal, path=setup_path) + + +@pytest.mark.parametrize("dtype", [np.int64, np.float64, object, "m8[ns]", "M8[ns]"]) +def test_empty_series(dtype, setup_path): + s = Series(dtype=dtype) + _check_roundtrip(s, tm.assert_series_equal, path=setup_path) + + +def test_can_serialize_dates(setup_path): + rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")] + frame = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng + ) + + _check_roundtrip(frame, tm.assert_frame_equal, path=setup_path) + + +def test_store_hierarchical(setup_path, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + _check_roundtrip(frame, tm.assert_frame_equal, path=setup_path) + _check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path) + _check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path) + + # check that the names are stored + with ensure_clean_store(setup_path) as store: + store["frame"] = frame + recons = store["frame"] + tm.assert_frame_equal(recons, frame) + + +@pytest.mark.parametrize( + "compression", [False, pytest.param(True, marks=td.skip_if_windows)] +) +def test_store_mixed(compression, setup_path): + def _make_one(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df["obj1"] = "foo" + df["obj2"] = "bar" + df["bool1"] = df["A"] > 0 + df["bool2"] = df["B"] > 0 + df["int1"] = 1 + df["int2"] = 2 + return df._consolidate() + + df1 = _make_one() + df2 = _make_one() + + _check_roundtrip(df1, tm.assert_frame_equal, path=setup_path) + _check_roundtrip(df2, tm.assert_frame_equal, path=setup_path) + + with ensure_clean_store(setup_path) as store: + store["obj"] = df1 + tm.assert_frame_equal(store["obj"], df1) + store["obj"] = df2 + tm.assert_frame_equal(store["obj"], df2) + + # check that can store Series of all of these types + _check_roundtrip( + df1["obj1"], + tm.assert_series_equal, + path=setup_path, + compression=compression, + ) + _check_roundtrip( + df1["bool1"], + tm.assert_series_equal, + path=setup_path, + compression=compression, + ) + _check_roundtrip( + df1["int1"], + tm.assert_series_equal, + path=setup_path, + compression=compression, + ) + + +def _check_roundtrip(obj, comparator, path, compression=False, **kwargs): + options = {} + if compression: + options["complib"] = "blosc" + + with ensure_clean_store(path, "w", **options) as store: + store["obj"] = obj + retrieved = store["obj"] + comparator(retrieved, obj, **kwargs) + + +def _check_roundtrip_table(obj, comparator, path, compression=False): + options = {} + if compression: + options["complib"] = "blosc" + + with ensure_clean_store(path, "w", **options) as store: + store.put("obj", obj, format="table") + retrieved = store["obj"] + + comparator(retrieved, obj) + + +def test_unicode_index(setup_path): + unicode_values = ["\u03c3", "\u03c3\u03c3"] + + s = Series( + np.random.default_rng(2).standard_normal(len(unicode_values)), + unicode_values, + ) + _check_roundtrip(s, tm.assert_series_equal, path=setup_path) + + +def test_unicode_longer_encoded(setup_path): + # GH 11234 + char = "\u0394" + df = DataFrame({"A": [char]}) + with ensure_clean_store(setup_path) as store: + store.put("df", df, format="table", encoding="utf-8") + result = store.get("df") + tm.assert_frame_equal(result, df) + + df = DataFrame({"A": ["a", char], "B": ["b", "b"]}) + with ensure_clean_store(setup_path) as store: + store.put("df", df, format="table", encoding="utf-8") + result = store.get("df") + tm.assert_frame_equal(result, df) + + +def test_store_datetime_mixed(setup_path): + df = DataFrame({"a": [1, 2, 3], "b": [1.0, 2.0, 3.0], "c": ["a", "b", "c"]}) + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + df["d"] = ts.index[:3] + _check_roundtrip(df, tm.assert_frame_equal, path=setup_path) + + +def test_round_trip_equals(tmp_path, setup_path): + # GH 9330 + df = DataFrame({"B": [1, 2], "A": ["x", "y"]}) + + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table") + other = read_hdf(path, "df") + tm.assert_frame_equal(df, other) + assert df.equals(other) + assert other.equals(df) + + +def test_infer_string_columns(tmp_path, setup_path): + # GH# + pytest.importorskip("pyarrow") + path = tmp_path / setup_path + with pd.option_context("future.infer_string", True): + df = DataFrame(1, columns=list("ABCD"), index=list(range(10))).set_index( + ["A", "B"] + ) + expected = df.copy() + df.to_hdf(path, key="df", format="table") + + result = read_hdf(path, "df") + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_select.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_select.py new file mode 100644 index 0000000000000000000000000000000000000000..0e303d1c890c5b6ea4fcfb8d526297981a14069b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_select.py @@ -0,0 +1,1047 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs import Timestamp + +import pandas as pd +from pandas import ( + DataFrame, + HDFStore, + Index, + MultiIndex, + Series, + _testing as tm, + bdate_range, + concat, + date_range, + isna, + read_hdf, +) +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) + +from pandas.io.pytables import Term + +pytestmark = pytest.mark.single_cpu + + +def test_select_columns_in_where(setup_path): + # GH 6169 + # recreate multi-indexes when columns is passed + # in the `where` argument + index = MultiIndex( + levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], + codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=["foo_name", "bar_name"], + ) + + # With a DataFrame + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), + index=index, + columns=["A", "B", "C"], + ) + + with ensure_clean_store(setup_path) as store: + store.put("df", df, format="table") + expected = df[["A"]] + + tm.assert_frame_equal(store.select("df", columns=["A"]), expected) + + tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected) + + # With a Series + s = Series(np.random.default_rng(2).standard_normal(10), index=index, name="A") + with ensure_clean_store(setup_path) as store: + store.put("s", s, format="table") + tm.assert_series_equal(store.select("s", where="columns=['A']"), s) + + +def test_select_with_dups(setup_path): + # single dtypes + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), columns=["A", "A", "B", "B"] + ) + df.index = date_range("20130101 9:30", periods=10, freq="min") + + with ensure_clean_store(setup_path) as store: + store.append("df", df) + + result = store.select("df") + expected = df + tm.assert_frame_equal(result, expected, by_blocks=True) + + result = store.select("df", columns=df.columns) + expected = df + tm.assert_frame_equal(result, expected, by_blocks=True) + + result = store.select("df", columns=["A"]) + expected = df.loc[:, ["A"]] + tm.assert_frame_equal(result, expected) + + # dups across dtypes + df = concat( + [ + DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=["A", "A", "B", "B"], + ), + DataFrame( + np.random.default_rng(2).integers(0, 10, size=20).reshape(10, 2), + columns=["A", "C"], + ), + ], + axis=1, + ) + df.index = date_range("20130101 9:30", periods=10, freq="min") + + with ensure_clean_store(setup_path) as store: + store.append("df", df) + + result = store.select("df") + expected = df + tm.assert_frame_equal(result, expected, by_blocks=True) + + result = store.select("df", columns=df.columns) + expected = df + tm.assert_frame_equal(result, expected, by_blocks=True) + + expected = df.loc[:, ["A"]] + result = store.select("df", columns=["A"]) + tm.assert_frame_equal(result, expected, by_blocks=True) + + expected = df.loc[:, ["B", "A"]] + result = store.select("df", columns=["B", "A"]) + tm.assert_frame_equal(result, expected, by_blocks=True) + + # duplicates on both index and columns + with ensure_clean_store(setup_path) as store: + store.append("df", df) + store.append("df", df) + + expected = df.loc[:, ["B", "A"]] + expected = concat([expected, expected]) + result = store.select("df", columns=["B", "A"]) + tm.assert_frame_equal(result, expected, by_blocks=True) + + +def test_select(setup_path): + with ensure_clean_store(setup_path) as store: + # select with columns= + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + _maybe_remove(store, "df") + store.append("df", df) + result = store.select("df", columns=["A", "B"]) + expected = df.reindex(columns=["A", "B"]) + tm.assert_frame_equal(expected, result) + + # equivalently + result = store.select("df", [("columns=['A', 'B']")]) + expected = df.reindex(columns=["A", "B"]) + tm.assert_frame_equal(expected, result) + + # with a data column + _maybe_remove(store, "df") + store.append("df", df, data_columns=["A"]) + result = store.select("df", ["A > 0"], columns=["A", "B"]) + expected = df[df.A > 0].reindex(columns=["A", "B"]) + tm.assert_frame_equal(expected, result) + + # all a data columns + _maybe_remove(store, "df") + store.append("df", df, data_columns=True) + result = store.select("df", ["A > 0"], columns=["A", "B"]) + expected = df[df.A > 0].reindex(columns=["A", "B"]) + tm.assert_frame_equal(expected, result) + + # with a data column, but different columns + _maybe_remove(store, "df") + store.append("df", df, data_columns=["A"]) + result = store.select("df", ["A > 0"], columns=["C", "D"]) + expected = df[df.A > 0].reindex(columns=["C", "D"]) + tm.assert_frame_equal(expected, result) + + +def test_select_dtypes(setup_path): + with ensure_clean_store(setup_path) as store: + # with a Timestamp data column (GH #2637) + df = DataFrame( + { + "ts": bdate_range("2012-01-01", periods=300), + "A": np.random.default_rng(2).standard_normal(300), + } + ) + _maybe_remove(store, "df") + store.append("df", df, data_columns=["ts", "A"]) + + result = store.select("df", "ts>=Timestamp('2012-02-01')") + expected = df[df.ts >= Timestamp("2012-02-01")] + tm.assert_frame_equal(expected, result) + + # bool columns (GH #2849) + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=["A", "B"] + ) + df["object"] = "foo" + df.loc[4:5, "object"] = "bar" + df["boolv"] = df["A"] > 0 + _maybe_remove(store, "df") + store.append("df", df, data_columns=True) + + expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa: E712 + for v in [True, "true", 1]: + result = store.select("df", f"boolv == {v}", columns=["A", "boolv"]) + tm.assert_frame_equal(expected, result) + + expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa: E712 + for v in [False, "false", 0]: + result = store.select("df", f"boolv == {v}", columns=["A", "boolv"]) + tm.assert_frame_equal(expected, result) + + # integer index + df = DataFrame( + { + "A": np.random.default_rng(2).random(20), + "B": np.random.default_rng(2).random(20), + } + ) + _maybe_remove(store, "df_int") + store.append("df_int", df) + result = store.select("df_int", "index<10 and columns=['A']") + expected = df.reindex(index=list(df.index)[0:10], columns=["A"]) + tm.assert_frame_equal(expected, result) + + # float index + df = DataFrame( + { + "A": np.random.default_rng(2).random(20), + "B": np.random.default_rng(2).random(20), + "index": np.arange(20, dtype="f8"), + } + ) + _maybe_remove(store, "df_float") + store.append("df_float", df) + result = store.select("df_float", "index<10.0 and columns=['A']") + expected = df.reindex(index=list(df.index)[0:10], columns=["A"]) + tm.assert_frame_equal(expected, result) + + with ensure_clean_store(setup_path) as store: + # floats w/o NaN + df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64") + df["cols"] = (df["cols"] + 10).apply(str) + + store.append("df1", df, data_columns=True) + result = store.select("df1", where="values>2.0") + expected = df[df["values"] > 2.0] + tm.assert_frame_equal(expected, result) + + # floats with NaN + df.iloc[0] = np.nan + expected = df[df["values"] > 2.0] + + store.append("df2", df, data_columns=True, index=False) + result = store.select("df2", where="values>2.0") + tm.assert_frame_equal(expected, result) + + # https://github.com/PyTables/PyTables/issues/282 + # bug in selection when 0th row has a np.nan and an index + # store.append('df3',df,data_columns=True) + # result = store.select( + # 'df3', where='values>2.0') + # tm.assert_frame_equal(expected, result) + + # not in first position float with NaN ok too + df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64") + df["cols"] = (df["cols"] + 10).apply(str) + + df.iloc[1] = np.nan + expected = df[df["values"] > 2.0] + + store.append("df4", df, data_columns=True) + result = store.select("df4", where="values>2.0") + tm.assert_frame_equal(expected, result) + + # test selection with comparison against numpy scalar + # GH 11283 + with ensure_clean_store(setup_path) as store: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + expected = df[df["A"] > 0] + + store.append("df", df, data_columns=True) + np_zero = np.float64(0) # noqa: F841 + result = store.select("df", where=["A>np_zero"]) + tm.assert_frame_equal(expected, result) + + +def test_select_with_many_inputs(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame( + { + "ts": bdate_range("2012-01-01", periods=300), + "A": np.random.default_rng(2).standard_normal(300), + "B": range(300), + "users": ["a"] * 50 + + ["b"] * 50 + + ["c"] * 100 + + [f"a{i:03d}" for i in range(100)], + } + ) + _maybe_remove(store, "df") + store.append("df", df, data_columns=["ts", "A", "B", "users"]) + + # regular select + result = store.select("df", "ts>=Timestamp('2012-02-01')") + expected = df[df.ts >= Timestamp("2012-02-01")] + tm.assert_frame_equal(expected, result) + + # small selector + result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']") + expected = df[ + (df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"]) + ] + tm.assert_frame_equal(expected, result) + + # big selector along the columns + selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)] + result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector") + expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)] + tm.assert_frame_equal(expected, result) + + selector = range(100, 200) + result = store.select("df", "B=selector") + expected = df[df.B.isin(selector)] + tm.assert_frame_equal(expected, result) + assert len(result) == 100 + + # big selector along the index + selector = Index(df.ts[0:100].values) + result = store.select("df", "ts=selector") + expected = df[df.ts.isin(selector.values)] + tm.assert_frame_equal(expected, result) + assert len(result) == 100 + + +def test_select_iterator(tmp_path, setup_path): + # single table + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + _maybe_remove(store, "df") + store.append("df", df) + + expected = store.select("df") + + results = list(store.select("df", iterator=True)) + result = concat(results) + tm.assert_frame_equal(expected, result) + + results = list(store.select("df", chunksize=2)) + assert len(results) == 5 + result = concat(results) + tm.assert_frame_equal(expected, result) + + results = list(store.select("df", chunksize=2)) + result = concat(results) + tm.assert_frame_equal(result, expected) + + path = tmp_path / setup_path + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df.to_hdf(path, key="df_non_table") + + msg = "can only use an iterator or chunksize on a table" + with pytest.raises(TypeError, match=msg): + read_hdf(path, "df_non_table", chunksize=2) + + with pytest.raises(TypeError, match=msg): + read_hdf(path, "df_non_table", iterator=True) + + path = tmp_path / setup_path + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df.to_hdf(path, key="df", format="table") + + results = list(read_hdf(path, "df", chunksize=2)) + result = concat(results) + + assert len(results) == 5 + tm.assert_frame_equal(result, df) + tm.assert_frame_equal(result, read_hdf(path, "df")) + + # multiple + + with ensure_clean_store(setup_path) as store: + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + store.append("df1", df1, data_columns=True) + df2 = df1.copy().rename(columns="{}_2".format) + df2["foo"] = "bar" + store.append("df2", df2) + + df = concat([df1, df2], axis=1) + + # full selection + expected = store.select_as_multiple(["df1", "df2"], selector="df1") + results = list( + store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=2) + ) + result = concat(results) + tm.assert_frame_equal(expected, result) + + +def test_select_iterator_complete_8014(setup_path): + # GH 8014 + # using iterator and where clause + chunksize = 1e4 + + # no iterator + with ensure_clean_store(setup_path) as store: + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) + _maybe_remove(store, "df") + store.append("df", expected) + + beg_dt = expected.index[0] + end_dt = expected.index[-1] + + # select w/o iteration and no where clause works + result = store.select("df") + tm.assert_frame_equal(expected, result) + + # select w/o iterator and where clause, single term, begin + # of range, works + where = f"index >= '{beg_dt}'" + result = store.select("df", where=where) + tm.assert_frame_equal(expected, result) + + # select w/o iterator and where clause, single term, end + # of range, works + where = f"index <= '{end_dt}'" + result = store.select("df", where=where) + tm.assert_frame_equal(expected, result) + + # select w/o iterator and where clause, inclusive range, + # works + where = f"index >= '{beg_dt}' & index <= '{end_dt}'" + result = store.select("df", where=where) + tm.assert_frame_equal(expected, result) + + # with iterator, full range + with ensure_clean_store(setup_path) as store: + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) + _maybe_remove(store, "df") + store.append("df", expected) + + beg_dt = expected.index[0] + end_dt = expected.index[-1] + + # select w/iterator and no where clause works + results = list(store.select("df", chunksize=chunksize)) + result = concat(results) + tm.assert_frame_equal(expected, result) + + # select w/iterator and where clause, single term, begin of range + where = f"index >= '{beg_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + tm.assert_frame_equal(expected, result) + + # select w/iterator and where clause, single term, end of range + where = f"index <= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + tm.assert_frame_equal(expected, result) + + # select w/iterator and where clause, inclusive range + where = f"index >= '{beg_dt}' & index <= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + tm.assert_frame_equal(expected, result) + + +def test_select_iterator_non_complete_8014(setup_path): + # GH 8014 + # using iterator and where clause + chunksize = 1e4 + + # with iterator, non complete range + with ensure_clean_store(setup_path) as store: + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) + _maybe_remove(store, "df") + store.append("df", expected) + + beg_dt = expected.index[1] + end_dt = expected.index[-2] + + # select w/iterator and where clause, single term, begin of range + where = f"index >= '{beg_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + rexpected = expected[expected.index >= beg_dt] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause, single term, end of range + where = f"index <= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + rexpected = expected[expected.index <= end_dt] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause, inclusive range + where = f"index >= '{beg_dt}' & index <= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)] + tm.assert_frame_equal(rexpected, result) + + # with iterator, empty where + with ensure_clean_store(setup_path) as store: + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) + _maybe_remove(store, "df") + store.append("df", expected) + + end_dt = expected.index[-1] + + # select w/iterator and where clause, single term, begin of range + where = f"index > '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + assert 0 == len(results) + + +def test_select_iterator_many_empty_frames(setup_path): + # GH 8014 + # using iterator and where clause can return many empty + # frames. + chunksize = 10_000 + + # with iterator, range limited to the first chunk + with ensure_clean_store(setup_path) as store: + expected = DataFrame( + np.random.default_rng(2).standard_normal((100064, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=100064, freq="s"), + ) + _maybe_remove(store, "df") + store.append("df", expected) + + beg_dt = expected.index[0] + end_dt = expected.index[chunksize - 1] + + # select w/iterator and where clause, single term, begin of range + where = f"index >= '{beg_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + result = concat(results) + rexpected = expected[expected.index >= beg_dt] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause, single term, end of range + where = f"index <= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + + assert len(results) == 1 + result = concat(results) + rexpected = expected[expected.index <= end_dt] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause, inclusive range + where = f"index >= '{beg_dt}' & index <= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + + # should be 1, is 10 + assert len(results) == 1 + result = concat(results) + rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause which selects + # *nothing*. + # + # To be consistent with Python idiom I suggest this should + # return [] e.g. `for e in []: print True` never prints + # True. + + where = f"index <= '{beg_dt}' & index >= '{end_dt}'" + results = list(store.select("df", where=where, chunksize=chunksize)) + + # should be [] + assert len(results) == 0 + + +def test_frame_select(setup_path): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + with ensure_clean_store(setup_path) as store: + store.put("frame", df, format="table") + date = df.index[len(df) // 2] + + crit1 = Term("index>=date") + assert crit1.env.scope["date"] == date + + crit2 = "columns=['A', 'D']" + crit3 = "columns=A" + + result = store.select("frame", [crit1, crit2]) + expected = df.loc[date:, ["A", "D"]] + tm.assert_frame_equal(result, expected) + + result = store.select("frame", [crit3]) + expected = df.loc[:, ["A"]] + tm.assert_frame_equal(result, expected) + + # invalid terms + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + store.append("df_time", df) + msg = "day is out of range for month: 0" + with pytest.raises(ValueError, match=msg): + store.select("df_time", "index>0") + + # can't select if not written as table + # store['frame'] = df + # with pytest.raises(ValueError): + # store.select('frame', [crit1, crit2]) + + +def test_frame_select_complex(setup_path): + # select via complex criteria + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df["string"] = "foo" + df.loc[df.index[0:4], "string"] = "bar" + + with ensure_clean_store(setup_path) as store: + store.put("df", df, format="table", data_columns=["string"]) + + # empty + result = store.select("df", 'index>df.index[3] & string="bar"') + expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")] + tm.assert_frame_equal(result, expected) + + result = store.select("df", 'index>df.index[3] & string="foo"') + expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")] + tm.assert_frame_equal(result, expected) + + # or + result = store.select("df", 'index>df.index[3] | string="bar"') + expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")] + tm.assert_frame_equal(result, expected) + + result = store.select( + "df", '(index>df.index[3] & index<=df.index[6]) | string="bar"' + ) + expected = df.loc[ + ((df.index > df.index[3]) & (df.index <= df.index[6])) + | (df.string == "bar") + ] + tm.assert_frame_equal(result, expected) + + # invert + result = store.select("df", 'string!="bar"') + expected = df.loc[df.string != "bar"] + tm.assert_frame_equal(result, expected) + + # invert not implemented in numexpr :( + msg = "cannot use an invert condition when passing to numexpr" + with pytest.raises(NotImplementedError, match=msg): + store.select("df", '~(string="bar")') + + # invert ok for filters + result = store.select("df", "~(columns=['A','B'])") + expected = df.loc[:, df.columns.difference(["A", "B"])] + tm.assert_frame_equal(result, expected) + + # in + result = store.select("df", "index>df.index[3] & columns in ['A','B']") + expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"]) + tm.assert_frame_equal(result, expected) + + +def test_frame_select_complex2(tmp_path): + pp = tmp_path / "params.hdf" + hh = tmp_path / "hist.hdf" + + # use non-trivial selection criteria + params = DataFrame({"A": [1, 1, 2, 2, 3]}) + params.to_hdf(pp, key="df", mode="w", format="table", data_columns=["A"]) + + selection = read_hdf(pp, "df", where="A=[2,3]") + hist = DataFrame( + np.random.default_rng(2).standard_normal((25, 1)), + columns=["data"], + index=MultiIndex.from_tuples( + [(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"] + ), + ) + + hist.to_hdf(hh, key="df", mode="w", format="table") + + expected = read_hdf(hh, "df", where="l1=[2, 3, 4]") + + # scope with list like + l0 = selection.index.tolist() # noqa: F841 + with HDFStore(hh) as store: + result = store.select("df", where="l1=l0") + tm.assert_frame_equal(result, expected) + + result = read_hdf(hh, "df", where="l1=l0") + tm.assert_frame_equal(result, expected) + + # index + index = selection.index # noqa: F841 + result = read_hdf(hh, "df", where="l1=index") + tm.assert_frame_equal(result, expected) + + result = read_hdf(hh, "df", where="l1=selection.index") + tm.assert_frame_equal(result, expected) + + result = read_hdf(hh, "df", where="l1=selection.index.tolist()") + tm.assert_frame_equal(result, expected) + + result = read_hdf(hh, "df", where="l1=list(selection.index)") + tm.assert_frame_equal(result, expected) + + # scope with index + with HDFStore(hh) as store: + result = store.select("df", where="l1=index") + tm.assert_frame_equal(result, expected) + + result = store.select("df", where="l1=selection.index") + tm.assert_frame_equal(result, expected) + + result = store.select("df", where="l1=selection.index.tolist()") + tm.assert_frame_equal(result, expected) + + result = store.select("df", where="l1=list(selection.index)") + tm.assert_frame_equal(result, expected) + + +def test_invalid_filtering(setup_path): + # can't use more than one filter (atm) + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + with ensure_clean_store(setup_path) as store: + store.put("df", df, format="table") + + msg = "unable to collapse Joint Filters" + # not implemented + with pytest.raises(NotImplementedError, match=msg): + store.select("df", "columns=['A'] | columns=['B']") + + # in theory we could deal with this + with pytest.raises(NotImplementedError, match=msg): + store.select("df", "columns=['A','B'] & columns=['C']") + + +def test_string_select(setup_path): + # GH 2973 + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + # test string ==/!= + df["x"] = "none" + df.loc[df.index[2:7], "x"] = "" + + store.append("df", df, data_columns=["x"]) + + result = store.select("df", "x=none") + expected = df[df.x == "none"] + tm.assert_frame_equal(result, expected) + + result = store.select("df", "x!=none") + expected = df[df.x != "none"] + tm.assert_frame_equal(result, expected) + + df2 = df.copy() + df2.loc[df2.x == "", "x"] = np.nan + + store.append("df2", df2, data_columns=["x"]) + result = store.select("df2", "x!=none") + expected = df2[isna(df2.x)] + tm.assert_frame_equal(result, expected) + + # int ==/!= + df["int"] = 1 + df.loc[df.index[2:7], "int"] = 2 + + store.append("df3", df, data_columns=["int"]) + + result = store.select("df3", "int=2") + expected = df[df.int == 2] + tm.assert_frame_equal(result, expected) + + result = store.select("df3", "int!=2") + expected = df[df.int != 2] + tm.assert_frame_equal(result, expected) + + +def test_select_as_multiple(setup_path): + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df2 = df1.copy().rename(columns="{}_2".format) + df2["foo"] = "bar" + + with ensure_clean_store(setup_path) as store: + msg = "keys must be a list/tuple" + # no tables stored + with pytest.raises(TypeError, match=msg): + store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1") + + store.append("df1", df1, data_columns=["A", "B"]) + store.append("df2", df2) + + # exceptions + with pytest.raises(TypeError, match=msg): + store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1") + + with pytest.raises(TypeError, match=msg): + store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1") + + msg = "'No object named df3 in the file'" + with pytest.raises(KeyError, match=msg): + store.select_as_multiple( + ["df1", "df3"], where=["A>0", "B>0"], selector="df1" + ) + + with pytest.raises(KeyError, match=msg): + store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1") + + with pytest.raises(KeyError, match="'No object named df4 in the file'"): + store.select_as_multiple( + ["df1", "df2"], where=["A>0", "B>0"], selector="df4" + ) + + # default select + result = store.select("df1", ["A>0", "B>0"]) + expected = store.select_as_multiple( + ["df1"], where=["A>0", "B>0"], selector="df1" + ) + tm.assert_frame_equal(result, expected) + expected = store.select_as_multiple("df1", where=["A>0", "B>0"], selector="df1") + tm.assert_frame_equal(result, expected) + + # multiple + result = store.select_as_multiple( + ["df1", "df2"], where=["A>0", "B>0"], selector="df1" + ) + expected = concat([df1, df2], axis=1) + expected = expected[(expected.A > 0) & (expected.B > 0)] + tm.assert_frame_equal(result, expected, check_freq=False) + # FIXME: 2021-01-20 this is failing with freq None vs 4B on some builds + + # multiple (diff selector) + result = store.select_as_multiple( + ["df1", "df2"], where="index>df2.index[4]", selector="df2" + ) + expected = concat([df1, df2], axis=1) + expected = expected[5:] + tm.assert_frame_equal(result, expected) + + # test exception for diff rows + df3 = df1.copy().head(2) + store.append("df3", df3) + msg = "all tables must have exactly the same nrows!" + with pytest.raises(ValueError, match=msg): + store.select_as_multiple( + ["df1", "df3"], where=["A>0", "B>0"], selector="df1" + ) + + +def test_nan_selection_bug_4858(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame({"cols": range(6), "values": range(6)}, dtype="float64") + df["cols"] = (df["cols"] + 10).apply(str) + df.iloc[0] = np.nan + + expected = DataFrame( + {"cols": ["13.0", "14.0", "15.0"], "values": [3.0, 4.0, 5.0]}, + index=[3, 4, 5], + ) + + # write w/o the index on that particular column + store.append("df", df, data_columns=True, index=["cols"]) + result = store.select("df", where="values>2.0") + tm.assert_frame_equal(result, expected) + + +def test_query_with_nested_special_character(setup_path): + df = DataFrame( + { + "a": ["a", "a", "c", "b", "test & test", "c", "b", "e"], + "b": [1, 2, 3, 4, 5, 6, 7, 8], + } + ) + expected = df[df.a == "test & test"] + with ensure_clean_store(setup_path) as store: + store.append("test", df, format="table", data_columns=True) + result = store.select("test", 'a = "test & test"') + tm.assert_frame_equal(expected, result) + + +def test_query_long_float_literal(setup_path): + # GH 14241 + df = DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]}) + + with ensure_clean_store(setup_path) as store: + store.append("test", df, format="table", data_columns=True) + + cutoff = 1000000000.0006 + result = store.select("test", f"A < {cutoff:.4f}") + assert result.empty + + cutoff = 1000000000.0010 + result = store.select("test", f"A > {cutoff:.4f}") + expected = df.loc[[1, 2], :] + tm.assert_frame_equal(expected, result) + + exact = 1000000000.0011 + result = store.select("test", f"A == {exact:.4f}") + expected = df.loc[[1], :] + tm.assert_frame_equal(expected, result) + + +def test_query_compare_column_type(setup_path): + # GH 15492 + df = DataFrame( + { + "date": ["2014-01-01", "2014-01-02"], + "real_date": date_range("2014-01-01", periods=2), + "float": [1.1, 1.2], + "int": [1, 2], + }, + columns=["date", "real_date", "float", "int"], + ) + + with ensure_clean_store(setup_path) as store: + store.append("test", df, format="table", data_columns=True) + + ts = Timestamp("2014-01-01") # noqa: F841 + result = store.select("test", where="real_date > ts") + expected = df.loc[[1], :] + tm.assert_frame_equal(expected, result) + + for op in ["<", ">", "=="]: + # non strings to string column always fail + for v in [2.1, True, Timestamp("2014-01-01"), pd.Timedelta(1, "s")]: + query = f"date {op} v" + msg = f"Cannot compare {v} of type {type(v)} to string column" + with pytest.raises(TypeError, match=msg): + store.select("test", where=query) + + # strings to other columns must be convertible to type + v = "a" + for col in ["int", "float", "real_date"]: + query = f"{col} {op} v" + if col == "real_date": + msg = 'Given date string "a" not likely a datetime' + else: + msg = "could not convert string to" + with pytest.raises(ValueError, match=msg): + store.select("test", where=query) + + for v, col in zip( + ["1", "1.1", "2014-01-01"], ["int", "float", "real_date"] + ): + query = f"{col} {op} v" + result = store.select("test", where=query) + + if op == "==": + expected = df.loc[[0], :] + elif op == ">": + expected = df.loc[[1], :] + else: + expected = df.loc[[], :] + tm.assert_frame_equal(expected, result) + + +@pytest.mark.parametrize("where", ["", (), (None,), [], [None]]) +def test_select_empty_where(tmp_path, where): + # GH26610 + + df = DataFrame([1, 2, 3]) + path = tmp_path / "empty_where.h5" + with HDFStore(path) as store: + store.put("df", df, "t") + result = read_hdf(store, "df", where=where) + tm.assert_frame_equal(result, df) + + +def test_select_large_integer(tmp_path): + path = tmp_path / "large_int.h5" + + df = DataFrame( + zip( + ["a", "b", "c", "d"], + [-9223372036854775801, -9223372036854775802, -9223372036854775803, 123], + ), + columns=["x", "y"], + ) + result = None + with HDFStore(path) as s: + s.append("data", df, data_columns=True, index=False) + result = s.select("data", where="y==-9223372036854775801").get("y").get(0) + expected = df["y"][0] + + assert expected == result diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_store.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_store.py new file mode 100644 index 0000000000000000000000000000000000000000..82d3052e7f5d6738801e973a9f34307ea0dff2cf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_store.py @@ -0,0 +1,1119 @@ +import contextlib +import datetime as dt +import hashlib +import tempfile +import time + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + Timestamp, + concat, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) + +from pandas.io.pytables import ( + HDFStore, + read_hdf, +) + +pytestmark = pytest.mark.single_cpu + +tables = pytest.importorskip("tables") + + +def test_context(setup_path): + with tm.ensure_clean(setup_path) as path: + try: + with HDFStore(path) as tbl: + raise ValueError("blah") + except ValueError: + pass + with tm.ensure_clean(setup_path) as path: + with HDFStore(path) as tbl: + tbl["a"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + assert len(tbl) == 1 + assert type(tbl["a"]) == DataFrame + + +def test_no_track_times(tmp_path, setup_path): + # GH 32682 + # enables to set track_times (see `pytables` `create_table` documentation) + + def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128): + h = hash_factory() + with open(filename, "rb") as f: + for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""): + h.update(chunk) + return h.digest() + + def create_h5_and_return_checksum(tmp_path, track_times): + path = tmp_path / setup_path + df = DataFrame({"a": [1]}) + + with HDFStore(path, mode="w") as hdf: + hdf.put( + "table", + df, + format="table", + data_columns=True, + index=None, + track_times=track_times, + ) + + return checksum(path) + + checksum_0_tt_false = create_h5_and_return_checksum(tmp_path, track_times=False) + checksum_0_tt_true = create_h5_and_return_checksum(tmp_path, track_times=True) + + # sleep is necessary to create h5 with different creation time + time.sleep(1) + + checksum_1_tt_false = create_h5_and_return_checksum(tmp_path, track_times=False) + checksum_1_tt_true = create_h5_and_return_checksum(tmp_path, track_times=True) + + # checksums are the same if track_time = False + assert checksum_0_tt_false == checksum_1_tt_false + + # checksums are NOT same if track_time = True + assert checksum_0_tt_true != checksum_1_tt_true + + +def test_iter_empty(setup_path): + with ensure_clean_store(setup_path) as store: + # GH 12221 + assert list(store) == [] + + +def test_repr(setup_path): + with ensure_clean_store(setup_path) as store: + repr(store) + store.info() + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + store["b"] = Series( + range(10), dtype="float64", index=[f"i_{i}" for i in range(10)] + ) + store["c"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df["obj1"] = "foo" + df["obj2"] = "bar" + df["bool1"] = df["A"] > 0 + df["bool2"] = df["B"] > 0 + df["bool3"] = True + df["int1"] = 1 + df["int2"] = 2 + df["timestamp1"] = Timestamp("20010102") + df["timestamp2"] = Timestamp("20010103") + df["datetime1"] = dt.datetime(2001, 1, 2, 0, 0) + df["datetime2"] = dt.datetime(2001, 1, 3, 0, 0) + df.loc[df.index[3:6], ["obj1"]] = np.nan + df = df._consolidate() + + with tm.assert_produces_warning(pd.errors.PerformanceWarning): + store["df"] = df + + # make a random group in hdf space + store._handle.create_group(store._handle.root, "bah") + + assert store.filename in repr(store) + assert store.filename in str(store) + store.info() + + # storers + with ensure_clean_store(setup_path) as store: + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + store.append("df", df) + + s = store.get_storer("df") + repr(s) + str(s) + + +def test_contains(setup_path): + with ensure_clean_store(setup_path) as store: + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + store["b"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + store["foo/bar"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + assert "a" in store + assert "b" in store + assert "c" not in store + assert "foo/bar" in store + assert "/foo/bar" in store + assert "/foo/b" not in store + assert "bar" not in store + + # gh-2694: tables.NaturalNameWarning + with tm.assert_produces_warning( + tables.NaturalNameWarning, check_stacklevel=False + ): + store["node())"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + assert "node())" in store + + +def test_versioning(setup_path): + with ensure_clean_store(setup_path) as store: + store["a"] = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + store["b"] = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=20, freq="B"), + ) + _maybe_remove(store, "df1") + store.append("df1", df[:10]) + store.append("df1", df[10:]) + assert store.root.a._v_attrs.pandas_version == "0.15.2" + assert store.root.b._v_attrs.pandas_version == "0.15.2" + assert store.root.df1._v_attrs.pandas_version == "0.15.2" + + # write a file and wipe its versioning + _maybe_remove(store, "df2") + store.append("df2", df) + + # this is an error because its table_type is appendable, but no + # version info + store.get_node("df2")._v_attrs.pandas_version = None + + msg = "'NoneType' object has no attribute 'startswith'" + + with pytest.raises(Exception, match=msg): + store.select("df2") + + +@pytest.mark.parametrize( + "where, expected", + [ + ( + "/", + { + "": ({"first_group", "second_group"}, set()), + "/first_group": (set(), {"df1", "df2"}), + "/second_group": ({"third_group"}, {"df3", "s1"}), + "/second_group/third_group": (set(), {"df4"}), + }, + ), + ( + "/second_group", + { + "/second_group": ({"third_group"}, {"df3", "s1"}), + "/second_group/third_group": (set(), {"df4"}), + }, + ), + ], +) +def test_walk(where, expected): + # GH10143 + objs = { + "df1": DataFrame([1, 2, 3]), + "df2": DataFrame([4, 5, 6]), + "df3": DataFrame([6, 7, 8]), + "df4": DataFrame([9, 10, 11]), + "s1": Series([10, 9, 8]), + # Next 3 items aren't pandas objects and should be ignored + "a1": np.array([[1, 2, 3], [4, 5, 6]]), + "tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"), + "tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"), + } + + with ensure_clean_store("walk_groups.hdf", mode="w") as store: + store.put("/first_group/df1", objs["df1"]) + store.put("/first_group/df2", objs["df2"]) + store.put("/second_group/df3", objs["df3"]) + store.put("/second_group/s1", objs["s1"]) + store.put("/second_group/third_group/df4", objs["df4"]) + # Create non-pandas objects + store._handle.create_array("/first_group", "a1", objs["a1"]) + store._handle.create_table("/first_group", "tb1", obj=objs["tb1"]) + store._handle.create_table("/second_group", "tb2", obj=objs["tb2"]) + + assert len(list(store.walk(where=where))) == len(expected) + for path, groups, leaves in store.walk(where=where): + assert path in expected + expected_groups, expected_frames = expected[path] + assert expected_groups == set(groups) + assert expected_frames == set(leaves) + for leaf in leaves: + frame_path = "/".join([path, leaf]) + obj = store.get(frame_path) + if "df" in leaf: + tm.assert_frame_equal(obj, objs[leaf]) + else: + tm.assert_series_equal(obj, objs[leaf]) + + +def test_getattr(setup_path): + with ensure_clean_store(setup_path) as store: + s = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + store["a"] = s + + # test attribute access + result = store.a + tm.assert_series_equal(result, s) + result = getattr(store, "a") + tm.assert_series_equal(result, s) + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + store["df"] = df + result = store.df + tm.assert_frame_equal(result, df) + + # errors + for x in ["d", "mode", "path", "handle", "complib"]: + msg = f"'HDFStore' object has no attribute '{x}'" + with pytest.raises(AttributeError, match=msg): + getattr(store, x) + + # not stores + for x in ["mode", "path", "handle", "complib"]: + getattr(store, f"_{x}") + + +def test_store_dropna(tmp_path, setup_path): + df_with_missing = DataFrame( + {"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]}, + index=list("abc"), + ) + df_without_missing = DataFrame( + {"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac") + ) + + # # Test to make sure defaults are to not drop. + # # Corresponding to Issue 9382 + path = tmp_path / setup_path + df_with_missing.to_hdf(path, key="df", format="table") + reloaded = read_hdf(path, "df") + tm.assert_frame_equal(df_with_missing, reloaded) + + path = tmp_path / setup_path + df_with_missing.to_hdf(path, key="df", format="table", dropna=False) + reloaded = read_hdf(path, "df") + tm.assert_frame_equal(df_with_missing, reloaded) + + path = tmp_path / setup_path + df_with_missing.to_hdf(path, key="df", format="table", dropna=True) + reloaded = read_hdf(path, "df") + tm.assert_frame_equal(df_without_missing, reloaded) + + +def test_keyword_deprecation(tmp_path, setup_path): + # GH 54229 + path = tmp_path / setup_path + + msg = ( + "Starting with pandas version 3.0 all arguments of to_hdf except for the " + "argument 'path_or_buf' will be keyword-only." + ) + df = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 1, "B": 2, "C": 3}]) + + with tm.assert_produces_warning(FutureWarning, match=msg): + df.to_hdf(path, "key") + + +def test_to_hdf_with_min_itemsize(tmp_path, setup_path): + path = tmp_path / setup_path + + # min_itemsize in index with to_hdf (GH 10381) + df = DataFrame( + { + "A": [0.0, 1.0, 2.0, 3.0, 4.0], + "B": [0.0, 1.0, 0.0, 1.0, 0.0], + "C": Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object), + "D": date_range("20130101", periods=5), + } + ).set_index("C") + df.to_hdf(path, key="ss3", format="table", min_itemsize={"index": 6}) + # just make sure there is a longer string: + df2 = df.copy().reset_index().assign(C="longer").set_index("C") + df2.to_hdf(path, key="ss3", append=True, format="table") + tm.assert_frame_equal(read_hdf(path, "ss3"), concat([df, df2])) + + # same as above, with a Series + df["B"].to_hdf(path, key="ss4", format="table", min_itemsize={"index": 6}) + df2["B"].to_hdf(path, key="ss4", append=True, format="table") + tm.assert_series_equal(read_hdf(path, "ss4"), concat([df["B"], df2["B"]])) + + +@pytest.mark.parametrize("format", ["fixed", "table"]) +def test_to_hdf_errors(tmp_path, format, setup_path): + data = ["\ud800foo"] + ser = Series(data, index=Index(data)) + path = tmp_path / setup_path + # GH 20835 + ser.to_hdf(path, key="table", format=format, errors="surrogatepass") + + result = read_hdf(path, "table", errors="surrogatepass") + tm.assert_series_equal(result, ser) + + +def test_create_table_index(setup_path): + with ensure_clean_store(setup_path) as store: + + def col(t, column): + return getattr(store.get_storer(t).table.cols, column) + + # data columns + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df["string"] = "foo" + df["string2"] = "bar" + store.append("f", df, data_columns=["string", "string2"]) + assert col("f", "index").is_indexed is True + assert col("f", "string").is_indexed is True + assert col("f", "string2").is_indexed is True + + # specify index=columns + store.append("f2", df, index=["string"], data_columns=["string", "string2"]) + assert col("f2", "index").is_indexed is False + assert col("f2", "string").is_indexed is True + assert col("f2", "string2").is_indexed is False + + # try to index a non-table + _maybe_remove(store, "f2") + store.put("f2", df) + msg = "cannot create table index on a Fixed format store" + with pytest.raises(TypeError, match=msg): + store.create_table_index("f2") + + +def test_create_table_index_data_columns_argument(setup_path): + # GH 28156 + + with ensure_clean_store(setup_path) as store: + + def col(t, column): + return getattr(store.get_storer(t).table.cols, column) + + # data columns + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df["string"] = "foo" + df["string2"] = "bar" + store.append("f", df, data_columns=["string"]) + assert col("f", "index").is_indexed is True + assert col("f", "string").is_indexed is True + + msg = "'Cols' object has no attribute 'string2'" + with pytest.raises(AttributeError, match=msg): + col("f", "string2").is_indexed + + # try to index a col which isn't a data_column + msg = ( + "column string2 is not a data_column.\n" + "In order to read column string2 you must reload the dataframe \n" + "into HDFStore and include string2 with the data_columns argument." + ) + with pytest.raises(AttributeError, match=msg): + store.create_table_index("f", columns=["string2"]) + + +def test_mi_data_columns(setup_path): + # GH 14435 + idx = MultiIndex.from_arrays( + [date_range("2000-01-01", periods=5), range(5)], names=["date", "id"] + ) + df = DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx) + + with ensure_clean_store(setup_path) as store: + store.append("df", df, data_columns=True) + + actual = store.select("df", where="id == 1") + expected = df.iloc[[1], :] + tm.assert_frame_equal(actual, expected) + + +def test_table_mixed_dtypes(setup_path): + # frame + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df["obj1"] = "foo" + df["obj2"] = "bar" + df["bool1"] = df["A"] > 0 + df["bool2"] = df["B"] > 0 + df["bool3"] = True + df["int1"] = 1 + df["int2"] = 2 + df["timestamp1"] = Timestamp("20010102").as_unit("ns") + df["timestamp2"] = Timestamp("20010103").as_unit("ns") + df["datetime1"] = Timestamp("20010102").as_unit("ns") + df["datetime2"] = Timestamp("20010103").as_unit("ns") + df.loc[df.index[3:6], ["obj1"]] = np.nan + df = df._consolidate() + + with ensure_clean_store(setup_path) as store: + store.append("df1_mixed", df) + tm.assert_frame_equal(store.select("df1_mixed"), df) + + +def test_calendar_roundtrip_issue(setup_path): + # 8591 + # doc example from tseries holiday section + weekmask_egypt = "Sun Mon Tue Wed Thu" + holidays = [ + "2012-05-01", + dt.datetime(2013, 5, 1), + np.datetime64("2014-05-01"), + ] + bday_egypt = pd.offsets.CustomBusinessDay( + holidays=holidays, weekmask=weekmask_egypt + ) + mydt = dt.datetime(2013, 4, 30) + dts = date_range(mydt, periods=5, freq=bday_egypt) + + s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split())) + + with ensure_clean_store(setup_path) as store: + store.put("fixed", s) + result = store.select("fixed") + tm.assert_series_equal(result, s) + + store.append("table", s) + result = store.select("table") + tm.assert_series_equal(result, s) + + +def test_remove(setup_path): + with ensure_clean_store(setup_path) as store: + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + store["a"] = ts + store["b"] = df + _maybe_remove(store, "a") + assert len(store) == 1 + tm.assert_frame_equal(df, store["b"]) + + _maybe_remove(store, "b") + assert len(store) == 0 + + # nonexistence + with pytest.raises( + KeyError, match="'No object named a_nonexistent_store in the file'" + ): + store.remove("a_nonexistent_store") + + # pathing + store["a"] = ts + store["b/foo"] = df + _maybe_remove(store, "foo") + _maybe_remove(store, "b/foo") + assert len(store) == 1 + + store["a"] = ts + store["b/foo"] = df + _maybe_remove(store, "b") + assert len(store) == 1 + + # __delitem__ + store["a"] = ts + store["b"] = df + del store["a"] + del store["b"] + assert len(store) == 0 + + +def test_same_name_scoping(setup_path): + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 2)), + index=date_range("20130101", periods=20), + ) + store.put("df", df, format="table") + expected = df[df.index > Timestamp("20130105")] + + result = store.select("df", "index>datetime.datetime(2013,1,5)") + tm.assert_frame_equal(result, expected) + + # changes what 'datetime' points to in the namespace where + # 'select' does the lookup + + # technically an error, but allow it + result = store.select("df", "index>datetime.datetime(2013,1,5)") + tm.assert_frame_equal(result, expected) + + result = store.select("df", "index>datetime(2013,1,5)") + tm.assert_frame_equal(result, expected) + + +def test_store_index_name(setup_path): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.index.name = "foo" + + with ensure_clean_store(setup_path) as store: + store["frame"] = df + recons = store["frame"] + tm.assert_frame_equal(recons, df) + + +@pytest.mark.parametrize("tz", [None, "US/Pacific"]) +@pytest.mark.parametrize("table_format", ["table", "fixed"]) +def test_store_index_name_numpy_str(tmp_path, table_format, setup_path, unit, tz): + # GH #13492 + idx = DatetimeIndex( + [dt.date(2000, 1, 1), dt.date(2000, 1, 2)], + name="cols\u05d2", + ).tz_localize(tz) + idx1 = ( + DatetimeIndex( + [dt.date(2010, 1, 1), dt.date(2010, 1, 2)], + name="rows\u05d0", + ) + .as_unit(unit) + .tz_localize(tz) + ) + df = DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1) + + # This used to fail, returning numpy strings instead of python strings. + path = tmp_path / setup_path + df.to_hdf(path, key="df", format=table_format) + df2 = read_hdf(path, "df") + + tm.assert_frame_equal(df, df2, check_names=True) + + assert isinstance(df2.index.name, str) + assert isinstance(df2.columns.name, str) + + +def test_store_series_name(setup_path): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + series = df["A"] + + with ensure_clean_store(setup_path) as store: + store["series"] = series + recons = store["series"] + tm.assert_series_equal(recons, series) + + +def test_overwrite_node(setup_path): + with ensure_clean_store(setup_path) as store: + store["a"] = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + store["a"] = ts + + tm.assert_series_equal(store["a"], ts) + + +def test_coordinates(setup_path): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + with ensure_clean_store(setup_path) as store: + _maybe_remove(store, "df") + store.append("df", df) + + # all + c = store.select_as_coordinates("df") + assert (c.values == np.arange(len(df.index))).all() + + # get coordinates back & test vs frame + _maybe_remove(store, "df") + + df = DataFrame({"A": range(5), "B": range(5)}) + store.append("df", df) + c = store.select_as_coordinates("df", ["index<3"]) + assert (c.values == np.arange(3)).all() + result = store.select("df", where=c) + expected = df.loc[0:2, :] + tm.assert_frame_equal(result, expected) + + c = store.select_as_coordinates("df", ["index>=3", "index<=4"]) + assert (c.values == np.arange(2) + 3).all() + result = store.select("df", where=c) + expected = df.loc[3:4, :] + tm.assert_frame_equal(result, expected) + assert isinstance(c, Index) + + # multiple tables + _maybe_remove(store, "df1") + _maybe_remove(store, "df2") + df1 = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df2 = df1.copy().rename(columns="{}_2".format) + store.append("df1", df1, data_columns=["A", "B"]) + store.append("df2", df2) + + c = store.select_as_coordinates("df1", ["A>0", "B>0"]) + df1_result = store.select("df1", c) + df2_result = store.select("df2", c) + result = concat([df1_result, df2_result], axis=1) + + expected = concat([df1, df2], axis=1) + expected = expected[(expected.A > 0) & (expected.B > 0)] + tm.assert_frame_equal(result, expected, check_freq=False) + # FIXME: 2021-01-18 on some (mostly windows) builds we get freq=None + # but expect freq="18B" + + # pass array/mask as the coordinates + with ensure_clean_store(setup_path) as store: + df = DataFrame( + np.random.default_rng(2).standard_normal((1000, 2)), + index=date_range("20000101", periods=1000), + ) + store.append("df", df) + c = store.select_column("df", "index") + where = c[DatetimeIndex(c).month == 5].index + expected = df.iloc[where] + + # locations + result = store.select("df", where=where) + tm.assert_frame_equal(result, expected) + + # boolean + result = store.select("df", where=where) + tm.assert_frame_equal(result, expected) + + # invalid + msg = ( + "where must be passed as a string, PyTablesExpr, " + "or list-like of PyTablesExpr" + ) + with pytest.raises(TypeError, match=msg): + store.select("df", where=np.arange(len(df), dtype="float64")) + + with pytest.raises(TypeError, match=msg): + store.select("df", where=np.arange(len(df) + 1)) + + with pytest.raises(TypeError, match=msg): + store.select("df", where=np.arange(len(df)), start=5) + + with pytest.raises(TypeError, match=msg): + store.select("df", where=np.arange(len(df)), start=5, stop=10) + + # selection with filter + selection = date_range("20000101", periods=500) + result = store.select("df", where="index in selection") + expected = df[df.index.isin(selection)] + tm.assert_frame_equal(result, expected) + + # list + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + store.append("df2", df) + result = store.select("df2", where=[0, 3, 5]) + expected = df.iloc[[0, 3, 5]] + tm.assert_frame_equal(result, expected) + + # boolean + where = [True] * 10 + where[-2] = False + result = store.select("df2", where=where) + expected = df.loc[where] + tm.assert_frame_equal(result, expected) + + # start/stop + result = store.select("df2", start=5, stop=10) + expected = df[5:10] + tm.assert_frame_equal(result, expected) + + +def test_start_stop_table(setup_path): + with ensure_clean_store(setup_path) as store: + # table + df = DataFrame( + { + "A": np.random.default_rng(2).random(20), + "B": np.random.default_rng(2).random(20), + } + ) + store.append("df", df) + + result = store.select("df", "columns=['A']", start=0, stop=5) + expected = df.loc[0:4, ["A"]] + tm.assert_frame_equal(result, expected) + + # out of range + result = store.select("df", "columns=['A']", start=30, stop=40) + assert len(result) == 0 + expected = df.loc[30:40, ["A"]] + tm.assert_frame_equal(result, expected) + + +def test_start_stop_multiple(setup_path): + # GH 16209 + with ensure_clean_store(setup_path) as store: + df = DataFrame({"foo": [1, 2], "bar": [1, 2]}) + + store.append_to_multiple( + {"selector": ["foo"], "data": None}, df, selector="selector" + ) + result = store.select_as_multiple( + ["selector", "data"], selector="selector", start=0, stop=1 + ) + expected = df.loc[[0], ["foo", "bar"]] + tm.assert_frame_equal(result, expected) + + +def test_start_stop_fixed(setup_path): + with ensure_clean_store(setup_path) as store: + # fixed, GH 8287 + df = DataFrame( + { + "A": np.random.default_rng(2).random(20), + "B": np.random.default_rng(2).random(20), + }, + index=date_range("20130101", periods=20), + ) + store.put("df", df) + + result = store.select("df", start=0, stop=5) + expected = df.iloc[0:5, :] + tm.assert_frame_equal(result, expected) + + result = store.select("df", start=5, stop=10) + expected = df.iloc[5:10, :] + tm.assert_frame_equal(result, expected) + + # out of range + result = store.select("df", start=30, stop=40) + expected = df.iloc[30:40, :] + tm.assert_frame_equal(result, expected) + + # series + s = df.A + store.put("s", s) + result = store.select("s", start=0, stop=5) + expected = s.iloc[0:5] + tm.assert_series_equal(result, expected) + + result = store.select("s", start=5, stop=10) + expected = s.iloc[5:10] + tm.assert_series_equal(result, expected) + + # sparse; not implemented + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + df.iloc[3:5, 1:3] = np.nan + df.iloc[8:10, -2] = np.nan + + +def test_select_filter_corner(setup_path): + df = DataFrame(np.random.default_rng(2).standard_normal((50, 100))) + df.index = [f"{c:3d}" for c in df.index] + df.columns = [f"{c:3d}" for c in df.columns] + + with ensure_clean_store(setup_path) as store: + store.put("frame", df, format="table") + + crit = "columns=df.columns[:75]" + result = store.select("frame", [crit]) + tm.assert_frame_equal(result, df.loc[:, df.columns[:75]]) + + crit = "columns=df.columns[:75:2]" + result = store.select("frame", [crit]) + tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]]) + + +def test_path_pathlib(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + result = tm.round_trip_pathlib( + lambda p: df.to_hdf(p, key="df"), lambda p: read_hdf(p, "df") + ) + tm.assert_frame_equal(df, result) + + +@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)]) +def test_contiguous_mixed_data_table(start, stop, setup_path): + # GH 17021 + df = DataFrame( + { + "a": Series([20111010, 20111011, 20111012]), + "b": Series(["ab", "cd", "ab"]), + } + ) + + with ensure_clean_store(setup_path) as store: + store.append("test_dataset", df) + + result = store.select("test_dataset", start=start, stop=stop) + tm.assert_frame_equal(df[start:stop], result) + + +def test_path_pathlib_hdfstore(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + def writer(path): + with HDFStore(path) as store: + df.to_hdf(store, key="df") + + def reader(path): + with HDFStore(path) as store: + return read_hdf(store, "df") + + result = tm.round_trip_pathlib(writer, reader) + tm.assert_frame_equal(df, result) + + +def test_pickle_path_localpath(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + result = tm.round_trip_pathlib( + lambda p: df.to_hdf(p, key="df"), lambda p: read_hdf(p, "df") + ) + tm.assert_frame_equal(df, result) + + +def test_path_localpath_hdfstore(): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + def writer(path): + with HDFStore(path) as store: + df.to_hdf(store, key="df") + + def reader(path): + with HDFStore(path) as store: + return read_hdf(store, "df") + + result = tm.round_trip_localpath(writer, reader) + tm.assert_frame_equal(df, result) + + +@pytest.mark.parametrize("propindexes", [True, False]) +def test_copy(propindexes): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + + with tm.ensure_clean() as path: + with HDFStore(path) as st: + st.append("df", df, data_columns=["A"]) + with tempfile.NamedTemporaryFile() as new_f: + with HDFStore(path) as store: + with contextlib.closing( + store.copy(new_f.name, keys=None, propindexes=propindexes) + ) as tstore: + # check keys + keys = store.keys() + assert set(keys) == set(tstore.keys()) + # check indices & nrows + for k in tstore.keys(): + if tstore.get_storer(k).is_table: + new_t = tstore.get_storer(k) + orig_t = store.get_storer(k) + + assert orig_t.nrows == new_t.nrows + + # check propindixes + if propindexes: + for a in orig_t.axes: + if a.is_indexed: + assert new_t[a.name].is_indexed + + +def test_duplicate_column_name(tmp_path, setup_path): + df = DataFrame(columns=["a", "a"], data=[[0, 0]]) + + path = tmp_path / setup_path + msg = "Columns index has to be unique for fixed format" + with pytest.raises(ValueError, match=msg): + df.to_hdf(path, key="df", format="fixed") + + df.to_hdf(path, key="df", format="table") + other = read_hdf(path, "df") + + tm.assert_frame_equal(df, other) + assert df.equals(other) + assert other.equals(df) + + +def test_preserve_timedeltaindex_type(setup_path): + # GH9635 + df = DataFrame(np.random.default_rng(2).normal(size=(10, 5))) + df.index = timedelta_range(start="0s", periods=10, freq="1s", name="example") + + with ensure_clean_store(setup_path) as store: + store["df"] = df + tm.assert_frame_equal(store["df"], df) + + +def test_columns_multiindex_modified(tmp_path, setup_path): + # BUG: 7212 + + df = DataFrame( + np.random.default_rng(2).random((4, 5)), + index=list("abcd"), + columns=list("ABCDE"), + ) + df.index.name = "letters" + df = df.set_index(keys="E", append=True) + + data_columns = df.index.names + df.columns.tolist() + path = tmp_path / setup_path + df.to_hdf( + path, + key="df", + mode="a", + append=True, + data_columns=data_columns, + index=False, + ) + cols2load = list("BCD") + cols2load_original = list(cols2load) + # GH#10055 make sure read_hdf call does not alter cols2load inplace + read_hdf(path, "df", columns=cols2load) + assert cols2load_original == cols2load + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +@pytest.mark.parametrize( + "columns", + [ + Index([0, 1], dtype=np.int64), + Index([0.0, 1.0], dtype=np.float64), + date_range("2020-01-01", periods=2), + timedelta_range("1 day", periods=2), + period_range("2020-01-01", periods=2, freq="D"), + ], +) +def test_to_hdf_with_object_column_names_should_fail(tmp_path, setup_path, columns): + # GH9057 + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)), columns=columns) + path = tmp_path / setup_path + msg = "cannot have non-object label DataIndexableCol" + with pytest.raises(ValueError, match=msg): + df.to_hdf(path, key="df", format="table", data_columns=True) + + +@pytest.mark.parametrize("dtype", [None, "category"]) +def test_to_hdf_with_object_column_names_should_run(tmp_path, setup_path, dtype): + # GH9057 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + columns=Index(["a", "b"], dtype=dtype), + ) + path = tmp_path / setup_path + df.to_hdf(path, key="df", format="table", data_columns=True) + result = read_hdf(path, "df", where=f"index = [{df.index[0]}]") + assert len(result) + + +def test_hdfstore_strides(setup_path): + # GH22073 + df = DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]}) + with ensure_clean_store(setup_path) as store: + store.put("df", df) + assert df["a"].values.strides == store["df"]["a"].values.strides + + +def test_store_bool_index(tmp_path, setup_path): + # GH#48667 + df = DataFrame([[1]], columns=[True], index=Index([False], dtype="bool")) + expected = df.copy() + + # # Test to make sure defaults are to not drop. + # # Corresponding to Issue 9382 + path = tmp_path / setup_path + df.to_hdf(path, key="a") + result = read_hdf(path, "a") + tm.assert_frame_equal(expected, result) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_time_series.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_time_series.py new file mode 100644 index 0000000000000000000000000000000000000000..726dd0d42034756b205c427c60546afb2901be49 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_time_series.py @@ -0,0 +1,72 @@ +import datetime + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + Series, + _testing as tm, + date_range, + period_range, +) +from pandas.tests.io.pytables.common import ensure_clean_store + +pytestmark = pytest.mark.single_cpu + + +@pytest.mark.parametrize("unit", ["us", "ns"]) +def test_store_datetime_fractional_secs(setup_path, unit): + dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456) + dti = DatetimeIndex([dt], dtype=f"M8[{unit}]") + series = Series([0], index=dti) + with ensure_clean_store(setup_path) as store: + store["a"] = series + assert store["a"].index[0] == dt + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_tseries_indices_series(setup_path): + with ensure_clean_store(setup_path) as store: + idx = date_range("2020-01-01", periods=10) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + store["a"] = ser + result = store["a"] + + tm.assert_series_equal(result, ser) + assert result.index.freq == ser.index.freq + tm.assert_class_equal(result.index, ser.index, obj="series index") + + idx = period_range("2020-01-01", periods=10, freq="D") + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + store["a"] = ser + result = store["a"] + + tm.assert_series_equal(result, ser) + assert result.index.freq == ser.index.freq + tm.assert_class_equal(result.index, ser.index, obj="series index") + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_tseries_indices_frame(setup_path): + with ensure_clean_store(setup_path) as store: + idx = date_range("2020-01-01", periods=10) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), index=idx + ) + store["a"] = df + result = store["a"] + + tm.assert_frame_equal(result, df) + assert result.index.freq == df.index.freq + tm.assert_class_equal(result.index, df.index, obj="dataframe index") + + idx = period_range("2020-01-01", periods=10, freq="D") + df = DataFrame(np.random.default_rng(2).standard_normal((len(idx), 3)), idx) + store["a"] = df + result = store["a"] + + tm.assert_frame_equal(result, df) + assert result.index.freq == df.index.freq + tm.assert_class_equal(result.index, df.index, obj="dataframe index") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_timezones.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_timezones.py new file mode 100644 index 0000000000000000000000000000000000000000..c5613daf62207319d9dc1e10beea669cd7248f38 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_timezones.py @@ -0,0 +1,378 @@ +from datetime import ( + date, + timedelta, +) + +import numpy as np +import pytest + +from pandas._libs.tslibs.timezones import maybe_get_tz +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.tests.io.pytables.common import ( + _maybe_remove, + ensure_clean_store, +) + + +def _compare_with_tz(a, b): + tm.assert_frame_equal(a, b) + + # compare the zones on each element + for c in a.columns: + for i in a.index: + a_e = a.loc[i, c] + b_e = b.loc[i, c] + if not (a_e == b_e and a_e.tz == b_e.tz): + raise AssertionError(f"invalid tz comparison [{a_e}] [{b_e}]") + + +# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows +# filename issues. +gettz_dateutil = lambda x: maybe_get_tz("dateutil/" + x) +gettz_pytz = lambda x: x + + +@pytest.mark.parametrize("gettz", [gettz_dateutil, gettz_pytz]) +def test_append_with_timezones(setup_path, gettz): + # as columns + + # Single-tzinfo, no DST transition + df_est = DataFrame( + { + "A": [ + Timestamp("20130102 2:00:00", tz=gettz("US/Eastern")).as_unit("ns") + + timedelta(hours=1) * i + for i in range(5) + ] + } + ) + + # frame with all columns having same tzinfo, but different sides + # of DST transition + df_crosses_dst = DataFrame( + { + "A": Timestamp("20130102", tz=gettz("US/Eastern")).as_unit("ns"), + "B": Timestamp("20130603", tz=gettz("US/Eastern")).as_unit("ns"), + }, + index=range(5), + ) + + df_mixed_tz = DataFrame( + { + "A": Timestamp("20130102", tz=gettz("US/Eastern")).as_unit("ns"), + "B": Timestamp("20130102", tz=gettz("EET")).as_unit("ns"), + }, + index=range(5), + ) + + df_different_tz = DataFrame( + { + "A": Timestamp("20130102", tz=gettz("US/Eastern")).as_unit("ns"), + "B": Timestamp("20130102", tz=gettz("CET")).as_unit("ns"), + }, + index=range(5), + ) + + with ensure_clean_store(setup_path) as store: + _maybe_remove(store, "df_tz") + store.append("df_tz", df_est, data_columns=["A"]) + result = store["df_tz"] + _compare_with_tz(result, df_est) + tm.assert_frame_equal(result, df_est) + + # select with tz aware + expected = df_est[df_est.A >= df_est.A[3]] + result = store.select("df_tz", where="A>=df_est.A[3]") + _compare_with_tz(result, expected) + + # ensure we include dates in DST and STD time here. + _maybe_remove(store, "df_tz") + store.append("df_tz", df_crosses_dst) + result = store["df_tz"] + _compare_with_tz(result, df_crosses_dst) + tm.assert_frame_equal(result, df_crosses_dst) + + msg = ( + r"invalid info for \[values_block_1\] for \[tz\], " + r"existing_value \[(dateutil/.*)?(US/Eastern|America/New_York)\] " + r"conflicts with new value \[(dateutil/.*)?EET\]" + ) + with pytest.raises(ValueError, match=msg): + store.append("df_tz", df_mixed_tz) + + # this is ok + _maybe_remove(store, "df_tz") + store.append("df_tz", df_mixed_tz, data_columns=["A", "B"]) + result = store["df_tz"] + _compare_with_tz(result, df_mixed_tz) + tm.assert_frame_equal(result, df_mixed_tz) + + # can't append with diff timezone + msg = ( + r"invalid info for \[B\] for \[tz\], " + r"existing_value \[(dateutil/.*)?EET\] " + r"conflicts with new value \[(dateutil/.*)?CET\]" + ) + with pytest.raises(ValueError, match=msg): + store.append("df_tz", df_different_tz) + + +@pytest.mark.parametrize("gettz", [gettz_dateutil, gettz_pytz]) +def test_append_with_timezones_as_index(setup_path, gettz): + # GH#4098 example + + dti = date_range("2000-1-1", periods=3, freq="h", tz=gettz("US/Eastern")) + dti = dti._with_freq(None) # freq doesn't round-trip + + df = DataFrame({"A": Series(range(3), index=dti)}) + + with ensure_clean_store(setup_path) as store: + _maybe_remove(store, "df") + store.put("df", df) + result = store.select("df") + tm.assert_frame_equal(result, df) + + _maybe_remove(store, "df") + store.append("df", df) + result = store.select("df") + tm.assert_frame_equal(result, df) + + +def test_roundtrip_tz_aware_index(setup_path, unit): + # GH 17618 + ts = Timestamp("2000-01-01 01:00:00", tz="US/Eastern") + dti = DatetimeIndex([ts]).as_unit(unit) + df = DataFrame(data=[0], index=dti) + + with ensure_clean_store(setup_path) as store: + store.put("frame", df, format="fixed") + recons = store["frame"] + tm.assert_frame_equal(recons, df) + + value = recons.index[0]._value + denom = {"ns": 1, "us": 1000, "ms": 10**6, "s": 10**9}[unit] + assert value == 946706400000000000 // denom + + +def test_store_index_name_with_tz(setup_path): + # GH 13884 + df = DataFrame({"A": [1, 2]}) + df.index = DatetimeIndex([1234567890123456787, 1234567890123456788]) + df.index = df.index.tz_localize("UTC") + df.index.name = "foo" + + with ensure_clean_store(setup_path) as store: + store.put("frame", df, format="table") + recons = store["frame"] + tm.assert_frame_equal(recons, df) + + +def test_tseries_select_index_column(setup_path): + # GH7777 + # selecting a UTC datetimeindex column did + # not preserve UTC tzinfo set before storing + + # check that no tz still works + rng = date_range("1/1/2000", "1/30/2000") + frame = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng + ) + + with ensure_clean_store(setup_path) as store: + store.append("frame", frame) + result = store.select_column("frame", "index") + assert rng.tz == DatetimeIndex(result.values).tz + + # check utc + rng = date_range("1/1/2000", "1/30/2000", tz="UTC") + frame = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng + ) + + with ensure_clean_store(setup_path) as store: + store.append("frame", frame) + result = store.select_column("frame", "index") + assert rng.tz == result.dt.tz + + # double check non-utc + rng = date_range("1/1/2000", "1/30/2000", tz="US/Eastern") + frame = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng + ) + + with ensure_clean_store(setup_path) as store: + store.append("frame", frame) + result = store.select_column("frame", "index") + assert rng.tz == result.dt.tz + + +def test_timezones_fixed_format_frame_non_empty(setup_path): + with ensure_clean_store(setup_path) as store: + # index + rng = date_range("1/1/2000", "1/30/2000", tz="US/Eastern") + rng = rng._with_freq(None) # freq doesn't round-trip + df = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng + ) + store["df"] = df + result = store["df"] + tm.assert_frame_equal(result, df) + + # as data + # GH11411 + _maybe_remove(store, "df") + df = DataFrame( + { + "A": rng, + "B": rng.tz_convert("UTC").tz_localize(None), + "C": rng.tz_convert("CET"), + "D": range(len(rng)), + }, + index=rng, + ) + store["df"] = df + result = store["df"] + tm.assert_frame_equal(result, df) + + +def test_timezones_fixed_format_empty(setup_path, tz_aware_fixture, frame_or_series): + # GH 20594 + + dtype = pd.DatetimeTZDtype(tz=tz_aware_fixture) + + obj = Series(dtype=dtype, name="A") + if frame_or_series is DataFrame: + obj = obj.to_frame() + + with ensure_clean_store(setup_path) as store: + store["obj"] = obj + result = store["obj"] + tm.assert_equal(result, obj) + + +def test_timezones_fixed_format_series_nonempty(setup_path, tz_aware_fixture): + # GH 20594 + + dtype = pd.DatetimeTZDtype(tz=tz_aware_fixture) + + with ensure_clean_store(setup_path) as store: + s = Series([0], dtype=dtype) + store["s"] = s + result = store["s"] + tm.assert_series_equal(result, s) + + +def test_fixed_offset_tz(setup_path): + rng = date_range("1/1/2000 00:00:00-07:00", "1/30/2000 00:00:00-07:00") + frame = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng + ) + + with ensure_clean_store(setup_path) as store: + store["frame"] = frame + recons = store["frame"] + tm.assert_index_equal(recons.index, rng) + assert rng.tz == recons.index.tz + + +@td.skip_if_windows +def test_store_timezone(setup_path): + # GH2852 + # issue storing datetime.date with a timezone as it resets when read + # back in a new timezone + + # original method + with ensure_clean_store(setup_path) as store: + today = date(2013, 9, 10) + df = DataFrame([1, 2, 3], index=[today, today, today]) + store["obj1"] = df + result = store["obj1"] + tm.assert_frame_equal(result, df) + + # with tz setting + with ensure_clean_store(setup_path) as store: + with tm.set_timezone("EST5EDT"): + today = date(2013, 9, 10) + df = DataFrame([1, 2, 3], index=[today, today, today]) + store["obj1"] = df + + with tm.set_timezone("CST6CDT"): + result = store["obj1"] + + tm.assert_frame_equal(result, df) + + +def test_legacy_datetimetz_object(datapath): + # legacy from < 0.17.0 + # 8260 + expected = DataFrame( + { + "A": Timestamp("20130102", tz="US/Eastern").as_unit("ns"), + "B": Timestamp("20130603", tz="CET").as_unit("ns"), + }, + index=range(5), + ) + with ensure_clean_store( + datapath("io", "data", "legacy_hdf", "datetimetz_object.h5"), mode="r" + ) as store: + result = store["df"] + tm.assert_frame_equal(result, expected) + + +def test_dst_transitions(setup_path): + # make sure we are not failing on transitions + with ensure_clean_store(setup_path) as store: + times = date_range( + "2013-10-26 23:00", + "2013-10-27 01:00", + tz="Europe/London", + freq="h", + ambiguous="infer", + ) + times = times._with_freq(None) # freq doesn't round-trip + + for i in [times, times + pd.Timedelta("10min")]: + _maybe_remove(store, "df") + df = DataFrame({"A": range(len(i)), "B": i}, index=i) + store.append("df", df) + result = store.select("df") + tm.assert_frame_equal(result, df) + + +def test_read_with_where_tz_aware_index(tmp_path, setup_path): + # GH 11926 + periods = 10 + dts = date_range("20151201", periods=periods, freq="D", tz="UTC") + mi = pd.MultiIndex.from_arrays([dts, range(periods)], names=["DATE", "NO"]) + expected = DataFrame({"MYCOL": 0}, index=mi) + + key = "mykey" + path = tmp_path / setup_path + with pd.HDFStore(path) as store: + store.append(key, expected, format="table", append=True) + result = pd.read_hdf(path, key, where="DATE > 20151130") + tm.assert_frame_equal(result, expected) + + +def test_py2_created_with_datetimez(datapath): + # The test HDF5 file was created in Python 2, but could not be read in + # Python 3. + # + # GH26443 + index = DatetimeIndex(["2019-01-01T18:00"], dtype="M8[ns, America/New_York]") + expected = DataFrame({"data": 123}, index=index) + with ensure_clean_store( + datapath("io", "data", "legacy_hdf", "gh26443.h5"), mode="r" + ) as store: + result = store["key"] + tm.assert_frame_equal(result, expected)