diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_79_mp_rank_02_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_79_mp_rank_02_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..218ad1d2846a225bc516ece35e1796e788219352 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_79_mp_rank_02_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c621cdfe648640f959f7941fbd6731fa14500a48341eb294777ec47cb77b297 +size 41830394 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_91_mp_rank_03_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_91_mp_rank_03_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..cd29751e0ad6bbe7329ce4a16692dee2db91194a --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_91_mp_rank_03_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:992a571497b11e66e56a3bc164e5cae459140c44d281ec6fa6bec406d41e186a +size 41830330 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8191925b824bb2270b8d4352de04659dff2cac4f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odf.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1fe74343b34c5dc0c9454aa7dd36804ca48161c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odswriter.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odswriter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8c157e9f89a7e0c2c8d697b3dee9b924d8a92e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odswriter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_openpyxl.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_openpyxl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e015c31a5fc1fdf5e8453d1507a7592f30a779d8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_openpyxl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_readers.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_readers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d26ce6bf2804aed497238258661395d7a2e7f323 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_readers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_style.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_style.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b54b3d13969e83164ce139162f7d468c136bf52 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_style.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_writers.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_writers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3dfd067a82fbeb577d31ccc3b94d87aa090b15c5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_writers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlrd.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlrd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1afe0255d493c136f6c6664f1c77db70856024e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlrd.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlsxwriter.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlsxwriter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10498fb06c14d948ac5dc2cf165f5ff51d6fc517 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlsxwriter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_odf.py b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_odf.py new file mode 100644 index 0000000000000000000000000000000000000000..b5bb9b27258d86cda6e44aeae17a4cdba4157a43 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_odf.py @@ -0,0 +1,77 @@ +import functools + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows + +import pandas as pd +import pandas._testing as tm + +pytest.importorskip("odf") + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +@pytest.fixture(autouse=True) +def cd_and_set_engine(monkeypatch, datapath): + func = functools.partial(pd.read_excel, engine="odf") + monkeypatch.setattr(pd, "read_excel", func) + monkeypatch.chdir(datapath("io", "data", "excel")) + + +def test_read_invalid_types_raises(): + # the invalid_value_type.ods required manually editing + # of the included content.xml file + with pytest.raises(ValueError, match="Unrecognized type awesome_new_type"): + pd.read_excel("invalid_value_type.ods") + + +def test_read_writer_table(): + # Also test reading tables from an text OpenDocument file + # (.odt) + index = pd.Index(["Row 1", "Row 2", "Row 3"], name="Header") + expected = pd.DataFrame( + [[1, np.nan, 7], [2, np.nan, 8], [3, np.nan, 9]], + index=index, + columns=["Column 1", "Unnamed: 2", "Column 3"], + ) + + result = pd.read_excel("writertable.odt", sheet_name="Table1", index_col=0) + + tm.assert_frame_equal(result, expected) + + +def test_read_newlines_between_xml_elements_table(): + # GH#45598 + expected = pd.DataFrame( + [[1.0, 4.0, 7], [np.nan, np.nan, 8], [3.0, 6.0, 9]], + columns=["Column 1", "Column 2", "Column 3"], + ) + + result = pd.read_excel("test_newlines.ods") + + tm.assert_frame_equal(result, expected) + + +def test_read_unempty_cells(): + expected = pd.DataFrame( + [1, np.nan, 3, np.nan, 5], + columns=["Column 1"], + ) + + result = pd.read_excel("test_unempty_cells.ods") + + tm.assert_frame_equal(result, expected) + + +def test_read_cell_annotation(): + expected = pd.DataFrame( + ["test", np.nan, "test 3"], + columns=["Column 1"], + ) + + result = pd.read_excel("test_cell_annotation.ods") + + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_odswriter.py b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_odswriter.py new file mode 100644 index 0000000000000000000000000000000000000000..1c728ad801bc139c1ca1cd2e902884a5a2c91ffc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_odswriter.py @@ -0,0 +1,106 @@ +from datetime import ( + date, + datetime, +) +import re + +import pytest + +from pandas.compat import is_platform_windows + +import pandas as pd +import pandas._testing as tm + +from pandas.io.excel import ExcelWriter + +odf = pytest.importorskip("odf") + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +@pytest.fixture +def ext(): + return ".ods" + + +def test_write_append_mode_raises(ext): + msg = "Append mode is not supported with odf!" + + with tm.ensure_clean(ext) as f: + with pytest.raises(ValueError, match=msg): + ExcelWriter(f, engine="odf", mode="a") + + +@pytest.mark.parametrize("engine_kwargs", [None, {"kwarg": 1}]) +def test_engine_kwargs(ext, engine_kwargs): + # GH 42286 + # GH 43445 + # test for error: OpenDocumentSpreadsheet does not accept any arguments + with tm.ensure_clean(ext) as f: + if engine_kwargs is not None: + error = re.escape( + "OpenDocumentSpreadsheet() got an unexpected keyword argument 'kwarg'" + ) + with pytest.raises( + TypeError, + match=error, + ): + ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs) + else: + with ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs) as _: + pass + + +def test_book_and_sheets_consistent(ext): + # GH#45687 - Ensure sheets is updated if user modifies book + with tm.ensure_clean(ext) as f: + with ExcelWriter(f) as writer: + assert writer.sheets == {} + table = odf.table.Table(name="test_name") + writer.book.spreadsheet.addElement(table) + assert writer.sheets == {"test_name": table} + + +@pytest.mark.parametrize( + ["value", "cell_value_type", "cell_value_attribute", "cell_value"], + argvalues=[ + (True, "boolean", "boolean-value", "true"), + ("test string", "string", "string-value", "test string"), + (1, "float", "value", "1"), + (1.5, "float", "value", "1.5"), + ( + datetime(2010, 10, 10, 10, 10, 10), + "date", + "date-value", + "2010-10-10T10:10:10", + ), + (date(2010, 10, 10), "date", "date-value", "2010-10-10"), + ], +) +def test_cell_value_type(ext, value, cell_value_type, cell_value_attribute, cell_value): + # GH#54994 ODS: cell attributes should follow specification + # http://docs.oasis-open.org/office/v1.2/os/OpenDocument-v1.2-os-part1.html#refTable13 + from odf.namespaces import OFFICENS + from odf.table import ( + TableCell, + TableRow, + ) + + table_cell_name = TableCell().qname + + with tm.ensure_clean(ext) as f: + pd.DataFrame([[value]]).to_excel(f, header=False, index=False) + + with pd.ExcelFile(f) as wb: + sheet = wb._reader.get_sheet_by_index(0) + sheet_rows = sheet.getElementsByType(TableRow) + sheet_cells = [ + x + for x in sheet_rows[0].childNodes + if hasattr(x, "qname") and x.qname == table_cell_name + ] + + cell = sheet_cells[0] + assert cell.attributes.get((OFFICENS, "value-type")) == cell_value_type + assert cell.attributes.get((OFFICENS, cell_value_attribute)) == cell_value diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_openpyxl.py b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_openpyxl.py new file mode 100644 index 0000000000000000000000000000000000000000..e53b5830ec6a4b315165f4896aed27bdaadfbda6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_openpyxl.py @@ -0,0 +1,432 @@ +import contextlib +from pathlib import Path +import re + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm + +from pandas.io.excel import ( + ExcelWriter, + _OpenpyxlWriter, +) +from pandas.io.excel._openpyxl import OpenpyxlReader + +openpyxl = pytest.importorskip("openpyxl") + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +@pytest.fixture +def ext(): + return ".xlsx" + + +def test_to_excel_styleconverter(): + from openpyxl import styles + + hstyle = { + "font": {"color": "00FF0000", "bold": True}, + "borders": {"top": "thin", "right": "thin", "bottom": "thin", "left": "thin"}, + "alignment": {"horizontal": "center", "vertical": "top"}, + "fill": {"patternType": "solid", "fgColor": {"rgb": "006666FF", "tint": 0.3}}, + "number_format": {"format_code": "0.00"}, + "protection": {"locked": True, "hidden": False}, + } + + font_color = styles.Color("00FF0000") + font = styles.Font(bold=True, color=font_color) + side = styles.Side(style=styles.borders.BORDER_THIN) + border = styles.Border(top=side, right=side, bottom=side, left=side) + alignment = styles.Alignment(horizontal="center", vertical="top") + fill_color = styles.Color(rgb="006666FF", tint=0.3) + fill = styles.PatternFill(patternType="solid", fgColor=fill_color) + + number_format = "0.00" + + protection = styles.Protection(locked=True, hidden=False) + + kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle) + assert kw["font"] == font + assert kw["border"] == border + assert kw["alignment"] == alignment + assert kw["fill"] == fill + assert kw["number_format"] == number_format + assert kw["protection"] == protection + + +def test_write_cells_merge_styled(ext): + from pandas.io.formats.excel import ExcelCell + + sheet_name = "merge_styled" + + sty_b1 = {"font": {"color": "00FF0000"}} + sty_a2 = {"font": {"color": "0000FF00"}} + + initial_cells = [ + ExcelCell(col=1, row=0, val=42, style=sty_b1), + ExcelCell(col=0, row=1, val=99, style=sty_a2), + ] + + sty_merged = {"font": {"color": "000000FF", "bold": True}} + sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged) + openpyxl_sty_merged = sty_kwargs["font"] + merge_cells = [ + ExcelCell( + col=0, row=0, val="pandas", mergestart=1, mergeend=1, style=sty_merged + ) + ] + + with tm.ensure_clean(ext) as path: + with _OpenpyxlWriter(path) as writer: + writer._write_cells(initial_cells, sheet_name=sheet_name) + writer._write_cells(merge_cells, sheet_name=sheet_name) + + wks = writer.sheets[sheet_name] + xcell_b1 = wks["B1"] + xcell_a2 = wks["A2"] + assert xcell_b1.font == openpyxl_sty_merged + assert xcell_a2.font == openpyxl_sty_merged + + +@pytest.mark.parametrize("iso_dates", [True, False]) +def test_engine_kwargs_write(ext, iso_dates): + # GH 42286 GH 43445 + engine_kwargs = {"iso_dates": iso_dates} + with tm.ensure_clean(ext) as f: + with ExcelWriter(f, engine="openpyxl", engine_kwargs=engine_kwargs) as writer: + assert writer.book.iso_dates == iso_dates + # ExcelWriter won't allow us to close without writing something + DataFrame().to_excel(writer) + + +def test_engine_kwargs_append_invalid(ext): + # GH 43445 + # test whether an invalid engine kwargs actually raises + with tm.ensure_clean(ext) as f: + DataFrame(["hello", "world"]).to_excel(f) + with pytest.raises( + TypeError, + match=re.escape( + "load_workbook() got an unexpected keyword argument 'apple_banana'" + ), + ): + with ExcelWriter( + f, engine="openpyxl", mode="a", engine_kwargs={"apple_banana": "fruit"} + ) as writer: + # ExcelWriter needs us to write something to close properly + DataFrame(["good"]).to_excel(writer, sheet_name="Sheet2") + + +@pytest.mark.parametrize("data_only, expected", [(True, 0), (False, "=1+1")]) +def test_engine_kwargs_append_data_only(ext, data_only, expected): + # GH 43445 + # tests whether the data_only engine_kwarg actually works well for + # openpyxl's load_workbook + with tm.ensure_clean(ext) as f: + DataFrame(["=1+1"]).to_excel(f) + with ExcelWriter( + f, engine="openpyxl", mode="a", engine_kwargs={"data_only": data_only} + ) as writer: + assert writer.sheets["Sheet1"]["B2"].value == expected + # ExcelWriter needs us to writer something to close properly? + DataFrame().to_excel(writer, sheet_name="Sheet2") + + # ensure that data_only also works for reading + # and that formulas/values roundtrip + assert ( + pd.read_excel( + f, + sheet_name="Sheet1", + engine="openpyxl", + engine_kwargs={"data_only": data_only}, + ).iloc[0, 1] + == expected + ) + + +@pytest.mark.parametrize("kwarg_name", ["read_only", "data_only"]) +@pytest.mark.parametrize("kwarg_value", [True, False]) +def test_engine_kwargs_append_reader(datapath, ext, kwarg_name, kwarg_value): + # GH 55027 + # test that `read_only` and `data_only` can be passed to + # `openpyxl.reader.excel.load_workbook` via `engine_kwargs` + filename = datapath("io", "data", "excel", "test1" + ext) + with contextlib.closing( + OpenpyxlReader(filename, engine_kwargs={kwarg_name: kwarg_value}) + ) as reader: + assert getattr(reader.book, kwarg_name) == kwarg_value + + +@pytest.mark.parametrize( + "mode,expected", [("w", ["baz"]), ("a", ["foo", "bar", "baz"])] +) +def test_write_append_mode(ext, mode, expected): + df = DataFrame([1], columns=["baz"]) + + with tm.ensure_clean(ext) as f: + wb = openpyxl.Workbook() + wb.worksheets[0].title = "foo" + wb.worksheets[0]["A1"].value = "foo" + wb.create_sheet("bar") + wb.worksheets[1]["A1"].value = "bar" + wb.save(f) + + with ExcelWriter(f, engine="openpyxl", mode=mode) as writer: + df.to_excel(writer, sheet_name="baz", index=False) + + with contextlib.closing(openpyxl.load_workbook(f)) as wb2: + result = [sheet.title for sheet in wb2.worksheets] + assert result == expected + + for index, cell_value in enumerate(expected): + assert wb2.worksheets[index]["A1"].value == cell_value + + +@pytest.mark.parametrize( + "if_sheet_exists,num_sheets,expected", + [ + ("new", 2, ["apple", "banana"]), + ("replace", 1, ["pear"]), + ("overlay", 1, ["pear", "banana"]), + ], +) +def test_if_sheet_exists_append_modes(ext, if_sheet_exists, num_sheets, expected): + # GH 40230 + df1 = DataFrame({"fruit": ["apple", "banana"]}) + df2 = DataFrame({"fruit": ["pear"]}) + + with tm.ensure_clean(ext) as f: + df1.to_excel(f, engine="openpyxl", sheet_name="foo", index=False) + with ExcelWriter( + f, engine="openpyxl", mode="a", if_sheet_exists=if_sheet_exists + ) as writer: + df2.to_excel(writer, sheet_name="foo", index=False) + + with contextlib.closing(openpyxl.load_workbook(f)) as wb: + assert len(wb.sheetnames) == num_sheets + assert wb.sheetnames[0] == "foo" + result = pd.read_excel(wb, "foo", engine="openpyxl") + assert list(result["fruit"]) == expected + if len(wb.sheetnames) == 2: + result = pd.read_excel(wb, wb.sheetnames[1], engine="openpyxl") + tm.assert_frame_equal(result, df2) + + +@pytest.mark.parametrize( + "startrow, startcol, greeting, goodbye", + [ + (0, 0, ["poop", "world"], ["goodbye", "people"]), + (0, 1, ["hello", "world"], ["poop", "people"]), + (1, 0, ["hello", "poop"], ["goodbye", "people"]), + (1, 1, ["hello", "world"], ["goodbye", "poop"]), + ], +) +def test_append_overlay_startrow_startcol(ext, startrow, startcol, greeting, goodbye): + df1 = DataFrame({"greeting": ["hello", "world"], "goodbye": ["goodbye", "people"]}) + df2 = DataFrame(["poop"]) + + with tm.ensure_clean(ext) as f: + df1.to_excel(f, engine="openpyxl", sheet_name="poo", index=False) + with ExcelWriter( + f, engine="openpyxl", mode="a", if_sheet_exists="overlay" + ) as writer: + # use startrow+1 because we don't have a header + df2.to_excel( + writer, + index=False, + header=False, + startrow=startrow + 1, + startcol=startcol, + sheet_name="poo", + ) + + result = pd.read_excel(f, sheet_name="poo", engine="openpyxl") + expected = DataFrame({"greeting": greeting, "goodbye": goodbye}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "if_sheet_exists,msg", + [ + ( + "invalid", + "'invalid' is not valid for if_sheet_exists. Valid options " + "are 'error', 'new', 'replace' and 'overlay'.", + ), + ( + "error", + "Sheet 'foo' already exists and if_sheet_exists is set to 'error'.", + ), + ( + None, + "Sheet 'foo' already exists and if_sheet_exists is set to 'error'.", + ), + ], +) +def test_if_sheet_exists_raises(ext, if_sheet_exists, msg): + # GH 40230 + df = DataFrame({"fruit": ["pear"]}) + with tm.ensure_clean(ext) as f: + with pytest.raises(ValueError, match=re.escape(msg)): + df.to_excel(f, sheet_name="foo", engine="openpyxl") + with ExcelWriter( + f, engine="openpyxl", mode="a", if_sheet_exists=if_sheet_exists + ) as writer: + df.to_excel(writer, sheet_name="foo") + + +def test_to_excel_with_openpyxl_engine(ext): + # GH 29854 + with tm.ensure_clean(ext) as filename: + df1 = DataFrame({"A": np.linspace(1, 10, 10)}) + df2 = DataFrame({"B": np.linspace(1, 20, 10)}) + df = pd.concat([df1, df2], axis=1) + styled = df.style.map( + lambda val: f"color: {'red' if val < 0 else 'black'}" + ).highlight_max() + + styled.to_excel(filename, engine="openpyxl") + + +@pytest.mark.parametrize("read_only", [True, False]) +def test_read_workbook(datapath, ext, read_only): + # GH 39528 + filename = datapath("io", "data", "excel", "test1" + ext) + with contextlib.closing( + openpyxl.load_workbook(filename, read_only=read_only) + ) as wb: + result = pd.read_excel(wb, engine="openpyxl") + expected = pd.read_excel(filename) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "header, expected_data", + [ + ( + 0, + { + "Title": [np.nan, "A", 1, 2, 3], + "Unnamed: 1": [np.nan, "B", 4, 5, 6], + "Unnamed: 2": [np.nan, "C", 7, 8, 9], + }, + ), + (2, {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}), + ], +) +@pytest.mark.parametrize( + "filename", ["dimension_missing", "dimension_small", "dimension_large"] +) +# When read_only is None, use read_excel instead of a workbook +@pytest.mark.parametrize("read_only", [True, False, None]) +def test_read_with_bad_dimension( + datapath, ext, header, expected_data, filename, read_only +): + # GH 38956, 39001 - no/incorrect dimension information + path = datapath("io", "data", "excel", f"{filename}{ext}") + if read_only is None: + result = pd.read_excel(path, header=header) + else: + with contextlib.closing( + openpyxl.load_workbook(path, read_only=read_only) + ) as wb: + result = pd.read_excel(wb, engine="openpyxl", header=header) + expected = DataFrame(expected_data) + tm.assert_frame_equal(result, expected) + + +def test_append_mode_file(ext): + # GH 39576 + df = DataFrame() + + with tm.ensure_clean(ext) as f: + df.to_excel(f, engine="openpyxl") + + with ExcelWriter( + f, mode="a", engine="openpyxl", if_sheet_exists="new" + ) as writer: + df.to_excel(writer) + + # make sure that zip files are not concatenated by making sure that + # "docProps/app.xml" only occurs twice in the file + data = Path(f).read_bytes() + first = data.find(b"docProps/app.xml") + second = data.find(b"docProps/app.xml", first + 1) + third = data.find(b"docProps/app.xml", second + 1) + assert second != -1 and third == -1 + + +# When read_only is None, use read_excel instead of a workbook +@pytest.mark.parametrize("read_only", [True, False, None]) +def test_read_with_empty_trailing_rows(datapath, ext, read_only): + # GH 39181 + path = datapath("io", "data", "excel", f"empty_trailing_rows{ext}") + if read_only is None: + result = pd.read_excel(path) + else: + with contextlib.closing( + openpyxl.load_workbook(path, read_only=read_only) + ) as wb: + result = pd.read_excel(wb, engine="openpyxl") + expected = DataFrame( + { + "Title": [np.nan, "A", 1, 2, 3], + "Unnamed: 1": [np.nan, "B", 4, 5, 6], + "Unnamed: 2": [np.nan, "C", 7, 8, 9], + } + ) + tm.assert_frame_equal(result, expected) + + +# When read_only is None, use read_excel instead of a workbook +@pytest.mark.parametrize("read_only", [True, False, None]) +def test_read_empty_with_blank_row(datapath, ext, read_only): + # GH 39547 - empty excel file with a row that has no data + path = datapath("io", "data", "excel", f"empty_with_blank_row{ext}") + if read_only is None: + result = pd.read_excel(path) + else: + with contextlib.closing( + openpyxl.load_workbook(path, read_only=read_only) + ) as wb: + result = pd.read_excel(wb, engine="openpyxl") + expected = DataFrame() + tm.assert_frame_equal(result, expected) + + +def test_book_and_sheets_consistent(ext): + # GH#45687 - Ensure sheets is updated if user modifies book + with tm.ensure_clean(ext) as f: + with ExcelWriter(f, engine="openpyxl") as writer: + assert writer.sheets == {} + sheet = writer.book.create_sheet("test_name", 0) + assert writer.sheets == {"test_name": sheet} + + +def test_ints_spelled_with_decimals(datapath, ext): + # GH 46988 - openpyxl returns this sheet with floats + path = datapath("io", "data", "excel", f"ints_spelled_with_decimals{ext}") + result = pd.read_excel(path) + expected = DataFrame(range(2, 12), columns=[1]) + tm.assert_frame_equal(result, expected) + + +def test_read_multiindex_header_no_index_names(datapath, ext): + # GH#47487 + path = datapath("io", "data", "excel", f"multiindex_no_index_names{ext}") + result = pd.read_excel(path, index_col=[0, 1, 2], header=[0, 1, 2]) + expected = DataFrame( + [[np.nan, "x", "x", "x"], ["x", np.nan, np.nan, np.nan]], + columns=pd.MultiIndex.from_tuples( + [("X", "Y", "A1"), ("X", "Y", "A2"), ("XX", "YY", "B1"), ("XX", "YY", "B2")] + ), + index=pd.MultiIndex.from_tuples([("A", "AA", "AAA"), ("A", "BB", "BBB")]), + ) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_readers.py b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_readers.py new file mode 100644 index 0000000000000000000000000000000000000000..8da8535952dcf98481716a2b00863dbfe6354af5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_readers.py @@ -0,0 +1,1751 @@ +from __future__ import annotations + +from datetime import ( + datetime, + time, +) +from functools import partial +from io import BytesIO +import os +from pathlib import Path +import platform +import re +from urllib.error import URLError +from zipfile import BadZipFile + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas.compat import is_platform_windows +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + read_csv, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + +read_ext_params = [".xls", ".xlsx", ".xlsm", ".xlsb", ".ods"] +engine_params = [ + # Add any engines to test here + # When defusedxml is installed it triggers deprecation warnings for + # xlrd and openpyxl, so catch those here + pytest.param( + "xlrd", + marks=[ + td.skip_if_no("xlrd"), + ], + ), + pytest.param( + "openpyxl", + marks=[ + td.skip_if_no("openpyxl"), + ], + ), + pytest.param( + None, + marks=[ + td.skip_if_no("xlrd"), + ], + ), + pytest.param("pyxlsb", marks=td.skip_if_no("pyxlsb")), + pytest.param("odf", marks=td.skip_if_no("odf")), + pytest.param("calamine", marks=td.skip_if_no("python_calamine")), +] + + +def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool: + """ + Filter out invalid (engine, ext) pairs instead of skipping, as that + produces 500+ pytest.skips. + """ + engine = engine.values[0] + if engine == "openpyxl" and read_ext == ".xls": + return False + if engine == "odf" and read_ext != ".ods": + return False + if read_ext == ".ods" and engine not in {"odf", "calamine"}: + return False + if engine == "pyxlsb" and read_ext != ".xlsb": + return False + if read_ext == ".xlsb" and engine not in {"pyxlsb", "calamine"}: + return False + if engine == "xlrd" and read_ext != ".xls": + return False + return True + + +def _transfer_marks(engine, read_ext): + """ + engine gives us a pytest.param object with some marks, read_ext is just + a string. We need to generate a new pytest.param inheriting the marks. + """ + values = engine.values + (read_ext,) + new_param = pytest.param(values, marks=engine.marks) + return new_param + + +@pytest.fixture( + params=[ + _transfer_marks(eng, ext) + for eng in engine_params + for ext in read_ext_params + if _is_valid_engine_ext_pair(eng, ext) + ], + ids=str, +) +def engine_and_read_ext(request): + """ + Fixture for Excel reader engine and read_ext, only including valid pairs. + """ + return request.param + + +@pytest.fixture +def engine(engine_and_read_ext): + engine, read_ext = engine_and_read_ext + return engine + + +@pytest.fixture +def read_ext(engine_and_read_ext): + engine, read_ext = engine_and_read_ext + return read_ext + + +@pytest.fixture +def df_ref(datapath): + """ + Obtain the reference data from read_csv with the Python engine. + """ + filepath = datapath("io", "data", "csv", "test1.csv") + df_ref = read_csv(filepath, index_col=0, parse_dates=True, engine="python") + return df_ref + + +def get_exp_unit(read_ext: str, engine: str | None) -> str: + return "ns" + + +def adjust_expected(expected: DataFrame, read_ext: str, engine: str) -> None: + expected.index.name = None + unit = get_exp_unit(read_ext, engine) + # error: "Index" has no attribute "as_unit" + expected.index = expected.index.as_unit(unit) # type: ignore[attr-defined] + + +def xfail_datetimes_with_pyxlsb(engine, request): + if engine == "pyxlsb": + request.applymarker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + +class TestReaders: + @pytest.fixture(autouse=True) + def cd_and_set_engine(self, engine, datapath, monkeypatch): + """ + Change directory and set engine for read_excel calls. + """ + func = partial(pd.read_excel, engine=engine) + monkeypatch.chdir(datapath("io", "data", "excel")) + monkeypatch.setattr(pd, "read_excel", func) + + def test_engine_used(self, read_ext, engine, monkeypatch): + # GH 38884 + def parser(self, *args, **kwargs): + return self.engine + + monkeypatch.setattr(pd.ExcelFile, "parse", parser) + + expected_defaults = { + "xlsx": "openpyxl", + "xlsm": "openpyxl", + "xlsb": "pyxlsb", + "xls": "xlrd", + "ods": "odf", + } + + with open("test1" + read_ext, "rb") as f: + result = pd.read_excel(f) + + if engine is not None: + expected = engine + else: + expected = expected_defaults[read_ext[1:]] + assert result == expected + + def test_engine_kwargs(self, read_ext, engine): + # GH#52214 + expected_defaults = { + "xlsx": {"foo": "abcd"}, + "xlsm": {"foo": 123}, + "xlsb": {"foo": "True"}, + "xls": {"foo": True}, + "ods": {"foo": "abcd"}, + } + + if engine in {"xlrd", "pyxlsb"}: + msg = re.escape(r"open_workbook() got an unexpected keyword argument 'foo'") + elif engine == "odf": + msg = re.escape(r"load() got an unexpected keyword argument 'foo'") + else: + msg = re.escape(r"load_workbook() got an unexpected keyword argument 'foo'") + + if engine is not None: + with pytest.raises(TypeError, match=msg): + pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet1", + index_col=0, + engine_kwargs=expected_defaults[read_ext[1:]], + ) + + def test_usecols_int(self, read_ext): + # usecols as int + msg = "Passing an integer for `usecols`" + with pytest.raises(ValueError, match=msg): + pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=3 + ) + + # usecols as int + with pytest.raises(ValueError, match=msg): + pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols=3, + ) + + def test_usecols_list(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref[["B", "C"]] + adjust_expected(expected, read_ext, engine) + + df1 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=[0, 2, 3] + ) + df2 = pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols=[0, 2, 3], + ) + + # TODO add index to xls file) + tm.assert_frame_equal(df1, expected) + tm.assert_frame_equal(df2, expected) + + def test_usecols_str(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref[["A", "B", "C"]] + adjust_expected(expected, read_ext, engine) + + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A:D" + ) + df3 = pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols="A:D", + ) + + # TODO add index to xls, read xls ignores index name ? + tm.assert_frame_equal(df2, expected) + tm.assert_frame_equal(df3, expected) + + expected = df_ref[["B", "C"]] + adjust_expected(expected, read_ext, engine) + + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C,D" + ) + df3 = pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols="A,C,D", + ) + # TODO add index to xls file + tm.assert_frame_equal(df2, expected) + tm.assert_frame_equal(df3, expected) + + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C:D" + ) + df3 = pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols="A,C:D", + ) + tm.assert_frame_equal(df2, expected) + tm.assert_frame_equal(df3, expected) + + @pytest.mark.parametrize( + "usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]] + ) + def test_usecols_diff_positional_int_columns_order( + self, request, engine, read_ext, usecols, df_ref + ): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref[["A", "C"]] + adjust_expected(expected, read_ext, engine) + + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=usecols + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]]) + def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref): + expected = df_ref[["B", "D"]] + expected.index = range(len(expected)) + + result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols=usecols) + tm.assert_frame_equal(result, expected) + + def test_read_excel_without_slicing(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref + adjust_expected(expected, read_ext, engine) + + result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0) + tm.assert_frame_equal(result, expected) + + def test_usecols_excel_range_str(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref[["C", "D"]] + adjust_expected(expected, read_ext, engine) + + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,D:E" + ) + tm.assert_frame_equal(result, expected) + + def test_usecols_excel_range_str_invalid(self, read_ext): + msg = "Invalid column name: E1" + + with pytest.raises(ValueError, match=msg): + pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols="D:E1") + + def test_index_col_label_error(self, read_ext): + msg = "list indices must be integers.*, not str" + + with pytest.raises(TypeError, match=msg): + pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet1", + index_col=["A"], + usecols=["A", "C"], + ) + + def test_index_col_str(self, read_ext): + # see gh-52716 + result = pd.read_excel("test1" + read_ext, sheet_name="Sheet3", index_col="A") + expected = DataFrame( + columns=["B", "C", "D", "E", "F"], index=Index([], name="A") + ) + tm.assert_frame_equal(result, expected) + + def test_index_col_empty(self, read_ext): + # see gh-9208 + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet3", index_col=["A", "B", "C"] + ) + expected = DataFrame( + columns=["D", "E", "F"], + index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("index_col", [None, 2]) + def test_index_col_with_unnamed(self, read_ext, index_col): + # see gh-18792 + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet4", index_col=index_col + ) + expected = DataFrame( + [["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"] + ) + if index_col: + expected = expected.set_index(expected.columns[index_col]) + + tm.assert_frame_equal(result, expected) + + def test_usecols_pass_non_existent_column(self, read_ext): + msg = ( + "Usecols do not match columns, " + "columns expected but not found: " + r"\['E'\]" + ) + + with pytest.raises(ValueError, match=msg): + pd.read_excel("test1" + read_ext, usecols=["E"]) + + def test_usecols_wrong_type(self, read_ext): + msg = ( + "'usecols' must either be list-like of " + "all strings, all unicode, all integers or a callable." + ) + + with pytest.raises(ValueError, match=msg): + pd.read_excel("test1" + read_ext, usecols=["E1", 0]) + + def test_excel_stop_iterator(self, read_ext): + parsed = pd.read_excel("test2" + read_ext, sheet_name="Sheet1") + expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"]) + tm.assert_frame_equal(parsed, expected) + + def test_excel_cell_error_na(self, request, engine, read_ext): + xfail_datetimes_with_pyxlsb(engine, request) + + # https://github.com/tafia/calamine/issues/355 + if engine == "calamine" and read_ext == ".ods": + request.applymarker( + pytest.mark.xfail(reason="Calamine can't extract error from ods files") + ) + + parsed = pd.read_excel("test3" + read_ext, sheet_name="Sheet1") + expected = DataFrame([[np.nan]], columns=["Test"]) + tm.assert_frame_equal(parsed, expected) + + def test_excel_table(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref + adjust_expected(expected, read_ext, engine) + + df1 = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0) + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet2", skiprows=[1], index_col=0 + ) + # TODO add index to file + tm.assert_frame_equal(df1, expected) + tm.assert_frame_equal(df2, expected) + + df3 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, skipfooter=1 + ) + tm.assert_frame_equal(df3, df1.iloc[:-1]) + + def test_reader_special_dtypes(self, request, engine, read_ext): + xfail_datetimes_with_pyxlsb(engine, request) + + unit = get_exp_unit(read_ext, engine) + expected = DataFrame.from_dict( + { + "IntCol": [1, 2, -3, 4, 0], + "FloatCol": [1.25, 2.25, 1.83, 1.92, 0.0000000005], + "BoolCol": [True, False, True, True, False], + "StrCol": [1, 2, 3, 4, 5], + "Str2Col": ["a", 3, "c", "d", "e"], + "DateCol": Index( + [ + datetime(2013, 10, 30), + datetime(2013, 10, 31), + datetime(1905, 1, 1), + datetime(2013, 12, 14), + datetime(2015, 3, 14), + ], + dtype=f"M8[{unit}]", + ), + }, + ) + basename = "test_types" + + # should read in correctly and infer types + actual = pd.read_excel(basename + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, expected) + + # if not coercing number, then int comes in as float + float_expected = expected.copy() + float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0 + actual = pd.read_excel(basename + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, float_expected) + + # check setting Index (assuming xls and xlsx are the same here) + for icol, name in enumerate(expected.columns): + actual = pd.read_excel( + basename + read_ext, sheet_name="Sheet1", index_col=icol + ) + exp = expected.set_index(name) + tm.assert_frame_equal(actual, exp) + + expected["StrCol"] = expected["StrCol"].apply(str) + actual = pd.read_excel( + basename + read_ext, sheet_name="Sheet1", converters={"StrCol": str} + ) + tm.assert_frame_equal(actual, expected) + + # GH8212 - support for converters and missing values + def test_reader_converters(self, read_ext): + basename = "test_converters" + + expected = DataFrame.from_dict( + { + "IntCol": [1, 2, -3, -1000, 0], + "FloatCol": [12.5, np.nan, 18.3, 19.2, 0.000000005], + "BoolCol": ["Found", "Found", "Found", "Not found", "Found"], + "StrCol": ["1", np.nan, "3", "4", "5"], + } + ) + + converters = { + "IntCol": lambda x: int(x) if x != "" else -1000, + "FloatCol": lambda x: 10 * x if x else np.nan, + 2: lambda x: "Found" if x != "" else "Not found", + 3: lambda x: str(x) if x else "", + } + + # should read in correctly and set types of single cells (not array + # dtypes) + actual = pd.read_excel( + basename + read_ext, sheet_name="Sheet1", converters=converters + ) + tm.assert_frame_equal(actual, expected) + + def test_reader_dtype(self, read_ext): + # GH 8212 + basename = "testdtype" + actual = pd.read_excel(basename + read_ext) + + expected = DataFrame( + { + "a": [1, 2, 3, 4], + "b": [2.5, 3.5, 4.5, 5.5], + "c": [1, 2, 3, 4], + "d": [1.0, 2.0, np.nan, 4.0], + } + ) + + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel( + basename + read_ext, dtype={"a": "float64", "b": "float32", "c": str} + ) + + expected["a"] = expected["a"].astype("float64") + expected["b"] = expected["b"].astype("float32") + expected["c"] = Series(["001", "002", "003", "004"], dtype=object) + tm.assert_frame_equal(actual, expected) + + msg = "Unable to convert column d to type int64" + with pytest.raises(ValueError, match=msg): + pd.read_excel(basename + read_ext, dtype={"d": "int64"}) + + @pytest.mark.parametrize( + "dtype,expected", + [ + ( + None, + DataFrame( + { + "a": [1, 2, 3, 4], + "b": [2.5, 3.5, 4.5, 5.5], + "c": [1, 2, 3, 4], + "d": [1.0, 2.0, np.nan, 4.0], + } + ), + ), + ( + {"a": "float64", "b": "float32", "c": str, "d": str}, + DataFrame( + { + "a": Series([1, 2, 3, 4], dtype="float64"), + "b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"), + "c": Series(["001", "002", "003", "004"], dtype=object), + "d": Series(["1", "2", np.nan, "4"], dtype=object), + } + ), + ), + ], + ) + def test_reader_dtype_str(self, read_ext, dtype, expected): + # see gh-20377 + basename = "testdtype" + + actual = pd.read_excel(basename + read_ext, dtype=dtype) + tm.assert_frame_equal(actual, expected) + + def test_dtype_backend(self, read_ext, dtype_backend, engine): + # GH#36712 + if read_ext in (".xlsb", ".xls"): + pytest.skip(f"No engine for filetype: '{read_ext}'") + + df = DataFrame( + { + "a": Series([1, 3], dtype="Int64"), + "b": Series([2.5, 4.5], dtype="Float64"), + "c": Series([True, False], dtype="boolean"), + "d": Series(["a", "b"], dtype="string"), + "e": Series([pd.NA, 6], dtype="Int64"), + "f": Series([pd.NA, 7.5], dtype="Float64"), + "g": Series([pd.NA, True], dtype="boolean"), + "h": Series([pd.NA, "a"], dtype="string"), + "i": Series([pd.Timestamp("2019-12-31")] * 2), + "j": Series([pd.NA, pd.NA], dtype="Int64"), + } + ) + with tm.ensure_clean(read_ext) as file_path: + df.to_excel(file_path, sheet_name="test", index=False) + result = pd.read_excel( + file_path, sheet_name="test", dtype_backend=dtype_backend + ) + if dtype_backend == "pyarrow": + import pyarrow as pa + + from pandas.arrays import ArrowExtensionArray + + expected = DataFrame( + { + col: ArrowExtensionArray(pa.array(df[col], from_pandas=True)) + for col in df.columns + } + ) + # pyarrow by default infers timestamp resolution as us, not ns + expected["i"] = ArrowExtensionArray( + expected["i"].array._pa_array.cast(pa.timestamp(unit="us")) + ) + # pyarrow supports a null type, so don't have to default to Int64 + expected["j"] = ArrowExtensionArray(pa.array([None, None])) + else: + expected = df + unit = get_exp_unit(read_ext, engine) + expected["i"] = expected["i"].astype(f"M8[{unit}]") + + tm.assert_frame_equal(result, expected) + + def test_dtype_backend_and_dtype(self, read_ext): + # GH#36712 + if read_ext in (".xlsb", ".xls"): + pytest.skip(f"No engine for filetype: '{read_ext}'") + + df = DataFrame({"a": [np.nan, 1.0], "b": [2.5, np.nan]}) + with tm.ensure_clean(read_ext) as file_path: + df.to_excel(file_path, sheet_name="test", index=False) + result = pd.read_excel( + file_path, + sheet_name="test", + dtype_backend="numpy_nullable", + dtype="float64", + ) + tm.assert_frame_equal(result, df) + + @pytest.mark.xfail( + using_pyarrow_string_dtype(), reason="infer_string takes precedence" + ) + def test_dtype_backend_string(self, read_ext, string_storage): + # GH#36712 + if read_ext in (".xlsb", ".xls"): + pytest.skip(f"No engine for filetype: '{read_ext}'") + + pa = pytest.importorskip("pyarrow") + + with pd.option_context("mode.string_storage", string_storage): + df = DataFrame( + { + "a": np.array(["a", "b"], dtype=np.object_), + "b": np.array(["x", pd.NA], dtype=np.object_), + } + ) + with tm.ensure_clean(read_ext) as file_path: + df.to_excel(file_path, sheet_name="test", index=False) + result = pd.read_excel( + file_path, sheet_name="test", dtype_backend="numpy_nullable" + ) + + if string_storage == "python": + expected = DataFrame( + { + "a": StringArray(np.array(["a", "b"], dtype=np.object_)), + "b": StringArray(np.array(["x", pd.NA], dtype=np.object_)), + } + ) + else: + expected = DataFrame( + { + "a": ArrowStringArray(pa.array(["a", "b"])), + "b": ArrowStringArray(pa.array(["x", None])), + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtypes, exp_value", [({}, 1), ({"a.1": "int64"}, 1)]) + def test_dtype_mangle_dup_cols(self, read_ext, dtypes, exp_value): + # GH#35211 + basename = "df_mangle_dup_col_dtypes" + dtype_dict = {"a": object, **dtypes} + dtype_dict_copy = dtype_dict.copy() + # GH#42462 + result = pd.read_excel(basename + read_ext, dtype=dtype_dict) + expected = DataFrame( + { + "a": Series([1], dtype=object), + "a.1": Series([exp_value], dtype=object if not dtypes else None), + } + ) + assert dtype_dict == dtype_dict_copy, "dtype dict changed" + tm.assert_frame_equal(result, expected) + + def test_reader_spaces(self, read_ext): + # see gh-32207 + basename = "test_spaces" + + actual = pd.read_excel(basename + read_ext) + expected = DataFrame( + { + "testcol": [ + "this is great", + "4 spaces", + "1 trailing ", + " 1 leading", + "2 spaces multiple times", + ] + } + ) + tm.assert_frame_equal(actual, expected) + + # gh-36122, gh-35802 + @pytest.mark.parametrize( + "basename,expected", + [ + ("gh-35802", DataFrame({"COLUMN": ["Test (1)"]})), + ("gh-36122", DataFrame(columns=["got 2nd sa"])), + ], + ) + def test_read_excel_ods_nested_xml(self, engine, read_ext, basename, expected): + # see gh-35802 + if engine != "odf": + pytest.skip(f"Skipped for engine: {engine}") + + actual = pd.read_excel(basename + read_ext) + tm.assert_frame_equal(actual, expected) + + def test_reading_all_sheets(self, read_ext): + # Test reading all sheet names by setting sheet_name to None, + # Ensure a dict is returned. + # See PR #9450 + basename = "test_multisheet" + dfs = pd.read_excel(basename + read_ext, sheet_name=None) + # ensure this is not alphabetical to test order preservation + expected_keys = ["Charlie", "Alpha", "Beta"] + tm.assert_contains_all(expected_keys, dfs.keys()) + # Issue 9930 + # Ensure sheet order is preserved + assert expected_keys == list(dfs.keys()) + + def test_reading_multiple_specific_sheets(self, read_ext): + # Test reading specific sheet names by specifying a mixed list + # of integers and strings, and confirm that duplicated sheet + # references (positions/names) are removed properly. + # Ensure a dict is returned + # See PR #9450 + basename = "test_multisheet" + # Explicitly request duplicates. Only the set should be returned. + expected_keys = [2, "Charlie", "Charlie"] + dfs = pd.read_excel(basename + read_ext, sheet_name=expected_keys) + expected_keys = list(set(expected_keys)) + tm.assert_contains_all(expected_keys, dfs.keys()) + assert len(expected_keys) == len(dfs.keys()) + + def test_reading_all_sheets_with_blank(self, read_ext): + # Test reading all sheet names by setting sheet_name to None, + # In the case where some sheets are blank. + # Issue #11711 + basename = "blank_with_header" + dfs = pd.read_excel(basename + read_ext, sheet_name=None) + expected_keys = ["Sheet1", "Sheet2", "Sheet3"] + tm.assert_contains_all(expected_keys, dfs.keys()) + + # GH6403 + def test_read_excel_blank(self, read_ext): + actual = pd.read_excel("blank" + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, DataFrame()) + + def test_read_excel_blank_with_header(self, read_ext): + expected = DataFrame(columns=["col_1", "col_2"]) + actual = pd.read_excel("blank_with_header" + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, expected) + + def test_exception_message_includes_sheet_name(self, read_ext): + # GH 48706 + with pytest.raises(ValueError, match=r" \(sheet: Sheet1\)$"): + pd.read_excel("blank_with_header" + read_ext, header=[1], sheet_name=None) + with pytest.raises(ZeroDivisionError, match=r" \(sheet: Sheet1\)$"): + pd.read_excel("test1" + read_ext, usecols=lambda x: 1 / 0, sheet_name=None) + + @pytest.mark.filterwarnings("ignore:Cell A4 is marked:UserWarning:openpyxl") + def test_date_conversion_overflow(self, request, engine, read_ext): + # GH 10001 : pandas.ExcelFile ignore parse_dates=False + xfail_datetimes_with_pyxlsb(engine, request) + + expected = DataFrame( + [ + [pd.Timestamp("2016-03-12"), "Marc Johnson"], + [pd.Timestamp("2016-03-16"), "Jack Black"], + [1e20, "Timothy Brown"], + ], + columns=["DateColWithBigInt", "StringCol"], + ) + + if engine == "openpyxl": + request.applymarker( + pytest.mark.xfail(reason="Maybe not supported by openpyxl") + ) + + if engine is None and read_ext in (".xlsx", ".xlsm"): + # GH 35029 + request.applymarker( + pytest.mark.xfail(reason="Defaults to openpyxl, maybe not supported") + ) + + result = pd.read_excel("testdateoverflow" + read_ext) + tm.assert_frame_equal(result, expected) + + def test_sheet_name(self, request, read_ext, engine, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + filename = "test1" + sheet_name = "Sheet1" + + expected = df_ref + adjust_expected(expected, read_ext, engine) + + df1 = pd.read_excel( + filename + read_ext, sheet_name=sheet_name, index_col=0 + ) # doc + df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name) + + tm.assert_frame_equal(df1, expected) + tm.assert_frame_equal(df2, expected) + + def test_excel_read_buffer(self, read_ext): + pth = "test1" + read_ext + expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0) + with open(pth, "rb") as f: + actual = pd.read_excel(f, sheet_name="Sheet1", index_col=0) + tm.assert_frame_equal(expected, actual) + + def test_bad_engine_raises(self): + bad_engine = "foo" + with pytest.raises(ValueError, match="Unknown engine: foo"): + pd.read_excel("", engine=bad_engine) + + @pytest.mark.parametrize( + "sheet_name", + [3, [0, 3], [3, 0], "Sheet4", ["Sheet1", "Sheet4"], ["Sheet4", "Sheet1"]], + ) + def test_bad_sheetname_raises(self, read_ext, sheet_name): + # GH 39250 + msg = "Worksheet index 3 is invalid|Worksheet named 'Sheet4' not found" + with pytest.raises(ValueError, match=msg): + pd.read_excel("blank" + read_ext, sheet_name=sheet_name) + + def test_missing_file_raises(self, read_ext): + bad_file = f"foo{read_ext}" + # CI tests with other languages, translates to "No such file or directory" + match = "|".join( + [ + "(No such file or directory", + "没有那个文件或目录", + "File o directory non esistente)", + ] + ) + with pytest.raises(FileNotFoundError, match=match): + pd.read_excel(bad_file) + + def test_corrupt_bytes_raises(self, engine): + bad_stream = b"foo" + if engine is None: + error = ValueError + msg = ( + "Excel file format cannot be determined, you must " + "specify an engine manually." + ) + elif engine == "xlrd": + from xlrd import XLRDError + + error = XLRDError + msg = ( + "Unsupported format, or corrupt file: Expected BOF " + "record; found b'foo'" + ) + elif engine == "calamine": + from python_calamine import CalamineError + + error = CalamineError + msg = "Cannot detect file format" + else: + error = BadZipFile + msg = "File is not a zip file" + with pytest.raises(error, match=msg): + pd.read_excel(BytesIO(bad_stream)) + + @pytest.mark.network + @pytest.mark.single_cpu + def test_read_from_http_url(self, httpserver, read_ext): + with open("test1" + read_ext, "rb") as f: + httpserver.serve_content(content=f.read()) + url_table = pd.read_excel(httpserver.url) + local_table = pd.read_excel("test1" + read_ext) + tm.assert_frame_equal(url_table, local_table) + + @td.skip_if_not_us_locale + @pytest.mark.single_cpu + def test_read_from_s3_url(self, read_ext, s3_public_bucket, s3so): + # Bucket created in tests/io/conftest.py + with open("test1" + read_ext, "rb") as f: + s3_public_bucket.put_object(Key="test1" + read_ext, Body=f) + + url = f"s3://{s3_public_bucket.name}/test1" + read_ext + + url_table = pd.read_excel(url, storage_options=s3so) + local_table = pd.read_excel("test1" + read_ext) + tm.assert_frame_equal(url_table, local_table) + + @pytest.mark.single_cpu + def test_read_from_s3_object(self, read_ext, s3_public_bucket, s3so): + # GH 38788 + # Bucket created in tests/io/conftest.py + with open("test1" + read_ext, "rb") as f: + s3_public_bucket.put_object(Key="test1" + read_ext, Body=f) + + import s3fs + + s3 = s3fs.S3FileSystem(**s3so) + + with s3.open(f"s3://{s3_public_bucket.name}/test1" + read_ext) as f: + url_table = pd.read_excel(f) + + local_table = pd.read_excel("test1" + read_ext) + tm.assert_frame_equal(url_table, local_table) + + @pytest.mark.slow + def test_read_from_file_url(self, read_ext, datapath): + # FILE + localtable = os.path.join(datapath("io", "data", "excel"), "test1" + read_ext) + local_table = pd.read_excel(localtable) + + try: + url_table = pd.read_excel("file://localhost/" + localtable) + except URLError: + # fails on some systems + platform_info = " ".join(platform.uname()).strip() + pytest.skip(f"failing on {platform_info}") + + tm.assert_frame_equal(url_table, local_table) + + def test_read_from_pathlib_path(self, read_ext): + # GH12655 + str_path = "test1" + read_ext + expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0) + + path_obj = Path("test1" + read_ext) + actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0) + + tm.assert_frame_equal(expected, actual) + + @td.skip_if_no("py.path") + def test_read_from_py_localpath(self, read_ext): + # GH12655 + from py.path import local as LocalPath + + str_path = os.path.join("test1" + read_ext) + expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0) + + path_obj = LocalPath().join("test1" + read_ext) + actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0) + + tm.assert_frame_equal(expected, actual) + + def test_close_from_py_localpath(self, read_ext): + # GH31467 + str_path = os.path.join("test1" + read_ext) + with open(str_path, "rb") as f: + x = pd.read_excel(f, sheet_name="Sheet1", index_col=0) + del x + # should not throw an exception because the passed file was closed + f.read() + + def test_reader_seconds(self, request, engine, read_ext): + xfail_datetimes_with_pyxlsb(engine, request) + + # GH 55045 + if engine == "calamine" and read_ext == ".ods": + request.applymarker( + pytest.mark.xfail( + reason="ODS file contains bad datetime (seconds as text)" + ) + ) + + # Test reading times with and without milliseconds. GH5945. + expected = DataFrame.from_dict( + { + "Time": [ + time(1, 2, 3), + time(2, 45, 56, 100000), + time(4, 29, 49, 200000), + time(6, 13, 42, 300000), + time(7, 57, 35, 400000), + time(9, 41, 28, 500000), + time(11, 25, 21, 600000), + time(13, 9, 14, 700000), + time(14, 53, 7, 800000), + time(16, 37, 0, 900000), + time(18, 20, 54), + ] + } + ) + + actual = pd.read_excel("times_1900" + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel("times_1904" + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, expected) + + def test_read_excel_multiindex(self, request, engine, read_ext): + # see gh-4679 + xfail_datetimes_with_pyxlsb(engine, request) + + unit = get_exp_unit(read_ext, engine) + + mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]]) + mi_file = "testmultiindex" + read_ext + + # "mi_column" sheet + expected = DataFrame( + [ + [1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=mi, + ) + expected[mi[2]] = expected[mi[2]].astype(f"M8[{unit}]") + + actual = pd.read_excel( + mi_file, sheet_name="mi_column", header=[0, 1], index_col=0 + ) + tm.assert_frame_equal(actual, expected) + + # "mi_index" sheet + expected.index = mi + expected.columns = ["a", "b", "c", "d"] + + actual = pd.read_excel(mi_file, sheet_name="mi_index", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected) + + # "both" sheet + expected.columns = mi + + actual = pd.read_excel( + mi_file, sheet_name="both", index_col=[0, 1], header=[0, 1] + ) + tm.assert_frame_equal(actual, expected) + + # "mi_index_name" sheet + expected.columns = ["a", "b", "c", "d"] + expected.index = mi.set_names(["ilvl1", "ilvl2"]) + + actual = pd.read_excel(mi_file, sheet_name="mi_index_name", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected) + + # "mi_column_name" sheet + expected.index = list(range(4)) + expected.columns = mi.set_names(["c1", "c2"]) + actual = pd.read_excel( + mi_file, sheet_name="mi_column_name", header=[0, 1], index_col=0 + ) + tm.assert_frame_equal(actual, expected) + + # see gh-11317 + # "name_with_int" sheet + expected.columns = mi.set_levels([1, 2], level=1).set_names(["c1", "c2"]) + + actual = pd.read_excel( + mi_file, sheet_name="name_with_int", index_col=0, header=[0, 1] + ) + tm.assert_frame_equal(actual, expected) + + # "both_name" sheet + expected.columns = mi.set_names(["c1", "c2"]) + expected.index = mi.set_names(["ilvl1", "ilvl2"]) + + actual = pd.read_excel( + mi_file, sheet_name="both_name", index_col=[0, 1], header=[0, 1] + ) + tm.assert_frame_equal(actual, expected) + + # "both_skiprows" sheet + actual = pd.read_excel( + mi_file, + sheet_name="both_name_skiprows", + index_col=[0, 1], + header=[0, 1], + skiprows=2, + ) + tm.assert_frame_equal(actual, expected) + + @pytest.mark.parametrize( + "sheet_name,idx_lvl2", + [ + ("both_name_blank_after_mi_name", [np.nan, "b", "a", "b"]), + ("both_name_multiple_blanks", [np.nan] * 4), + ], + ) + def test_read_excel_multiindex_blank_after_name( + self, request, engine, read_ext, sheet_name, idx_lvl2 + ): + # GH34673 + xfail_datetimes_with_pyxlsb(engine, request) + + mi_file = "testmultiindex" + read_ext + mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]], names=["c1", "c2"]) + + unit = get_exp_unit(read_ext, engine) + + expected = DataFrame( + [ + [1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=mi, + index=MultiIndex.from_arrays( + (["foo", "foo", "bar", "bar"], idx_lvl2), + names=["ilvl1", "ilvl2"], + ), + ) + expected[mi[2]] = expected[mi[2]].astype(f"M8[{unit}]") + result = pd.read_excel( + mi_file, + sheet_name=sheet_name, + index_col=[0, 1], + header=[0, 1], + ) + tm.assert_frame_equal(result, expected) + + def test_read_excel_multiindex_header_only(self, read_ext): + # see gh-11733. + # + # Don't try to parse a header name if there isn't one. + mi_file = "testmultiindex" + read_ext + result = pd.read_excel(mi_file, sheet_name="index_col_none", header=[0, 1]) + + exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")]) + expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns) + tm.assert_frame_equal(result, expected) + + def test_excel_old_index_format(self, read_ext): + # see gh-4679 + filename = "test_index_name_pre17" + read_ext + + # We detect headers to determine if index names exist, so + # that "index" name in the "names" version of the data will + # now be interpreted as rows that include null data. + data = np.array( + [ + [np.nan, np.nan, np.nan, np.nan, np.nan], + ["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"], + ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"], + ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"], + ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"], + ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"], + ], + dtype=object, + ) + columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"] + mi = MultiIndex( + levels=[ + ["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], + ["R1", "R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"], + ], + codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], + names=[None, None], + ) + si = Index( + ["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None + ) + + expected = DataFrame(data, index=si, columns=columns) + + actual = pd.read_excel(filename, sheet_name="single_names", index_col=0) + tm.assert_frame_equal(actual, expected) + + expected.index = mi + + actual = pd.read_excel(filename, sheet_name="multi_names", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected) + + # The analogous versions of the "names" version data + # where there are explicitly no names for the indices. + data = np.array( + [ + ["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"], + ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"], + ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"], + ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"], + ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"], + ] + ) + columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"] + mi = MultiIndex( + levels=[ + ["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], + ["R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"], + ], + codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], + names=[None, None], + ) + si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None) + + expected = DataFrame(data, index=si, columns=columns) + + actual = pd.read_excel(filename, sheet_name="single_no_names", index_col=0) + tm.assert_frame_equal(actual, expected) + + expected.index = mi + + actual = pd.read_excel(filename, sheet_name="multi_no_names", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected) + + def test_read_excel_bool_header_arg(self, read_ext): + # GH 6114 + msg = "Passing a bool to header is invalid" + for arg in [True, False]: + with pytest.raises(TypeError, match=msg): + pd.read_excel("test1" + read_ext, header=arg) + + def test_read_excel_skiprows(self, request, engine, read_ext): + # GH 4903 + xfail_datetimes_with_pyxlsb(engine, request) + + unit = get_exp_unit(read_ext, engine) + + actual = pd.read_excel( + "testskiprows" + read_ext, sheet_name="skiprows_list", skiprows=[0, 2] + ) + expected = DataFrame( + [ + [1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=["a", "b", "c", "d"], + ) + expected["c"] = expected["c"].astype(f"M8[{unit}]") + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=np.array([0, 2]), + ) + tm.assert_frame_equal(actual, expected) + + # GH36435 + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=lambda x: x in [0, 2], + ) + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=3, + names=["a", "b", "c", "d"], + ) + expected = DataFrame( + [ + # [1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=["a", "b", "c", "d"], + ) + expected["c"] = expected["c"].astype(f"M8[{unit}]") + tm.assert_frame_equal(actual, expected) + + def test_read_excel_skiprows_callable_not_in(self, request, engine, read_ext): + # GH 4903 + xfail_datetimes_with_pyxlsb(engine, request) + unit = get_exp_unit(read_ext, engine) + + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=lambda x: x not in [1, 3, 5], + ) + expected = DataFrame( + [ + [1, 2.5, pd.Timestamp("2015-01-01"), True], + # [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + # [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=["a", "b", "c", "d"], + ) + expected["c"] = expected["c"].astype(f"M8[{unit}]") + tm.assert_frame_equal(actual, expected) + + def test_read_excel_nrows(self, read_ext): + # GH 16645 + num_rows_to_pull = 5 + actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull) + expected = pd.read_excel("test1" + read_ext) + expected = expected[:num_rows_to_pull] + tm.assert_frame_equal(actual, expected) + + def test_read_excel_nrows_greater_than_nrows_in_file(self, read_ext): + # GH 16645 + expected = pd.read_excel("test1" + read_ext) + num_records_in_file = len(expected) + num_rows_to_pull = num_records_in_file + 10 + actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull) + tm.assert_frame_equal(actual, expected) + + def test_read_excel_nrows_non_integer_parameter(self, read_ext): + # GH 16645 + msg = "'nrows' must be an integer >=0" + with pytest.raises(ValueError, match=msg): + pd.read_excel("test1" + read_ext, nrows="5") + + @pytest.mark.parametrize( + "filename,sheet_name,header,index_col,skiprows", + [ + ("testmultiindex", "mi_column", [0, 1], 0, None), + ("testmultiindex", "mi_index", None, [0, 1], None), + ("testmultiindex", "both", [0, 1], [0, 1], None), + ("testmultiindex", "mi_column_name", [0, 1], 0, None), + ("testskiprows", "skiprows_list", None, None, [0, 2]), + ("testskiprows", "skiprows_list", None, None, lambda x: x in (0, 2)), + ], + ) + def test_read_excel_nrows_params( + self, read_ext, filename, sheet_name, header, index_col, skiprows + ): + """ + For various parameters, we should get the same result whether we + limit the rows during load (nrows=3) or after (df.iloc[:3]). + """ + # GH 46894 + expected = pd.read_excel( + filename + read_ext, + sheet_name=sheet_name, + header=header, + index_col=index_col, + skiprows=skiprows, + ).iloc[:3] + actual = pd.read_excel( + filename + read_ext, + sheet_name=sheet_name, + header=header, + index_col=index_col, + skiprows=skiprows, + nrows=3, + ) + tm.assert_frame_equal(actual, expected) + + def test_deprecated_kwargs(self, read_ext): + with pytest.raises(TypeError, match="but 3 positional arguments"): + pd.read_excel("test1" + read_ext, "Sheet1", 0) + + def test_no_header_with_list_index_col(self, read_ext): + # GH 31783 + file_name = "testmultiindex" + read_ext + data = [("B", "B"), ("key", "val"), (3, 4), (3, 4)] + idx = MultiIndex.from_tuples( + [("A", "A"), ("key", "val"), (1, 2), (1, 2)], names=(0, 1) + ) + expected = DataFrame(data, index=idx, columns=(2, 3)) + result = pd.read_excel( + file_name, sheet_name="index_col_none", index_col=[0, 1], header=None + ) + tm.assert_frame_equal(expected, result) + + def test_one_col_noskip_blank_line(self, read_ext): + # GH 39808 + file_name = "one_col_blank_line" + read_ext + data = [0.5, np.nan, 1, 2] + expected = DataFrame(data, columns=["numbers"]) + result = pd.read_excel(file_name) + tm.assert_frame_equal(result, expected) + + def test_multiheader_two_blank_lines(self, read_ext): + # GH 40442 + file_name = "testmultiindex" + read_ext + columns = MultiIndex.from_tuples([("a", "A"), ("b", "B")]) + data = [[np.nan, np.nan], [np.nan, np.nan], [1, 3], [2, 4]] + expected = DataFrame(data, columns=columns) + result = pd.read_excel( + file_name, sheet_name="mi_column_empty_rows", header=[0, 1] + ) + tm.assert_frame_equal(result, expected) + + def test_trailing_blanks(self, read_ext): + """ + Sheets can contain blank cells with no data. Some of our readers + were including those cells, creating many empty rows and columns + """ + file_name = "trailing_blanks" + read_ext + result = pd.read_excel(file_name) + assert result.shape == (3, 3) + + def test_ignore_chartsheets_by_str(self, request, engine, read_ext): + # GH 41448 + if read_ext == ".ods": + pytest.skip("chartsheets do not exist in the ODF format") + if engine == "pyxlsb": + request.applymarker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pytest.raises(ValueError, match="Worksheet named 'Chart1' not found"): + pd.read_excel("chartsheet" + read_ext, sheet_name="Chart1") + + def test_ignore_chartsheets_by_int(self, request, engine, read_ext): + # GH 41448 + if read_ext == ".ods": + pytest.skip("chartsheets do not exist in the ODF format") + if engine == "pyxlsb": + request.applymarker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pytest.raises( + ValueError, match="Worksheet index 1 is invalid, 1 worksheets found" + ): + pd.read_excel("chartsheet" + read_ext, sheet_name=1) + + def test_euro_decimal_format(self, read_ext): + # copied from read_csv + result = pd.read_excel("test_decimal" + read_ext, decimal=",", skiprows=1) + expected = DataFrame( + [ + [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819], + [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872], + [3, 878.158, 108013.434, "GHI", "rez", 2.735694704], + ], + columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"], + ) + tm.assert_frame_equal(result, expected) + + +class TestExcelFileRead: + def test_deprecate_bytes_input(self, engine, read_ext): + # GH 53830 + msg = ( + "Passing bytes to 'read_excel' is deprecated and " + "will be removed in a future version. To read from a " + "byte string, wrap it in a `BytesIO` object." + ) + + with tm.assert_produces_warning( + FutureWarning, match=msg, raise_on_extra_warnings=False + ): + with open("test1" + read_ext, "rb") as f: + pd.read_excel(f.read(), engine=engine) + + @pytest.fixture(autouse=True) + def cd_and_set_engine(self, engine, datapath, monkeypatch): + """ + Change directory and set engine for ExcelFile objects. + """ + func = partial(pd.ExcelFile, engine=engine) + monkeypatch.chdir(datapath("io", "data", "excel")) + monkeypatch.setattr(pd, "ExcelFile", func) + + def test_engine_used(self, read_ext, engine): + expected_defaults = { + "xlsx": "openpyxl", + "xlsm": "openpyxl", + "xlsb": "pyxlsb", + "xls": "xlrd", + "ods": "odf", + } + + with pd.ExcelFile("test1" + read_ext) as excel: + result = excel.engine + + if engine is not None: + expected = engine + else: + expected = expected_defaults[read_ext[1:]] + assert result == expected + + def test_excel_passes_na(self, read_ext): + with pd.ExcelFile("test4" + read_ext) as excel: + parsed = pd.read_excel( + excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"] + ) + expected = DataFrame( + [["NA"], [1], ["NA"], [np.nan], ["rabbit"]], columns=["Test"] + ) + tm.assert_frame_equal(parsed, expected) + + with pd.ExcelFile("test4" + read_ext) as excel: + parsed = pd.read_excel( + excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"] + ) + expected = DataFrame( + [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"] + ) + tm.assert_frame_equal(parsed, expected) + + # 13967 + with pd.ExcelFile("test5" + read_ext) as excel: + parsed = pd.read_excel( + excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"] + ) + expected = DataFrame( + [["1.#QNAN"], [1], ["nan"], [np.nan], ["rabbit"]], columns=["Test"] + ) + tm.assert_frame_equal(parsed, expected) + + with pd.ExcelFile("test5" + read_ext) as excel: + parsed = pd.read_excel( + excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"] + ) + expected = DataFrame( + [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"] + ) + tm.assert_frame_equal(parsed, expected) + + @pytest.mark.parametrize("na_filter", [None, True, False]) + def test_excel_passes_na_filter(self, read_ext, na_filter): + # gh-25453 + kwargs = {} + + if na_filter is not None: + kwargs["na_filter"] = na_filter + + with pd.ExcelFile("test5" + read_ext) as excel: + parsed = pd.read_excel( + excel, + sheet_name="Sheet1", + keep_default_na=True, + na_values=["apple"], + **kwargs, + ) + + if na_filter is False: + expected = [["1.#QNAN"], [1], ["nan"], ["apple"], ["rabbit"]] + else: + expected = [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]] + + expected = DataFrame(expected, columns=["Test"]) + tm.assert_frame_equal(parsed, expected) + + def test_excel_table_sheet_by_index(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref + adjust_expected(expected, read_ext, engine) + + with pd.ExcelFile("test1" + read_ext) as excel: + df1 = pd.read_excel(excel, sheet_name=0, index_col=0) + df2 = pd.read_excel(excel, sheet_name=1, skiprows=[1], index_col=0) + tm.assert_frame_equal(df1, expected) + tm.assert_frame_equal(df2, expected) + + with pd.ExcelFile("test1" + read_ext) as excel: + df1 = excel.parse(0, index_col=0) + df2 = excel.parse(1, skiprows=[1], index_col=0) + tm.assert_frame_equal(df1, expected) + tm.assert_frame_equal(df2, expected) + + with pd.ExcelFile("test1" + read_ext) as excel: + df3 = pd.read_excel(excel, sheet_name=0, index_col=0, skipfooter=1) + tm.assert_frame_equal(df3, df1.iloc[:-1]) + + with pd.ExcelFile("test1" + read_ext) as excel: + df3 = excel.parse(0, index_col=0, skipfooter=1) + + tm.assert_frame_equal(df3, df1.iloc[:-1]) + + def test_sheet_name(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref + adjust_expected(expected, read_ext, engine) + + filename = "test1" + sheet_name = "Sheet1" + + with pd.ExcelFile(filename + read_ext) as excel: + df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc + + with pd.ExcelFile(filename + read_ext) as excel: + df2_parse = excel.parse(index_col=0, sheet_name=sheet_name) + + tm.assert_frame_equal(df1_parse, expected) + tm.assert_frame_equal(df2_parse, expected) + + @pytest.mark.parametrize( + "sheet_name", + [3, [0, 3], [3, 0], "Sheet4", ["Sheet1", "Sheet4"], ["Sheet4", "Sheet1"]], + ) + def test_bad_sheetname_raises(self, read_ext, sheet_name): + # GH 39250 + msg = "Worksheet index 3 is invalid|Worksheet named 'Sheet4' not found" + with pytest.raises(ValueError, match=msg): + with pd.ExcelFile("blank" + read_ext) as excel: + excel.parse(sheet_name=sheet_name) + + def test_excel_read_buffer(self, engine, read_ext): + pth = "test1" + read_ext + expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0, engine=engine) + + with open(pth, "rb") as f: + with pd.ExcelFile(f) as xls: + actual = pd.read_excel(xls, sheet_name="Sheet1", index_col=0) + + tm.assert_frame_equal(expected, actual) + + def test_reader_closes_file(self, engine, read_ext): + with open("test1" + read_ext, "rb") as f: + with pd.ExcelFile(f) as xlsx: + # parses okay + pd.read_excel(xlsx, sheet_name="Sheet1", index_col=0, engine=engine) + + assert f.closed + + def test_conflicting_excel_engines(self, read_ext): + # GH 26566 + msg = "Engine should not be specified when passing an ExcelFile" + + with pd.ExcelFile("test1" + read_ext) as xl: + with pytest.raises(ValueError, match=msg): + pd.read_excel(xl, engine="foo") + + def test_excel_read_binary(self, engine, read_ext): + # GH 15914 + expected = pd.read_excel("test1" + read_ext, engine=engine) + + with open("test1" + read_ext, "rb") as f: + data = f.read() + + actual = pd.read_excel(BytesIO(data), engine=engine) + tm.assert_frame_equal(expected, actual) + + def test_excel_read_binary_via_read_excel(self, read_ext, engine): + # GH 38424 + with open("test1" + read_ext, "rb") as f: + result = pd.read_excel(f, engine=engine) + expected = pd.read_excel("test1" + read_ext, engine=engine) + tm.assert_frame_equal(result, expected) + + def test_read_excel_header_index_out_of_range(self, engine): + # GH#43143 + with open("df_header_oob.xlsx", "rb") as f: + with pytest.raises(ValueError, match="exceeds maximum"): + pd.read_excel(f, header=[0, 1]) + + @pytest.mark.parametrize("filename", ["df_empty.xlsx", "df_equals.xlsx"]) + def test_header_with_index_col(self, filename): + # GH 33476 + idx = Index(["Z"], name="I2") + cols = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"]) + expected = DataFrame([[1, 3]], index=idx, columns=cols, dtype="int64") + result = pd.read_excel( + filename, sheet_name="Sheet1", index_col=0, header=[0, 1] + ) + tm.assert_frame_equal(expected, result) + + def test_read_datetime_multiindex(self, request, engine, read_ext): + # GH 34748 + xfail_datetimes_with_pyxlsb(engine, request) + + f = "test_datetime_mi" + read_ext + with pd.ExcelFile(f) as excel: + actual = pd.read_excel(excel, header=[0, 1], index_col=0, engine=engine) + + unit = get_exp_unit(read_ext, engine) + dti = pd.DatetimeIndex(["2020-02-29", "2020-03-01"], dtype=f"M8[{unit}]") + expected_column_index = MultiIndex.from_arrays( + [dti[:1], dti[1:]], + names=[ + dti[0].to_pydatetime(), + dti[1].to_pydatetime(), + ], + ) + expected = DataFrame([], index=[], columns=expected_column_index) + + tm.assert_frame_equal(expected, actual) + + def test_engine_invalid_option(self, read_ext): + # read_ext includes the '.' hence the weird formatting + with pytest.raises(ValueError, match="Value must be one of *"): + with pd.option_context(f"io.excel{read_ext}.reader", "abc"): + pass + + def test_ignore_chartsheets(self, request, engine, read_ext): + # GH 41448 + if read_ext == ".ods": + pytest.skip("chartsheets do not exist in the ODF format") + if engine == "pyxlsb": + request.applymarker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pd.ExcelFile("chartsheet" + read_ext) as excel: + assert excel.sheet_names == ["Sheet1"] + + def test_corrupt_files_closed(self, engine, read_ext): + # GH41778 + errors = (BadZipFile,) + if engine is None: + pytest.skip(f"Invalid test for engine={engine}") + elif engine == "xlrd": + import xlrd + + errors = (BadZipFile, xlrd.biffh.XLRDError) + elif engine == "calamine": + from python_calamine import CalamineError + + errors = (CalamineError,) + + with tm.ensure_clean(f"corrupt{read_ext}") as file: + Path(file).write_text("corrupt", encoding="utf-8") + with tm.assert_produces_warning(False): + try: + pd.ExcelFile(file, engine=engine) + except errors: + pass diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_style.py b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_style.py new file mode 100644 index 0000000000000000000000000000000000000000..89615172688d7b56fbb070dbcd4750365d7d612d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_style.py @@ -0,0 +1,298 @@ +import contextlib +import time + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + read_excel, +) +import pandas._testing as tm + +from pandas.io.excel import ExcelWriter +from pandas.io.formats.excel import ExcelFormatter + +pytest.importorskip("jinja2") +# jinja2 is currently required for Styler.__init__(). Technically Styler.to_excel +# could compute styles and render to excel without jinja2, since there is no +# 'template' file, but this needs the import error to delayed until render time. + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +def assert_equal_cell_styles(cell1, cell2): + # TODO: should find a better way to check equality + assert cell1.alignment.__dict__ == cell2.alignment.__dict__ + assert cell1.border.__dict__ == cell2.border.__dict__ + assert cell1.fill.__dict__ == cell2.fill.__dict__ + assert cell1.font.__dict__ == cell2.font.__dict__ + assert cell1.number_format == cell2.number_format + assert cell1.protection.__dict__ == cell2.protection.__dict__ + + +@pytest.mark.parametrize( + "engine", + ["xlsxwriter", "openpyxl"], +) +def test_styler_to_excel_unstyled(engine): + # compare DataFrame.to_excel and Styler.to_excel when no styles applied + pytest.importorskip(engine) + df = DataFrame(np.random.default_rng(2).standard_normal((2, 2))) + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine=engine) as writer: + df.to_excel(writer, sheet_name="dataframe") + df.style.to_excel(writer, sheet_name="unstyled") + + openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + for col1, col2 in zip(wb["dataframe"].columns, wb["unstyled"].columns): + assert len(col1) == len(col2) + for cell1, cell2 in zip(col1, col2): + assert cell1.value == cell2.value + assert_equal_cell_styles(cell1, cell2) + + +shared_style_params = [ + ( + "background-color: #111222", + ["fill", "fgColor", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ( + "color: #111222", + ["font", "color", "value"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("font-family: Arial;", ["font", "name"], "arial"), + ("font-weight: bold;", ["font", "b"], True), + ("font-style: italic;", ["font", "i"], True), + ("text-decoration: underline;", ["font", "u"], "single"), + ("number-format: $??,???.00;", ["number_format"], "$??,???.00"), + ("text-align: left;", ["alignment", "horizontal"], "left"), + ( + "vertical-align: bottom;", + ["alignment", "vertical"], + {"xlsxwriter": None, "openpyxl": "bottom"}, # xlsxwriter Fails + ), + ("vertical-align: middle;", ["alignment", "vertical"], "center"), + # Border widths + ("border-left: 2pt solid red", ["border", "left", "style"], "medium"), + ("border-left: 1pt dotted red", ["border", "left", "style"], "dotted"), + ("border-left: 2pt dotted red", ["border", "left", "style"], "mediumDashDotDot"), + ("border-left: 1pt dashed red", ["border", "left", "style"], "dashed"), + ("border-left: 2pt dashed red", ["border", "left", "style"], "mediumDashed"), + ("border-left: 1pt solid red", ["border", "left", "style"], "thin"), + ("border-left: 3pt solid red", ["border", "left", "style"], "thick"), + # Border expansion + ( + "border-left: 2pt solid #111222", + ["border", "left", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("border: 1pt solid red", ["border", "top", "style"], "thin"), + ( + "border: 1pt solid #111222", + ["border", "top", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("border: 1pt solid red", ["border", "right", "style"], "thin"), + ( + "border: 1pt solid #111222", + ["border", "right", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("border: 1pt solid red", ["border", "bottom", "style"], "thin"), + ( + "border: 1pt solid #111222", + ["border", "bottom", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("border: 1pt solid red", ["border", "left", "style"], "thin"), + ( + "border: 1pt solid #111222", + ["border", "left", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + # Border styles + ( + "border-left-style: hair; border-left-color: black", + ["border", "left", "style"], + "hair", + ), +] + + +@pytest.mark.parametrize( + "engine", + ["xlsxwriter", "openpyxl"], +) +@pytest.mark.parametrize("css, attrs, expected", shared_style_params) +def test_styler_to_excel_basic(engine, css, attrs, expected): + pytest.importorskip(engine) + df = DataFrame(np.random.default_rng(2).standard_normal((1, 1))) + styler = df.style.map(lambda x: css) + + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine=engine) as writer: + df.to_excel(writer, sheet_name="dataframe") + styler.to_excel(writer, sheet_name="styled") + + openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + # test unstyled data cell does not have expected styles + # test styled cell has expected styles + u_cell, s_cell = wb["dataframe"].cell(2, 2), wb["styled"].cell(2, 2) + for attr in attrs: + u_cell, s_cell = getattr(u_cell, attr, None), getattr(s_cell, attr) + + if isinstance(expected, dict): + assert u_cell is None or u_cell != expected[engine] + assert s_cell == expected[engine] + else: + assert u_cell is None or u_cell != expected + assert s_cell == expected + + +@pytest.mark.parametrize( + "engine", + ["xlsxwriter", "openpyxl"], +) +@pytest.mark.parametrize("css, attrs, expected", shared_style_params) +def test_styler_to_excel_basic_indexes(engine, css, attrs, expected): + pytest.importorskip(engine) + df = DataFrame(np.random.default_rng(2).standard_normal((1, 1))) + + styler = df.style + styler.map_index(lambda x: css, axis=0) + styler.map_index(lambda x: css, axis=1) + + null_styler = df.style + null_styler.map(lambda x: "null: css;") + null_styler.map_index(lambda x: "null: css;", axis=0) + null_styler.map_index(lambda x: "null: css;", axis=1) + + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine=engine) as writer: + null_styler.to_excel(writer, sheet_name="null_styled") + styler.to_excel(writer, sheet_name="styled") + + openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + # test null styled index cells does not have expected styles + # test styled cell has expected styles + ui_cell, si_cell = wb["null_styled"].cell(2, 1), wb["styled"].cell(2, 1) + uc_cell, sc_cell = wb["null_styled"].cell(1, 2), wb["styled"].cell(1, 2) + for attr in attrs: + ui_cell, si_cell = getattr(ui_cell, attr, None), getattr(si_cell, attr) + uc_cell, sc_cell = getattr(uc_cell, attr, None), getattr(sc_cell, attr) + + if isinstance(expected, dict): + assert ui_cell is None or ui_cell != expected[engine] + assert si_cell == expected[engine] + assert uc_cell is None or uc_cell != expected[engine] + assert sc_cell == expected[engine] + else: + assert ui_cell is None or ui_cell != expected + assert si_cell == expected + assert uc_cell is None or uc_cell != expected + assert sc_cell == expected + + +# From https://openpyxl.readthedocs.io/en/stable/api/openpyxl.styles.borders.html +# Note: Leaving behavior of "width"-type styles undefined; user should use border-width +# instead +excel_border_styles = [ + # "thin", + "dashed", + "mediumDashDot", + "dashDotDot", + "hair", + "dotted", + "mediumDashDotDot", + # "medium", + "double", + "dashDot", + "slantDashDot", + # "thick", + "mediumDashed", +] + + +@pytest.mark.parametrize( + "engine", + ["xlsxwriter", "openpyxl"], +) +@pytest.mark.parametrize("border_style", excel_border_styles) +def test_styler_to_excel_border_style(engine, border_style): + css = f"border-left: {border_style} black thin" + attrs = ["border", "left", "style"] + expected = border_style + + pytest.importorskip(engine) + df = DataFrame(np.random.default_rng(2).standard_normal((1, 1))) + styler = df.style.map(lambda x: css) + + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine=engine) as writer: + df.to_excel(writer, sheet_name="dataframe") + styler.to_excel(writer, sheet_name="styled") + + openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + # test unstyled data cell does not have expected styles + # test styled cell has expected styles + u_cell, s_cell = wb["dataframe"].cell(2, 2), wb["styled"].cell(2, 2) + for attr in attrs: + u_cell, s_cell = getattr(u_cell, attr, None), getattr(s_cell, attr) + + if isinstance(expected, dict): + assert u_cell is None or u_cell != expected[engine] + assert s_cell == expected[engine] + else: + assert u_cell is None or u_cell != expected + assert s_cell == expected + + +def test_styler_custom_converter(): + openpyxl = pytest.importorskip("openpyxl") + + def custom_converter(css): + return {"font": {"color": {"rgb": "111222"}}} + + df = DataFrame(np.random.default_rng(2).standard_normal((1, 1))) + styler = df.style.map(lambda x: "color: #888999") + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine="openpyxl") as writer: + ExcelFormatter(styler, style_converter=custom_converter).write( + writer, sheet_name="custom" + ) + + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + assert wb["custom"].cell(2, 2).font.color.value == "00111222" + + +@pytest.mark.single_cpu +@td.skip_if_not_us_locale +def test_styler_to_s3(s3_public_bucket, s3so): + # GH#46381 + + mock_bucket_name, target_file = s3_public_bucket.name, "test.xlsx" + df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]}) + styler = df.style.set_sticky(axis="index") + styler.to_excel(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so) + timeout = 5 + while True: + if target_file in (obj.key for obj in s3_public_bucket.objects.all()): + break + time.sleep(0.1) + timeout -= 0.1 + assert timeout > 0, "Timed out waiting for file to appear on moto" + result = read_excel( + f"s3://{mock_bucket_name}/{target_file}", index_col=0, storage_options=s3so + ) + tm.assert_frame_equal(result, df) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_writers.py b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_writers.py new file mode 100644 index 0000000000000000000000000000000000000000..292eab2d881526e6816d85f1fcd38aaa35255243 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_writers.py @@ -0,0 +1,1511 @@ +from datetime import ( + date, + datetime, + timedelta, +) +from functools import partial +from io import BytesIO +import os +import re + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows +from pandas.compat._constants import PY310 +from pandas.compat._optional import import_optional_dependency +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + date_range, + option_context, +) +import pandas._testing as tm + +from pandas.io.excel import ( + ExcelFile, + ExcelWriter, + _OpenpyxlWriter, + _XlsxWriter, + register_writer, +) +from pandas.io.excel._util import _writers + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +def get_exp_unit(path: str) -> str: + return "ns" + + +@pytest.fixture +def frame(float_frame): + """ + Returns the first ten items in fixture "float_frame". + """ + return float_frame[:10] + + +@pytest.fixture(params=[True, False]) +def merge_cells(request): + return request.param + + +@pytest.fixture +def path(ext): + """ + Fixture to open file for use in each test case. + """ + with tm.ensure_clean(ext) as file_path: + yield file_path + + +@pytest.fixture +def set_engine(engine, ext): + """ + Fixture to set engine for use in each test case. + + Rather than requiring `engine=...` to be provided explicitly as an + argument in each test, this fixture sets a global option to dictate + which engine should be used to write Excel files. After executing + the test it rolls back said change to the global option. + """ + option_name = f"io.excel.{ext.strip('.')}.writer" + with option_context(option_name, engine): + yield + + +@pytest.mark.parametrize( + "ext", + [ + pytest.param(".xlsx", marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")]), + pytest.param(".xlsm", marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")]), + pytest.param( + ".xlsx", marks=[td.skip_if_no("xlsxwriter"), td.skip_if_no("xlrd")] + ), + pytest.param(".ods", marks=td.skip_if_no("odf")), + ], +) +class TestRoundTrip: + @pytest.mark.parametrize( + "header,expected", + [(None, DataFrame([np.nan] * 4)), (0, DataFrame({"Unnamed: 0": [np.nan] * 3}))], + ) + def test_read_one_empty_col_no_header(self, ext, header, expected): + # xref gh-12292 + filename = "no_header" + df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]]) + + with tm.ensure_clean(ext) as path: + df.to_excel(path, sheet_name=filename, index=False, header=False) + result = pd.read_excel( + path, sheet_name=filename, usecols=[0], header=header + ) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "header,expected", + [(None, DataFrame([0] + [np.nan] * 4)), (0, DataFrame([np.nan] * 4))], + ) + def test_read_one_empty_col_with_header(self, ext, header, expected): + filename = "with_header" + df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]]) + + with tm.ensure_clean(ext) as path: + df.to_excel(path, sheet_name="with_header", index=False, header=True) + result = pd.read_excel( + path, sheet_name=filename, usecols=[0], header=header + ) + + tm.assert_frame_equal(result, expected) + + def test_set_column_names_in_parameter(self, ext): + # GH 12870 : pass down column names associated with + # keyword argument names + refdf = DataFrame([[1, "foo"], [2, "bar"], [3, "baz"]], columns=["a", "b"]) + + with tm.ensure_clean(ext) as pth: + with ExcelWriter(pth) as writer: + refdf.to_excel( + writer, sheet_name="Data_no_head", header=False, index=False + ) + refdf.to_excel(writer, sheet_name="Data_with_head", index=False) + + refdf.columns = ["A", "B"] + + with ExcelFile(pth) as reader: + xlsdf_no_head = pd.read_excel( + reader, sheet_name="Data_no_head", header=None, names=["A", "B"] + ) + xlsdf_with_head = pd.read_excel( + reader, + sheet_name="Data_with_head", + index_col=None, + names=["A", "B"], + ) + + tm.assert_frame_equal(xlsdf_no_head, refdf) + tm.assert_frame_equal(xlsdf_with_head, refdf) + + def test_creating_and_reading_multiple_sheets(self, ext): + # see gh-9450 + # + # Test reading multiple sheets, from a runtime + # created Excel file with multiple sheets. + def tdf(col_sheet_name): + d, i = [11, 22, 33], [1, 2, 3] + return DataFrame(d, i, columns=[col_sheet_name]) + + sheets = ["AAA", "BBB", "CCC"] + + dfs = [tdf(s) for s in sheets] + dfs = dict(zip(sheets, dfs)) + + with tm.ensure_clean(ext) as pth: + with ExcelWriter(pth) as ew: + for sheetname, df in dfs.items(): + df.to_excel(ew, sheet_name=sheetname) + + dfs_returned = pd.read_excel(pth, sheet_name=sheets, index_col=0) + + for s in sheets: + tm.assert_frame_equal(dfs[s], dfs_returned[s]) + + def test_read_excel_multiindex_empty_level(self, ext): + # see gh-12453 + with tm.ensure_clean(ext) as path: + df = DataFrame( + { + ("One", "x"): {0: 1}, + ("Two", "X"): {0: 3}, + ("Two", "Y"): {0: 7}, + ("Zero", ""): {0: 0}, + } + ) + + expected = DataFrame( + { + ("One", "x"): {0: 1}, + ("Two", "X"): {0: 3}, + ("Two", "Y"): {0: 7}, + ("Zero", "Unnamed: 4_level_1"): {0: 0}, + } + ) + + df.to_excel(path) + actual = pd.read_excel(path, header=[0, 1], index_col=0) + tm.assert_frame_equal(actual, expected) + + df = DataFrame( + { + ("Beg", ""): {0: 0}, + ("Middle", "x"): {0: 1}, + ("Tail", "X"): {0: 3}, + ("Tail", "Y"): {0: 7}, + } + ) + + expected = DataFrame( + { + ("Beg", "Unnamed: 1_level_1"): {0: 0}, + ("Middle", "x"): {0: 1}, + ("Tail", "X"): {0: 3}, + ("Tail", "Y"): {0: 7}, + } + ) + + df.to_excel(path) + actual = pd.read_excel(path, header=[0, 1], index_col=0) + tm.assert_frame_equal(actual, expected) + + @pytest.mark.parametrize("c_idx_names", ["a", None]) + @pytest.mark.parametrize("r_idx_names", ["b", None]) + @pytest.mark.parametrize("c_idx_levels", [1, 3]) + @pytest.mark.parametrize("r_idx_levels", [1, 3]) + def test_excel_multindex_roundtrip( + self, ext, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels, request + ): + # see gh-4679 + with tm.ensure_clean(ext) as pth: + # Empty name case current read in as + # unnamed levels, not Nones. + check_names = bool(r_idx_names) or r_idx_levels <= 1 + + if c_idx_levels == 1: + columns = Index(list("abcde")) + else: + columns = MultiIndex.from_arrays( + [range(5) for _ in range(c_idx_levels)], + names=[f"{c_idx_names}-{i}" for i in range(c_idx_levels)], + ) + if r_idx_levels == 1: + index = Index(list("ghijk")) + else: + index = MultiIndex.from_arrays( + [range(5) for _ in range(r_idx_levels)], + names=[f"{r_idx_names}-{i}" for i in range(r_idx_levels)], + ) + df = DataFrame( + 1.1 * np.ones((5, 5)), + columns=columns, + index=index, + ) + df.to_excel(pth) + + act = pd.read_excel( + pth, + index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels)), + ) + tm.assert_frame_equal(df, act, check_names=check_names) + + df.iloc[0, :] = np.nan + df.to_excel(pth) + + act = pd.read_excel( + pth, + index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels)), + ) + tm.assert_frame_equal(df, act, check_names=check_names) + + df.iloc[-1, :] = np.nan + df.to_excel(pth) + act = pd.read_excel( + pth, + index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels)), + ) + tm.assert_frame_equal(df, act, check_names=check_names) + + def test_read_excel_parse_dates(self, ext): + # see gh-11544, gh-12051 + df = DataFrame( + {"col": [1, 2, 3], "date_strings": date_range("2012-01-01", periods=3)} + ) + df2 = df.copy() + df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y") + + with tm.ensure_clean(ext) as pth: + df2.to_excel(pth) + + res = pd.read_excel(pth, index_col=0) + tm.assert_frame_equal(df2, res) + + res = pd.read_excel(pth, parse_dates=["date_strings"], index_col=0) + tm.assert_frame_equal(df, res) + + date_parser = lambda x: datetime.strptime(x, "%m/%d/%Y") + with tm.assert_produces_warning( + FutureWarning, + match="use 'date_format' instead", + raise_on_extra_warnings=False, + ): + res = pd.read_excel( + pth, + parse_dates=["date_strings"], + date_parser=date_parser, + index_col=0, + ) + tm.assert_frame_equal(df, res) + res = pd.read_excel( + pth, parse_dates=["date_strings"], date_format="%m/%d/%Y", index_col=0 + ) + tm.assert_frame_equal(df, res) + + def test_multiindex_interval_datetimes(self, ext): + # GH 30986 + midx = MultiIndex.from_arrays( + [ + range(4), + pd.interval_range( + start=pd.Timestamp("2020-01-01"), periods=4, freq="6ME" + ), + ] + ) + df = DataFrame(range(4), index=midx) + with tm.ensure_clean(ext) as pth: + df.to_excel(pth) + result = pd.read_excel(pth, index_col=[0, 1]) + expected = DataFrame( + range(4), + MultiIndex.from_arrays( + [ + range(4), + [ + "(2020-01-31 00:00:00, 2020-07-31 00:00:00]", + "(2020-07-31 00:00:00, 2021-01-31 00:00:00]", + "(2021-01-31 00:00:00, 2021-07-31 00:00:00]", + "(2021-07-31 00:00:00, 2022-01-31 00:00:00]", + ], + ] + ), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "engine,ext", + [ + pytest.param( + "openpyxl", + ".xlsx", + marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")], + ), + pytest.param( + "openpyxl", + ".xlsm", + marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")], + ), + pytest.param( + "xlsxwriter", + ".xlsx", + marks=[td.skip_if_no("xlsxwriter"), td.skip_if_no("xlrd")], + ), + pytest.param("odf", ".ods", marks=td.skip_if_no("odf")), + ], +) +@pytest.mark.usefixtures("set_engine") +class TestExcelWriter: + def test_excel_sheet_size(self, path): + # GH 26080 + breaking_row_count = 2**20 + 1 + breaking_col_count = 2**14 + 1 + # purposely using two arrays to prevent memory issues while testing + row_arr = np.zeros(shape=(breaking_row_count, 1)) + col_arr = np.zeros(shape=(1, breaking_col_count)) + row_df = DataFrame(row_arr) + col_df = DataFrame(col_arr) + + msg = "sheet is too large" + with pytest.raises(ValueError, match=msg): + row_df.to_excel(path) + + with pytest.raises(ValueError, match=msg): + col_df.to_excel(path) + + def test_excel_sheet_by_name_raise(self, path): + gt = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + gt.to_excel(path) + + with ExcelFile(path) as xl: + df = pd.read_excel(xl, sheet_name=0, index_col=0) + + tm.assert_frame_equal(gt, df) + + msg = "Worksheet named '0' not found" + with pytest.raises(ValueError, match=msg): + pd.read_excel(xl, "0") + + def test_excel_writer_context_manager(self, frame, path): + with ExcelWriter(path) as writer: + frame.to_excel(writer, sheet_name="Data1") + frame2 = frame.copy() + frame2.columns = frame.columns[::-1] + frame2.to_excel(writer, sheet_name="Data2") + + with ExcelFile(path) as reader: + found_df = pd.read_excel(reader, sheet_name="Data1", index_col=0) + found_df2 = pd.read_excel(reader, sheet_name="Data2", index_col=0) + + tm.assert_frame_equal(found_df, frame) + tm.assert_frame_equal(found_df2, frame2) + + def test_roundtrip(self, frame, path): + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + + frame.to_excel(path, sheet_name="test1") + frame.to_excel(path, sheet_name="test1", columns=["A", "B"]) + frame.to_excel(path, sheet_name="test1", header=False) + frame.to_excel(path, sheet_name="test1", index=False) + + # test roundtrip + frame.to_excel(path, sheet_name="test1") + recons = pd.read_excel(path, sheet_name="test1", index_col=0) + tm.assert_frame_equal(frame, recons) + + frame.to_excel(path, sheet_name="test1", index=False) + recons = pd.read_excel(path, sheet_name="test1", index_col=None) + recons.index = frame.index + tm.assert_frame_equal(frame, recons) + + frame.to_excel(path, sheet_name="test1", na_rep="NA") + recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["NA"]) + tm.assert_frame_equal(frame, recons) + + # GH 3611 + frame.to_excel(path, sheet_name="test1", na_rep="88") + recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["88"]) + tm.assert_frame_equal(frame, recons) + + frame.to_excel(path, sheet_name="test1", na_rep="88") + recons = pd.read_excel( + path, sheet_name="test1", index_col=0, na_values=[88, 88.0] + ) + tm.assert_frame_equal(frame, recons) + + # GH 6573 + frame.to_excel(path, sheet_name="Sheet1") + recons = pd.read_excel(path, index_col=0) + tm.assert_frame_equal(frame, recons) + + frame.to_excel(path, sheet_name="0") + recons = pd.read_excel(path, index_col=0) + tm.assert_frame_equal(frame, recons) + + # GH 8825 Pandas Series should provide to_excel method + s = frame["A"] + s.to_excel(path) + recons = pd.read_excel(path, index_col=0) + tm.assert_frame_equal(s.to_frame(), recons) + + def test_mixed(self, frame, path): + mixed_frame = frame.copy() + mixed_frame["foo"] = "bar" + + mixed_frame.to_excel(path, sheet_name="test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(mixed_frame, recons) + + def test_ts_frame(self, path): + unit = get_exp_unit(path) + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD")), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + + # freq doesn't round-trip + index = pd.DatetimeIndex(np.asarray(df.index), freq=None) + df.index = index + + expected = df[:] + expected.index = expected.index.as_unit(unit) + + df.to_excel(path, sheet_name="test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(expected, recons) + + def test_basics_with_nan(self, frame, path): + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + frame.to_excel(path, sheet_name="test1") + frame.to_excel(path, sheet_name="test1", columns=["A", "B"]) + frame.to_excel(path, sheet_name="test1", header=False) + frame.to_excel(path, sheet_name="test1", index=False) + + @pytest.mark.parametrize("np_type", [np.int8, np.int16, np.int32, np.int64]) + def test_int_types(self, np_type, path): + # Test np.int values read come back as int + # (rather than float which is Excel's format). + df = DataFrame( + np.random.default_rng(2).integers(-10, 10, size=(10, 2)), dtype=np_type + ) + df.to_excel(path, sheet_name="test1") + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + + int_frame = df.astype(np.int64) + tm.assert_frame_equal(int_frame, recons) + + recons2 = pd.read_excel(path, sheet_name="test1", index_col=0) + tm.assert_frame_equal(int_frame, recons2) + + @pytest.mark.parametrize("np_type", [np.float16, np.float32, np.float64]) + def test_float_types(self, np_type, path): + # Test np.float values read come back as float. + df = DataFrame(np.random.default_rng(2).random(10), dtype=np_type) + df.to_excel(path, sheet_name="test1") + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np_type + ) + + tm.assert_frame_equal(df, recons) + + def test_bool_types(self, path): + # Test np.bool_ values read come back as float. + df = DataFrame([1, 0, True, False], dtype=np.bool_) + df.to_excel(path, sheet_name="test1") + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np.bool_ + ) + + tm.assert_frame_equal(df, recons) + + def test_inf_roundtrip(self, path): + df = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)]) + df.to_excel(path, sheet_name="test1") + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + + tm.assert_frame_equal(df, recons) + + def test_sheets(self, frame, path): + # freq doesn't round-trip + unit = get_exp_unit(path) + tsframe = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD")), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None) + tsframe.index = index + + expected = tsframe[:] + expected.index = expected.index.as_unit(unit) + + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + + frame.to_excel(path, sheet_name="test1") + frame.to_excel(path, sheet_name="test1", columns=["A", "B"]) + frame.to_excel(path, sheet_name="test1", header=False) + frame.to_excel(path, sheet_name="test1", index=False) + + # Test writing to separate sheets + with ExcelWriter(path) as writer: + frame.to_excel(writer, sheet_name="test1") + tsframe.to_excel(writer, sheet_name="test2") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(frame, recons) + recons = pd.read_excel(reader, sheet_name="test2", index_col=0) + tm.assert_frame_equal(expected, recons) + assert 2 == len(reader.sheet_names) + assert "test1" == reader.sheet_names[0] + assert "test2" == reader.sheet_names[1] + + def test_colaliases(self, frame, path): + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + + frame.to_excel(path, sheet_name="test1") + frame.to_excel(path, sheet_name="test1", columns=["A", "B"]) + frame.to_excel(path, sheet_name="test1", header=False) + frame.to_excel(path, sheet_name="test1", index=False) + + # column aliases + col_aliases = Index(["AA", "X", "Y", "Z"]) + frame.to_excel(path, sheet_name="test1", header=col_aliases) + with ExcelFile(path) as reader: + rs = pd.read_excel(reader, sheet_name="test1", index_col=0) + xp = frame.copy() + xp.columns = col_aliases + tm.assert_frame_equal(xp, rs) + + def test_roundtrip_indexlabels(self, merge_cells, frame, path): + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + + frame.to_excel(path, sheet_name="test1") + frame.to_excel(path, sheet_name="test1", columns=["A", "B"]) + frame.to_excel(path, sheet_name="test1", header=False) + frame.to_excel(path, sheet_name="test1", index=False) + + # test index_label + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0 + df.to_excel( + path, sheet_name="test1", index_label=["test"], merge_cells=merge_cells + ) + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np.int64 + ) + df.index.names = ["test"] + assert df.index.names == recons.index.names + + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0 + df.to_excel( + path, + sheet_name="test1", + index_label=["test", "dummy", "dummy2"], + merge_cells=merge_cells, + ) + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np.int64 + ) + df.index.names = ["test"] + assert df.index.names == recons.index.names + + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0 + df.to_excel( + path, sheet_name="test1", index_label="test", merge_cells=merge_cells + ) + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np.int64 + ) + df.index.names = ["test"] + tm.assert_frame_equal(df, recons.astype(bool)) + + frame.to_excel( + path, + sheet_name="test1", + columns=["A", "B", "C", "D"], + index=False, + merge_cells=merge_cells, + ) + # take 'A' and 'B' as indexes (same row as cols 'C', 'D') + df = frame.copy() + df = df.set_index(["A", "B"]) + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1]) + tm.assert_frame_equal(df, recons) + + def test_excel_roundtrip_indexname(self, merge_cells, path): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + df.index.name = "foo" + + df.to_excel(path, merge_cells=merge_cells) + + with ExcelFile(path) as xf: + result = pd.read_excel(xf, sheet_name=xf.sheet_names[0], index_col=0) + + tm.assert_frame_equal(result, df) + assert result.index.name == "foo" + + def test_excel_roundtrip_datetime(self, merge_cells, path): + # datetime.date, not sure what to test here exactly + unit = get_exp_unit(path) + + # freq does not round-trip + tsframe = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD")), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None) + tsframe.index = index + + tsf = tsframe.copy() + + tsf.index = [x.date() for x in tsframe.index] + tsf.to_excel(path, sheet_name="test1", merge_cells=merge_cells) + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + + expected = tsframe[:] + expected.index = expected.index.as_unit(unit) + tm.assert_frame_equal(expected, recons) + + def test_excel_date_datetime_format(self, ext, path): + # see gh-4133 + # + # Excel output format strings + unit = get_exp_unit(path) + + df = DataFrame( + [ + [date(2014, 1, 31), date(1999, 9, 24)], + [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)], + ], + index=["DATE", "DATETIME"], + columns=["X", "Y"], + ) + df_expected = DataFrame( + [ + [datetime(2014, 1, 31), datetime(1999, 9, 24)], + [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)], + ], + index=["DATE", "DATETIME"], + columns=["X", "Y"], + ) + df_expected = df_expected.astype(f"M8[{unit}]") + + with tm.ensure_clean(ext) as filename2: + with ExcelWriter(path) as writer1: + df.to_excel(writer1, sheet_name="test1") + + with ExcelWriter( + filename2, + date_format="DD.MM.YYYY", + datetime_format="DD.MM.YYYY HH-MM-SS", + ) as writer2: + df.to_excel(writer2, sheet_name="test1") + + with ExcelFile(path) as reader1: + rs1 = pd.read_excel(reader1, sheet_name="test1", index_col=0) + + with ExcelFile(filename2) as reader2: + rs2 = pd.read_excel(reader2, sheet_name="test1", index_col=0) + + tm.assert_frame_equal(rs1, rs2) + + # Since the reader returns a datetime object for dates, + # we need to use df_expected to check the result. + tm.assert_frame_equal(rs2, df_expected) + + def test_to_excel_interval_no_labels(self, path, using_infer_string): + # see gh-19242 + # + # Test writing Interval without labels. + df = DataFrame( + np.random.default_rng(2).integers(-10, 10, size=(20, 1)), dtype=np.int64 + ) + expected = df.copy() + + df["new"] = pd.cut(df[0], 10) + expected["new"] = pd.cut(expected[0], 10).astype( + str if not using_infer_string else "string[pyarrow_numpy]" + ) + + df.to_excel(path, sheet_name="test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(expected, recons) + + def test_to_excel_interval_labels(self, path): + # see gh-19242 + # + # Test writing Interval with labels. + df = DataFrame( + np.random.default_rng(2).integers(-10, 10, size=(20, 1)), dtype=np.int64 + ) + expected = df.copy() + intervals = pd.cut( + df[0], 10, labels=["A", "B", "C", "D", "E", "F", "G", "H", "I", "J"] + ) + df["new"] = intervals + expected["new"] = pd.Series(list(intervals)) + + df.to_excel(path, sheet_name="test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(expected, recons) + + def test_to_excel_timedelta(self, path): + # see gh-19242, gh-9155 + # + # Test writing timedelta to xls. + df = DataFrame( + np.random.default_rng(2).integers(-10, 10, size=(20, 1)), + columns=["A"], + dtype=np.int64, + ) + expected = df.copy() + + df["new"] = df["A"].apply(lambda x: timedelta(seconds=x)) + expected["new"] = expected["A"].apply( + lambda x: timedelta(seconds=x).total_seconds() / 86400 + ) + + df.to_excel(path, sheet_name="test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(expected, recons) + + def test_to_excel_periodindex(self, path): + # xp has a PeriodIndex + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD")), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + xp = df.resample("ME").mean().to_period("M") + + xp.to_excel(path, sheet_name="sht1") + + with ExcelFile(path) as reader: + rs = pd.read_excel(reader, sheet_name="sht1", index_col=0) + tm.assert_frame_equal(xp, rs.to_period("M")) + + def test_to_excel_multiindex(self, merge_cells, frame, path): + arrays = np.arange(len(frame.index) * 2, dtype=np.int64).reshape(2, -1) + new_index = MultiIndex.from_arrays(arrays, names=["first", "second"]) + frame.index = new_index + + frame.to_excel(path, sheet_name="test1", header=False) + frame.to_excel(path, sheet_name="test1", columns=["A", "B"]) + + # round trip + frame.to_excel(path, sheet_name="test1", merge_cells=merge_cells) + with ExcelFile(path) as reader: + df = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1]) + tm.assert_frame_equal(frame, df) + + # GH13511 + def test_to_excel_multiindex_nan_label(self, merge_cells, path): + df = DataFrame( + { + "A": [None, 2, 3], + "B": [10, 20, 30], + "C": np.random.default_rng(2).random(3), + } + ) + df = df.set_index(["A", "B"]) + + df.to_excel(path, merge_cells=merge_cells) + df1 = pd.read_excel(path, index_col=[0, 1]) + tm.assert_frame_equal(df, df1) + + # Test for Issue 11328. If column indices are integers, make + # sure they are handled correctly for either setting of + # merge_cells + def test_to_excel_multiindex_cols(self, merge_cells, frame, path): + arrays = np.arange(len(frame.index) * 2, dtype=np.int64).reshape(2, -1) + new_index = MultiIndex.from_arrays(arrays, names=["first", "second"]) + frame.index = new_index + + new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2), (50, 1), (50, 2)]) + frame.columns = new_cols_index + header = [0, 1] + if not merge_cells: + header = 0 + + # round trip + frame.to_excel(path, sheet_name="test1", merge_cells=merge_cells) + with ExcelFile(path) as reader: + df = pd.read_excel( + reader, sheet_name="test1", header=header, index_col=[0, 1] + ) + if not merge_cells: + fm = frame.columns._format_multi(sparsify=False, include_names=False) + frame.columns = [".".join(map(str, q)) for q in zip(*fm)] + tm.assert_frame_equal(frame, df) + + def test_to_excel_multiindex_dates(self, merge_cells, path): + # try multiindex with dates + unit = get_exp_unit(path) + tsframe = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD")), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + tsframe.index = MultiIndex.from_arrays( + [ + tsframe.index.as_unit(unit), + np.arange(len(tsframe.index), dtype=np.int64), + ], + names=["time", "foo"], + ) + + tsframe.to_excel(path, sheet_name="test1", merge_cells=merge_cells) + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1]) + + tm.assert_frame_equal(tsframe, recons) + assert recons.index.names == ("time", "foo") + + def test_to_excel_multiindex_no_write_index(self, path): + # Test writing and re-reading a MI without the index. GH 5616. + + # Initial non-MI frame. + frame1 = DataFrame({"a": [10, 20], "b": [30, 40], "c": [50, 60]}) + + # Add a MI. + frame2 = frame1.copy() + multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)]) + frame2.index = multi_index + + # Write out to Excel without the index. + frame2.to_excel(path, sheet_name="test1", index=False) + + # Read it back in. + with ExcelFile(path) as reader: + frame3 = pd.read_excel(reader, sheet_name="test1") + + # Test that it is the same as the initial frame. + tm.assert_frame_equal(frame1, frame3) + + def test_to_excel_empty_multiindex(self, path): + # GH 19543. + expected = DataFrame([], columns=[0, 1, 2]) + + df = DataFrame([], index=MultiIndex.from_tuples([], names=[0, 1]), columns=[2]) + df.to_excel(path, sheet_name="test1") + + with ExcelFile(path) as reader: + result = pd.read_excel(reader, sheet_name="test1") + tm.assert_frame_equal( + result, expected, check_index_type=False, check_dtype=False + ) + + def test_to_excel_float_format(self, path): + df = DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + df.to_excel(path, sheet_name="test1", float_format="%.2f") + + with ExcelFile(path) as reader: + result = pd.read_excel(reader, sheet_name="test1", index_col=0) + + expected = DataFrame( + [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + tm.assert_frame_equal(result, expected) + + def test_to_excel_output_encoding(self, ext): + # Avoid mixed inferred_type. + df = DataFrame( + [["\u0192", "\u0193", "\u0194"], ["\u0195", "\u0196", "\u0197"]], + index=["A\u0192", "B"], + columns=["X\u0193", "Y", "Z"], + ) + + with tm.ensure_clean("__tmp_to_excel_float_format__." + ext) as filename: + df.to_excel(filename, sheet_name="TestSheet") + result = pd.read_excel(filename, sheet_name="TestSheet", index_col=0) + tm.assert_frame_equal(result, df) + + def test_to_excel_unicode_filename(self, ext): + with tm.ensure_clean("\u0192u." + ext) as filename: + try: + with open(filename, "wb"): + pass + except UnicodeEncodeError: + pytest.skip("No unicode file names on this system") + + df = DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + df.to_excel(filename, sheet_name="test1", float_format="%.2f") + + with ExcelFile(filename) as reader: + result = pd.read_excel(reader, sheet_name="test1", index_col=0) + + expected = DataFrame( + [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("use_headers", [True, False]) + @pytest.mark.parametrize("r_idx_nlevels", [1, 2, 3]) + @pytest.mark.parametrize("c_idx_nlevels", [1, 2, 3]) + def test_excel_010_hemstring( + self, merge_cells, c_idx_nlevels, r_idx_nlevels, use_headers, path + ): + def roundtrip(data, header=True, parser_hdr=0, index=True): + data.to_excel(path, header=header, merge_cells=merge_cells, index=index) + + with ExcelFile(path) as xf: + return pd.read_excel( + xf, sheet_name=xf.sheet_names[0], header=parser_hdr + ) + + # Basic test. + parser_header = 0 if use_headers else None + res = roundtrip(DataFrame([0]), use_headers, parser_header) + + assert res.shape == (1, 2) + assert res.iloc[0, 0] is not np.nan + + # More complex tests with multi-index. + nrows = 5 + ncols = 3 + + # ensure limited functionality in 0.10 + # override of gh-2370 until sorted out in 0.11 + + if c_idx_nlevels == 1: + columns = Index([f"a-{i}" for i in range(ncols)], dtype=object) + else: + columns = MultiIndex.from_arrays( + [range(ncols) for _ in range(c_idx_nlevels)], + names=[f"i-{i}" for i in range(c_idx_nlevels)], + ) + if r_idx_nlevels == 1: + index = Index([f"b-{i}" for i in range(nrows)], dtype=object) + else: + index = MultiIndex.from_arrays( + [range(nrows) for _ in range(r_idx_nlevels)], + names=[f"j-{i}" for i in range(r_idx_nlevels)], + ) + + df = DataFrame( + np.ones((nrows, ncols)), + columns=columns, + index=index, + ) + + # This if will be removed once multi-column Excel writing + # is implemented. For now fixing gh-9794. + if c_idx_nlevels > 1: + msg = ( + "Writing to Excel with MultiIndex columns and no index " + "\\('index'=False\\) is not yet implemented." + ) + with pytest.raises(NotImplementedError, match=msg): + roundtrip(df, use_headers, index=False) + else: + res = roundtrip(df, use_headers) + + if use_headers: + assert res.shape == (nrows, ncols + r_idx_nlevels) + else: + # First row taken as columns. + assert res.shape == (nrows - 1, ncols + r_idx_nlevels) + + # No NaNs. + for r in range(len(res.index)): + for c in range(len(res.columns)): + assert res.iloc[r, c] is not np.nan + + def test_duplicated_columns(self, path): + # see gh-5235 + df = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B"]) + df.to_excel(path, sheet_name="test1") + expected = DataFrame( + [[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B.1"] + ) + + # By default, we mangle. + result = pd.read_excel(path, sheet_name="test1", index_col=0) + tm.assert_frame_equal(result, expected) + + # see gh-11007, gh-10970 + df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A", "B"]) + df.to_excel(path, sheet_name="test1") + + result = pd.read_excel(path, sheet_name="test1", index_col=0) + expected = DataFrame( + [[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A.1", "B.1"] + ) + tm.assert_frame_equal(result, expected) + + # see gh-10982 + df.to_excel(path, sheet_name="test1", index=False, header=False) + result = pd.read_excel(path, sheet_name="test1", header=None) + + expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]]) + tm.assert_frame_equal(result, expected) + + def test_swapped_columns(self, path): + # Test for issue #5427. + write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]}) + write_frame.to_excel(path, sheet_name="test1", columns=["B", "A"]) + + read_frame = pd.read_excel(path, sheet_name="test1", header=0) + + tm.assert_series_equal(write_frame["A"], read_frame["A"]) + tm.assert_series_equal(write_frame["B"], read_frame["B"]) + + def test_invalid_columns(self, path): + # see gh-10982 + write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]}) + + with pytest.raises(KeyError, match="Not all names specified"): + write_frame.to_excel(path, sheet_name="test1", columns=["B", "C"]) + + with pytest.raises( + KeyError, match="'passes columns are not ALL present dataframe'" + ): + write_frame.to_excel(path, sheet_name="test1", columns=["C", "D"]) + + @pytest.mark.parametrize( + "to_excel_index,read_excel_index_col", + [ + (True, 0), # Include index in write to file + (False, None), # Dont include index in write to file + ], + ) + def test_write_subset_columns(self, path, to_excel_index, read_excel_index_col): + # GH 31677 + write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2], "C": [3, 3, 3]}) + write_frame.to_excel( + path, sheet_name="col_subset_bug", columns=["A", "B"], index=to_excel_index + ) + + expected = write_frame[["A", "B"]] + read_frame = pd.read_excel( + path, sheet_name="col_subset_bug", index_col=read_excel_index_col + ) + + tm.assert_frame_equal(expected, read_frame) + + def test_comment_arg(self, path): + # see gh-18735 + # + # Test the comment argument functionality to pd.read_excel. + + # Create file to read in. + df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]}) + df.to_excel(path, sheet_name="test_c") + + # Read file without comment arg. + result1 = pd.read_excel(path, sheet_name="test_c", index_col=0) + + result1.iloc[1, 0] = None + result1.iloc[1, 1] = None + result1.iloc[2, 1] = None + + result2 = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0) + tm.assert_frame_equal(result1, result2) + + def test_comment_default(self, path): + # Re issue #18735 + # Test the comment argument default to pd.read_excel + + # Create file to read in + df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]}) + df.to_excel(path, sheet_name="test_c") + + # Read file with default and explicit comment=None + result1 = pd.read_excel(path, sheet_name="test_c") + result2 = pd.read_excel(path, sheet_name="test_c", comment=None) + tm.assert_frame_equal(result1, result2) + + def test_comment_used(self, path): + # see gh-18735 + # + # Test the comment argument is working as expected when used. + + # Create file to read in. + df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]}) + df.to_excel(path, sheet_name="test_c") + + # Test read_frame_comment against manually produced expected output. + expected = DataFrame({"A": ["one", None, "one"], "B": ["two", None, None]}) + result = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0) + tm.assert_frame_equal(result, expected) + + def test_comment_empty_line(self, path): + # Re issue #18735 + # Test that pd.read_excel ignores commented lines at the end of file + + df = DataFrame({"a": ["1", "#2"], "b": ["2", "3"]}) + df.to_excel(path, index=False) + + # Test that all-comment lines at EoF are ignored + expected = DataFrame({"a": [1], "b": [2]}) + result = pd.read_excel(path, comment="#") + tm.assert_frame_equal(result, expected) + + def test_datetimes(self, path): + # Test writing and reading datetimes. For issue #9139. (xref #9185) + unit = get_exp_unit(path) + datetimes = [ + datetime(2013, 1, 13, 1, 2, 3), + datetime(2013, 1, 13, 2, 45, 56), + datetime(2013, 1, 13, 4, 29, 49), + datetime(2013, 1, 13, 6, 13, 42), + datetime(2013, 1, 13, 7, 57, 35), + datetime(2013, 1, 13, 9, 41, 28), + datetime(2013, 1, 13, 11, 25, 21), + datetime(2013, 1, 13, 13, 9, 14), + datetime(2013, 1, 13, 14, 53, 7), + datetime(2013, 1, 13, 16, 37, 0), + datetime(2013, 1, 13, 18, 20, 52), + ] + + write_frame = DataFrame({"A": datetimes}) + write_frame.to_excel(path, sheet_name="Sheet1") + read_frame = pd.read_excel(path, sheet_name="Sheet1", header=0) + + expected = write_frame.astype(f"M8[{unit}]") + tm.assert_series_equal(expected["A"], read_frame["A"]) + + def test_bytes_io(self, engine): + # see gh-7074 + with BytesIO() as bio: + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + + # Pass engine explicitly, as there is no file path to infer from. + with ExcelWriter(bio, engine=engine) as writer: + df.to_excel(writer) + + bio.seek(0) + reread_df = pd.read_excel(bio, index_col=0) + tm.assert_frame_equal(df, reread_df) + + def test_engine_kwargs(self, engine, path): + # GH#52368 + df = DataFrame([{"A": 1, "B": 2}, {"A": 3, "B": 4}]) + + msgs = { + "odf": r"OpenDocumentSpreadsheet() got an unexpected keyword " + r"argument 'foo'", + "openpyxl": r"__init__() got an unexpected keyword argument 'foo'", + "xlsxwriter": r"__init__() got an unexpected keyword argument 'foo'", + } + + if PY310: + msgs[ + "openpyxl" + ] = "Workbook.__init__() got an unexpected keyword argument 'foo'" + msgs[ + "xlsxwriter" + ] = "Workbook.__init__() got an unexpected keyword argument 'foo'" + + # Handle change in error message for openpyxl (write and append mode) + if engine == "openpyxl" and not os.path.exists(path): + msgs[ + "openpyxl" + ] = r"load_workbook() got an unexpected keyword argument 'foo'" + + with pytest.raises(TypeError, match=re.escape(msgs[engine])): + df.to_excel( + path, + engine=engine, + engine_kwargs={"foo": "bar"}, + ) + + def test_write_lists_dict(self, path): + # see gh-8188. + df = DataFrame( + { + "mixed": ["a", ["b", "c"], {"d": "e", "f": 2}], + "numeric": [1, 2, 3.0], + "str": ["apple", "banana", "cherry"], + } + ) + df.to_excel(path, sheet_name="Sheet1") + read = pd.read_excel(path, sheet_name="Sheet1", header=0, index_col=0) + + expected = df.copy() + expected.mixed = expected.mixed.apply(str) + expected.numeric = expected.numeric.astype("int64") + + tm.assert_frame_equal(read, expected) + + def test_render_as_column_name(self, path): + # see gh-34331 + df = DataFrame({"render": [1, 2], "data": [3, 4]}) + df.to_excel(path, sheet_name="Sheet1") + read = pd.read_excel(path, "Sheet1", index_col=0) + expected = df + tm.assert_frame_equal(read, expected) + + def test_true_and_false_value_options(self, path): + # see gh-13347 + df = DataFrame([["foo", "bar"]], columns=["col1", "col2"], dtype=object) + with option_context("future.no_silent_downcasting", True): + expected = df.replace({"foo": True, "bar": False}).astype("bool") + + df.to_excel(path) + read_frame = pd.read_excel( + path, true_values=["foo"], false_values=["bar"], index_col=0 + ) + tm.assert_frame_equal(read_frame, expected) + + def test_freeze_panes(self, path): + # see gh-15160 + expected = DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"]) + expected.to_excel(path, sheet_name="Sheet1", freeze_panes=(1, 1)) + + result = pd.read_excel(path, index_col=0) + tm.assert_frame_equal(result, expected) + + def test_path_path_lib(self, engine, ext): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD")), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + writer = partial(df.to_excel, engine=engine) + + reader = partial(pd.read_excel, index_col=0) + result = tm.round_trip_pathlib(writer, reader, path=f"foo{ext}") + tm.assert_frame_equal(result, df) + + def test_path_local_path(self, engine, ext): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD")), + index=Index([f"i-{i}" for i in range(30)]), + ) + writer = partial(df.to_excel, engine=engine) + + reader = partial(pd.read_excel, index_col=0) + result = tm.round_trip_localpath(writer, reader, path=f"foo{ext}") + tm.assert_frame_equal(result, df) + + def test_merged_cell_custom_objects(self, path): + # see GH-27006 + mi = MultiIndex.from_tuples( + [ + (pd.Period("2018"), pd.Period("2018Q1")), + (pd.Period("2018"), pd.Period("2018Q2")), + ] + ) + expected = DataFrame(np.ones((2, 2), dtype="int64"), columns=mi) + expected.to_excel(path) + result = pd.read_excel(path, header=[0, 1], index_col=0) + # need to convert PeriodIndexes to standard Indexes for assert equal + expected.columns = expected.columns.set_levels( + [[str(i) for i in mi.levels[0]], [str(i) for i in mi.levels[1]]], + level=[0, 1], + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", [None, object]) + def test_raise_when_saving_timezones(self, dtype, tz_aware_fixture, path): + # GH 27008, GH 7056 + tz = tz_aware_fixture + data = pd.Timestamp("2019", tz=tz) + df = DataFrame([data], dtype=dtype) + with pytest.raises(ValueError, match="Excel does not support"): + df.to_excel(path) + + data = data.to_pydatetime() + df = DataFrame([data], dtype=dtype) + with pytest.raises(ValueError, match="Excel does not support"): + df.to_excel(path) + + def test_excel_duplicate_columns_with_names(self, path): + # GH#39695 + df = DataFrame({"A": [0, 1], "B": [10, 11]}) + df.to_excel(path, columns=["A", "B", "A"], index=False) + + result = pd.read_excel(path) + expected = DataFrame([[0, 10, 0], [1, 11, 1]], columns=["A", "B", "A.1"]) + tm.assert_frame_equal(result, expected) + + def test_if_sheet_exists_raises(self, ext): + # GH 40230 + msg = "if_sheet_exists is only valid in append mode (mode='a')" + + with tm.ensure_clean(ext) as f: + with pytest.raises(ValueError, match=re.escape(msg)): + ExcelWriter(f, if_sheet_exists="replace") + + def test_excel_writer_empty_frame(self, engine, ext): + # GH#45793 + with tm.ensure_clean(ext) as path: + with ExcelWriter(path, engine=engine) as writer: + DataFrame().to_excel(writer) + result = pd.read_excel(path) + expected = DataFrame() + tm.assert_frame_equal(result, expected) + + def test_to_excel_empty_frame(self, engine, ext): + # GH#45793 + with tm.ensure_clean(ext) as path: + DataFrame().to_excel(path, engine=engine) + result = pd.read_excel(path) + expected = DataFrame() + tm.assert_frame_equal(result, expected) + + +class TestExcelWriterEngineTests: + @pytest.mark.parametrize( + "klass,ext", + [ + pytest.param(_XlsxWriter, ".xlsx", marks=td.skip_if_no("xlsxwriter")), + pytest.param(_OpenpyxlWriter, ".xlsx", marks=td.skip_if_no("openpyxl")), + ], + ) + def test_ExcelWriter_dispatch(self, klass, ext): + with tm.ensure_clean(ext) as path: + with ExcelWriter(path) as writer: + if ext == ".xlsx" and bool( + import_optional_dependency("xlsxwriter", errors="ignore") + ): + # xlsxwriter has preference over openpyxl if both installed + assert isinstance(writer, _XlsxWriter) + else: + assert isinstance(writer, klass) + + def test_ExcelWriter_dispatch_raises(self): + with pytest.raises(ValueError, match="No engine"): + ExcelWriter("nothing") + + def test_register_writer(self): + class DummyClass(ExcelWriter): + called_save = False + called_write_cells = False + called_sheets = False + _supported_extensions = ("xlsx", "xls") + _engine = "dummy" + + def book(self): + pass + + def _save(self): + type(self).called_save = True + + def _write_cells(self, *args, **kwargs): + type(self).called_write_cells = True + + @property + def sheets(self): + type(self).called_sheets = True + + @classmethod + def assert_called_and_reset(cls): + assert cls.called_save + assert cls.called_write_cells + assert not cls.called_sheets + cls.called_save = False + cls.called_write_cells = False + + register_writer(DummyClass) + + with option_context("io.excel.xlsx.writer", "dummy"): + path = "something.xlsx" + with tm.ensure_clean(path) as filepath: + with ExcelWriter(filepath) as writer: + assert isinstance(writer, DummyClass) + df = DataFrame( + ["a"], + columns=Index(["b"], name="foo"), + index=Index(["c"], name="bar"), + ) + df.to_excel(filepath) + DummyClass.assert_called_and_reset() + + with tm.ensure_clean("something.xls") as filepath: + df.to_excel(filepath, engine="dummy") + DummyClass.assert_called_and_reset() + + +@td.skip_if_no("xlrd") +@td.skip_if_no("openpyxl") +class TestFSPath: + def test_excelfile_fspath(self): + with tm.ensure_clean("foo.xlsx") as path: + df = DataFrame({"A": [1, 2]}) + df.to_excel(path) + with ExcelFile(path) as xl: + result = os.fspath(xl) + assert result == path + + def test_excelwriter_fspath(self): + with tm.ensure_clean("foo.xlsx") as path: + with ExcelWriter(path) as writer: + assert os.fspath(writer) == str(path) + + def test_to_excel_pos_args_deprecation(self): + # GH-54229 + df = DataFrame({"a": [1, 2, 3]}) + msg = ( + r"Starting with pandas version 3.0 all arguments of to_excel except " + r"for the argument 'excel_writer' will be keyword-only." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + buf = BytesIO() + writer = ExcelWriter(buf) + df.to_excel(writer, "Sheet_name_1") + + +@pytest.mark.parametrize("klass", _writers.values()) +def test_subclass_attr(klass): + # testing that subclasses of ExcelWriter don't have public attributes (issue 49602) + attrs_base = {name for name in dir(ExcelWriter) if not name.startswith("_")} + attrs_klass = {name for name in dir(klass) if not name.startswith("_")} + assert not attrs_base.symmetric_difference(attrs_klass) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlrd.py b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlrd.py new file mode 100644 index 0000000000000000000000000000000000000000..066393d91eeadcdc08873f4ffeedda0f689337fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlrd.py @@ -0,0 +1,76 @@ +import io + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows + +import pandas as pd +import pandas._testing as tm + +from pandas.io.excel import ExcelFile +from pandas.io.excel._base import inspect_excel_format + +xlrd = pytest.importorskip("xlrd") + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +@pytest.fixture(params=[".xls"]) +def read_ext_xlrd(request): + """ + Valid extensions for reading Excel files with xlrd. + + Similar to read_ext, but excludes .ods, .xlsb, and for xlrd>2 .xlsx, .xlsm + """ + return request.param + + +def test_read_xlrd_book(read_ext_xlrd, datapath): + engine = "xlrd" + sheet_name = "Sheet1" + pth = datapath("io", "data", "excel", "test1.xls") + with xlrd.open_workbook(pth) as book: + with ExcelFile(book, engine=engine) as xl: + result = pd.read_excel(xl, sheet_name=sheet_name, index_col=0) + + expected = pd.read_excel( + book, sheet_name=sheet_name, engine=engine, index_col=0 + ) + tm.assert_frame_equal(result, expected) + + +def test_read_xlsx_fails(datapath): + # GH 29375 + from xlrd.biffh import XLRDError + + path = datapath("io", "data", "excel", "test1.xlsx") + with pytest.raises(XLRDError, match="Excel xlsx file; not supported"): + pd.read_excel(path, engine="xlrd") + + +def test_nan_in_xls(datapath): + # GH 54564 + path = datapath("io", "data", "excel", "test6.xls") + + expected = pd.DataFrame({0: np.r_[0, 2].astype("int64"), 1: np.r_[1, np.nan]}) + + result = pd.read_excel(path, header=None) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "file_header", + [ + b"\x09\x00\x04\x00\x07\x00\x10\x00", + b"\x09\x02\x06\x00\x00\x00\x10\x00", + b"\x09\x04\x06\x00\x00\x00\x10\x00", + b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1", + ], +) +def test_read_old_xls_files(file_header): + # GH 41226 + f = io.BytesIO(file_header) + assert inspect_excel_format(f) == "xls" diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlsxwriter.py b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlsxwriter.py new file mode 100644 index 0000000000000000000000000000000000000000..529367761fc025e3e5d02bea85741c82f64c97ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlsxwriter.py @@ -0,0 +1,86 @@ +import contextlib + +import pytest + +from pandas.compat import is_platform_windows + +from pandas import DataFrame +import pandas._testing as tm + +from pandas.io.excel import ExcelWriter + +xlsxwriter = pytest.importorskip("xlsxwriter") + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +@pytest.fixture +def ext(): + return ".xlsx" + + +def test_column_format(ext): + # Test that column formats are applied to cells. Test for issue #9167. + # Applicable to xlsxwriter only. + openpyxl = pytest.importorskip("openpyxl") + + with tm.ensure_clean(ext) as path: + frame = DataFrame({"A": [123456, 123456], "B": [123456, 123456]}) + + with ExcelWriter(path) as writer: + frame.to_excel(writer) + + # Add a number format to col B and ensure it is applied to cells. + num_format = "#,##0" + write_workbook = writer.book + write_worksheet = write_workbook.worksheets()[0] + col_format = write_workbook.add_format({"num_format": num_format}) + write_worksheet.set_column("B:B", None, col_format) + + with contextlib.closing(openpyxl.load_workbook(path)) as read_workbook: + try: + read_worksheet = read_workbook["Sheet1"] + except TypeError: + # compat + read_worksheet = read_workbook.get_sheet_by_name(name="Sheet1") + + # Get the number format from the cell. + try: + cell = read_worksheet["B2"] + except TypeError: + # compat + cell = read_worksheet.cell("B2") + + try: + read_num_format = cell.number_format + except AttributeError: + read_num_format = cell.style.number_format._format_code + + assert read_num_format == num_format + + +def test_write_append_mode_raises(ext): + msg = "Append mode is not supported with xlsxwriter!" + + with tm.ensure_clean(ext) as f: + with pytest.raises(ValueError, match=msg): + ExcelWriter(f, engine="xlsxwriter", mode="a") + + +@pytest.mark.parametrize("nan_inf_to_errors", [True, False]) +def test_engine_kwargs(ext, nan_inf_to_errors): + # GH 42286 + engine_kwargs = {"options": {"nan_inf_to_errors": nan_inf_to_errors}} + with tm.ensure_clean(ext) as f: + with ExcelWriter(f, engine="xlsxwriter", engine_kwargs=engine_kwargs) as writer: + assert writer.book.nan_inf_to_errors == nan_inf_to_errors + + +def test_book_and_sheets_consistent(ext): + # GH#45687 - Ensure sheets is updated if user modifies book + with tm.ensure_clean(ext) as f: + with ExcelWriter(f, engine="xlsxwriter") as writer: + assert writer.sheets == {} + sheet = writer.book.add_worksheet("test_name") + assert writer.sheets == {"test_name": sheet} diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b38bcb48d8a77e02d46cdf8abe90147029349371 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..987a230fc743bcab68e8ae22336dcf19f2097da9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d345247f472afd4e550f77d5037e9a748f1d3fc Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_deprecated_kwargs.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_deprecated_kwargs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4e8afbe76049ec07099f1a956b532320bfad03a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_deprecated_kwargs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10cd1f43825e9d6bcee0bb1c5edeef8d9efb68a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95192ad05886745d224afc38ef10c59d9ac3d20c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_normalize.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_normalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3daa5eeaefa3bef0ff4262264724d2fe9b2c1d2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_normalize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_pandas.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_pandas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e96a0dd3c2b63955d28fc7c2b47991000a5aa819 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_pandas.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f721a349f282d6bae422607c2c8465dc34295f05 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_ujson.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_ujson.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d13028dced6590b5c2c1a406b14442fba4ddc3c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_ujson.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/conftest.py b/venv/lib/python3.10/site-packages/pandas/tests/io/json/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..4e848cd48b42d70033af233bf71c6904b9d069f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/json/conftest.py @@ -0,0 +1,9 @@ +import pytest + + +@pytest.fixture(params=["split", "records", "index", "columns", "values"]) +def orient(request): + """ + Fixture for orients excluding the table format. + """ + return request.param diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema_ext_dtype.py b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema_ext_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..b7bb057bc538e9eed084c93c1a209a34d904ea61 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema_ext_dtype.py @@ -0,0 +1,317 @@ +"""Tests for ExtensionDtype Table Schema integration.""" + +from collections import OrderedDict +import datetime as dt +import decimal +from io import StringIO +import json + +import pytest + +from pandas import ( + NA, + DataFrame, + Index, + array, + read_json, +) +import pandas._testing as tm +from pandas.core.arrays.integer import Int64Dtype +from pandas.core.arrays.string_ import StringDtype +from pandas.core.series import Series +from pandas.tests.extension.date import ( + DateArray, + DateDtype, +) +from pandas.tests.extension.decimal.array import ( + DecimalArray, + DecimalDtype, +) + +from pandas.io.json._table_schema import ( + as_json_table_type, + build_table_schema, +) + + +class TestBuildSchema: + def test_build_table_schema(self): + df = DataFrame( + { + "A": DateArray([dt.date(2021, 10, 10)]), + "B": DecimalArray([decimal.Decimal(10)]), + "C": array(["pandas"], dtype="string"), + "D": array([10], dtype="Int64"), + } + ) + result = build_table_schema(df, version=False) + expected = { + "fields": [ + {"name": "index", "type": "integer"}, + {"name": "A", "type": "any", "extDtype": "DateDtype"}, + {"name": "B", "type": "number", "extDtype": "decimal"}, + {"name": "C", "type": "any", "extDtype": "string"}, + {"name": "D", "type": "integer", "extDtype": "Int64"}, + ], + "primaryKey": ["index"], + } + assert result == expected + result = build_table_schema(df) + assert "pandas_version" in result + + +class TestTableSchemaType: + @pytest.mark.parametrize( + "date_data", + [ + DateArray([dt.date(2021, 10, 10)]), + DateArray(dt.date(2021, 10, 10)), + Series(DateArray(dt.date(2021, 10, 10))), + ], + ) + def test_as_json_table_type_ext_date_array_dtype(self, date_data): + assert as_json_table_type(date_data.dtype) == "any" + + def test_as_json_table_type_ext_date_dtype(self): + assert as_json_table_type(DateDtype()) == "any" + + @pytest.mark.parametrize( + "decimal_data", + [ + DecimalArray([decimal.Decimal(10)]), + Series(DecimalArray([decimal.Decimal(10)])), + ], + ) + def test_as_json_table_type_ext_decimal_array_dtype(self, decimal_data): + assert as_json_table_type(decimal_data.dtype) == "number" + + def test_as_json_table_type_ext_decimal_dtype(self): + assert as_json_table_type(DecimalDtype()) == "number" + + @pytest.mark.parametrize( + "string_data", + [ + array(["pandas"], dtype="string"), + Series(array(["pandas"], dtype="string")), + ], + ) + def test_as_json_table_type_ext_string_array_dtype(self, string_data): + assert as_json_table_type(string_data.dtype) == "any" + + def test_as_json_table_type_ext_string_dtype(self): + assert as_json_table_type(StringDtype()) == "any" + + @pytest.mark.parametrize( + "integer_data", + [ + array([10], dtype="Int64"), + Series(array([10], dtype="Int64")), + ], + ) + def test_as_json_table_type_ext_integer_array_dtype(self, integer_data): + assert as_json_table_type(integer_data.dtype) == "integer" + + def test_as_json_table_type_ext_integer_dtype(self): + assert as_json_table_type(Int64Dtype()) == "integer" + + +class TestTableOrient: + @pytest.fixture + def da(self): + return DateArray([dt.date(2021, 10, 10)]) + + @pytest.fixture + def dc(self): + return DecimalArray([decimal.Decimal(10)]) + + @pytest.fixture + def sa(self): + return array(["pandas"], dtype="string") + + @pytest.fixture + def ia(self): + return array([10], dtype="Int64") + + @pytest.fixture + def df(self, da, dc, sa, ia): + return DataFrame( + { + "A": da, + "B": dc, + "C": sa, + "D": ia, + } + ) + + def test_build_date_series(self, da): + s = Series(da, name="a") + s.index.name = "id" + result = s.to_json(orient="table", date_format="iso") + result = json.loads(result, object_pairs_hook=OrderedDict) + + assert "pandas_version" in result["schema"] + result["schema"].pop("pandas_version") + + fields = [ + {"name": "id", "type": "integer"}, + {"name": "a", "type": "any", "extDtype": "DateDtype"}, + ] + + schema = {"fields": fields, "primaryKey": ["id"]} + + expected = OrderedDict( + [ + ("schema", schema), + ("data", [OrderedDict([("id", 0), ("a", "2021-10-10T00:00:00.000")])]), + ] + ) + + assert result == expected + + def test_build_decimal_series(self, dc): + s = Series(dc, name="a") + s.index.name = "id" + result = s.to_json(orient="table", date_format="iso") + result = json.loads(result, object_pairs_hook=OrderedDict) + + assert "pandas_version" in result["schema"] + result["schema"].pop("pandas_version") + + fields = [ + {"name": "id", "type": "integer"}, + {"name": "a", "type": "number", "extDtype": "decimal"}, + ] + + schema = {"fields": fields, "primaryKey": ["id"]} + + expected = OrderedDict( + [ + ("schema", schema), + ("data", [OrderedDict([("id", 0), ("a", 10.0)])]), + ] + ) + + assert result == expected + + def test_build_string_series(self, sa): + s = Series(sa, name="a") + s.index.name = "id" + result = s.to_json(orient="table", date_format="iso") + result = json.loads(result, object_pairs_hook=OrderedDict) + + assert "pandas_version" in result["schema"] + result["schema"].pop("pandas_version") + + fields = [ + {"name": "id", "type": "integer"}, + {"name": "a", "type": "any", "extDtype": "string"}, + ] + + schema = {"fields": fields, "primaryKey": ["id"]} + + expected = OrderedDict( + [ + ("schema", schema), + ("data", [OrderedDict([("id", 0), ("a", "pandas")])]), + ] + ) + + assert result == expected + + def test_build_int64_series(self, ia): + s = Series(ia, name="a") + s.index.name = "id" + result = s.to_json(orient="table", date_format="iso") + result = json.loads(result, object_pairs_hook=OrderedDict) + + assert "pandas_version" in result["schema"] + result["schema"].pop("pandas_version") + + fields = [ + {"name": "id", "type": "integer"}, + {"name": "a", "type": "integer", "extDtype": "Int64"}, + ] + + schema = {"fields": fields, "primaryKey": ["id"]} + + expected = OrderedDict( + [ + ("schema", schema), + ("data", [OrderedDict([("id", 0), ("a", 10)])]), + ] + ) + + assert result == expected + + def test_to_json(self, df): + df = df.copy() + df.index.name = "idx" + result = df.to_json(orient="table", date_format="iso") + result = json.loads(result, object_pairs_hook=OrderedDict) + + assert "pandas_version" in result["schema"] + result["schema"].pop("pandas_version") + + fields = [ + OrderedDict({"name": "idx", "type": "integer"}), + OrderedDict({"name": "A", "type": "any", "extDtype": "DateDtype"}), + OrderedDict({"name": "B", "type": "number", "extDtype": "decimal"}), + OrderedDict({"name": "C", "type": "any", "extDtype": "string"}), + OrderedDict({"name": "D", "type": "integer", "extDtype": "Int64"}), + ] + + schema = OrderedDict({"fields": fields, "primaryKey": ["idx"]}) + data = [ + OrderedDict( + [ + ("idx", 0), + ("A", "2021-10-10T00:00:00.000"), + ("B", 10.0), + ("C", "pandas"), + ("D", 10), + ] + ) + ] + expected = OrderedDict([("schema", schema), ("data", data)]) + + assert result == expected + + def test_json_ext_dtype_reading_roundtrip(self): + # GH#40255 + df = DataFrame( + { + "a": Series([2, NA], dtype="Int64"), + "b": Series([1.5, NA], dtype="Float64"), + "c": Series([True, NA], dtype="boolean"), + }, + index=Index([1, NA], dtype="Int64"), + ) + expected = df.copy() + data_json = df.to_json(orient="table", indent=4) + result = read_json(StringIO(data_json), orient="table") + tm.assert_frame_equal(result, expected) + + def test_json_ext_dtype_reading(self): + # GH#40255 + data_json = """{ + "schema":{ + "fields":[ + { + "name":"a", + "type":"integer", + "extDtype":"Int64" + } + ], + }, + "data":[ + { + "a":2 + }, + { + "a":null + } + ] + }""" + result = read_json(StringIO(data_json), orient="table") + expected = DataFrame({"a": Series([2, NA], dtype="Int64")}) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_normalize.py b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_normalize.py new file mode 100644 index 0000000000000000000000000000000000000000..3c6517a872b76df61cfd2e1d11e5164b809f3e90 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_normalize.py @@ -0,0 +1,907 @@ +import json + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, + json_normalize, +) +import pandas._testing as tm + +from pandas.io.json._normalize import nested_to_record + + +@pytest.fixture +def deep_nested(): + # deeply nested data + return [ + { + "country": "USA", + "states": [ + { + "name": "California", + "cities": [ + {"name": "San Francisco", "pop": 12345}, + {"name": "Los Angeles", "pop": 12346}, + ], + }, + { + "name": "Ohio", + "cities": [ + {"name": "Columbus", "pop": 1234}, + {"name": "Cleveland", "pop": 1236}, + ], + }, + ], + }, + { + "country": "Germany", + "states": [ + {"name": "Bayern", "cities": [{"name": "Munich", "pop": 12347}]}, + { + "name": "Nordrhein-Westfalen", + "cities": [ + {"name": "Duesseldorf", "pop": 1238}, + {"name": "Koeln", "pop": 1239}, + ], + }, + ], + }, + ] + + +@pytest.fixture +def state_data(): + return [ + { + "counties": [ + {"name": "Dade", "population": 12345}, + {"name": "Broward", "population": 40000}, + {"name": "Palm Beach", "population": 60000}, + ], + "info": {"governor": "Rick Scott"}, + "shortname": "FL", + "state": "Florida", + }, + { + "counties": [ + {"name": "Summit", "population": 1234}, + {"name": "Cuyahoga", "population": 1337}, + ], + "info": {"governor": "John Kasich"}, + "shortname": "OH", + "state": "Ohio", + }, + ] + + +@pytest.fixture +def author_missing_data(): + return [ + {"info": None}, + { + "info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"}, + "author_name": {"first": "Jane", "last_name": "Doe"}, + }, + ] + + +@pytest.fixture +def missing_metadata(): + return [ + { + "name": "Alice", + "addresses": [ + { + "number": 9562, + "street": "Morris St.", + "city": "Massillon", + "state": "OH", + "zip": 44646, + } + ], + "previous_residences": {"cities": [{"city_name": "Foo York City"}]}, + }, + { + "addresses": [ + { + "number": 8449, + "street": "Spring St.", + "city": "Elizabethton", + "state": "TN", + "zip": 37643, + } + ], + "previous_residences": {"cities": [{"city_name": "Barmingham"}]}, + }, + ] + + +@pytest.fixture +def max_level_test_input_data(): + """ + input data to test json_normalize with max_level param + """ + return [ + { + "CreatedBy": {"Name": "User001"}, + "Lookup": { + "TextField": "Some text", + "UserField": {"Id": "ID001", "Name": "Name001"}, + }, + "Image": {"a": "b"}, + } + ] + + +class TestJSONNormalize: + def test_simple_records(self): + recs = [ + {"a": 1, "b": 2, "c": 3}, + {"a": 4, "b": 5, "c": 6}, + {"a": 7, "b": 8, "c": 9}, + {"a": 10, "b": 11, "c": 12}, + ] + + result = json_normalize(recs) + expected = DataFrame(recs) + + tm.assert_frame_equal(result, expected) + + def test_simple_normalize(self, state_data): + result = json_normalize(state_data[0], "counties") + expected = DataFrame(state_data[0]["counties"]) + tm.assert_frame_equal(result, expected) + + result = json_normalize(state_data, "counties") + + expected = [] + for rec in state_data: + expected.extend(rec["counties"]) + expected = DataFrame(expected) + + tm.assert_frame_equal(result, expected) + + result = json_normalize(state_data, "counties", meta="state") + expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2]) + + tm.assert_frame_equal(result, expected) + + def test_fields_list_type_normalize(self): + parse_metadata_fields_list_type = [ + {"values": [1, 2, 3], "metadata": {"listdata": [1, 2]}} + ] + result = json_normalize( + parse_metadata_fields_list_type, + record_path=["values"], + meta=[["metadata", "listdata"]], + ) + expected = DataFrame( + {0: [1, 2, 3], "metadata.listdata": [[1, 2], [1, 2], [1, 2]]} + ) + tm.assert_frame_equal(result, expected) + + def test_empty_array(self): + result = json_normalize([]) + expected = DataFrame() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "data, record_path, exception_type", + [ + ([{"a": 0}, {"a": 1}], None, None), + ({"a": [{"a": 0}, {"a": 1}]}, "a", None), + ('{"a": [{"a": 0}, {"a": 1}]}', None, NotImplementedError), + (None, None, NotImplementedError), + ], + ) + def test_accepted_input(self, data, record_path, exception_type): + if exception_type is not None: + with pytest.raises(exception_type, match=""): + json_normalize(data, record_path=record_path) + else: + result = json_normalize(data, record_path=record_path) + expected = DataFrame([0, 1], columns=["a"]) + tm.assert_frame_equal(result, expected) + + def test_simple_normalize_with_separator(self, deep_nested): + # GH 14883 + result = json_normalize({"A": {"A": 1, "B": 2}}) + expected = DataFrame([[1, 2]], columns=["A.A", "A.B"]) + tm.assert_frame_equal(result.reindex_like(expected), expected) + + result = json_normalize({"A": {"A": 1, "B": 2}}, sep="_") + expected = DataFrame([[1, 2]], columns=["A_A", "A_B"]) + tm.assert_frame_equal(result.reindex_like(expected), expected) + + result = json_normalize({"A": {"A": 1, "B": 2}}, sep="\u03c3") + expected = DataFrame([[1, 2]], columns=["A\u03c3A", "A\u03c3B"]) + tm.assert_frame_equal(result.reindex_like(expected), expected) + + result = json_normalize( + deep_nested, + ["states", "cities"], + meta=["country", ["states", "name"]], + sep="_", + ) + expected = Index(["name", "pop", "country", "states_name"]).sort_values() + assert result.columns.sort_values().equals(expected) + + def test_normalize_with_multichar_separator(self): + # GH #43831 + data = {"a": [1, 2], "b": {"b_1": 2, "b_2": (3, 4)}} + result = json_normalize(data, sep="__") + expected = DataFrame([[[1, 2], 2, (3, 4)]], columns=["a", "b__b_1", "b__b_2"]) + tm.assert_frame_equal(result, expected) + + def test_value_array_record_prefix(self): + # GH 21536 + result = json_normalize({"A": [1, 2]}, "A", record_prefix="Prefix.") + expected = DataFrame([[1], [2]], columns=["Prefix.0"]) + tm.assert_frame_equal(result, expected) + + def test_nested_object_record_path(self): + # GH 22706 + data = { + "state": "Florida", + "info": { + "governor": "Rick Scott", + "counties": [ + {"name": "Dade", "population": 12345}, + {"name": "Broward", "population": 40000}, + {"name": "Palm Beach", "population": 60000}, + ], + }, + } + result = json_normalize(data, record_path=["info", "counties"]) + expected = DataFrame( + [["Dade", 12345], ["Broward", 40000], ["Palm Beach", 60000]], + columns=["name", "population"], + ) + tm.assert_frame_equal(result, expected) + + def test_more_deeply_nested(self, deep_nested): + result = json_normalize( + deep_nested, ["states", "cities"], meta=["country", ["states", "name"]] + ) + ex_data = { + "country": ["USA"] * 4 + ["Germany"] * 3, + "states.name": [ + "California", + "California", + "Ohio", + "Ohio", + "Bayern", + "Nordrhein-Westfalen", + "Nordrhein-Westfalen", + ], + "name": [ + "San Francisco", + "Los Angeles", + "Columbus", + "Cleveland", + "Munich", + "Duesseldorf", + "Koeln", + ], + "pop": [12345, 12346, 1234, 1236, 12347, 1238, 1239], + } + + expected = DataFrame(ex_data, columns=result.columns) + tm.assert_frame_equal(result, expected) + + def test_shallow_nested(self): + data = [ + { + "state": "Florida", + "shortname": "FL", + "info": {"governor": "Rick Scott"}, + "counties": [ + {"name": "Dade", "population": 12345}, + {"name": "Broward", "population": 40000}, + {"name": "Palm Beach", "population": 60000}, + ], + }, + { + "state": "Ohio", + "shortname": "OH", + "info": {"governor": "John Kasich"}, + "counties": [ + {"name": "Summit", "population": 1234}, + {"name": "Cuyahoga", "population": 1337}, + ], + }, + ] + + result = json_normalize( + data, "counties", ["state", "shortname", ["info", "governor"]] + ) + ex_data = { + "name": ["Dade", "Broward", "Palm Beach", "Summit", "Cuyahoga"], + "state": ["Florida"] * 3 + ["Ohio"] * 2, + "shortname": ["FL", "FL", "FL", "OH", "OH"], + "info.governor": ["Rick Scott"] * 3 + ["John Kasich"] * 2, + "population": [12345, 40000, 60000, 1234, 1337], + } + expected = DataFrame(ex_data, columns=result.columns) + tm.assert_frame_equal(result, expected) + + def test_nested_meta_path_with_nested_record_path(self, state_data): + # GH 27220 + result = json_normalize( + data=state_data, + record_path=["counties"], + meta=["state", "shortname", ["info", "governor"]], + errors="ignore", + ) + + ex_data = { + "name": ["Dade", "Broward", "Palm Beach", "Summit", "Cuyahoga"], + "population": [12345, 40000, 60000, 1234, 1337], + "state": ["Florida"] * 3 + ["Ohio"] * 2, + "shortname": ["FL"] * 3 + ["OH"] * 2, + "info.governor": ["Rick Scott"] * 3 + ["John Kasich"] * 2, + } + + expected = DataFrame(ex_data) + tm.assert_frame_equal(result, expected) + + def test_meta_name_conflict(self): + data = [ + { + "foo": "hello", + "bar": "there", + "data": [ + {"foo": "something", "bar": "else"}, + {"foo": "something2", "bar": "else2"}, + ], + } + ] + + msg = r"Conflicting metadata name (foo|bar), need distinguishing prefix" + with pytest.raises(ValueError, match=msg): + json_normalize(data, "data", meta=["foo", "bar"]) + + result = json_normalize(data, "data", meta=["foo", "bar"], meta_prefix="meta") + + for val in ["metafoo", "metabar", "foo", "bar"]: + assert val in result + + def test_meta_parameter_not_modified(self): + # GH 18610 + data = [ + { + "foo": "hello", + "bar": "there", + "data": [ + {"foo": "something", "bar": "else"}, + {"foo": "something2", "bar": "else2"}, + ], + } + ] + + COLUMNS = ["foo", "bar"] + result = json_normalize(data, "data", meta=COLUMNS, meta_prefix="meta") + + assert COLUMNS == ["foo", "bar"] + for val in ["metafoo", "metabar", "foo", "bar"]: + assert val in result + + def test_record_prefix(self, state_data): + result = json_normalize(state_data[0], "counties") + expected = DataFrame(state_data[0]["counties"]) + tm.assert_frame_equal(result, expected) + + result = json_normalize( + state_data, "counties", meta="state", record_prefix="county_" + ) + + expected = [] + for rec in state_data: + expected.extend(rec["counties"]) + expected = DataFrame(expected) + expected = expected.rename(columns=lambda x: "county_" + x) + expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2]) + + tm.assert_frame_equal(result, expected) + + def test_non_ascii_key(self): + testjson = ( + b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},' + b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]' + ).decode("utf8") + + testdata = { + b"\xc3\x9cnic\xc3\xb8de".decode("utf8"): [0, 1], + "sub.A": [1, 3], + "sub.B": [2, 4], + } + expected = DataFrame(testdata) + + result = json_normalize(json.loads(testjson)) + tm.assert_frame_equal(result, expected) + + def test_missing_field(self, author_missing_data): + # GH20030: + result = json_normalize(author_missing_data) + ex_data = [ + { + "info": np.nan, + "info.created_at": np.nan, + "info.last_updated": np.nan, + "author_name.first": np.nan, + "author_name.last_name": np.nan, + }, + { + "info": None, + "info.created_at": "11/08/1993", + "info.last_updated": "26/05/2012", + "author_name.first": "Jane", + "author_name.last_name": "Doe", + }, + ] + expected = DataFrame(ex_data) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "max_level,expected", + [ + ( + 0, + [ + { + "TextField": "Some text", + "UserField": {"Id": "ID001", "Name": "Name001"}, + "CreatedBy": {"Name": "User001"}, + "Image": {"a": "b"}, + }, + { + "TextField": "Some text", + "UserField": {"Id": "ID001", "Name": "Name001"}, + "CreatedBy": {"Name": "User001"}, + "Image": {"a": "b"}, + }, + ], + ), + ( + 1, + [ + { + "TextField": "Some text", + "UserField.Id": "ID001", + "UserField.Name": "Name001", + "CreatedBy": {"Name": "User001"}, + "Image": {"a": "b"}, + }, + { + "TextField": "Some text", + "UserField.Id": "ID001", + "UserField.Name": "Name001", + "CreatedBy": {"Name": "User001"}, + "Image": {"a": "b"}, + }, + ], + ), + ], + ) + def test_max_level_with_records_path(self, max_level, expected): + # GH23843: Enhanced JSON normalize + test_input = [ + { + "CreatedBy": {"Name": "User001"}, + "Lookup": [ + { + "TextField": "Some text", + "UserField": {"Id": "ID001", "Name": "Name001"}, + }, + { + "TextField": "Some text", + "UserField": {"Id": "ID001", "Name": "Name001"}, + }, + ], + "Image": {"a": "b"}, + "tags": [ + {"foo": "something", "bar": "else"}, + {"foo": "something2", "bar": "else2"}, + ], + } + ] + + result = json_normalize( + test_input, + record_path=["Lookup"], + meta=[["CreatedBy"], ["Image"]], + max_level=max_level, + ) + expected_df = DataFrame(data=expected, columns=result.columns.values) + tm.assert_equal(expected_df, result) + + def test_nested_flattening_consistent(self): + # see gh-21537 + df1 = json_normalize([{"A": {"B": 1}}]) + df2 = json_normalize({"dummy": [{"A": {"B": 1}}]}, "dummy") + + # They should be the same. + tm.assert_frame_equal(df1, df2) + + def test_nonetype_record_path(self, nulls_fixture): + # see gh-30148 + # should not raise TypeError + result = json_normalize( + [ + {"state": "Texas", "info": nulls_fixture}, + {"state": "Florida", "info": [{"i": 2}]}, + ], + record_path=["info"], + ) + expected = DataFrame({"i": 2}, index=[0]) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("value", ["false", "true", "{}", "1", '"text"']) + def test_non_list_record_path_errors(self, value): + # see gh-30148, GH 26284 + parsed_value = json.loads(value) + test_input = {"state": "Texas", "info": parsed_value} + test_path = "info" + msg = ( + f"{test_input} has non list value {parsed_value} for path {test_path}. " + "Must be list or null." + ) + with pytest.raises(TypeError, match=msg): + json_normalize([test_input], record_path=[test_path]) + + def test_meta_non_iterable(self): + # GH 31507 + data = """[{"id": 99, "data": [{"one": 1, "two": 2}]}]""" + + result = json_normalize(json.loads(data), record_path=["data"], meta=["id"]) + expected = DataFrame( + {"one": [1], "two": [2], "id": np.array([99], dtype=object)} + ) + tm.assert_frame_equal(result, expected) + + def test_generator(self, state_data): + # GH35923 Fix pd.json_normalize to not skip the first element of a + # generator input + def generator_data(): + yield from state_data[0]["counties"] + + result = json_normalize(generator_data()) + expected = DataFrame(state_data[0]["counties"]) + + tm.assert_frame_equal(result, expected) + + def test_top_column_with_leading_underscore(self): + # 49861 + data = {"_id": {"a1": 10, "l2": {"l3": 0}}, "gg": 4} + result = json_normalize(data, sep="_") + expected = DataFrame([[4, 10, 0]], columns=["gg", "_id_a1", "_id_l2_l3"]) + + tm.assert_frame_equal(result, expected) + + +class TestNestedToRecord: + def test_flat_stays_flat(self): + recs = [{"flat1": 1, "flat2": 2}, {"flat3": 3, "flat2": 4}] + result = nested_to_record(recs) + expected = recs + assert result == expected + + def test_one_level_deep_flattens(self): + data = {"flat1": 1, "dict1": {"c": 1, "d": 2}} + + result = nested_to_record(data) + expected = {"dict1.c": 1, "dict1.d": 2, "flat1": 1} + + assert result == expected + + def test_nested_flattens(self): + data = { + "flat1": 1, + "dict1": {"c": 1, "d": 2}, + "nested": {"e": {"c": 1, "d": 2}, "d": 2}, + } + + result = nested_to_record(data) + expected = { + "dict1.c": 1, + "dict1.d": 2, + "flat1": 1, + "nested.d": 2, + "nested.e.c": 1, + "nested.e.d": 2, + } + + assert result == expected + + def test_json_normalize_errors(self, missing_metadata): + # GH14583: + # If meta keys are not always present a new option to set + # errors='ignore' has been implemented + + msg = ( + "Key 'name' not found. To replace missing values of " + "'name' with np.nan, pass in errors='ignore'" + ) + with pytest.raises(KeyError, match=msg): + json_normalize( + data=missing_metadata, + record_path="addresses", + meta="name", + errors="raise", + ) + + def test_missing_meta(self, missing_metadata): + # GH25468 + # If metadata is nullable with errors set to ignore, the null values + # should be numpy.nan values + result = json_normalize( + data=missing_metadata, record_path="addresses", meta="name", errors="ignore" + ) + ex_data = [ + [9562, "Morris St.", "Massillon", "OH", 44646, "Alice"], + [8449, "Spring St.", "Elizabethton", "TN", 37643, np.nan], + ] + columns = ["number", "street", "city", "state", "zip", "name"] + expected = DataFrame(ex_data, columns=columns) + tm.assert_frame_equal(result, expected) + + def test_missing_nested_meta(self): + # GH44312 + # If errors="ignore" and nested metadata is null, we should return nan + data = {"meta": "foo", "nested_meta": None, "value": [{"rec": 1}, {"rec": 2}]} + result = json_normalize( + data, + record_path="value", + meta=["meta", ["nested_meta", "leaf"]], + errors="ignore", + ) + ex_data = [[1, "foo", np.nan], [2, "foo", np.nan]] + columns = ["rec", "meta", "nested_meta.leaf"] + expected = DataFrame(ex_data, columns=columns).astype( + {"nested_meta.leaf": object} + ) + tm.assert_frame_equal(result, expected) + + # If errors="raise" and nested metadata is null, we should raise with the + # key of the first missing level + with pytest.raises(KeyError, match="'leaf' not found"): + json_normalize( + data, + record_path="value", + meta=["meta", ["nested_meta", "leaf"]], + errors="raise", + ) + + def test_missing_meta_multilevel_record_path_errors_raise(self, missing_metadata): + # GH41876 + # Ensure errors='raise' works as intended even when a record_path of length + # greater than one is passed in + msg = ( + "Key 'name' not found. To replace missing values of " + "'name' with np.nan, pass in errors='ignore'" + ) + with pytest.raises(KeyError, match=msg): + json_normalize( + data=missing_metadata, + record_path=["previous_residences", "cities"], + meta="name", + errors="raise", + ) + + def test_missing_meta_multilevel_record_path_errors_ignore(self, missing_metadata): + # GH41876 + # Ensure errors='ignore' works as intended even when a record_path of length + # greater than one is passed in + result = json_normalize( + data=missing_metadata, + record_path=["previous_residences", "cities"], + meta="name", + errors="ignore", + ) + ex_data = [ + ["Foo York City", "Alice"], + ["Barmingham", np.nan], + ] + columns = ["city_name", "name"] + expected = DataFrame(ex_data, columns=columns) + tm.assert_frame_equal(result, expected) + + def test_donot_drop_nonevalues(self): + # GH21356 + data = [ + {"info": None, "author_name": {"first": "Smith", "last_name": "Appleseed"}}, + { + "info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"}, + "author_name": {"first": "Jane", "last_name": "Doe"}, + }, + ] + result = nested_to_record(data) + expected = [ + { + "info": None, + "author_name.first": "Smith", + "author_name.last_name": "Appleseed", + }, + { + "author_name.first": "Jane", + "author_name.last_name": "Doe", + "info.created_at": "11/08/1993", + "info.last_updated": "26/05/2012", + }, + ] + + assert result == expected + + def test_nonetype_top_level_bottom_level(self): + # GH21158: If inner level json has a key with a null value + # make sure it does not do a new_d.pop twice and except + data = { + "id": None, + "location": { + "country": { + "state": { + "id": None, + "town.info": { + "id": None, + "region": None, + "x": 49.151580810546875, + "y": -33.148521423339844, + "z": 27.572303771972656, + }, + } + } + }, + } + result = nested_to_record(data) + expected = { + "id": None, + "location.country.state.id": None, + "location.country.state.town.info.id": None, + "location.country.state.town.info.region": None, + "location.country.state.town.info.x": 49.151580810546875, + "location.country.state.town.info.y": -33.148521423339844, + "location.country.state.town.info.z": 27.572303771972656, + } + assert result == expected + + def test_nonetype_multiple_levels(self): + # GH21158: If inner level json has a key with a null value + # make sure it does not do a new_d.pop twice and except + data = { + "id": None, + "location": { + "id": None, + "country": { + "id": None, + "state": { + "id": None, + "town.info": { + "region": None, + "x": 49.151580810546875, + "y": -33.148521423339844, + "z": 27.572303771972656, + }, + }, + }, + }, + } + result = nested_to_record(data) + expected = { + "id": None, + "location.id": None, + "location.country.id": None, + "location.country.state.id": None, + "location.country.state.town.info.region": None, + "location.country.state.town.info.x": 49.151580810546875, + "location.country.state.town.info.y": -33.148521423339844, + "location.country.state.town.info.z": 27.572303771972656, + } + assert result == expected + + @pytest.mark.parametrize( + "max_level, expected", + [ + ( + None, + [ + { + "CreatedBy.Name": "User001", + "Lookup.TextField": "Some text", + "Lookup.UserField.Id": "ID001", + "Lookup.UserField.Name": "Name001", + "Image.a": "b", + } + ], + ), + ( + 0, + [ + { + "CreatedBy": {"Name": "User001"}, + "Lookup": { + "TextField": "Some text", + "UserField": {"Id": "ID001", "Name": "Name001"}, + }, + "Image": {"a": "b"}, + } + ], + ), + ( + 1, + [ + { + "CreatedBy.Name": "User001", + "Lookup.TextField": "Some text", + "Lookup.UserField": {"Id": "ID001", "Name": "Name001"}, + "Image.a": "b", + } + ], + ), + ], + ) + def test_with_max_level(self, max_level, expected, max_level_test_input_data): + # GH23843: Enhanced JSON normalize + output = nested_to_record(max_level_test_input_data, max_level=max_level) + assert output == expected + + def test_with_large_max_level(self): + # GH23843: Enhanced JSON normalize + max_level = 100 + input_data = [ + { + "CreatedBy": { + "user": { + "name": {"firstname": "Leo", "LastName": "Thomson"}, + "family_tree": { + "father": { + "name": "Father001", + "father": { + "Name": "Father002", + "father": { + "name": "Father003", + "father": {"Name": "Father004"}, + }, + }, + } + }, + } + } + } + ] + expected = [ + { + "CreatedBy.user.name.firstname": "Leo", + "CreatedBy.user.name.LastName": "Thomson", + "CreatedBy.user.family_tree.father.name": "Father001", + "CreatedBy.user.family_tree.father.father.Name": "Father002", + "CreatedBy.user.family_tree.father.father.father.name": "Father003", + "CreatedBy.user.family_tree.father.father.father.father.Name": "Father004", # noqa: E501 + } + ] + output = nested_to_record(input_data, max_level=max_level) + assert output == expected + + def test_series_non_zero_index(self): + # GH 19020 + data = { + 0: {"id": 1, "name": "Foo", "elements": {"a": 1}}, + 1: {"id": 2, "name": "Bar", "elements": {"b": 2}}, + 2: {"id": 3, "name": "Baz", "elements": {"c": 3}}, + } + s = Series(data) + s.index = [1, 2, 3] + result = json_normalize(s) + expected = DataFrame( + { + "id": [1, 2, 3], + "name": ["Foo", "Bar", "Baz"], + "elements.a": [1.0, np.nan, np.nan], + "elements.b": [np.nan, 2.0, np.nan], + "elements.c": [np.nan, np.nan, 3.0], + } + ) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_pandas.py b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_pandas.py new file mode 100644 index 0000000000000000000000000000000000000000..5279f3f1cdfbe9fbec62f5685503c28b62f5f872 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/json/test_pandas.py @@ -0,0 +1,2202 @@ +import datetime +from datetime import timedelta +from decimal import Decimal +from io import ( + BytesIO, + StringIO, +) +import json +import os +import sys +import time + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas.compat import IS64 +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + NA, + DataFrame, + DatetimeIndex, + Index, + RangeIndex, + Series, + Timestamp, + date_range, + read_json, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) +from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics + +from pandas.io.json import ujson_dumps + + +def test_literal_json_deprecation(): + # PR 53409 + expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + + jsonl = """{"a": 1, "b": 2} + {"a": 3, "b": 4} + {"a": 5, "b": 6} + {"a": 7, "b": 8}""" + + msg = ( + "Passing literal json to 'read_json' is deprecated and " + "will be removed in a future version. To read from a " + "literal string, wrap it in a 'StringIO' object." + ) + + with tm.assert_produces_warning(FutureWarning, match=msg): + try: + read_json(jsonl, lines=False) + except ValueError: + pass + + with tm.assert_produces_warning(FutureWarning, match=msg): + read_json(expected.to_json(), lines=False) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True) + tm.assert_frame_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, match=msg): + try: + result = read_json( + '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n', + lines=False, + ) + except ValueError: + pass + + with tm.assert_produces_warning(FutureWarning, match=msg): + try: + result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=False) + except ValueError: + pass + tm.assert_frame_equal(result, expected) + + +def assert_json_roundtrip_equal(result, expected, orient): + if orient in ("records", "values"): + expected = expected.reset_index(drop=True) + if orient == "values": + expected.columns = range(len(expected.columns)) + tm.assert_frame_equal(result, expected) + + +class TestPandasContainer: + @pytest.fixture + def categorical_frame(self): + data = { + c: np.random.default_rng(i).standard_normal(30) + for i, c in enumerate(list("ABCD")) + } + cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * 15 + data["E"] = list(reversed(cat)) + data["sort"] = np.arange(30, dtype="int64") + return DataFrame(data, index=pd.CategoricalIndex(cat, name="E")) + + @pytest.fixture + def datetime_series(self): + # Same as usual datetime_series, but with index freq set to None, + # since that doesn't round-trip, see GH#33711 + ser = Series( + 1.1 * np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + ser.index = ser.index._with_freq(None) + return ser + + @pytest.fixture + def datetime_frame(self): + # Same as usual datetime_frame, but with index freq set to None, + # since that doesn't round-trip, see GH#33711 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=30, freq="B"), + ) + df.index = df.index._with_freq(None) + return df + + def test_frame_double_encoded_labels(self, orient): + df = DataFrame( + [["a", "b"], ["c", "d"]], + index=['index " 1', "index / 2"], + columns=["a \\ b", "y / z"], + ) + + data = StringIO(df.to_json(orient=orient)) + result = read_json(data, orient=orient) + expected = df.copy() + assert_json_roundtrip_equal(result, expected, orient) + + @pytest.mark.parametrize("orient", ["split", "records", "values"]) + def test_frame_non_unique_index(self, orient): + df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"]) + data = StringIO(df.to_json(orient=orient)) + result = read_json(data, orient=orient) + expected = df.copy() + + assert_json_roundtrip_equal(result, expected, orient) + + @pytest.mark.parametrize("orient", ["index", "columns"]) + def test_frame_non_unique_index_raises(self, orient): + df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"]) + msg = f"DataFrame index must be unique for orient='{orient}'" + with pytest.raises(ValueError, match=msg): + df.to_json(orient=orient) + + @pytest.mark.parametrize("orient", ["split", "values"]) + @pytest.mark.parametrize( + "data", + [ + [["a", "b"], ["c", "d"]], + [[1.5, 2.5], [3.5, 4.5]], + [[1, 2.5], [3, 4.5]], + [[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]], + ], + ) + def test_frame_non_unique_columns(self, orient, data): + df = DataFrame(data, index=[1, 2], columns=["x", "x"]) + + result = read_json( + StringIO(df.to_json(orient=orient)), orient=orient, convert_dates=["x"] + ) + if orient == "values": + expected = DataFrame(data) + if expected.iloc[:, 0].dtype == "datetime64[ns]": + # orient == "values" by default will write Timestamp objects out + # in milliseconds; these are internally stored in nanosecond, + # so divide to get where we need + # TODO: a to_epoch method would also solve; see GH 14772 + expected.isetitem(0, expected.iloc[:, 0].astype(np.int64) // 1000000) + elif orient == "split": + expected = df + expected.columns = ["x", "x.1"] + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("orient", ["index", "columns", "records"]) + def test_frame_non_unique_columns_raises(self, orient): + df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"]) + + msg = f"DataFrame columns must be unique for orient='{orient}'" + with pytest.raises(ValueError, match=msg): + df.to_json(orient=orient) + + def test_frame_default_orient(self, float_frame): + assert float_frame.to_json() == float_frame.to_json(orient="columns") + + @pytest.mark.parametrize("dtype", [False, float]) + @pytest.mark.parametrize("convert_axes", [True, False]) + def test_roundtrip_simple(self, orient, convert_axes, dtype, float_frame): + data = StringIO(float_frame.to_json(orient=orient)) + result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype) + + expected = float_frame + + assert_json_roundtrip_equal(result, expected, orient) + + @pytest.mark.parametrize("dtype", [False, np.int64]) + @pytest.mark.parametrize("convert_axes", [True, False]) + def test_roundtrip_intframe(self, orient, convert_axes, dtype, int_frame): + data = StringIO(int_frame.to_json(orient=orient)) + result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype) + expected = int_frame + assert_json_roundtrip_equal(result, expected, orient) + + @pytest.mark.parametrize("dtype", [None, np.float64, int, "U3"]) + @pytest.mark.parametrize("convert_axes", [True, False]) + def test_roundtrip_str_axes(self, orient, convert_axes, dtype): + df = DataFrame( + np.zeros((200, 4)), + columns=[str(i) for i in range(4)], + index=[str(i) for i in range(200)], + dtype=dtype, + ) + + data = StringIO(df.to_json(orient=orient)) + result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype) + + expected = df.copy() + if not dtype: + expected = expected.astype(np.int64) + + # index columns, and records orients cannot fully preserve the string + # dtype for axes as the index and column labels are used as keys in + # JSON objects. JSON keys are by definition strings, so there's no way + # to disambiguate whether those keys actually were strings or numeric + # beforehand and numeric wins out. + if convert_axes and (orient in ("index", "columns")): + expected.columns = expected.columns.astype(np.int64) + expected.index = expected.index.astype(np.int64) + elif orient == "records" and convert_axes: + expected.columns = expected.columns.astype(np.int64) + elif convert_axes and orient == "split": + expected.columns = expected.columns.astype(np.int64) + + assert_json_roundtrip_equal(result, expected, orient) + + @pytest.mark.parametrize("convert_axes", [True, False]) + def test_roundtrip_categorical( + self, request, orient, categorical_frame, convert_axes, using_infer_string + ): + # TODO: create a better frame to test with and improve coverage + if orient in ("index", "columns"): + request.applymarker( + pytest.mark.xfail( + reason=f"Can't have duplicate index values for orient '{orient}')" + ) + ) + + data = StringIO(categorical_frame.to_json(orient=orient)) + result = read_json(data, orient=orient, convert_axes=convert_axes) + + expected = categorical_frame.copy() + expected.index = expected.index.astype( + str if not using_infer_string else "string[pyarrow_numpy]" + ) # Categorical not preserved + expected.index.name = None # index names aren't preserved in JSON + assert_json_roundtrip_equal(result, expected, orient) + + @pytest.mark.parametrize("convert_axes", [True, False]) + def test_roundtrip_empty(self, orient, convert_axes): + empty_frame = DataFrame() + data = StringIO(empty_frame.to_json(orient=orient)) + result = read_json(data, orient=orient, convert_axes=convert_axes) + if orient == "split": + idx = Index([], dtype=(float if convert_axes else object)) + expected = DataFrame(index=idx, columns=idx) + elif orient in ["index", "columns"]: + expected = DataFrame() + else: + expected = empty_frame.copy() + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("convert_axes", [True, False]) + def test_roundtrip_timestamp(self, orient, convert_axes, datetime_frame): + # TODO: improve coverage with date_format parameter + data = StringIO(datetime_frame.to_json(orient=orient)) + result = read_json(data, orient=orient, convert_axes=convert_axes) + expected = datetime_frame.copy() + + if not convert_axes: # one off for ts handling + # DTI gets converted to epoch values + idx = expected.index.view(np.int64) // 1000000 + if orient != "split": # TODO: handle consistently across orients + idx = idx.astype(str) + + expected.index = idx + + assert_json_roundtrip_equal(result, expected, orient) + + @pytest.mark.parametrize("convert_axes", [True, False]) + def test_roundtrip_mixed(self, orient, convert_axes): + index = Index(["a", "b", "c", "d", "e"]) + values = { + "A": [0.0, 1.0, 2.0, 3.0, 4.0], + "B": [0.0, 1.0, 0.0, 1.0, 0.0], + "C": ["foo1", "foo2", "foo3", "foo4", "foo5"], + "D": [True, False, True, False, True], + } + + df = DataFrame(data=values, index=index) + + data = StringIO(df.to_json(orient=orient)) + result = read_json(data, orient=orient, convert_axes=convert_axes) + + expected = df.copy() + expected = expected.assign(**expected.select_dtypes("number").astype(np.int64)) + + assert_json_roundtrip_equal(result, expected, orient) + + @pytest.mark.xfail( + reason="#50456 Column multiindex is stored and loaded differently", + raises=AssertionError, + ) + @pytest.mark.parametrize( + "columns", + [ + [["2022", "2022"], ["JAN", "FEB"]], + [["2022", "2023"], ["JAN", "JAN"]], + [["2022", "2022"], ["JAN", "JAN"]], + ], + ) + def test_roundtrip_multiindex(self, columns): + df = DataFrame( + [[1, 2], [3, 4]], + columns=pd.MultiIndex.from_arrays(columns), + ) + data = StringIO(df.to_json(orient="split")) + result = read_json(data, orient="split") + tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize( + "data,msg,orient", + [ + ('{"key":b:a:d}', "Expected object or value", "columns"), + # too few indices + ( + '{"columns":["A","B"],' + '"index":["2","3"],' + '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}', + "|".join( + [ + r"Length of values \(3\) does not match length of index \(2\)", + ] + ), + "split", + ), + # too many columns + ( + '{"columns":["A","B","C"],' + '"index":["1","2","3"],' + '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}', + "3 columns passed, passed data had 2 columns", + "split", + ), + # bad key + ( + '{"badkey":["A","B"],' + '"index":["2","3"],' + '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}', + r"unexpected key\(s\): badkey", + "split", + ), + ], + ) + def test_frame_from_json_bad_data_raises(self, data, msg, orient): + with pytest.raises(ValueError, match=msg): + read_json(StringIO(data), orient=orient) + + @pytest.mark.parametrize("dtype", [True, False]) + @pytest.mark.parametrize("convert_axes", [True, False]) + def test_frame_from_json_missing_data(self, orient, convert_axes, dtype): + num_df = DataFrame([[1, 2], [4, 5, 6]]) + + result = read_json( + StringIO(num_df.to_json(orient=orient)), + orient=orient, + convert_axes=convert_axes, + dtype=dtype, + ) + assert np.isnan(result.iloc[0, 2]) + + obj_df = DataFrame([["1", "2"], ["4", "5", "6"]]) + result = read_json( + StringIO(obj_df.to_json(orient=orient)), + orient=orient, + convert_axes=convert_axes, + dtype=dtype, + ) + assert np.isnan(result.iloc[0, 2]) + + @pytest.mark.parametrize("dtype", [True, False]) + def test_frame_read_json_dtype_missing_value(self, dtype): + # GH28501 Parse missing values using read_json with dtype=False + # to NaN instead of None + result = read_json(StringIO("[null]"), dtype=dtype) + expected = DataFrame([np.nan]) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("inf", [np.inf, -np.inf]) + @pytest.mark.parametrize("dtype", [True, False]) + def test_frame_infinity(self, inf, dtype): + # infinities get mapped to nulls which get mapped to NaNs during + # deserialisation + df = DataFrame([[1, 2], [4, 5, 6]]) + df.loc[0, 2] = inf + + data = StringIO(df.to_json()) + result = read_json(data, dtype=dtype) + assert np.isnan(result.iloc[0, 2]) + + @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865") + @pytest.mark.parametrize( + "value,precision,expected_val", + [ + (0.95, 1, 1.0), + (1.95, 1, 2.0), + (-1.95, 1, -2.0), + (0.995, 2, 1.0), + (0.9995, 3, 1.0), + (0.99999999999999944, 15, 1.0), + ], + ) + def test_frame_to_json_float_precision(self, value, precision, expected_val): + df = DataFrame([{"a_float": value}]) + encoded = df.to_json(double_precision=precision) + assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}' + + def test_frame_to_json_except(self): + df = DataFrame([1, 2, 3]) + msg = "Invalid value 'garbage' for option 'orient'" + with pytest.raises(ValueError, match=msg): + df.to_json(orient="garbage") + + def test_frame_empty(self): + df = DataFrame(columns=["jim", "joe"]) + assert not df._is_mixed_type + + data = StringIO(df.to_json()) + result = read_json(data, dtype=dict(df.dtypes)) + tm.assert_frame_equal(result, df, check_index_type=False) + + def test_frame_empty_to_json(self): + # GH 7445 + df = DataFrame({"test": []}, index=[]) + result = df.to_json(orient="columns") + expected = '{"test":{}}' + assert result == expected + + def test_frame_empty_mixedtype(self): + # mixed type + df = DataFrame(columns=["jim", "joe"]) + df["joe"] = df["joe"].astype("i8") + assert df._is_mixed_type + data = df.to_json() + tm.assert_frame_equal( + read_json(StringIO(data), dtype=dict(df.dtypes)), + df, + check_index_type=False, + ) + + def test_frame_mixedtype_orient(self): # GH10289 + vals = [ + [10, 1, "foo", 0.1, 0.01], + [20, 2, "bar", 0.2, 0.02], + [30, 3, "baz", 0.3, 0.03], + [40, 4, "qux", 0.4, 0.04], + ] + + df = DataFrame( + vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"] + ) + + assert df._is_mixed_type + right = df.copy() + + for orient in ["split", "index", "columns"]: + inp = StringIO(df.to_json(orient=orient)) + left = read_json(inp, orient=orient, convert_axes=False) + tm.assert_frame_equal(left, right) + + right.index = RangeIndex(len(df)) + inp = StringIO(df.to_json(orient="records")) + left = read_json(inp, orient="records", convert_axes=False) + tm.assert_frame_equal(left, right) + + right.columns = RangeIndex(df.shape[1]) + inp = StringIO(df.to_json(orient="values")) + left = read_json(inp, orient="values", convert_axes=False) + tm.assert_frame_equal(left, right) + + def test_v12_compat(self, datapath): + dti = date_range("2000-01-03", "2000-01-07") + # freq doesn't roundtrip + dti = DatetimeIndex(np.asarray(dti), freq=None) + df = DataFrame( + [ + [1.56808523, 0.65727391, 1.81021139, -0.17251653], + [-0.2550111, -0.08072427, -0.03202878, -0.17581665], + [1.51493992, 0.11805825, 1.629455, -1.31506612], + [-0.02765498, 0.44679743, 0.33192641, -0.27885413], + [0.05951614, -2.69652057, 1.28163262, 0.34703478], + ], + columns=["A", "B", "C", "D"], + index=dti, + ) + df["date"] = Timestamp("19920106 18:21:32.12").as_unit("ns") + df.iloc[3, df.columns.get_loc("date")] = Timestamp("20130101") + df["modified"] = df["date"] + df.iloc[1, df.columns.get_loc("modified")] = pd.NaT + + dirpath = datapath("io", "json", "data") + v12_json = os.path.join(dirpath, "tsframe_v012.json") + df_unser = read_json(v12_json) + tm.assert_frame_equal(df, df_unser) + + df_iso = df.drop(["modified"], axis=1) + v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json") + df_unser_iso = read_json(v12_iso_json) + tm.assert_frame_equal(df_iso, df_unser_iso, check_column_type=False) + + def test_blocks_compat_GH9037(self, using_infer_string): + index = date_range("20000101", periods=10, freq="h") + # freq doesn't round-trip + index = DatetimeIndex(list(index), freq=None) + + df_mixed = DataFrame( + { + "float_1": [ + -0.92077639, + 0.77434435, + 1.25234727, + 0.61485564, + -0.60316077, + 0.24653374, + 0.28668979, + -2.51969012, + 0.95748401, + -1.02970536, + ], + "int_1": [ + 19680418, + 75337055, + 99973684, + 65103179, + 79373900, + 40314334, + 21290235, + 4991321, + 41903419, + 16008365, + ], + "str_1": [ + "78c608f1", + "64a99743", + "13d2ff52", + "ca7f4af2", + "97236474", + "bde7e214", + "1a6bde47", + "b1190be5", + "7a669144", + "8d64d068", + ], + "float_2": [ + -0.0428278, + -1.80872357, + 3.36042349, + -0.7573685, + -0.48217572, + 0.86229683, + 1.08935819, + 0.93898739, + -0.03030452, + 1.43366348, + ], + "str_2": [ + "14f04af9", + "d085da90", + "4bcfac83", + "81504caf", + "2ffef4a9", + "08e2f5c4", + "07e1af03", + "addbd4a7", + "1f6a09ba", + "4bfc4d87", + ], + "int_2": [ + 86967717, + 98098830, + 51927505, + 20372254, + 12601730, + 20884027, + 34193846, + 10561746, + 24867120, + 76131025, + ], + }, + index=index, + ) + + # JSON deserialisation always creates unicode strings + df_mixed.columns = df_mixed.columns.astype( + np.str_ if not using_infer_string else "string[pyarrow_numpy]" + ) + data = StringIO(df_mixed.to_json(orient="split")) + df_roundtrip = read_json(data, orient="split") + tm.assert_frame_equal( + df_mixed, + df_roundtrip, + check_index_type=True, + check_column_type=True, + by_blocks=True, + check_exact=True, + ) + + def test_frame_nonprintable_bytes(self): + # GH14256: failing column caused segfaults, if it is not the last one + + class BinaryThing: + def __init__(self, hexed) -> None: + self.hexed = hexed + self.binary = bytes.fromhex(hexed) + + def __str__(self) -> str: + return self.hexed + + hexed = "574b4454ba8c5eb4f98a8f45" + binthing = BinaryThing(hexed) + + # verify the proper conversion of printable content + df_printable = DataFrame({"A": [binthing.hexed]}) + assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}' + + # check if non-printable content throws appropriate Exception + df_nonprintable = DataFrame({"A": [binthing]}) + msg = "Unsupported UTF-8 sequence length when encoding string" + with pytest.raises(OverflowError, match=msg): + df_nonprintable.to_json() + + # the same with multiple columns threw segfaults + df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"]) + with pytest.raises(OverflowError, match=msg): + df_mixed.to_json() + + # default_handler should resolve exceptions for non-string types + result = df_nonprintable.to_json(default_handler=str) + expected = f'{{"A":{{"0":"{hexed}"}}}}' + assert result == expected + assert ( + df_mixed.to_json(default_handler=str) + == f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}' + ) + + def test_label_overflow(self): + # GH14256: buffer length not checked when writing label + result = DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json() + expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}' + assert result == expected + + def test_series_non_unique_index(self): + s = Series(["a", "b"], index=[1, 1]) + + msg = "Series index must be unique for orient='index'" + with pytest.raises(ValueError, match=msg): + s.to_json(orient="index") + + tm.assert_series_equal( + s, + read_json( + StringIO(s.to_json(orient="split")), orient="split", typ="series" + ), + ) + unserialized = read_json( + StringIO(s.to_json(orient="records")), orient="records", typ="series" + ) + tm.assert_equal(s.values, unserialized.values) + + def test_series_default_orient(self, string_series): + assert string_series.to_json() == string_series.to_json(orient="index") + + def test_series_roundtrip_simple(self, orient, string_series, using_infer_string): + data = StringIO(string_series.to_json(orient=orient)) + result = read_json(data, typ="series", orient=orient) + + expected = string_series + if using_infer_string and orient in ("split", "index", "columns"): + # These schemas don't contain dtypes, so we infer string + expected.index = expected.index.astype("string[pyarrow_numpy]") + if orient in ("values", "records"): + expected = expected.reset_index(drop=True) + if orient != "split": + expected.name = None + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", [False, None]) + def test_series_roundtrip_object(self, orient, dtype, object_series): + data = StringIO(object_series.to_json(orient=orient)) + result = read_json(data, typ="series", orient=orient, dtype=dtype) + + expected = object_series + if orient in ("values", "records"): + expected = expected.reset_index(drop=True) + if orient != "split": + expected.name = None + + tm.assert_series_equal(result, expected) + + def test_series_roundtrip_empty(self, orient): + empty_series = Series([], index=[], dtype=np.float64) + data = StringIO(empty_series.to_json(orient=orient)) + result = read_json(data, typ="series", orient=orient) + + expected = empty_series.reset_index(drop=True) + if orient in ("split"): + expected.index = expected.index.astype(np.float64) + + tm.assert_series_equal(result, expected) + + def test_series_roundtrip_timeseries(self, orient, datetime_series): + data = StringIO(datetime_series.to_json(orient=orient)) + result = read_json(data, typ="series", orient=orient) + + expected = datetime_series + if orient in ("values", "records"): + expected = expected.reset_index(drop=True) + if orient != "split": + expected.name = None + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", [np.float64, int]) + def test_series_roundtrip_numeric(self, orient, dtype): + s = Series(range(6), index=["a", "b", "c", "d", "e", "f"]) + data = StringIO(s.to_json(orient=orient)) + result = read_json(data, typ="series", orient=orient) + + expected = s.copy() + if orient in ("values", "records"): + expected = expected.reset_index(drop=True) + + tm.assert_series_equal(result, expected) + + def test_series_to_json_except(self): + s = Series([1, 2, 3]) + msg = "Invalid value 'garbage' for option 'orient'" + with pytest.raises(ValueError, match=msg): + s.to_json(orient="garbage") + + def test_series_from_json_precise_float(self): + s = Series([4.56, 4.56, 4.56]) + result = read_json(StringIO(s.to_json()), typ="series", precise_float=True) + tm.assert_series_equal(result, s, check_index_type=False) + + def test_series_with_dtype(self): + # GH 21986 + s = Series([4.56, 4.56, 4.56]) + result = read_json(StringIO(s.to_json()), typ="series", dtype=np.int64) + expected = Series([4] * 3) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "dtype,expected", + [ + (True, Series(["2000-01-01"], dtype="datetime64[ns]")), + (False, Series([946684800000])), + ], + ) + def test_series_with_dtype_datetime(self, dtype, expected): + s = Series(["2000-01-01"], dtype="datetime64[ns]") + data = StringIO(s.to_json()) + result = read_json(data, typ="series", dtype=dtype) + tm.assert_series_equal(result, expected) + + def test_frame_from_json_precise_float(self): + df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]]) + result = read_json(StringIO(df.to_json()), precise_float=True) + tm.assert_frame_equal(result, df) + + def test_typ(self): + s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64") + result = read_json(StringIO(s.to_json()), typ=None) + tm.assert_series_equal(result, s) + + def test_reconstruction_index(self): + df = DataFrame([[1, 2, 3], [4, 5, 6]]) + result = read_json(StringIO(df.to_json())) + tm.assert_frame_equal(result, df) + + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"]) + result = read_json(StringIO(df.to_json())) + tm.assert_frame_equal(result, df) + + def test_path(self, float_frame, int_frame, datetime_frame): + with tm.ensure_clean("test.json") as path: + for df in [float_frame, int_frame, datetime_frame]: + df.to_json(path) + read_json(path) + + def test_axis_dates(self, datetime_series, datetime_frame): + # frame + json = StringIO(datetime_frame.to_json()) + result = read_json(json) + tm.assert_frame_equal(result, datetime_frame) + + # series + json = StringIO(datetime_series.to_json()) + result = read_json(json, typ="series") + tm.assert_series_equal(result, datetime_series, check_names=False) + assert result.name is None + + def test_convert_dates(self, datetime_series, datetime_frame): + # frame + df = datetime_frame + df["date"] = Timestamp("20130101").as_unit("ns") + + json = StringIO(df.to_json()) + result = read_json(json) + tm.assert_frame_equal(result, df) + + df["foo"] = 1.0 + json = StringIO(df.to_json(date_unit="ns")) + + result = read_json(json, convert_dates=False) + expected = df.copy() + expected["date"] = expected["date"].values.view("i8") + expected["foo"] = expected["foo"].astype("int64") + tm.assert_frame_equal(result, expected) + + # series + ts = Series(Timestamp("20130101").as_unit("ns"), index=datetime_series.index) + json = StringIO(ts.to_json()) + result = read_json(json, typ="series") + tm.assert_series_equal(result, ts) + + @pytest.mark.parametrize("date_format", ["epoch", "iso"]) + @pytest.mark.parametrize("as_object", [True, False]) + @pytest.mark.parametrize("date_typ", [datetime.date, datetime.datetime, Timestamp]) + def test_date_index_and_values(self, date_format, as_object, date_typ): + data = [date_typ(year=2020, month=1, day=1), pd.NaT] + if as_object: + data.append("a") + + ser = Series(data, index=data) + result = ser.to_json(date_format=date_format) + + if date_format == "epoch": + expected = '{"1577836800000":1577836800000,"null":null}' + else: + expected = ( + '{"2020-01-01T00:00:00.000":"2020-01-01T00:00:00.000","null":null}' + ) + + if as_object: + expected = expected.replace("}", ',"a":"a"}') + + assert result == expected + + @pytest.mark.parametrize( + "infer_word", + [ + "trade_time", + "date", + "datetime", + "sold_at", + "modified", + "timestamp", + "timestamps", + ], + ) + def test_convert_dates_infer(self, infer_word): + # GH10747 + + data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}] + expected = DataFrame( + [[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word] + ) + + result = read_json(StringIO(ujson_dumps(data)))[["id", infer_word]] + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "date,date_unit", + [ + ("20130101 20:43:42.123", None), + ("20130101 20:43:42", "s"), + ("20130101 20:43:42.123", "ms"), + ("20130101 20:43:42.123456", "us"), + ("20130101 20:43:42.123456789", "ns"), + ], + ) + def test_date_format_frame(self, date, date_unit, datetime_frame): + df = datetime_frame + + df["date"] = Timestamp(date).as_unit("ns") + df.iloc[1, df.columns.get_loc("date")] = pd.NaT + df.iloc[5, df.columns.get_loc("date")] = pd.NaT + if date_unit: + json = df.to_json(date_format="iso", date_unit=date_unit) + else: + json = df.to_json(date_format="iso") + + result = read_json(StringIO(json)) + expected = df.copy() + tm.assert_frame_equal(result, expected) + + def test_date_format_frame_raises(self, datetime_frame): + df = datetime_frame + msg = "Invalid value 'foo' for option 'date_unit'" + with pytest.raises(ValueError, match=msg): + df.to_json(date_format="iso", date_unit="foo") + + @pytest.mark.parametrize( + "date,date_unit", + [ + ("20130101 20:43:42.123", None), + ("20130101 20:43:42", "s"), + ("20130101 20:43:42.123", "ms"), + ("20130101 20:43:42.123456", "us"), + ("20130101 20:43:42.123456789", "ns"), + ], + ) + def test_date_format_series(self, date, date_unit, datetime_series): + ts = Series(Timestamp(date).as_unit("ns"), index=datetime_series.index) + ts.iloc[1] = pd.NaT + ts.iloc[5] = pd.NaT + if date_unit: + json = ts.to_json(date_format="iso", date_unit=date_unit) + else: + json = ts.to_json(date_format="iso") + + result = read_json(StringIO(json), typ="series") + expected = ts.copy() + tm.assert_series_equal(result, expected) + + def test_date_format_series_raises(self, datetime_series): + ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index) + msg = "Invalid value 'foo' for option 'date_unit'" + with pytest.raises(ValueError, match=msg): + ts.to_json(date_format="iso", date_unit="foo") + + @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) + def test_date_unit(self, unit, datetime_frame): + df = datetime_frame + df["date"] = Timestamp("20130101 20:43:42").as_unit("ns") + dl = df.columns.get_loc("date") + df.iloc[1, dl] = Timestamp("19710101 20:43:42") + df.iloc[2, dl] = Timestamp("21460101 20:43:42") + df.iloc[4, dl] = pd.NaT + + json = df.to_json(date_format="epoch", date_unit=unit) + + # force date unit + result = read_json(StringIO(json), date_unit=unit) + tm.assert_frame_equal(result, df) + + # detect date unit + result = read_json(StringIO(json), date_unit=None) + tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize("unit", ["s", "ms", "us"]) + def test_iso_non_nano_datetimes(self, unit): + # Test that numpy datetimes + # in an Index or a column with non-nano resolution can be serialized + # correctly + # GH53686 + index = DatetimeIndex( + [np.datetime64("2023-01-01T11:22:33.123456", unit)], + dtype=f"datetime64[{unit}]", + ) + df = DataFrame( + { + "date": Series( + [np.datetime64("2022-01-01T11:22:33.123456", unit)], + dtype=f"datetime64[{unit}]", + index=index, + ), + "date_obj": Series( + [np.datetime64("2023-01-01T11:22:33.123456", unit)], + dtype=object, + index=index, + ), + }, + ) + + buf = StringIO() + df.to_json(buf, date_format="iso", date_unit=unit) + buf.seek(0) + + # read_json always reads datetimes in nanosecond resolution + # TODO: check_dtype/check_index_type should be removable + # once read_json gets non-nano support + tm.assert_frame_equal( + read_json(buf, convert_dates=["date", "date_obj"]), + df, + check_index_type=False, + check_dtype=False, + ) + + def test_weird_nested_json(self): + # this used to core dump the parser + s = r"""{ + "status": "success", + "data": { + "posts": [ + { + "id": 1, + "title": "A blog post", + "body": "Some useful content" + }, + { + "id": 2, + "title": "Another blog post", + "body": "More content" + } + ] + } + }""" + read_json(StringIO(s)) + + def test_doc_example(self): + dfj2 = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=list("AB") + ) + dfj2["date"] = Timestamp("20130101") + dfj2["ints"] = range(5) + dfj2["bools"] = True + dfj2.index = date_range("20130101", periods=5) + + json = StringIO(dfj2.to_json()) + result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_}) + tm.assert_frame_equal(result, result) + + def test_round_trip_exception(self, datapath): + # GH 3867 + path = datapath("io", "json", "data", "teams.csv") + df = pd.read_csv(path) + s = df.to_json() + + result = read_json(StringIO(s)) + res = result.reindex(index=df.index, columns=df.columns) + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = res.fillna(np.nan, downcast=False) + tm.assert_frame_equal(res, df) + + @pytest.mark.network + @pytest.mark.single_cpu + @pytest.mark.parametrize( + "field,dtype", + [ + ["created_at", pd.DatetimeTZDtype(tz="UTC")], + ["closed_at", "datetime64[ns]"], + ["updated_at", pd.DatetimeTZDtype(tz="UTC")], + ], + ) + def test_url(self, field, dtype, httpserver): + data = '{"created_at": ["2023-06-23T18:21:36Z"], "closed_at": ["2023-06-23T18:21:36"], "updated_at": ["2023-06-23T18:21:36Z"]}\n' # noqa: E501 + httpserver.serve_content(content=data) + result = read_json(httpserver.url, convert_dates=True) + assert result[field].dtype == dtype + + def test_timedelta(self): + converter = lambda x: pd.to_timedelta(x, unit="ms") + + ser = Series([timedelta(23), timedelta(seconds=5)]) + assert ser.dtype == "timedelta64[ns]" + + result = read_json(StringIO(ser.to_json()), typ="series").apply(converter) + tm.assert_series_equal(result, ser) + + ser = Series([timedelta(23), timedelta(seconds=5)], index=Index([0, 1])) + assert ser.dtype == "timedelta64[ns]" + result = read_json(StringIO(ser.to_json()), typ="series").apply(converter) + tm.assert_series_equal(result, ser) + + frame = DataFrame([timedelta(23), timedelta(seconds=5)]) + assert frame[0].dtype == "timedelta64[ns]" + tm.assert_frame_equal( + frame, read_json(StringIO(frame.to_json())).apply(converter) + ) + + def test_timedelta2(self): + frame = DataFrame( + { + "a": [timedelta(days=23), timedelta(seconds=5)], + "b": [1, 2], + "c": date_range(start="20130101", periods=2), + } + ) + data = StringIO(frame.to_json(date_unit="ns")) + result = read_json(data) + result["a"] = pd.to_timedelta(result.a, unit="ns") + result["c"] = pd.to_datetime(result.c) + tm.assert_frame_equal(frame, result) + + def test_mixed_timedelta_datetime(self): + td = timedelta(23) + ts = Timestamp("20130101") + frame = DataFrame({"a": [td, ts]}, dtype=object) + + expected = DataFrame( + {"a": [pd.Timedelta(td).as_unit("ns")._value, ts.as_unit("ns")._value]} + ) + data = StringIO(frame.to_json(date_unit="ns")) + result = read_json(data, dtype={"a": "int64"}) + tm.assert_frame_equal(result, expected, check_index_type=False) + + @pytest.mark.parametrize("as_object", [True, False]) + @pytest.mark.parametrize("date_format", ["iso", "epoch"]) + @pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta]) + def test_timedelta_to_json(self, as_object, date_format, timedelta_typ): + # GH28156: to_json not correctly formatting Timedelta + data = [timedelta_typ(days=1), timedelta_typ(days=2), pd.NaT] + if as_object: + data.append("a") + + ser = Series(data, index=data) + if date_format == "iso": + expected = ( + '{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}' + ) + else: + expected = '{"86400000":86400000,"172800000":172800000,"null":null}' + + if as_object: + expected = expected.replace("}", ',"a":"a"}') + + result = ser.to_json(date_format=date_format) + assert result == expected + + @pytest.mark.parametrize("as_object", [True, False]) + @pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta]) + def test_timedelta_to_json_fractional_precision(self, as_object, timedelta_typ): + data = [timedelta_typ(milliseconds=42)] + ser = Series(data, index=data) + if as_object: + ser = ser.astype(object) + + result = ser.to_json() + expected = '{"42":42}' + assert result == expected + + def test_default_handler(self): + value = object() + frame = DataFrame({"a": [7, value]}) + expected = DataFrame({"a": [7, str(value)]}) + result = read_json(StringIO(frame.to_json(default_handler=str))) + tm.assert_frame_equal(expected, result, check_index_type=False) + + def test_default_handler_indirect(self): + def default(obj): + if isinstance(obj, complex): + return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)] + return str(obj) + + df_list = [ + 9, + DataFrame( + {"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]}, + columns=["a", "b"], + ), + ] + expected = ( + '[9,[[1,null],["STR",null],[[["mathjs","Complex"],' + '["re",4.0],["im",-5.0]],"N\\/A"]]]' + ) + assert ( + ujson_dumps(df_list, default_handler=default, orient="values") == expected + ) + + def test_default_handler_numpy_unsupported_dtype(self): + # GH12554 to_json raises 'Unhandled numpy dtype 15' + df = DataFrame( + {"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]}, + columns=["a", "b"], + ) + expected = ( + '[["(1+0j)","(nan+0j)"],' + '["(2.3+0j)","(nan+0j)"],' + '["(4-5j)","(1.2+0j)"]]' + ) + assert df.to_json(default_handler=str, orient="values") == expected + + def test_default_handler_raises(self): + msg = "raisin" + + def my_handler_raises(obj): + raise TypeError(msg) + + with pytest.raises(TypeError, match=msg): + DataFrame({"a": [1, 2, object()]}).to_json( + default_handler=my_handler_raises + ) + with pytest.raises(TypeError, match=msg): + DataFrame({"a": [1, 2, complex(4, -5)]}).to_json( + default_handler=my_handler_raises + ) + + def test_categorical(self): + # GH4377 df.to_json segfaults with non-ndarray blocks + df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]}) + df["B"] = df["A"] + expected = df.to_json() + + df["B"] = df["A"].astype("category") + assert expected == df.to_json() + + s = df["A"] + sc = df["B"] + assert s.to_json() == sc.to_json() + + def test_datetime_tz(self): + # GH4377 df.to_json segfaults with non-ndarray blocks + tz_range = date_range("20130101", periods=3, tz="US/Eastern") + tz_naive = tz_range.tz_convert("utc").tz_localize(None) + + df = DataFrame({"A": tz_range, "B": date_range("20130101", periods=3)}) + + df_naive = df.copy() + df_naive["A"] = tz_naive + expected = df_naive.to_json() + assert expected == df.to_json() + + stz = Series(tz_range) + s_naive = Series(tz_naive) + assert stz.to_json() == s_naive.to_json() + + def test_sparse(self): + # GH4377 df.to_json segfaults with non-ndarray blocks + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + df.loc[:8] = np.nan + + sdf = df.astype("Sparse") + expected = df.to_json() + assert expected == sdf.to_json() + + s = Series(np.random.default_rng(2).standard_normal(10)) + s.loc[:8] = np.nan + ss = s.astype("Sparse") + + expected = s.to_json() + assert expected == ss.to_json() + + @pytest.mark.parametrize( + "ts", + [ + Timestamp("2013-01-10 05:00:00Z"), + Timestamp("2013-01-10 00:00:00", tz="US/Eastern"), + Timestamp("2013-01-10 00:00:00-0500"), + ], + ) + def test_tz_is_utc(self, ts): + exp = '"2013-01-10T05:00:00.000Z"' + + assert ujson_dumps(ts, iso_dates=True) == exp + dt = ts.to_pydatetime() + assert ujson_dumps(dt, iso_dates=True) == exp + + def test_tz_is_naive(self): + ts = Timestamp("2013-01-10 05:00:00") + exp = '"2013-01-10T05:00:00.000"' + + assert ujson_dumps(ts, iso_dates=True) == exp + dt = ts.to_pydatetime() + assert ujson_dumps(dt, iso_dates=True) == exp + + @pytest.mark.parametrize( + "tz_range", + [ + date_range("2013-01-01 05:00:00Z", periods=2), + date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"), + date_range("2013-01-01 00:00:00-0500", periods=2), + ], + ) + def test_tz_range_is_utc(self, tz_range): + exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]' + dfexp = ( + '{"DT":{' + '"0":"2013-01-01T05:00:00.000Z",' + '"1":"2013-01-02T05:00:00.000Z"}}' + ) + + assert ujson_dumps(tz_range, iso_dates=True) == exp + dti = DatetimeIndex(tz_range) + # Ensure datetimes in object array are serialized correctly + # in addition to the normal DTI case + assert ujson_dumps(dti, iso_dates=True) == exp + assert ujson_dumps(dti.astype(object), iso_dates=True) == exp + df = DataFrame({"DT": dti}) + result = ujson_dumps(df, iso_dates=True) + assert result == dfexp + assert ujson_dumps(df.astype({"DT": object}), iso_dates=True) + + def test_tz_range_is_naive(self): + dti = date_range("2013-01-01 05:00:00", periods=2) + + exp = '["2013-01-01T05:00:00.000","2013-01-02T05:00:00.000"]' + dfexp = '{"DT":{"0":"2013-01-01T05:00:00.000","1":"2013-01-02T05:00:00.000"}}' + + # Ensure datetimes in object array are serialized correctly + # in addition to the normal DTI case + assert ujson_dumps(dti, iso_dates=True) == exp + assert ujson_dumps(dti.astype(object), iso_dates=True) == exp + df = DataFrame({"DT": dti}) + result = ujson_dumps(df, iso_dates=True) + assert result == dfexp + assert ujson_dumps(df.astype({"DT": object}), iso_dates=True) + + def test_read_inline_jsonl(self): + # GH9180 + + result = read_json(StringIO('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n'), lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.single_cpu + @td.skip_if_not_us_locale + def test_read_s3_jsonl(self, s3_public_bucket_with_data, s3so): + # GH17200 + + result = read_json( + f"s3n://{s3_public_bucket_with_data.name}/items.jsonl", + lines=True, + storage_options=s3so, + ) + expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + def test_read_local_jsonl(self): + # GH17200 + with tm.ensure_clean("tmp_items.json") as path: + with open(path, "w", encoding="utf-8") as infile: + infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n') + result = read_json(path, lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + def test_read_jsonl_unicode_chars(self): + # GH15132: non-ascii unicode characters + # \u201d == RIGHT DOUBLE QUOTATION MARK + + # simulate file handle + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + json = StringIO(json) + result = read_json(json, lines=True) + expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + # simulate string + json = StringIO('{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n') + result = read_json(json, lines=True) + expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)]) + def test_to_json_large_numbers(self, bigNum): + # GH34473 + series = Series(bigNum, dtype=object, index=["articleId"]) + json = series.to_json() + expected = '{"articleId":' + str(bigNum) + "}" + assert json == expected + + df = DataFrame(bigNum, dtype=object, index=["articleId"], columns=[0]) + json = df.to_json() + expected = '{"0":{"articleId":' + str(bigNum) + "}}" + assert json == expected + + @pytest.mark.parametrize("bigNum", [-(2**63) - 1, 2**64]) + def test_read_json_large_numbers(self, bigNum): + # GH20599, 26068 + json = StringIO('{"articleId":' + str(bigNum) + "}") + msg = r"Value is too small|Value is too big" + with pytest.raises(ValueError, match=msg): + read_json(json) + + json = StringIO('{"0":{"articleId":' + str(bigNum) + "}}") + with pytest.raises(ValueError, match=msg): + read_json(json) + + def test_read_json_large_numbers2(self): + # GH18842 + json = '{"articleId": "1404366058080022500245"}' + json = StringIO(json) + result = read_json(json, typ="series") + expected = Series(1.404366e21, index=["articleId"]) + tm.assert_series_equal(result, expected) + + json = '{"0": {"articleId": "1404366058080022500245"}}' + json = StringIO(json) + result = read_json(json) + expected = DataFrame(1.404366e21, index=["articleId"], columns=[0]) + tm.assert_frame_equal(result, expected) + + def test_to_jsonl(self): + # GH9180 + df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) + result = df.to_json(orient="records", lines=True) + expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n' + assert result == expected + + df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"]) + result = df.to_json(orient="records", lines=True) + expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n' + assert result == expected + tm.assert_frame_equal(read_json(StringIO(result), lines=True), df) + + # GH15096: escaped characters in columns and data + df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"]) + result = df.to_json(orient="records", lines=True) + expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n' + assert result == expected + + tm.assert_frame_equal(read_json(StringIO(result), lines=True), df) + + # TODO: there is a near-identical test for pytables; can we share? + @pytest.mark.xfail(reason="GH#13774 encoding kwarg not supported", raises=TypeError) + @pytest.mark.parametrize( + "val", + [ + [b"E\xc9, 17", b"", b"a", b"b", b"c"], + [b"E\xc9, 17", b"a", b"b", b"c"], + [b"EE, 17", b"", b"a", b"b", b"c"], + [b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"], + [b"", b"a", b"b", b"c"], + [b"\xf8\xfc", b"a", b"b", b"c"], + [b"A\xf8\xfc", b"", b"a", b"b", b"c"], + [np.nan, b"", b"b", b"c"], + [b"A\xf8\xfc", np.nan, b"", b"b", b"c"], + ], + ) + @pytest.mark.parametrize("dtype", ["category", object]) + def test_latin_encoding(self, dtype, val): + # GH 13774 + ser = Series( + [x.decode("latin-1") if isinstance(x, bytes) else x for x in val], + dtype=dtype, + ) + encoding = "latin-1" + with tm.ensure_clean("test.json") as path: + ser.to_json(path, encoding=encoding) + retr = read_json(StringIO(path), encoding=encoding) + tm.assert_series_equal(ser, retr, check_categorical=False) + + def test_data_frame_size_after_to_json(self): + # GH15344 + df = DataFrame({"a": [str(1)]}) + + size_before = df.memory_usage(index=True, deep=True).sum() + df.to_json() + size_after = df.memory_usage(index=True, deep=True).sum() + + assert size_before == size_after + + @pytest.mark.parametrize( + "index", [None, [1, 2], [1.0, 2.0], ["a", "b"], ["1", "2"], ["1.", "2."]] + ) + @pytest.mark.parametrize("columns", [["a", "b"], ["1", "2"], ["1.", "2."]]) + def test_from_json_to_json_table_index_and_columns(self, index, columns): + # GH25433 GH25435 + expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns) + dfjson = expected.to_json(orient="table") + + result = read_json(StringIO(dfjson), orient="table") + tm.assert_frame_equal(result, expected) + + def test_from_json_to_json_table_dtypes(self): + # GH21345 + expected = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]}) + dfjson = expected.to_json(orient="table") + result = read_json(StringIO(dfjson), orient="table") + tm.assert_frame_equal(result, expected) + + # TODO: We are casting to string which coerces None to NaN before casting back + # to object, ending up with incorrect na values + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="incorrect na conversion") + @pytest.mark.parametrize("orient", ["split", "records", "index", "columns"]) + def test_to_json_from_json_columns_dtypes(self, orient): + # GH21892 GH33205 + expected = DataFrame.from_dict( + { + "Integer": Series([1, 2, 3], dtype="int64"), + "Float": Series([None, 2.0, 3.0], dtype="float64"), + "Object": Series([None, "", "c"], dtype="object"), + "Bool": Series([True, False, True], dtype="bool"), + "Category": Series(["a", "b", None], dtype="category"), + "Datetime": Series( + ["2020-01-01", None, "2020-01-03"], dtype="datetime64[ns]" + ), + } + ) + dfjson = expected.to_json(orient=orient) + + result = read_json( + StringIO(dfjson), + orient=orient, + dtype={ + "Integer": "int64", + "Float": "float64", + "Object": "object", + "Bool": "bool", + "Category": "category", + "Datetime": "datetime64[ns]", + }, + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", [True, {"b": int, "c": int}]) + def test_read_json_table_dtype_raises(self, dtype): + # GH21345 + df = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]}) + dfjson = df.to_json(orient="table") + msg = "cannot pass both dtype and orient='table'" + with pytest.raises(ValueError, match=msg): + read_json(dfjson, orient="table", dtype=dtype) + + @pytest.mark.parametrize("orient", ["index", "columns", "records", "values"]) + def test_read_json_table_empty_axes_dtype(self, orient): + # GH28558 + + expected = DataFrame() + result = read_json(StringIO("{}"), orient=orient, convert_axes=True) + tm.assert_index_equal(result.index, expected.index) + tm.assert_index_equal(result.columns, expected.columns) + + def test_read_json_table_convert_axes_raises(self): + # GH25433 GH25435 + df = DataFrame([[1, 2], [3, 4]], index=[1.0, 2.0], columns=["1.", "2."]) + dfjson = df.to_json(orient="table") + msg = "cannot pass both convert_axes and orient='table'" + with pytest.raises(ValueError, match=msg): + read_json(dfjson, orient="table", convert_axes=True) + + @pytest.mark.parametrize( + "data, expected", + [ + ( + DataFrame([[1, 2], [4, 5]], columns=["a", "b"]), + {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]}, + ), + ( + DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo"), + {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]}, + ), + ( + DataFrame( + [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]] + ), + {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]}, + ), + (Series([1, 2, 3], name="A"), {"name": "A", "data": [1, 2, 3]}), + ( + Series([1, 2, 3], name="A").rename_axis("foo"), + {"name": "A", "data": [1, 2, 3]}, + ), + ( + Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]]), + {"name": "A", "data": [1, 2]}, + ), + ], + ) + def test_index_false_to_json_split(self, data, expected): + # GH 17394 + # Testing index=False in to_json with orient='split' + + result = data.to_json(orient="split", index=False) + result = json.loads(result) + + assert result == expected + + @pytest.mark.parametrize( + "data", + [ + (DataFrame([[1, 2], [4, 5]], columns=["a", "b"])), + (DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo")), + ( + DataFrame( + [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]] + ) + ), + (Series([1, 2, 3], name="A")), + (Series([1, 2, 3], name="A").rename_axis("foo")), + (Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]])), + ], + ) + def test_index_false_to_json_table(self, data): + # GH 17394 + # Testing index=False in to_json with orient='table' + + result = data.to_json(orient="table", index=False) + result = json.loads(result) + + expected = { + "schema": pd.io.json.build_table_schema(data, index=False), + "data": DataFrame(data).to_dict(orient="records"), + } + + assert result == expected + + @pytest.mark.parametrize("orient", ["index", "columns"]) + def test_index_false_error_to_json(self, orient): + # GH 17394, 25513 + # Testing error message from to_json with index=False + + df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"]) + + msg = ( + "'index=False' is only valid when 'orient' is 'split', " + "'table', 'records', or 'values'" + ) + with pytest.raises(ValueError, match=msg): + df.to_json(orient=orient, index=False) + + @pytest.mark.parametrize("orient", ["records", "values"]) + def test_index_true_error_to_json(self, orient): + # GH 25513 + # Testing error message from to_json with index=True + + df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"]) + + msg = ( + "'index=True' is only valid when 'orient' is 'split', " + "'table', 'index', or 'columns'" + ) + with pytest.raises(ValueError, match=msg): + df.to_json(orient=orient, index=True) + + @pytest.mark.parametrize("orient", ["split", "table"]) + @pytest.mark.parametrize("index", [True, False]) + def test_index_false_from_json_to_json(self, orient, index): + # GH25170 + # Test index=False in from_json to_json + expected = DataFrame({"a": [1, 2], "b": [3, 4]}) + dfjson = expected.to_json(orient=orient, index=index) + result = read_json(StringIO(dfjson), orient=orient) + tm.assert_frame_equal(result, expected) + + def test_read_timezone_information(self): + # GH 25546 + result = read_json( + StringIO('{"2019-01-01T11:00:00.000Z":88}'), typ="series", orient="index" + ) + exp_dti = DatetimeIndex(["2019-01-01 11:00:00"], dtype="M8[ns, UTC]") + expected = Series([88], index=exp_dti) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "url", + [ + "s3://example-fsspec/", + "gcs://another-fsspec/file.json", + "https://example-site.com/data", + "some-protocol://data.txt", + ], + ) + def test_read_json_with_url_value(self, url): + # GH 36271 + result = read_json(StringIO(f'{{"url":{{"0":"{url}"}}}}')) + expected = DataFrame({"url": [url]}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "compression", + ["", ".gz", ".bz2", ".tar"], + ) + def test_read_json_with_very_long_file_path(self, compression): + # GH 46718 + long_json_path = f'{"a" * 1000}.json{compression}' + with pytest.raises( + FileNotFoundError, match=f"File {long_json_path} does not exist" + ): + # path too long for Windows is handled in file_exists() but raises in + # _get_data_from_filepath() + read_json(long_json_path) + + @pytest.mark.parametrize( + "date_format,key", [("epoch", 86400000), ("iso", "P1DT0H0M0S")] + ) + def test_timedelta_as_label(self, date_format, key): + df = DataFrame([[1]], columns=[pd.Timedelta("1D")]) + expected = f'{{"{key}":{{"0":1}}}}' + result = df.to_json(date_format=date_format) + + assert result == expected + + @pytest.mark.parametrize( + "orient,expected", + [ + ("index", "{\"('a', 'b')\":{\"('c', 'd')\":1}}"), + ("columns", "{\"('c', 'd')\":{\"('a', 'b')\":1}}"), + # TODO: the below have separate encoding procedures + pytest.param( + "split", + "", + marks=pytest.mark.xfail( + reason="Produces JSON but not in a consistent manner" + ), + ), + pytest.param( + "table", + "", + marks=pytest.mark.xfail( + reason="Produces JSON but not in a consistent manner" + ), + ), + ], + ) + def test_tuple_labels(self, orient, expected): + # GH 20500 + df = DataFrame([[1]], index=[("a", "b")], columns=[("c", "d")]) + result = df.to_json(orient=orient) + assert result == expected + + @pytest.mark.parametrize("indent", [1, 2, 4]) + def test_to_json_indent(self, indent): + # GH 12004 + df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"]) + + result = df.to_json(indent=indent) + spaces = " " * indent + expected = f"""{{ +{spaces}"a":{{ +{spaces}{spaces}"0":"foo", +{spaces}{spaces}"1":"baz" +{spaces}}}, +{spaces}"b":{{ +{spaces}{spaces}"0":"bar", +{spaces}{spaces}"1":"qux" +{spaces}}} +}}""" + + assert result == expected + + @pytest.mark.skipif( + using_pyarrow_string_dtype(), + reason="Adjust expected when infer_string is default, no bug here, " + "just a complicated parametrization", + ) + @pytest.mark.parametrize( + "orient,expected", + [ + ( + "split", + """{ + "columns":[ + "a", + "b" + ], + "index":[ + 0, + 1 + ], + "data":[ + [ + "foo", + "bar" + ], + [ + "baz", + "qux" + ] + ] +}""", + ), + ( + "records", + """[ + { + "a":"foo", + "b":"bar" + }, + { + "a":"baz", + "b":"qux" + } +]""", + ), + ( + "index", + """{ + "0":{ + "a":"foo", + "b":"bar" + }, + "1":{ + "a":"baz", + "b":"qux" + } +}""", + ), + ( + "columns", + """{ + "a":{ + "0":"foo", + "1":"baz" + }, + "b":{ + "0":"bar", + "1":"qux" + } +}""", + ), + ( + "values", + """[ + [ + "foo", + "bar" + ], + [ + "baz", + "qux" + ] +]""", + ), + ( + "table", + """{ + "schema":{ + "fields":[ + { + "name":"index", + "type":"integer" + }, + { + "name":"a", + "type":"string" + }, + { + "name":"b", + "type":"string" + } + ], + "primaryKey":[ + "index" + ], + "pandas_version":"1.4.0" + }, + "data":[ + { + "index":0, + "a":"foo", + "b":"bar" + }, + { + "index":1, + "a":"baz", + "b":"qux" + } + ] +}""", + ), + ], + ) + def test_json_indent_all_orients(self, orient, expected): + # GH 12004 + df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"]) + result = df.to_json(orient=orient, indent=4) + assert result == expected + + def test_json_negative_indent_raises(self): + with pytest.raises(ValueError, match="must be a nonnegative integer"): + DataFrame().to_json(indent=-1) + + def test_emca_262_nan_inf_support(self): + # GH 12213 + data = StringIO( + '["a", NaN, "NaN", Infinity, "Infinity", -Infinity, "-Infinity"]' + ) + result = read_json(data) + expected = DataFrame( + ["a", None, "NaN", np.inf, "Infinity", -np.inf, "-Infinity"] + ) + tm.assert_frame_equal(result, expected) + + def test_frame_int_overflow(self): + # GH 30320 + encoded_json = json.dumps([{"col": "31900441201190696999"}, {"col": "Text"}]) + expected = DataFrame({"col": ["31900441201190696999", "Text"]}) + result = read_json(StringIO(encoded_json)) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dataframe,expected", + [ + ( + DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]}), + '{"(0, \'x\')":1,"(0, \'y\')":"a","(1, \'x\')":2,' + '"(1, \'y\')":"b","(2, \'x\')":3,"(2, \'y\')":"c"}', + ) + ], + ) + def test_json_multiindex(self, dataframe, expected): + series = dataframe.stack(future_stack=True) + result = series.to_json(orient="index") + assert result == expected + + @pytest.mark.single_cpu + def test_to_s3(self, s3_public_bucket, s3so): + # GH 28375 + mock_bucket_name, target_file = s3_public_bucket.name, "test.json" + df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]}) + df.to_json(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so) + timeout = 5 + while True: + if target_file in (obj.key for obj in s3_public_bucket.objects.all()): + break + time.sleep(0.1) + timeout -= 0.1 + assert timeout > 0, "Timed out waiting for file to appear on moto" + + def test_json_pandas_nulls(self, nulls_fixture, request): + # GH 31615 + if isinstance(nulls_fixture, Decimal): + mark = pytest.mark.xfail(reason="not implemented") + request.applymarker(mark) + + result = DataFrame([[nulls_fixture]]).to_json() + assert result == '{"0":{"0":null}}' + + def test_readjson_bool_series(self): + # GH31464 + result = read_json(StringIO("[true, true, false]"), typ="series") + expected = Series([True, True, False]) + tm.assert_series_equal(result, expected) + + def test_to_json_multiindex_escape(self): + # GH 15273 + df = DataFrame( + True, + index=date_range("2017-01-20", "2017-01-23"), + columns=["foo", "bar"], + ).stack(future_stack=True) + result = df.to_json() + expected = ( + "{\"(Timestamp('2017-01-20 00:00:00'), 'foo')\":true," + "\"(Timestamp('2017-01-20 00:00:00'), 'bar')\":true," + "\"(Timestamp('2017-01-21 00:00:00'), 'foo')\":true," + "\"(Timestamp('2017-01-21 00:00:00'), 'bar')\":true," + "\"(Timestamp('2017-01-22 00:00:00'), 'foo')\":true," + "\"(Timestamp('2017-01-22 00:00:00'), 'bar')\":true," + "\"(Timestamp('2017-01-23 00:00:00'), 'foo')\":true," + "\"(Timestamp('2017-01-23 00:00:00'), 'bar')\":true}" + ) + assert result == expected + + def test_to_json_series_of_objects(self): + class _TestObject: + def __init__(self, a, b, _c, d) -> None: + self.a = a + self.b = b + self._c = _c + self.d = d + + def e(self): + return 5 + + # JSON keys should be all non-callable non-underscore attributes, see GH-42768 + series = Series([_TestObject(a=1, b=2, _c=3, d=4)]) + assert json.loads(series.to_json()) == {"0": {"a": 1, "b": 2, "d": 4}} + + @pytest.mark.parametrize( + "data,expected", + [ + ( + Series({0: -6 + 8j, 1: 0 + 1j, 2: 9 - 5j}), + '{"0":{"imag":8.0,"real":-6.0},' + '"1":{"imag":1.0,"real":0.0},' + '"2":{"imag":-5.0,"real":9.0}}', + ), + ( + Series({0: -9.39 + 0.66j, 1: 3.95 + 9.32j, 2: 4.03 - 0.17j}), + '{"0":{"imag":0.66,"real":-9.39},' + '"1":{"imag":9.32,"real":3.95},' + '"2":{"imag":-0.17,"real":4.03}}', + ), + ( + DataFrame([[-2 + 3j, -1 - 0j], [4 - 3j, -0 - 10j]]), + '{"0":{"0":{"imag":3.0,"real":-2.0},' + '"1":{"imag":-3.0,"real":4.0}},' + '"1":{"0":{"imag":0.0,"real":-1.0},' + '"1":{"imag":-10.0,"real":0.0}}}', + ), + ( + DataFrame( + [[-0.28 + 0.34j, -1.08 - 0.39j], [0.41 - 0.34j, -0.78 - 1.35j]] + ), + '{"0":{"0":{"imag":0.34,"real":-0.28},' + '"1":{"imag":-0.34,"real":0.41}},' + '"1":{"0":{"imag":-0.39,"real":-1.08},' + '"1":{"imag":-1.35,"real":-0.78}}}', + ), + ], + ) + def test_complex_data_tojson(self, data, expected): + # GH41174 + result = data.to_json() + assert result == expected + + def test_json_uint64(self): + # GH21073 + expected = ( + '{"columns":["col1"],"index":[0,1],' + '"data":[[13342205958987758245],[12388075603347835679]]}' + ) + df = DataFrame(data={"col1": [13342205958987758245, 12388075603347835679]}) + result = df.to_json(orient="split") + assert result == expected + + @pytest.mark.parametrize( + "orient", ["split", "records", "values", "index", "columns"] + ) + def test_read_json_dtype_backend( + self, string_storage, dtype_backend, orient, using_infer_string + ): + # GH#50750 + pa = pytest.importorskip("pyarrow") + df = DataFrame( + { + "a": Series([1, np.nan, 3], dtype="Int64"), + "b": Series([1, 2, 3], dtype="Int64"), + "c": Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": [True, False, None], + "f": [True, False, True], + "g": ["a", "b", "c"], + "h": ["a", "b", None], + } + ) + + if using_infer_string: + string_array = ArrowStringArrayNumpySemantics(pa.array(["a", "b", "c"])) + string_array_na = ArrowStringArrayNumpySemantics(pa.array(["a", "b", None])) + elif string_storage == "python": + string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_)) + string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_)) + + elif dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + string_array = ArrowExtensionArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowExtensionArray(pa.array(["a", "b", None])) + + else: + string_array = ArrowStringArray(pa.array(["a", "b", "c"])) + string_array_na = ArrowStringArray(pa.array(["a", "b", None])) + + out = df.to_json(orient=orient) + with pd.option_context("mode.string_storage", string_storage): + result = read_json( + StringIO(out), dtype_backend=dtype_backend, orient=orient + ) + + expected = DataFrame( + { + "a": Series([1, np.nan, 3], dtype="Int64"), + "b": Series([1, 2, 3], dtype="Int64"), + "c": Series([1.5, np.nan, 2.5], dtype="Float64"), + "d": Series([1.5, 2.0, 2.5], dtype="Float64"), + "e": Series([True, False, NA], dtype="boolean"), + "f": Series([True, False, True], dtype="boolean"), + "g": string_array, + "h": string_array_na, + } + ) + + if dtype_backend == "pyarrow": + from pandas.arrays import ArrowExtensionArray + + expected = DataFrame( + { + col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True)) + for col in expected.columns + } + ) + + if orient == "values": + expected.columns = list(range(8)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("orient", ["split", "records", "index"]) + def test_read_json_nullable_series(self, string_storage, dtype_backend, orient): + # GH#50750 + pa = pytest.importorskip("pyarrow") + ser = Series([1, np.nan, 3], dtype="Int64") + + out = ser.to_json(orient=orient) + with pd.option_context("mode.string_storage", string_storage): + result = read_json( + StringIO(out), dtype_backend=dtype_backend, orient=orient, typ="series" + ) + + expected = Series([1, np.nan, 3], dtype="Int64") + + if dtype_backend == "pyarrow": + from pandas.arrays import ArrowExtensionArray + + expected = Series(ArrowExtensionArray(pa.array(expected, from_pandas=True))) + + tm.assert_series_equal(result, expected) + + def test_invalid_dtype_backend(self): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + read_json("test", dtype_backend="numpy") + + +def test_invalid_engine(): + # GH 48893 + ser = Series(range(1)) + out = ser.to_json() + with pytest.raises(ValueError, match="The engine type foo"): + read_json(out, engine="foo") + + +def test_pyarrow_engine_lines_false(): + # GH 48893 + ser = Series(range(1)) + out = ser.to_json() + with pytest.raises(ValueError, match="currently pyarrow engine only supports"): + read_json(out, engine="pyarrow", lines=False) + + +def test_json_roundtrip_string_inference(orient): + pytest.importorskip("pyarrow") + df = DataFrame( + [["a", "b"], ["c", "d"]], index=["row 1", "row 2"], columns=["col 1", "col 2"] + ) + out = df.to_json() + with pd.option_context("future.infer_string", True): + result = read_json(StringIO(out)) + expected = DataFrame( + [["a", "b"], ["c", "d"]], + dtype="string[pyarrow_numpy]", + index=Index(["row 1", "row 2"], dtype="string[pyarrow_numpy]"), + columns=Index(["col 1", "col 2"], dtype="string[pyarrow_numpy]"), + ) + tm.assert_frame_equal(result, expected) + + +def test_json_pos_args_deprecation(): + # GH-54229 + df = DataFrame({"a": [1, 2, 3]}) + msg = ( + r"Starting with pandas version 3.0 all arguments of to_json except for the " + r"argument 'path_or_buf' will be keyword-only." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + buf = BytesIO() + df.to_json(buf, "split") + + +@td.skip_if_no("pyarrow") +def test_to_json_ea_null(): + # GH#57224 + df = DataFrame( + { + "a": Series([1, NA], dtype="int64[pyarrow]"), + "b": Series([2, NA], dtype="Int64"), + } + ) + result = df.to_json(orient="records", lines=True) + expected = """{"a":1,"b":2} +{"a":null,"b":null} +""" + assert result == expected + + +def test_read_json_lines_rangeindex(): + # GH 57429 + data = """ +{"a": 1, "b": 2} +{"a": 3, "b": 4} +""" + result = read_json(StringIO(data), lines=True).index + expected = RangeIndex(2) + tm.assert_index_equal(result, expected, exact=True) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_dialect.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_dialect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6f6262504fc95db11038f2996acaead4f2c4c5d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_dialect.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_index_col.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_index_col.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..527539c5becd3b6cacf6f9b80a00d44aa595a20a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_index_col.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_skiprows.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_skiprows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0cf704acf20e43da199b96a51bfb83cc04c7054 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_skiprows.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_textreader.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_textreader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba6a15bffe904ac2870d64b39e11abb222a36eac Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_textreader.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/conftest.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..6d5f870f07206f0380f87233828047cdc6dc5ea9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/conftest.py @@ -0,0 +1,319 @@ +from __future__ import annotations + +import os + +import pytest + +from pandas.compat._optional import VERSIONS + +from pandas import ( + read_csv, + read_table, +) +import pandas._testing as tm + + +class BaseParser: + engine: str | None = None + low_memory = True + float_precision_choices: list[str | None] = [] + + def update_kwargs(self, kwargs): + kwargs = kwargs.copy() + kwargs.update({"engine": self.engine, "low_memory": self.low_memory}) + + return kwargs + + def read_csv(self, *args, **kwargs): + kwargs = self.update_kwargs(kwargs) + return read_csv(*args, **kwargs) + + def read_csv_check_warnings( + self, + warn_type: type[Warning], + warn_msg: str, + *args, + raise_on_extra_warnings=True, + check_stacklevel: bool = True, + **kwargs, + ): + # We need to check the stacklevel here instead of in the tests + # since this is where read_csv is called and where the warning + # should point to. + kwargs = self.update_kwargs(kwargs) + with tm.assert_produces_warning( + warn_type, + match=warn_msg, + raise_on_extra_warnings=raise_on_extra_warnings, + check_stacklevel=check_stacklevel, + ): + return read_csv(*args, **kwargs) + + def read_table(self, *args, **kwargs): + kwargs = self.update_kwargs(kwargs) + return read_table(*args, **kwargs) + + def read_table_check_warnings( + self, + warn_type: type[Warning], + warn_msg: str, + *args, + raise_on_extra_warnings=True, + **kwargs, + ): + # We need to check the stacklevel here instead of in the tests + # since this is where read_table is called and where the warning + # should point to. + kwargs = self.update_kwargs(kwargs) + with tm.assert_produces_warning( + warn_type, match=warn_msg, raise_on_extra_warnings=raise_on_extra_warnings + ): + return read_table(*args, **kwargs) + + +class CParser(BaseParser): + engine = "c" + float_precision_choices = [None, "high", "round_trip"] + + +class CParserHighMemory(CParser): + low_memory = False + + +class CParserLowMemory(CParser): + low_memory = True + + +class PythonParser(BaseParser): + engine = "python" + float_precision_choices = [None] + + +class PyArrowParser(BaseParser): + engine = "pyarrow" + float_precision_choices = [None] + + +@pytest.fixture +def csv_dir_path(datapath): + """ + The directory path to the data files needed for parser tests. + """ + return datapath("io", "parser", "data") + + +@pytest.fixture +def csv1(datapath): + """ + The path to the data file "test1.csv" needed for parser tests. + """ + return os.path.join(datapath("io", "data", "csv"), "test1.csv") + + +_cParserHighMemory = CParserHighMemory +_cParserLowMemory = CParserLowMemory +_pythonParser = PythonParser +_pyarrowParser = PyArrowParser + +_py_parsers_only = [_pythonParser] +_c_parsers_only = [_cParserHighMemory, _cParserLowMemory] +_pyarrow_parsers_only = [pytest.param(_pyarrowParser, marks=pytest.mark.single_cpu)] + +_all_parsers = [*_c_parsers_only, *_py_parsers_only, *_pyarrow_parsers_only] + +_py_parser_ids = ["python"] +_c_parser_ids = ["c_high", "c_low"] +_pyarrow_parsers_ids = ["pyarrow"] + +_all_parser_ids = [*_c_parser_ids, *_py_parser_ids, *_pyarrow_parsers_ids] + + +@pytest.fixture(params=_all_parsers, ids=_all_parser_ids) +def all_parsers(request): + """ + Fixture all of the CSV parsers. + """ + parser = request.param() + if parser.engine == "pyarrow": + pytest.importorskip("pyarrow", VERSIONS["pyarrow"]) + # Try finding a way to disable threads all together + # for more stable CI runs + import pyarrow + + pyarrow.set_cpu_count(1) + return parser + + +@pytest.fixture(params=_c_parsers_only, ids=_c_parser_ids) +def c_parser_only(request): + """ + Fixture all of the CSV parsers using the C engine. + """ + return request.param() + + +@pytest.fixture(params=_py_parsers_only, ids=_py_parser_ids) +def python_parser_only(request): + """ + Fixture all of the CSV parsers using the Python engine. + """ + return request.param() + + +@pytest.fixture(params=_pyarrow_parsers_only, ids=_pyarrow_parsers_ids) +def pyarrow_parser_only(request): + """ + Fixture all of the CSV parsers using the Pyarrow engine. + """ + return request.param() + + +def _get_all_parser_float_precision_combinations(): + """ + Return all allowable parser and float precision + combinations and corresponding ids. + """ + params = [] + ids = [] + for parser, parser_id in zip(_all_parsers, _all_parser_ids): + if hasattr(parser, "values"): + # Wrapped in pytest.param, get the actual parser back + parser = parser.values[0] + for precision in parser.float_precision_choices: + # Re-wrap in pytest.param for pyarrow + mark = pytest.mark.single_cpu if parser.engine == "pyarrow" else () + param = pytest.param((parser(), precision), marks=mark) + params.append(param) + ids.append(f"{parser_id}-{precision}") + + return {"params": params, "ids": ids} + + +@pytest.fixture( + params=_get_all_parser_float_precision_combinations()["params"], + ids=_get_all_parser_float_precision_combinations()["ids"], +) +def all_parsers_all_precisions(request): + """ + Fixture for all allowable combinations of parser + and float precision + """ + return request.param + + +_utf_values = [8, 16, 32] + +_encoding_seps = ["", "-", "_"] +_encoding_prefixes = ["utf", "UTF"] + +_encoding_fmts = [ + f"{prefix}{sep}{{0}}" for sep in _encoding_seps for prefix in _encoding_prefixes +] + + +@pytest.fixture(params=_utf_values) +def utf_value(request): + """ + Fixture for all possible integer values for a UTF encoding. + """ + return request.param + + +@pytest.fixture(params=_encoding_fmts) +def encoding_fmt(request): + """ + Fixture for all possible string formats of a UTF encoding. + """ + return request.param + + +@pytest.fixture( + params=[ + ("-1,0", -1.0), + ("-1,2e0", -1.2), + ("-1e0", -1.0), + ("+1e0", 1.0), + ("+1e+0", 1.0), + ("+1e-1", 0.1), + ("+,1e1", 1.0), + ("+1,e0", 1.0), + ("-,1e1", -1.0), + ("-1,e0", -1.0), + ("0,1", 0.1), + ("1,", 1.0), + (",1", 0.1), + ("-,1", -0.1), + ("1_,", 1.0), + ("1_234,56", 1234.56), + ("1_234,56e0", 1234.56), + # negative cases; must not parse as float + ("_", "_"), + ("-_", "-_"), + ("-_1", "-_1"), + ("-_1e0", "-_1e0"), + ("_1", "_1"), + ("_1,", "_1,"), + ("_1,_", "_1,_"), + ("_1e0", "_1e0"), + ("1,2e_1", "1,2e_1"), + ("1,2e1_0", "1,2e1_0"), + ("1,_2", "1,_2"), + (",1__2", ",1__2"), + (",1e", ",1e"), + ("-,1e", "-,1e"), + ("1_000,000_000", "1_000,000_000"), + ("1,e1_2", "1,e1_2"), + ("e11,2", "e11,2"), + ("1e11,2", "1e11,2"), + ("1,2,2", "1,2,2"), + ("1,2_1", "1,2_1"), + ("1,2e-10e1", "1,2e-10e1"), + ("--1,2", "--1,2"), + ("1a_2,1", "1a_2,1"), + ("1,2E-1", 0.12), + ("1,2E1", 12.0), + ] +) +def numeric_decimal(request): + """ + Fixture for all numeric formats which should get recognized. The first entry + represents the value to read while the second represents the expected result. + """ + return request.param + + +@pytest.fixture +def pyarrow_xfail(request): + """ + Fixture that xfails a test if the engine is pyarrow. + + Use if failure is do to unsupported keywords or inconsistent results. + """ + if "all_parsers" in request.fixturenames: + parser = request.getfixturevalue("all_parsers") + elif "all_parsers_all_precisions" in request.fixturenames: + # Return value is tuple of (engine, precision) + parser = request.getfixturevalue("all_parsers_all_precisions")[0] + else: + return + if parser.engine == "pyarrow": + mark = pytest.mark.xfail(reason="pyarrow doesn't support this.") + request.applymarker(mark) + + +@pytest.fixture +def pyarrow_skip(request): + """ + Fixture that skips a test if the engine is pyarrow. + + Use if failure is do a parsing failure from pyarrow.csv.read_csv + """ + if "all_parsers" in request.fixturenames: + parser = request.getfixturevalue("all_parsers") + elif "all_parsers_all_precisions" in request.fixturenames: + # Return value is tuple of (engine, precision) + parser = request.getfixturevalue("all_parsers_all_precisions")[0] + else: + return + if parser.engine == "pyarrow": + pytest.skip(reason="https://github.com/apache/arrow/issues/38676") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcc27f383688f57acd6d0e82cbaa3d5e42040a87 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_categorical.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_categorical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f99bed5ca3e94b229557817142e550b126da0694 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_categorical.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_dtypes_basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_dtypes_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4813bd9f0c9a346572e70b5152e859e503e2e071 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_dtypes_basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_empty.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_empty.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d4226378cdf3774195f914d19abbddc851973a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_empty.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_categorical.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..f4aff14a5ce32d19b0c4e6c9ef504ae141bdca67 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_categorical.py @@ -0,0 +1,334 @@ +""" +Tests dtype specification during parsing +for all of the parsers defined in parsers.py +""" +from io import StringIO +import os + +import numpy as np +import pytest + +from pandas._libs import parsers as libparsers + +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Timestamp, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") + + +@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different +@pytest.mark.parametrize( + "dtype", + [ + "category", + CategoricalDtype(), + {"a": "category", "b": "category", "c": CategoricalDtype()}, + ], +) +def test_categorical_dtype(all_parsers, dtype): + # see gh-10153 + parser = all_parsers + data = """a,b,c +1,a,3.4 +1,a,3.4 +2,b,4.5""" + expected = DataFrame( + { + "a": Categorical(["1", "1", "2"]), + "b": Categorical(["a", "a", "b"]), + "c": Categorical(["3.4", "3.4", "4.5"]), + } + ) + actual = parser.read_csv(StringIO(data), dtype=dtype) + tm.assert_frame_equal(actual, expected) + + +@pytest.mark.parametrize("dtype", [{"b": "category"}, {1: "category"}]) +def test_categorical_dtype_single(all_parsers, dtype, request): + # see gh-10153 + parser = all_parsers + data = """a,b,c +1,a,3.4 +1,a,3.4 +2,b,4.5""" + expected = DataFrame( + {"a": [1, 1, 2], "b": Categorical(["a", "a", "b"]), "c": [3.4, 3.4, 4.5]} + ) + if parser.engine == "pyarrow": + mark = pytest.mark.xfail( + strict=False, + reason="Flaky test sometimes gives object dtype instead of Categorical", + ) + request.applymarker(mark) + + actual = parser.read_csv(StringIO(data), dtype=dtype) + tm.assert_frame_equal(actual, expected) + + +@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different +def test_categorical_dtype_unsorted(all_parsers): + # see gh-10153 + parser = all_parsers + data = """a,b,c +1,b,3.4 +1,b,3.4 +2,a,4.5""" + expected = DataFrame( + { + "a": Categorical(["1", "1", "2"]), + "b": Categorical(["b", "b", "a"]), + "c": Categorical(["3.4", "3.4", "4.5"]), + } + ) + actual = parser.read_csv(StringIO(data), dtype="category") + tm.assert_frame_equal(actual, expected) + + +@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different +def test_categorical_dtype_missing(all_parsers): + # see gh-10153 + parser = all_parsers + data = """a,b,c +1,b,3.4 +1,nan,3.4 +2,a,4.5""" + expected = DataFrame( + { + "a": Categorical(["1", "1", "2"]), + "b": Categorical(["b", np.nan, "a"]), + "c": Categorical(["3.4", "3.4", "4.5"]), + } + ) + actual = parser.read_csv(StringIO(data), dtype="category") + tm.assert_frame_equal(actual, expected) + + +@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different +@pytest.mark.slow +def test_categorical_dtype_high_cardinality_numeric(all_parsers, monkeypatch): + # see gh-18186 + # was an issue with C parser, due to DEFAULT_BUFFER_HEURISTIC + parser = all_parsers + heuristic = 2**5 + data = np.sort([str(i) for i in range(heuristic + 1)]) + expected = DataFrame({"a": Categorical(data, ordered=True)}) + with monkeypatch.context() as m: + m.setattr(libparsers, "DEFAULT_BUFFER_HEURISTIC", heuristic) + actual = parser.read_csv(StringIO("a\n" + "\n".join(data)), dtype="category") + actual["a"] = actual["a"].cat.reorder_categories( + np.sort(actual.a.cat.categories), ordered=True + ) + tm.assert_frame_equal(actual, expected) + + +def test_categorical_dtype_utf16(all_parsers, csv_dir_path): + # see gh-10153 + pth = os.path.join(csv_dir_path, "utf16_ex.txt") + parser = all_parsers + encoding = "utf-16" + sep = "\t" + + expected = parser.read_csv(pth, sep=sep, encoding=encoding) + expected = expected.apply(Categorical) + + actual = parser.read_csv(pth, sep=sep, encoding=encoding, dtype="category") + tm.assert_frame_equal(actual, expected) + + +def test_categorical_dtype_chunksize_infer_categories(all_parsers): + # see gh-10153 + parser = all_parsers + data = """a,b +1,a +1,b +1,b +2,c""" + expecteds = [ + DataFrame({"a": [1, 1], "b": Categorical(["a", "b"])}), + DataFrame({"a": [1, 2], "b": Categorical(["b", "c"])}, index=[2, 3]), + ] + + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), dtype={"b": "category"}, chunksize=2) + return + + with parser.read_csv( + StringIO(data), dtype={"b": "category"}, chunksize=2 + ) as actuals: + for actual, expected in zip(actuals, expecteds): + tm.assert_frame_equal(actual, expected) + + +def test_categorical_dtype_chunksize_explicit_categories(all_parsers): + # see gh-10153 + parser = all_parsers + data = """a,b +1,a +1,b +1,b +2,c""" + cats = ["a", "b", "c"] + expecteds = [ + DataFrame({"a": [1, 1], "b": Categorical(["a", "b"], categories=cats)}), + DataFrame( + {"a": [1, 2], "b": Categorical(["b", "c"], categories=cats)}, + index=[2, 3], + ), + ] + dtype = CategoricalDtype(cats) + + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2) + return + + with parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2) as actuals: + for actual, expected in zip(actuals, expecteds): + tm.assert_frame_equal(actual, expected) + + +def test_categorical_dtype_latin1(all_parsers, csv_dir_path): + # see gh-10153 + pth = os.path.join(csv_dir_path, "unicode_series.csv") + parser = all_parsers + encoding = "latin-1" + + expected = parser.read_csv(pth, header=None, encoding=encoding) + expected[1] = Categorical(expected[1]) + + actual = parser.read_csv(pth, header=None, encoding=encoding, dtype={1: "category"}) + tm.assert_frame_equal(actual, expected) + + +@pytest.mark.parametrize("ordered", [False, True]) +@pytest.mark.parametrize( + "categories", + [["a", "b", "c"], ["a", "c", "b"], ["a", "b", "c", "d"], ["c", "b", "a"]], +) +def test_categorical_category_dtype(all_parsers, categories, ordered): + parser = all_parsers + data = """a,b +1,a +1,b +1,b +2,c""" + expected = DataFrame( + { + "a": [1, 1, 1, 2], + "b": Categorical( + ["a", "b", "b", "c"], categories=categories, ordered=ordered + ), + } + ) + + dtype = {"b": CategoricalDtype(categories=categories, ordered=ordered)} + result = parser.read_csv(StringIO(data), dtype=dtype) + tm.assert_frame_equal(result, expected) + + +def test_categorical_category_dtype_unsorted(all_parsers): + parser = all_parsers + data = """a,b +1,a +1,b +1,b +2,c""" + dtype = CategoricalDtype(["c", "b", "a"]) + expected = DataFrame( + { + "a": [1, 1, 1, 2], + "b": Categorical(["a", "b", "b", "c"], categories=["c", "b", "a"]), + } + ) + + result = parser.read_csv(StringIO(data), dtype={"b": dtype}) + tm.assert_frame_equal(result, expected) + + +def test_categorical_coerces_numeric(all_parsers): + parser = all_parsers + dtype = {"b": CategoricalDtype([1, 2, 3])} + + data = "b\n1\n1\n2\n3" + expected = DataFrame({"b": Categorical([1, 1, 2, 3])}) + + result = parser.read_csv(StringIO(data), dtype=dtype) + tm.assert_frame_equal(result, expected) + + +def test_categorical_coerces_datetime(all_parsers): + parser = all_parsers + dti = pd.DatetimeIndex(["2017-01-01", "2018-01-01", "2019-01-01"], freq=None) + dtype = {"b": CategoricalDtype(dti)} + + data = "b\n2017-01-01\n2018-01-01\n2019-01-01" + expected = DataFrame({"b": Categorical(dtype["b"].categories)}) + + result = parser.read_csv(StringIO(data), dtype=dtype) + tm.assert_frame_equal(result, expected) + + +def test_categorical_coerces_timestamp(all_parsers): + parser = all_parsers + dtype = {"b": CategoricalDtype([Timestamp("2014")])} + + data = "b\n2014-01-01\n2014-01-01" + expected = DataFrame({"b": Categorical([Timestamp("2014")] * 2)}) + + result = parser.read_csv(StringIO(data), dtype=dtype) + tm.assert_frame_equal(result, expected) + + +def test_categorical_coerces_timedelta(all_parsers): + parser = all_parsers + dtype = {"b": CategoricalDtype(pd.to_timedelta(["1h", "2h", "3h"]))} + + data = "b\n1h\n2h\n3h" + expected = DataFrame({"b": Categorical(dtype["b"].categories)}) + + result = parser.read_csv(StringIO(data), dtype=dtype) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data", + [ + "b\nTrue\nFalse\nNA\nFalse", + "b\ntrue\nfalse\nNA\nfalse", + "b\nTRUE\nFALSE\nNA\nFALSE", + "b\nTrue\nFalse\nNA\nFALSE", + ], +) +def test_categorical_dtype_coerces_boolean(all_parsers, data): + # see gh-20498 + parser = all_parsers + dtype = {"b": CategoricalDtype([False, True])} + expected = DataFrame({"b": Categorical([True, False, None, False])}) + + result = parser.read_csv(StringIO(data), dtype=dtype) + tm.assert_frame_equal(result, expected) + + +def test_categorical_unexpected_categories(all_parsers): + parser = all_parsers + dtype = {"b": CategoricalDtype(["a", "b", "d", "e"])} + + data = "b\nd\na\nc\nd" # Unexpected c + expected = DataFrame({"b": Categorical(list("dacd"), dtype=dtype["b"])}) + + result = parser.read_csv(StringIO(data), dtype=dtype) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_dtypes_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..ce02e752fb90b4f69d63baa6875ba8bda6d991fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_dtypes_basic.py @@ -0,0 +1,643 @@ +""" +Tests dtype specification during parsing +for all of the parsers defined in parsers.py +""" +from collections import defaultdict +from io import StringIO + +import numpy as np +import pytest + +from pandas.errors import ParserWarning + +import pandas as pd +from pandas import ( + DataFrame, + Timestamp, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + IntegerArray, + StringArray, +) + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.mark.parametrize("dtype", [str, object]) +@pytest.mark.parametrize("check_orig", [True, False]) +@pytest.mark.usefixtures("pyarrow_xfail") +def test_dtype_all_columns(all_parsers, dtype, check_orig): + # see gh-3795, gh-6607 + parser = all_parsers + + df = DataFrame( + np.random.default_rng(2).random((5, 2)).round(4), + columns=list("AB"), + index=["1A", "1B", "1C", "1D", "1E"], + ) + + with tm.ensure_clean("__passing_str_as_dtype__.csv") as path: + df.to_csv(path) + + result = parser.read_csv(path, dtype=dtype, index_col=0) + + if check_orig: + expected = df.copy() + result = result.astype(float) + else: + expected = df.astype(str) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.usefixtures("pyarrow_xfail") +def test_dtype_per_column(all_parsers): + parser = all_parsers + data = """\ +one,two +1,2.5 +2,3.5 +3,4.5 +4,5.5""" + expected = DataFrame( + [[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"] + ) + expected["one"] = expected["one"].astype(np.float64) + expected["two"] = expected["two"].astype(object) + + result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str}) + tm.assert_frame_equal(result, expected) + + +def test_invalid_dtype_per_column(all_parsers): + parser = all_parsers + data = """\ +one,two +1,2.5 +2,3.5 +3,4.5 +4,5.5""" + + with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"): + parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"}) + + +def test_raise_on_passed_int_dtype_with_nas(all_parsers): + # see gh-2631 + parser = all_parsers + data = """YEAR, DOY, a +2001,106380451,10 +2001,,11 +2001,106380451,67""" + + if parser.engine == "c": + msg = "Integer column has NA values" + elif parser.engine == "pyarrow": + msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine" + else: + msg = "Unable to convert column DOY" + + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), dtype={"DOY": np.int64}, skipinitialspace=True) + + +def test_dtype_with_converters(all_parsers): + parser = all_parsers + data = """a,b +1.1,2.2 +1.2,2.3""" + + if parser.engine == "pyarrow": + msg = "The 'converters' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), dtype={"a": "i8"}, converters={"a": lambda x: str(x)} + ) + return + + # Dtype spec ignored if converted specified. + result = parser.read_csv_check_warnings( + ParserWarning, + "Both a converter and dtype were specified for column a " + "- only the converter will be used.", + StringIO(data), + dtype={"a": "i8"}, + converters={"a": lambda x: str(x)}, + ) + expected = DataFrame({"a": ["1.1", "1.2"], "b": [2.2, 2.3]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype", list(np.typecodes["AllInteger"] + np.typecodes["Float"]) +) +def test_numeric_dtype(all_parsers, dtype): + data = "0\n1" + parser = all_parsers + expected = DataFrame([0, 1], dtype=dtype) + + result = parser.read_csv(StringIO(data), header=None, dtype=dtype) + tm.assert_frame_equal(expected, result) + + +@pytest.mark.usefixtures("pyarrow_xfail") +def test_boolean_dtype(all_parsers): + parser = all_parsers + data = "\n".join( + [ + "a", + "True", + "TRUE", + "true", + "1", + "1.0", + "False", + "FALSE", + "false", + "0", + "0.0", + "NaN", + "nan", + "NA", + "null", + "NULL", + ] + ) + + result = parser.read_csv(StringIO(data), dtype="boolean") + expected = DataFrame( + { + "a": pd.array( + [ + True, + True, + True, + True, + True, + False, + False, + False, + False, + False, + None, + None, + None, + None, + None, + ], + dtype="boolean", + ) + } + ) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.usefixtures("pyarrow_xfail") +def test_delimiter_with_usecols_and_parse_dates(all_parsers): + # GH#35873 + result = all_parsers.read_csv( + StringIO('"dump","-9,1","-9,1",20101010'), + engine="python", + names=["col", "col1", "col2", "col3"], + usecols=["col1", "col2", "col3"], + parse_dates=["col3"], + decimal=",", + ) + expected = DataFrame( + {"col1": [-9.1], "col2": [-9.1], "col3": [Timestamp("2010-10-10")]} + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("thousands", ["_", None]) +def test_decimal_and_exponential( + request, python_parser_only, numeric_decimal, thousands +): + # GH#31920 + decimal_number_check(request, python_parser_only, numeric_decimal, thousands, None) + + +@pytest.mark.parametrize("thousands", ["_", None]) +@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"]) +def test_1000_sep_decimal_float_precision( + request, c_parser_only, numeric_decimal, float_precision, thousands +): + # test decimal and thousand sep handling in across 'float_precision' + # parsers + decimal_number_check( + request, c_parser_only, numeric_decimal, thousands, float_precision + ) + text, value = numeric_decimal + text = " " + text + " " + if isinstance(value, str): # the negative cases (parse as text) + value = " " + value + " " + decimal_number_check( + request, c_parser_only, (text, value), thousands, float_precision + ) + + +def decimal_number_check(request, parser, numeric_decimal, thousands, float_precision): + # GH#31920 + value = numeric_decimal[0] + if thousands is None and value in ("1_,", "1_234,56", "1_234,56e0"): + request.applymarker( + pytest.mark.xfail(reason=f"thousands={thousands} and sep is in {value}") + ) + df = parser.read_csv( + StringIO(value), + float_precision=float_precision, + sep="|", + thousands=thousands, + decimal=",", + header=None, + ) + val = df.iloc[0, 0] + assert val == numeric_decimal[1] + + +@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"]) +def test_skip_whitespace(c_parser_only, float_precision): + DATA = """id\tnum\t +1\t1.2 \t +1\t 2.1\t +2\t 1\t +2\t 1.2 \t +""" + df = c_parser_only.read_csv( + StringIO(DATA), + float_precision=float_precision, + sep="\t", + header=0, + dtype={1: np.float64}, + ) + tm.assert_series_equal(df.iloc[:, 1], pd.Series([1.2, 2.1, 1.0, 1.2], name="num")) + + +@pytest.mark.usefixtures("pyarrow_xfail") +def test_true_values_cast_to_bool(all_parsers): + # GH#34655 + text = """a,b +yes,xxx +no,yyy +1,zzz +0,aaa + """ + parser = all_parsers + result = parser.read_csv( + StringIO(text), + true_values=["yes"], + false_values=["no"], + dtype={"a": "boolean"}, + ) + expected = DataFrame( + {"a": [True, False, True, False], "b": ["xxx", "yyy", "zzz", "aaa"]} + ) + expected["a"] = expected["a"].astype("boolean") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.usefixtures("pyarrow_xfail") +@pytest.mark.parametrize("dtypes, exp_value", [({}, "1"), ({"a.1": "int64"}, 1)]) +def test_dtype_mangle_dup_cols(all_parsers, dtypes, exp_value): + # GH#35211 + parser = all_parsers + data = """a,a\n1,1""" + dtype_dict = {"a": str, **dtypes} + # GH#42462 + dtype_dict_copy = dtype_dict.copy() + result = parser.read_csv(StringIO(data), dtype=dtype_dict) + expected = DataFrame({"a": ["1"], "a.1": [exp_value]}) + assert dtype_dict == dtype_dict_copy, "dtype dict changed" + tm.assert_frame_equal(result, expected) + + +@pytest.mark.usefixtures("pyarrow_xfail") +def test_dtype_mangle_dup_cols_single_dtype(all_parsers): + # GH#42022 + parser = all_parsers + data = """a,a\n1,1""" + result = parser.read_csv(StringIO(data), dtype=str) + expected = DataFrame({"a": ["1"], "a.1": ["1"]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.usefixtures("pyarrow_xfail") +def test_dtype_multi_index(all_parsers): + # GH 42446 + parser = all_parsers + data = "A,B,B\nX,Y,Z\n1,2,3" + + result = parser.read_csv( + StringIO(data), + header=list(range(2)), + dtype={ + ("A", "X"): np.int32, + ("B", "Y"): np.int32, + ("B", "Z"): np.float32, + }, + ) + + expected = DataFrame( + { + ("A", "X"): np.int32([1]), + ("B", "Y"): np.int32([2]), + ("B", "Z"): np.float32([3]), + } + ) + + tm.assert_frame_equal(result, expected) + + +def test_nullable_int_dtype(all_parsers, any_int_ea_dtype): + # GH 25472 + parser = all_parsers + dtype = any_int_ea_dtype + + data = """a,b,c +,3,5 +1,,6 +2,4,""" + expected = DataFrame( + { + "a": pd.array([pd.NA, 1, 2], dtype=dtype), + "b": pd.array([3, pd.NA, 4], dtype=dtype), + "c": pd.array([5, 6, pd.NA], dtype=dtype), + } + ) + actual = parser.read_csv(StringIO(data), dtype=dtype) + tm.assert_frame_equal(actual, expected) + + +@pytest.mark.usefixtures("pyarrow_xfail") +@pytest.mark.parametrize("default", ["float", "float64"]) +def test_dtypes_defaultdict(all_parsers, default): + # GH#41574 + data = """a,b +1,2 +""" + dtype = defaultdict(lambda: default, a="int64") + parser = all_parsers + result = parser.read_csv(StringIO(data), dtype=dtype) + expected = DataFrame({"a": [1], "b": 2.0}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.usefixtures("pyarrow_xfail") +def test_dtypes_defaultdict_mangle_dup_cols(all_parsers): + # GH#41574 + data = """a,b,a,b,b.1 +1,2,3,4,5 +""" + dtype = defaultdict(lambda: "float64", a="int64") + dtype["b.1"] = "int64" + parser = all_parsers + result = parser.read_csv(StringIO(data), dtype=dtype) + expected = DataFrame({"a": [1], "b": [2.0], "a.1": [3], "b.2": [4.0], "b.1": [5]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.usefixtures("pyarrow_xfail") +def test_dtypes_defaultdict_invalid(all_parsers): + # GH#41574 + data = """a,b +1,2 +""" + dtype = defaultdict(lambda: "invalid_dtype", a="int64") + parser = all_parsers + with pytest.raises(TypeError, match="not understood"): + parser.read_csv(StringIO(data), dtype=dtype) + + +def test_dtype_backend(all_parsers): + # GH#36712 + + parser = all_parsers + + data = """a,b,c,d,e,f,g,h,i,j +1,2.5,True,a,,,,,12-31-2019, +3,4.5,False,b,6,7.5,True,a,12-31-2019, +""" + result = parser.read_csv( + StringIO(data), dtype_backend="numpy_nullable", parse_dates=["i"] + ) + expected = DataFrame( + { + "a": pd.Series([1, 3], dtype="Int64"), + "b": pd.Series([2.5, 4.5], dtype="Float64"), + "c": pd.Series([True, False], dtype="boolean"), + "d": pd.Series(["a", "b"], dtype="string"), + "e": pd.Series([pd.NA, 6], dtype="Int64"), + "f": pd.Series([pd.NA, 7.5], dtype="Float64"), + "g": pd.Series([pd.NA, True], dtype="boolean"), + "h": pd.Series([pd.NA, "a"], dtype="string"), + "i": pd.Series([Timestamp("2019-12-31")] * 2), + "j": pd.Series([pd.NA, pd.NA], dtype="Int64"), + } + ) + tm.assert_frame_equal(result, expected) + + +def test_dtype_backend_and_dtype(all_parsers): + # GH#36712 + + parser = all_parsers + + data = """a,b +1,2.5 +, +""" + result = parser.read_csv( + StringIO(data), dtype_backend="numpy_nullable", dtype="float64" + ) + expected = DataFrame({"a": [1.0, np.nan], "b": [2.5, np.nan]}) + tm.assert_frame_equal(result, expected) + + +def test_dtype_backend_string(all_parsers, string_storage): + # GH#36712 + pa = pytest.importorskip("pyarrow") + + with pd.option_context("mode.string_storage", string_storage): + parser = all_parsers + + data = """a,b +a,x +b, +""" + result = parser.read_csv(StringIO(data), dtype_backend="numpy_nullable") + + if string_storage == "python": + expected = DataFrame( + { + "a": StringArray(np.array(["a", "b"], dtype=np.object_)), + "b": StringArray(np.array(["x", pd.NA], dtype=np.object_)), + } + ) + else: + expected = DataFrame( + { + "a": ArrowStringArray(pa.array(["a", "b"])), + "b": ArrowStringArray(pa.array(["x", None])), + } + ) + tm.assert_frame_equal(result, expected) + + +def test_dtype_backend_ea_dtype_specified(all_parsers): + # GH#491496 + data = """a,b +1,2 +""" + parser = all_parsers + result = parser.read_csv( + StringIO(data), dtype="Int64", dtype_backend="numpy_nullable" + ) + expected = DataFrame({"a": [1], "b": 2}, dtype="Int64") + tm.assert_frame_equal(result, expected) + + +def test_dtype_backend_pyarrow(all_parsers, request): + # GH#36712 + pa = pytest.importorskip("pyarrow") + parser = all_parsers + + data = """a,b,c,d,e,f,g,h,i,j +1,2.5,True,a,,,,,12-31-2019, +3,4.5,False,b,6,7.5,True,a,12-31-2019, +""" + result = parser.read_csv(StringIO(data), dtype_backend="pyarrow", parse_dates=["i"]) + expected = DataFrame( + { + "a": pd.Series([1, 3], dtype="int64[pyarrow]"), + "b": pd.Series([2.5, 4.5], dtype="float64[pyarrow]"), + "c": pd.Series([True, False], dtype="bool[pyarrow]"), + "d": pd.Series(["a", "b"], dtype=pd.ArrowDtype(pa.string())), + "e": pd.Series([pd.NA, 6], dtype="int64[pyarrow]"), + "f": pd.Series([pd.NA, 7.5], dtype="float64[pyarrow]"), + "g": pd.Series([pd.NA, True], dtype="bool[pyarrow]"), + "h": pd.Series( + [pd.NA, "a"], + dtype=pd.ArrowDtype(pa.string()), + ), + "i": pd.Series([Timestamp("2019-12-31")] * 2), + "j": pd.Series([pd.NA, pd.NA], dtype="null[pyarrow]"), + } + ) + tm.assert_frame_equal(result, expected) + + +# pyarrow engine failing: +# https://github.com/pandas-dev/pandas/issues/56136 +@pytest.mark.usefixtures("pyarrow_xfail") +def test_ea_int_avoid_overflow(all_parsers): + # GH#32134 + parser = all_parsers + data = """a,b +1,1 +,1 +1582218195625938945,1 +""" + result = parser.read_csv(StringIO(data), dtype={"a": "Int64"}) + expected = DataFrame( + { + "a": IntegerArray( + np.array([1, 1, 1582218195625938945]), np.array([False, True, False]) + ), + "b": 1, + } + ) + tm.assert_frame_equal(result, expected) + + +def test_string_inference(all_parsers): + # GH#54430 + pytest.importorskip("pyarrow") + dtype = "string[pyarrow_numpy]" + + data = """a,b +x,1 +y,2 +,3""" + parser = all_parsers + with pd.option_context("future.infer_string", True): + result = parser.read_csv(StringIO(data)) + + expected = DataFrame( + {"a": pd.Series(["x", "y", None], dtype=dtype), "b": [1, 2, 3]}, + columns=pd.Index(["a", "b"], dtype=dtype), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["O", object, "object", np.object_, str, np.str_]) +def test_string_inference_object_dtype(all_parsers, dtype): + # GH#56047 + pytest.importorskip("pyarrow") + + data = """a,b +x,a +y,a +z,a""" + parser = all_parsers + with pd.option_context("future.infer_string", True): + result = parser.read_csv(StringIO(data), dtype=dtype) + + expected = DataFrame( + { + "a": pd.Series(["x", "y", "z"], dtype=object), + "b": pd.Series(["a", "a", "a"], dtype=object), + }, + columns=pd.Index(["a", "b"], dtype="string[pyarrow_numpy]"), + ) + tm.assert_frame_equal(result, expected) + + with pd.option_context("future.infer_string", True): + result = parser.read_csv(StringIO(data), dtype={"a": dtype}) + + expected = DataFrame( + { + "a": pd.Series(["x", "y", "z"], dtype=object), + "b": pd.Series(["a", "a", "a"], dtype="string[pyarrow_numpy]"), + }, + columns=pd.Index(["a", "b"], dtype="string[pyarrow_numpy]"), + ) + tm.assert_frame_equal(result, expected) + + +def test_accurate_parsing_of_large_integers(all_parsers): + # GH#52505 + data = """SYMBOL,MOMENT,ID,ID_DEAL +AAPL,20230301181139587,1925036343869802844, +AAPL,20230301181139587,2023552585717889863,2023552585717263358 +NVDA,20230301181139587,2023552585717889863,2023552585717263359 +AMC,20230301181139587,2023552585717889863,2023552585717263360 +AMZN,20230301181139587,2023552585717889759,2023552585717263360 +MSFT,20230301181139587,2023552585717889863,2023552585717263361 +NVDA,20230301181139587,2023552585717889827,2023552585717263361""" + orders = pd.read_csv(StringIO(data), dtype={"ID_DEAL": pd.Int64Dtype()}) + assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263358, "ID_DEAL"]) == 1 + assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263359, "ID_DEAL"]) == 1 + assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263360, "ID_DEAL"]) == 2 + assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263361, "ID_DEAL"]) == 2 + + +def test_dtypes_with_usecols(all_parsers): + # GH#54868 + + parser = all_parsers + data = """a,b,c +1,2,3 +4,5,6""" + + result = parser.read_csv(StringIO(data), usecols=["a", "c"], dtype={"a": object}) + if parser.engine == "pyarrow": + values = [1, 4] + else: + values = ["1", "4"] + expected = DataFrame({"a": pd.Series(values, dtype=object), "c": [3, 6]}) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_empty.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_empty.py new file mode 100644 index 0000000000000000000000000000000000000000..f34385b190c5ffa8df1a517fb0e0c9ccd8fe0073 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_empty.py @@ -0,0 +1,181 @@ +""" +Tests dtype specification during parsing +for all of the parsers defined in parsers.py +""" +from io import StringIO + +import numpy as np +import pytest + +from pandas import ( + Categorical, + DataFrame, + Index, + MultiIndex, + Series, + concat, +) +import pandas._testing as tm + +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + + +@skip_pyarrow # CSV parse error: Empty CSV file or block +def test_dtype_all_columns_empty(all_parsers): + # see gh-12048 + parser = all_parsers + result = parser.read_csv(StringIO("A,B"), dtype=str) + + expected = DataFrame({"A": [], "B": []}, dtype=str) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Empty CSV file or block +def test_empty_pass_dtype(all_parsers): + parser = all_parsers + + data = "one,two" + result = parser.read_csv(StringIO(data), dtype={"one": "u1"}) + + expected = DataFrame( + {"one": np.empty(0, dtype="u1"), "two": np.empty(0, dtype=object)}, + ) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Empty CSV file or block +def test_empty_with_index_pass_dtype(all_parsers): + parser = all_parsers + + data = "one,two" + result = parser.read_csv( + StringIO(data), index_col=["one"], dtype={"one": "u1", 1: "f"} + ) + + expected = DataFrame( + {"two": np.empty(0, dtype="f")}, index=Index([], dtype="u1", name="one") + ) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Empty CSV file or block +def test_empty_with_multi_index_pass_dtype(all_parsers): + parser = all_parsers + + data = "one,two,three" + result = parser.read_csv( + StringIO(data), index_col=["one", "two"], dtype={"one": "u1", 1: "f8"} + ) + + exp_idx = MultiIndex.from_arrays( + [np.empty(0, dtype="u1"), np.empty(0, dtype=np.float64)], + names=["one", "two"], + ) + expected = DataFrame({"three": np.empty(0, dtype=object)}, index=exp_idx) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Empty CSV file or block +def test_empty_with_mangled_column_pass_dtype_by_names(all_parsers): + parser = all_parsers + + data = "one,one" + result = parser.read_csv(StringIO(data), dtype={"one": "u1", "one.1": "f"}) + + expected = DataFrame( + {"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")}, + ) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Empty CSV file or block +def test_empty_with_mangled_column_pass_dtype_by_indexes(all_parsers): + parser = all_parsers + + data = "one,one" + result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"}) + + expected = DataFrame( + {"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")}, + ) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Empty CSV file or block +def test_empty_with_dup_column_pass_dtype_by_indexes(all_parsers): + # see gh-9424 + parser = all_parsers + expected = concat( + [Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")], + axis=1, + ) + + data = "one,one" + result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"}) + tm.assert_frame_equal(result, expected) + + +def test_empty_with_dup_column_pass_dtype_by_indexes_raises(all_parsers): + # see gh-9424 + parser = all_parsers + expected = concat( + [Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")], + axis=1, + ) + expected.index = expected.index.astype(object) + + with pytest.raises(ValueError, match="Duplicate names"): + data = "" + parser.read_csv(StringIO(data), names=["one", "one"], dtype={0: "u1", 1: "f"}) + + +@pytest.mark.parametrize( + "dtype,expected", + [ + (np.float64, DataFrame(columns=["a", "b"], dtype=np.float64)), + ( + "category", + DataFrame({"a": Categorical([]), "b": Categorical([])}), + ), + ( + {"a": "category", "b": "category"}, + DataFrame({"a": Categorical([]), "b": Categorical([])}), + ), + ("datetime64[ns]", DataFrame(columns=["a", "b"], dtype="datetime64[ns]")), + ( + "timedelta64[ns]", + DataFrame( + { + "a": Series([], dtype="timedelta64[ns]"), + "b": Series([], dtype="timedelta64[ns]"), + }, + ), + ), + ( + {"a": np.int64, "b": np.int32}, + DataFrame( + {"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)}, + ), + ), + ( + {0: np.int64, 1: np.int32}, + DataFrame( + {"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)}, + ), + ), + ( + {"a": np.int64, 1: np.int32}, + DataFrame( + {"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)}, + ), + ), + ], +) +@skip_pyarrow # CSV parse error: Empty CSV file or block +def test_empty_dtype(all_parsers, dtype, expected): + # see gh-14712 + parser = all_parsers + data = "a,b" + + result = parser.read_csv(StringIO(data), header=0, dtype=dtype) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_c_parser_only.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_c_parser_only.py new file mode 100644 index 0000000000000000000000000000000000000000..27d7bc0bb6c07aeb74cc7324a101a3572c3161ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_c_parser_only.py @@ -0,0 +1,643 @@ +""" +Tests that apply specifically to the CParser. Unless specifically stated +as a CParser-specific issue, the goal is to eventually move as many of +these tests out of this module as soon as the Python parser can accept +further arguments when parsing. +""" +from decimal import Decimal +from io import ( + BytesIO, + StringIO, + TextIOWrapper, +) +import mmap +import os +import tarfile + +import numpy as np +import pytest + +from pandas.compat.numpy import np_version_gte1p24 +from pandas.errors import ( + ParserError, + ParserWarning, +) +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + concat, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "malformed", + ["1\r1\r1\r 1\r 1\r", "1\r1\r1\r 1\r 1\r11\r", "1\r1\r1\r 1\r 1\r11\r1\r"], + ids=["words pointer", "stream pointer", "lines pointer"], +) +def test_buffer_overflow(c_parser_only, malformed): + # see gh-9205: test certain malformed input files that cause + # buffer overflows in tokenizer.c + msg = "Buffer overflow caught - possible malformed input file." + parser = c_parser_only + + with pytest.raises(ParserError, match=msg): + parser.read_csv(StringIO(malformed)) + + +def test_delim_whitespace_custom_terminator(c_parser_only): + # See gh-12912 + data = "a b c~1 2 3~4 5 6~7 8 9" + parser = c_parser_only + + depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated" + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + df = parser.read_csv(StringIO(data), lineterminator="~", delim_whitespace=True) + expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]) + tm.assert_frame_equal(df, expected) + + +def test_dtype_and_names_error(c_parser_only): + # see gh-8833: passing both dtype and names + # resulting in an error reporting issue + parser = c_parser_only + data = """ +1.0 1 +2.0 2 +3.0 3 +""" + # base cases + result = parser.read_csv(StringIO(data), sep=r"\s+", header=None) + expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]]) + tm.assert_frame_equal(result, expected) + + result = parser.read_csv(StringIO(data), sep=r"\s+", header=None, names=["a", "b"]) + expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + # fallback casting + result = parser.read_csv( + StringIO(data), sep=r"\s+", header=None, names=["a", "b"], dtype={"a": np.int32} + ) + expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=["a", "b"]) + expected["a"] = expected["a"].astype(np.int32) + tm.assert_frame_equal(result, expected) + + data = """ +1.0 1 +nan 2 +3.0 3 +""" + # fallback casting, but not castable + warning = RuntimeWarning if np_version_gte1p24 else None + with pytest.raises(ValueError, match="cannot safely convert"): + with tm.assert_produces_warning(warning, check_stacklevel=False): + parser.read_csv( + StringIO(data), + sep=r"\s+", + header=None, + names=["a", "b"], + dtype={"a": np.int32}, + ) + + +@pytest.mark.parametrize( + "match,kwargs", + [ + # For each of these cases, all of the dtypes are valid, just unsupported. + ( + ( + "the dtype datetime64 is not supported for parsing, " + "pass this column using parse_dates instead" + ), + {"dtype": {"A": "datetime64", "B": "float64"}}, + ), + ( + ( + "the dtype datetime64 is not supported for parsing, " + "pass this column using parse_dates instead" + ), + {"dtype": {"A": "datetime64", "B": "float64"}, "parse_dates": ["B"]}, + ), + ( + "the dtype timedelta64 is not supported for parsing", + {"dtype": {"A": "timedelta64", "B": "float64"}}, + ), + ( + f"the dtype {tm.ENDIAN}U8 is not supported for parsing", + {"dtype": {"A": "U8"}}, + ), + ], + ids=["dt64-0", "dt64-1", "td64", f"{tm.ENDIAN}U8"], +) +def test_unsupported_dtype(c_parser_only, match, kwargs): + parser = c_parser_only + df = DataFrame( + np.random.default_rng(2).random((5, 2)), + columns=list("AB"), + index=["1A", "1B", "1C", "1D", "1E"], + ) + + with tm.ensure_clean("__unsupported_dtype__.csv") as path: + df.to_csv(path) + + with pytest.raises(TypeError, match=match): + parser.read_csv(path, index_col=0, **kwargs) + + +@td.skip_if_32bit +@pytest.mark.slow +# test numbers between 1 and 2 +@pytest.mark.parametrize("num", np.linspace(1.0, 2.0, num=21)) +def test_precise_conversion(c_parser_only, num): + parser = c_parser_only + + normal_errors = [] + precise_errors = [] + + def error(val: float, actual_val: Decimal) -> Decimal: + return abs(Decimal(f"{val:.100}") - actual_val) + + # 25 decimal digits of precision + text = f"a\n{num:.25}" + + normal_val = float( + parser.read_csv(StringIO(text), float_precision="legacy")["a"][0] + ) + precise_val = float(parser.read_csv(StringIO(text), float_precision="high")["a"][0]) + roundtrip_val = float( + parser.read_csv(StringIO(text), float_precision="round_trip")["a"][0] + ) + actual_val = Decimal(text[2:]) + + normal_errors.append(error(normal_val, actual_val)) + precise_errors.append(error(precise_val, actual_val)) + + # round-trip should match float() + assert roundtrip_val == float(text[2:]) + + assert sum(precise_errors) <= sum(normal_errors) + assert max(precise_errors) <= max(normal_errors) + + +def test_usecols_dtypes(c_parser_only): + parser = c_parser_only + data = """\ +1,2,3 +4,5,6 +7,8,9 +10,11,12""" + + result = parser.read_csv( + StringIO(data), + usecols=(0, 1, 2), + names=("a", "b", "c"), + header=None, + converters={"a": str}, + dtype={"b": int, "c": float}, + ) + result2 = parser.read_csv( + StringIO(data), + usecols=(0, 2), + names=("a", "b", "c"), + header=None, + converters={"a": str}, + dtype={"b": int, "c": float}, + ) + + assert (result.dtypes == [object, int, float]).all() + assert (result2.dtypes == [object, float]).all() + + +def test_disable_bool_parsing(c_parser_only): + # see gh-2090 + + parser = c_parser_only + data = """A,B,C +Yes,No,Yes +No,Yes,Yes +Yes,,Yes +No,No,No""" + + result = parser.read_csv(StringIO(data), dtype=object) + assert (result.dtypes == object).all() + + result = parser.read_csv(StringIO(data), dtype=object, na_filter=False) + assert result["B"][2] == "" + + +def test_custom_lineterminator(c_parser_only): + parser = c_parser_only + data = "a,b,c~1,2,3~4,5,6" + + result = parser.read_csv(StringIO(data), lineterminator="~") + expected = parser.read_csv(StringIO(data.replace("~", "\n"))) + + tm.assert_frame_equal(result, expected) + + +def test_parse_ragged_csv(c_parser_only): + parser = c_parser_only + data = """1,2,3 +1,2,3,4 +1,2,3,4,5 +1,2 +1,2,3,4""" + + nice_data = """1,2,3,, +1,2,3,4, +1,2,3,4,5 +1,2,,, +1,2,3,4,""" + result = parser.read_csv( + StringIO(data), header=None, names=["a", "b", "c", "d", "e"] + ) + + expected = parser.read_csv( + StringIO(nice_data), header=None, names=["a", "b", "c", "d", "e"] + ) + + tm.assert_frame_equal(result, expected) + + # too many columns, cause segfault if not careful + data = "1,2\n3,4,5" + + result = parser.read_csv(StringIO(data), header=None, names=range(50)) + expected = parser.read_csv(StringIO(data), header=None, names=range(3)).reindex( + columns=range(50) + ) + + tm.assert_frame_equal(result, expected) + + +def test_tokenize_CR_with_quoting(c_parser_only): + # see gh-3453 + parser = c_parser_only + data = ' a,b,c\r"a,b","e,d","f,f"' + + result = parser.read_csv(StringIO(data), header=None) + expected = parser.read_csv(StringIO(data.replace("\r", "\n")), header=None) + tm.assert_frame_equal(result, expected) + + result = parser.read_csv(StringIO(data)) + expected = parser.read_csv(StringIO(data.replace("\r", "\n"))) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.slow +@pytest.mark.parametrize("count", [3 * 2**n for n in range(6)]) +def test_grow_boundary_at_cap(c_parser_only, count): + # See gh-12494 + # + # Cause of error was that the C parser + # was not increasing the buffer size when + # the desired space would fill the buffer + # to capacity, which would later cause a + # buffer overflow error when checking the + # EOF terminator of the CSV stream. + # 3 * 2^n commas was observed to break the parser + parser = c_parser_only + + with StringIO("," * count) as s: + expected = DataFrame(columns=[f"Unnamed: {i}" for i in range(count + 1)]) + df = parser.read_csv(s) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.slow +@pytest.mark.parametrize("encoding", [None, "utf-8"]) +def test_parse_trim_buffers(c_parser_only, encoding): + # This test is part of a bugfix for gh-13703. It attempts to + # to stress the system memory allocator, to cause it to move the + # stream buffer and either let the OS reclaim the region, or let + # other memory requests of parser otherwise modify the contents + # of memory space, where it was formally located. + # This test is designed to cause a `segfault` with unpatched + # `tokenizer.c`. Sometimes the test fails on `segfault`, other + # times it fails due to memory corruption, which causes the + # loaded DataFrame to differ from the expected one. + + # Also force 'utf-8' encoding, so that `_string_convert` would take + # a different execution branch. + + parser = c_parser_only + + # Generate a large mixed-type CSV file on-the-fly (one record is + # approx 1.5KiB). + record_ = ( + """9999-9,99:99,,,,ZZ,ZZ,,,ZZZ-ZZZZ,.Z-ZZZZ,-9.99,,,9.99,Z""" + """ZZZZ,,-99,9,ZZZ-ZZZZ,ZZ-ZZZZ,,9.99,ZZZ-ZZZZZ,ZZZ-ZZZZZ,""" + """ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,9""" + """99,ZZZ-ZZZZ,,ZZ-ZZZZ,,,,,ZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,,,9,9,""" + """9,9,99,99,999,999,ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,9,ZZ-ZZZZ,9.""" + """99,ZZ-ZZZZ,ZZ-ZZZZ,,,,ZZZZ,,,ZZ,ZZ,,,,,,,,,,,,,9,,,999.""" + """99,999.99,,,ZZZZZ,,,Z9,,,,,,,ZZZ,ZZZ,,,,,,,,,,,ZZZZZ,ZZ""" + """ZZZ,ZZZ-ZZZZZZ,ZZZ-ZZZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZ""" + """ZZ,,,999999,999999,ZZZ,ZZZ,,,ZZZ,ZZZ,999.99,999.99,,,,Z""" + """ZZ-ZZZ,ZZZ-ZZZ,-9.99,-9.99,9,9,,99,,9.99,9.99,9,9,9.99,""" + """9.99,,,,9.99,9.99,,99,,99,9.99,9.99,,,ZZZ,ZZZ,,999.99,,""" + """999.99,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,ZZZZZ,ZZZZZ,ZZZ,ZZZ,9,9,""" + """,,,,,ZZZ-ZZZZ,ZZZ999Z,,,999.99,,999.99,ZZZ-ZZZZ,,,9.999""" + """,9.999,9.999,9.999,-9.999,-9.999,-9.999,-9.999,9.999,9.""" + """999,9.999,9.999,9.999,9.999,9.999,9.999,99999,ZZZ-ZZZZ,""" + """,9.99,ZZZ,,,,,,,,ZZZ,,,,,9,,,,9,,,,,,,,,,ZZZ-ZZZZ,ZZZ-Z""" + """ZZZ,,ZZZZZ,ZZZZZ,ZZZZZ,ZZZZZ,,,9.99,,ZZ-ZZZZ,ZZ-ZZZZ,ZZ""" + """,999,,,,ZZ-ZZZZ,ZZZ,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,99.99,99.99""" + """,,,9.99,9.99,9.99,9.99,ZZZ-ZZZZ,,,ZZZ-ZZZZZ,,,,,-9.99,-""" + """9.99,-9.99,-9.99,,,,,,,,,ZZZ-ZZZZ,,9,9.99,9.99,99ZZ,,-9""" + """.99,-9.99,ZZZ-ZZZZ,,,,,,,ZZZ-ZZZZ,9.99,9.99,9999,,,,,,,""" + """,,,-9.9,Z/Z-ZZZZ,999.99,9.99,,999.99,ZZ-ZZZZ,ZZ-ZZZZ,9.""" + """99,9.99,9.99,9.99,9.99,9.99,,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZ""" + """ZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ,ZZZ,ZZZ,ZZZ,9.99,,,-9.99,ZZ""" + """-ZZZZ,-999.99,,-9999,,999.99,,,,999.99,99.99,,,ZZ-ZZZZZ""" + """ZZZ,ZZ-ZZZZ-ZZZZZZZ,,,,ZZ-ZZ-ZZZZZZZZ,ZZZZZZZZ,ZZZ-ZZZZ""" + """,9999,999.99,ZZZ-ZZZZ,-9.99,-9.99,ZZZ-ZZZZ,99:99:99,,99""" + """,99,,9.99,,-99.99,,,,,,9.99,ZZZ-ZZZZ,-9.99,-9.99,9.99,9""" + """.99,,ZZZ,,,,,,,ZZZ,ZZZ,,,,,""" + ) + + # Set the number of lines so that a call to `parser_trim_buffers` + # is triggered: after a couple of full chunks are consumed a + # relatively small 'residual' chunk would cause reallocation + # within the parser. + chunksize, n_lines = 128, 2 * 128 + 15 + csv_data = "\n".join([record_] * n_lines) + "\n" + + # We will use StringIO to load the CSV from this text buffer. + # pd.read_csv() will iterate over the file in chunks and will + # finally read a residual chunk of really small size. + + # Generate the expected output: manually create the dataframe + # by splitting by comma and repeating the `n_lines` times. + row = tuple(val_ if val_ else np.nan for val_ in record_.split(",")) + expected = DataFrame( + [row for _ in range(n_lines)], dtype=object, columns=None, index=None + ) + + # Iterate over the CSV file in chunks of `chunksize` lines + with parser.read_csv( + StringIO(csv_data), + header=None, + dtype=object, + chunksize=chunksize, + encoding=encoding, + ) as chunks_: + result = concat(chunks_, axis=0, ignore_index=True) + + # Check for data corruption if there was no segfault + tm.assert_frame_equal(result, expected) + + +def test_internal_null_byte(c_parser_only): + # see gh-14012 + # + # The null byte ('\x00') should not be used as a + # true line terminator, escape character, or comment + # character, only as a placeholder to indicate that + # none was specified. + # + # This test should be moved to test_common.py ONLY when + # Python's csv class supports parsing '\x00'. + parser = c_parser_only + + names = ["a", "b", "c"] + data = "1,2,3\n4,\x00,6\n7,8,9" + expected = DataFrame([[1, 2.0, 3], [4, np.nan, 6], [7, 8, 9]], columns=names) + + result = parser.read_csv(StringIO(data), names=names) + tm.assert_frame_equal(result, expected) + + +def test_read_nrows_large(c_parser_only): + # gh-7626 - Read only nrows of data in for large inputs (>262144b) + parser = c_parser_only + header_narrow = "\t".join(["COL_HEADER_" + str(i) for i in range(10)]) + "\n" + data_narrow = "\t".join(["somedatasomedatasomedata1" for _ in range(10)]) + "\n" + header_wide = "\t".join(["COL_HEADER_" + str(i) for i in range(15)]) + "\n" + data_wide = "\t".join(["somedatasomedatasomedata2" for _ in range(15)]) + "\n" + test_input = header_narrow + data_narrow * 1050 + header_wide + data_wide * 2 + + df = parser.read_csv(StringIO(test_input), sep="\t", nrows=1010) + + assert df.size == 1010 * 10 + + +def test_float_precision_round_trip_with_text(c_parser_only): + # see gh-15140 + parser = c_parser_only + df = parser.read_csv(StringIO("a"), header=None, float_precision="round_trip") + tm.assert_frame_equal(df, DataFrame({0: ["a"]})) + + +def test_large_difference_in_columns(c_parser_only): + # see gh-14125 + parser = c_parser_only + + count = 10000 + large_row = ("X," * count)[:-1] + "\n" + normal_row = "XXXXXX XXXXXX,111111111111111\n" + test_input = (large_row + normal_row * 6)[:-1] + + result = parser.read_csv(StringIO(test_input), header=None, usecols=[0]) + rows = test_input.split("\n") + + expected = DataFrame([row.split(",")[0] for row in rows]) + tm.assert_frame_equal(result, expected) + + +def test_data_after_quote(c_parser_only): + # see gh-15910 + parser = c_parser_only + + data = 'a\n1\n"b"a' + result = parser.read_csv(StringIO(data)) + + expected = DataFrame({"a": ["1", "ba"]}) + tm.assert_frame_equal(result, expected) + + +def test_comment_whitespace_delimited(c_parser_only): + parser = c_parser_only + test_input = """\ +1 2 +2 2 3 +3 2 3 # 3 fields +4 2 3# 3 fields +5 2 # 2 fields +6 2# 2 fields +7 # 1 field, NaN +8# 1 field, NaN +9 2 3 # skipped line +# comment""" + with tm.assert_produces_warning( + ParserWarning, match="Skipping line", check_stacklevel=False + ): + df = parser.read_csv( + StringIO(test_input), + comment="#", + header=None, + delimiter="\\s+", + skiprows=0, + on_bad_lines="warn", + ) + expected = DataFrame([[1, 2], [5, 2], [6, 2], [7, np.nan], [8, np.nan]]) + tm.assert_frame_equal(df, expected) + + +def test_file_like_no_next(c_parser_only): + # gh-16530: the file-like need not have a "next" or "__next__" + # attribute despite having an "__iter__" attribute. + # + # NOTE: This is only true for the C engine, not Python engine. + class NoNextBuffer(StringIO): + def __next__(self): + raise AttributeError("No next method") + + next = __next__ + + parser = c_parser_only + data = "a\n1" + + expected = DataFrame({"a": [1]}) + result = parser.read_csv(NoNextBuffer(data)) + + tm.assert_frame_equal(result, expected) + + +def test_buffer_rd_bytes_bad_unicode(c_parser_only): + # see gh-22748 + t = BytesIO(b"\xB0") + t = TextIOWrapper(t, encoding="ascii", errors="surrogateescape") + msg = "'utf-8' codec can't encode character" + with pytest.raises(UnicodeError, match=msg): + c_parser_only.read_csv(t, encoding="UTF-8") + + +@pytest.mark.parametrize("tar_suffix", [".tar", ".tar.gz"]) +def test_read_tarfile(c_parser_only, csv_dir_path, tar_suffix): + # see gh-16530 + # + # Unfortunately, Python's CSV library can't handle + # tarfile objects (expects string, not bytes when + # iterating through a file-like). + parser = c_parser_only + tar_path = os.path.join(csv_dir_path, "tar_csv" + tar_suffix) + + with tarfile.open(tar_path, "r") as tar: + data_file = tar.extractfile("tar_data.csv") + + out = parser.read_csv(data_file) + expected = DataFrame({"a": [1]}) + tm.assert_frame_equal(out, expected) + + +def test_chunk_whitespace_on_boundary(c_parser_only): + # see gh-9735: this issue is C parser-specific (bug when + # parsing whitespace and characters at chunk boundary) + # + # This test case has a field too large for the Python parser / CSV library. + parser = c_parser_only + + chunk1 = "a" * (1024 * 256 - 2) + "\na" + chunk2 = "\n a" + result = parser.read_csv(StringIO(chunk1 + chunk2), header=None) + + expected = DataFrame(["a" * (1024 * 256 - 2), "a", " a"]) + tm.assert_frame_equal(result, expected) + + +def test_file_handles_mmap(c_parser_only, csv1): + # gh-14418 + # + # Don't close user provided file handles. + parser = c_parser_only + + with open(csv1, encoding="utf-8") as f: + with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as m: + parser.read_csv(m) + assert not m.closed + + +def test_file_binary_mode(c_parser_only): + # see gh-23779 + parser = c_parser_only + expected = DataFrame([[1, 2, 3], [4, 5, 6]]) + + with tm.ensure_clean() as path: + with open(path, "w", encoding="utf-8") as f: + f.write("1,2,3\n4,5,6") + + with open(path, "rb") as f: + result = parser.read_csv(f, header=None) + tm.assert_frame_equal(result, expected) + + +def test_unix_style_breaks(c_parser_only): + # GH 11020 + parser = c_parser_only + with tm.ensure_clean() as path: + with open(path, "w", newline="\n", encoding="utf-8") as f: + f.write("blah\n\ncol_1,col_2,col_3\n\n") + result = parser.read_csv(path, skiprows=2, encoding="utf-8", engine="c") + expected = DataFrame(columns=["col_1", "col_2", "col_3"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"]) +@pytest.mark.parametrize( + "data,thousands,decimal", + [ + ( + """A|B|C +1|2,334.01|5 +10|13|10. +""", + ",", + ".", + ), + ( + """A|B|C +1|2.334,01|5 +10|13|10, +""", + ".", + ",", + ), + ], +) +def test_1000_sep_with_decimal( + c_parser_only, data, thousands, decimal, float_precision +): + parser = c_parser_only + expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]}) + + result = parser.read_csv( + StringIO(data), + sep="|", + thousands=thousands, + decimal=decimal, + float_precision=float_precision, + ) + tm.assert_frame_equal(result, expected) + + +def test_float_precision_options(c_parser_only): + # GH 17154, 36228 + parser = c_parser_only + s = "foo\n243.164\n" + df = parser.read_csv(StringIO(s)) + df2 = parser.read_csv(StringIO(s), float_precision="high") + + tm.assert_frame_equal(df, df2) + + df3 = parser.read_csv(StringIO(s), float_precision="legacy") + + assert not df.iloc[0, 0] == df3.iloc[0, 0] + + msg = "Unrecognized float_precision option: junk" + + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(s), float_precision="junk") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_comment.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_comment.py new file mode 100644 index 0000000000000000000000000000000000000000..abaeeb86476da183630b58708741bdad2bc5330d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_comment.py @@ -0,0 +1,227 @@ +""" +Tests that comments are properly handled during parsing +for all of the parsers defined in parsers.py +""" +from io import StringIO + +import numpy as np +import pytest + +from pandas import DataFrame +import pandas._testing as tm + + +@pytest.mark.parametrize("na_values", [None, ["NaN"]]) +def test_comment(all_parsers, na_values): + parser = all_parsers + data = """A,B,C +1,2.,4.#hello world +5.,NaN,10.0 +""" + expected = DataFrame( + [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"] + ) + if parser.engine == "pyarrow": + msg = "The 'comment' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), comment="#", na_values=na_values) + return + result = parser.read_csv(StringIO(data), comment="#", na_values=na_values) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "read_kwargs", [{}, {"lineterminator": "*"}, {"delim_whitespace": True}] +) +def test_line_comment(all_parsers, read_kwargs, request): + parser = all_parsers + data = """# empty +A,B,C +1,2.,4.#hello world +#ignore this line +5.,NaN,10.0 +""" + warn = None + depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated" + + if read_kwargs.get("delim_whitespace"): + data = data.replace(",", " ") + warn = FutureWarning + elif read_kwargs.get("lineterminator"): + data = data.replace("\n", read_kwargs.get("lineterminator")) + + read_kwargs["comment"] = "#" + if parser.engine == "pyarrow": + if "lineterminator" in read_kwargs: + msg = ( + "The 'lineterminator' option is not supported with the 'pyarrow' engine" + ) + else: + msg = "The 'comment' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning( + warn, match=depr_msg, check_stacklevel=False + ): + parser.read_csv(StringIO(data), **read_kwargs) + return + elif parser.engine == "python" and read_kwargs.get("lineterminator"): + msg = r"Custom line terminators not supported in python parser \(yet\)" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning( + warn, match=depr_msg, check_stacklevel=False + ): + parser.read_csv(StringIO(data), **read_kwargs) + return + + with tm.assert_produces_warning(warn, match=depr_msg, check_stacklevel=False): + result = parser.read_csv(StringIO(data), **read_kwargs) + + expected = DataFrame( + [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"] + ) + tm.assert_frame_equal(result, expected) + + +def test_comment_skiprows(all_parsers): + parser = all_parsers + data = """# empty +random line +# second empty line +1,2,3 +A,B,C +1,2.,4. +5.,NaN,10.0 +""" + # This should ignore the first four lines (including comments). + expected = DataFrame( + [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"] + ) + if parser.engine == "pyarrow": + msg = "The 'comment' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), comment="#", skiprows=4) + return + + result = parser.read_csv(StringIO(data), comment="#", skiprows=4) + tm.assert_frame_equal(result, expected) + + +def test_comment_header(all_parsers): + parser = all_parsers + data = """# empty +# second empty line +1,2,3 +A,B,C +1,2.,4. +5.,NaN,10.0 +""" + # Header should begin at the second non-comment line. + expected = DataFrame( + [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"] + ) + if parser.engine == "pyarrow": + msg = "The 'comment' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), comment="#", header=1) + return + result = parser.read_csv(StringIO(data), comment="#", header=1) + tm.assert_frame_equal(result, expected) + + +def test_comment_skiprows_header(all_parsers): + parser = all_parsers + data = """# empty +# second empty line +# third empty line +X,Y,Z +1,2,3 +A,B,C +1,2.,4. +5.,NaN,10.0 +""" + # Skiprows should skip the first 4 lines (including comments), + # while header should start from the second non-commented line, + # starting with line 5. + expected = DataFrame( + [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"] + ) + if parser.engine == "pyarrow": + msg = "The 'comment' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), comment="#", skiprows=4, header=1) + return + + result = parser.read_csv(StringIO(data), comment="#", skiprows=4, header=1) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("comment_char", ["#", "~", "&", "^", "*", "@"]) +def test_custom_comment_char(all_parsers, comment_char): + parser = all_parsers + data = "a,b,c\n1,2,3#ignore this!\n4,5,6#ignorethistoo" + + if parser.engine == "pyarrow": + msg = "The 'comment' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data.replace("#", comment_char)), comment=comment_char + ) + return + result = parser.read_csv( + StringIO(data.replace("#", comment_char)), comment=comment_char + ) + + expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "b", "c"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("header", ["infer", None]) +def test_comment_first_line(all_parsers, header): + # see gh-4623 + parser = all_parsers + data = "# notes\na,b,c\n# more notes\n1,2,3" + + if header is None: + expected = DataFrame({0: ["a", "1"], 1: ["b", "2"], 2: ["c", "3"]}) + else: + expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"]) + + if parser.engine == "pyarrow": + msg = "The 'comment' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), comment="#", header=header) + return + result = parser.read_csv(StringIO(data), comment="#", header=header) + tm.assert_frame_equal(result, expected) + + +def test_comment_char_in_default_value(all_parsers, request): + # GH#34002 + if all_parsers.engine == "c": + reason = "see gh-34002: works on the python engine but not the c engine" + # NA value containing comment char is interpreted as comment + request.applymarker(pytest.mark.xfail(reason=reason, raises=AssertionError)) + parser = all_parsers + + data = ( + "# this is a comment\n" + "col1,col2,col3,col4\n" + "1,2,3,4#inline comment\n" + "4,5#,6,10\n" + "7,8,#N/A,11\n" + ) + if parser.engine == "pyarrow": + msg = "The 'comment' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), comment="#", na_values="#N/A") + return + result = parser.read_csv(StringIO(data), comment="#", na_values="#N/A") + expected = DataFrame( + { + "col1": [1, 4, 7], + "col2": [2, 5, 8], + "col3": [3.0, np.nan, np.nan], + "col4": [4.0, np.nan, 11.0], + } + ) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_compression.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_compression.py new file mode 100644 index 0000000000000000000000000000000000000000..191d0de50b12f91d75e5d8891ef045c6410170ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_compression.py @@ -0,0 +1,211 @@ +""" +Tests compressed data parsing functionality for all +of the parsers defined in parsers.py +""" + +import os +from pathlib import Path +import tarfile +import zipfile + +import pytest + +from pandas import DataFrame +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.fixture(params=[True, False]) +def buffer(request): + return request.param + + +@pytest.fixture +def parser_and_data(all_parsers, csv1): + parser = all_parsers + + with open(csv1, "rb") as f: + data = f.read() + expected = parser.read_csv(csv1) + + return parser, data, expected + + +@pytest.mark.parametrize("compression", ["zip", "infer", "zip2"]) +def test_zip(parser_and_data, compression): + parser, data, expected = parser_and_data + + with tm.ensure_clean("test_file.zip") as path: + with zipfile.ZipFile(path, mode="w") as tmp: + tmp.writestr("test_file", data) + + if compression == "zip2": + with open(path, "rb") as f: + result = parser.read_csv(f, compression="zip") + else: + result = parser.read_csv(path, compression=compression) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("compression", ["zip", "infer"]) +def test_zip_error_multiple_files(parser_and_data, compression): + parser, data, expected = parser_and_data + + with tm.ensure_clean("combined_zip.zip") as path: + inner_file_names = ["test_file", "second_file"] + + with zipfile.ZipFile(path, mode="w") as tmp: + for file_name in inner_file_names: + tmp.writestr(file_name, data) + + with pytest.raises(ValueError, match="Multiple files"): + parser.read_csv(path, compression=compression) + + +def test_zip_error_no_files(parser_and_data): + parser, _, _ = parser_and_data + + with tm.ensure_clean() as path: + with zipfile.ZipFile(path, mode="w"): + pass + + with pytest.raises(ValueError, match="Zero files"): + parser.read_csv(path, compression="zip") + + +def test_zip_error_invalid_zip(parser_and_data): + parser, _, _ = parser_and_data + + with tm.ensure_clean() as path: + with open(path, "rb") as f: + with pytest.raises(zipfile.BadZipFile, match="File is not a zip file"): + parser.read_csv(f, compression="zip") + + +@pytest.mark.parametrize("filename", [None, "test.{ext}"]) +def test_compression( + request, + parser_and_data, + compression_only, + buffer, + filename, + compression_to_extension, +): + parser, data, expected = parser_and_data + compress_type = compression_only + + ext = compression_to_extension[compress_type] + filename = filename if filename is None else filename.format(ext=ext) + + if filename and buffer: + request.applymarker( + pytest.mark.xfail( + reason="Cannot deduce compression from buffer of compressed data." + ) + ) + + with tm.ensure_clean(filename=filename) as path: + tm.write_to_compressed(compress_type, path, data) + compression = "infer" if filename else compress_type + + if buffer: + with open(path, "rb") as f: + result = parser.read_csv(f, compression=compression) + else: + result = parser.read_csv(path, compression=compression) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("ext", [None, "gz", "bz2"]) +def test_infer_compression(all_parsers, csv1, buffer, ext): + # see gh-9770 + parser = all_parsers + kwargs = {"index_col": 0, "parse_dates": True} + + expected = parser.read_csv(csv1, **kwargs) + kwargs["compression"] = "infer" + + if buffer: + with open(csv1, encoding="utf-8") as f: + result = parser.read_csv(f, **kwargs) + else: + ext = "." + ext if ext else "" + result = parser.read_csv(csv1 + ext, **kwargs) + + tm.assert_frame_equal(result, expected) + + +def test_compression_utf_encoding(all_parsers, csv_dir_path, utf_value, encoding_fmt): + # see gh-18071, gh-24130 + parser = all_parsers + encoding = encoding_fmt.format(utf_value) + path = os.path.join(csv_dir_path, f"utf{utf_value}_ex_small.zip") + + result = parser.read_csv(path, encoding=encoding, compression="zip", sep="\t") + expected = DataFrame( + { + "Country": ["Venezuela", "Venezuela"], + "Twitter": ["Hugo Chávez Frías", "Henrique Capriles R."], + } + ) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("invalid_compression", ["sfark", "bz3", "zipper"]) +def test_invalid_compression(all_parsers, invalid_compression): + parser = all_parsers + compress_kwargs = {"compression": invalid_compression} + + msg = f"Unrecognized compression type: {invalid_compression}" + + with pytest.raises(ValueError, match=msg): + parser.read_csv("test_file.zip", **compress_kwargs) + + +def test_compression_tar_archive(all_parsers, csv_dir_path): + parser = all_parsers + path = os.path.join(csv_dir_path, "tar_csv.tar.gz") + df = parser.read_csv(path) + assert list(df.columns) == ["a"] + + +def test_ignore_compression_extension(all_parsers): + parser = all_parsers + df = DataFrame({"a": [0, 1]}) + with tm.ensure_clean("test.csv") as path_csv: + with tm.ensure_clean("test.csv.zip") as path_zip: + # make sure to create un-compressed file with zip extension + df.to_csv(path_csv, index=False) + Path(path_zip).write_text( + Path(path_csv).read_text(encoding="utf-8"), encoding="utf-8" + ) + + tm.assert_frame_equal(parser.read_csv(path_zip, compression=None), df) + + +def test_writes_tar_gz(all_parsers): + parser = all_parsers + data = DataFrame( + { + "Country": ["Venezuela", "Venezuela"], + "Twitter": ["Hugo Chávez Frías", "Henrique Capriles R."], + } + ) + with tm.ensure_clean("test.tar.gz") as tar_path: + data.to_csv(tar_path, index=False) + + # test that read_csv infers .tar.gz to gzip: + tm.assert_frame_equal(parser.read_csv(tar_path), data) + + # test that file is indeed gzipped: + with tarfile.open(tar_path, "r:gz") as tar: + result = parser.read_csv( + tar.extractfile(tar.getnames()[0]), compression="infer" + ) + tm.assert_frame_equal(result, data) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_concatenate_chunks.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_concatenate_chunks.py new file mode 100644 index 0000000000000000000000000000000000000000..1bae2317a2fc602a436d17e80bc8d4bfdcd7fe5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_concatenate_chunks.py @@ -0,0 +1,36 @@ +import numpy as np +import pytest + +from pandas.errors import DtypeWarning + +import pandas._testing as tm +from pandas.core.arrays import ArrowExtensionArray + +from pandas.io.parsers.c_parser_wrapper import _concatenate_chunks + + +def test_concatenate_chunks_pyarrow(): + # GH#51876 + pa = pytest.importorskip("pyarrow") + chunks = [ + {0: ArrowExtensionArray(pa.array([1.5, 2.5]))}, + {0: ArrowExtensionArray(pa.array([1, 2]))}, + ] + result = _concatenate_chunks(chunks) + expected = ArrowExtensionArray(pa.array([1.5, 2.5, 1.0, 2.0])) + tm.assert_extension_array_equal(result[0], expected) + + +def test_concatenate_chunks_pyarrow_strings(): + # GH#51876 + pa = pytest.importorskip("pyarrow") + chunks = [ + {0: ArrowExtensionArray(pa.array([1.5, 2.5]))}, + {0: ArrowExtensionArray(pa.array(["a", "b"]))}, + ] + with tm.assert_produces_warning(DtypeWarning, match="have mixed types"): + result = _concatenate_chunks(chunks) + expected = np.concatenate( + [np.array([1.5, 2.5], dtype=object), np.array(["a", "b"])] + ) + tm.assert_numpy_array_equal(result[0], expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_converters.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_converters.py new file mode 100644 index 0000000000000000000000000000000000000000..7f3e45324dbd2113669bddacf824f308d7058123 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_converters.py @@ -0,0 +1,263 @@ +""" +Tests column conversion functionality during parsing +for all of the parsers defined in parsers.py +""" +from io import StringIO + +from dateutil.parser import parse +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, +) +import pandas._testing as tm + + +def test_converters_type_must_be_dict(all_parsers): + parser = all_parsers + data = """index,A,B,C,D +foo,2,3,4,5 +""" + if parser.engine == "pyarrow": + msg = "The 'converters' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), converters=0) + return + with pytest.raises(TypeError, match="Type converters.+"): + parser.read_csv(StringIO(data), converters=0) + + +@pytest.mark.parametrize("column", [3, "D"]) +@pytest.mark.parametrize( + "converter", [parse, lambda x: int(x.split("/")[2])] # Produce integer. +) +def test_converters(all_parsers, column, converter): + parser = all_parsers + data = """A,B,C,D +a,1,2,01/01/2009 +b,3,4,01/02/2009 +c,4,5,01/03/2009 +""" + if parser.engine == "pyarrow": + msg = "The 'converters' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), converters={column: converter}) + return + + result = parser.read_csv(StringIO(data), converters={column: converter}) + + expected = parser.read_csv(StringIO(data)) + expected["D"] = expected["D"].map(converter) + + tm.assert_frame_equal(result, expected) + + +def test_converters_no_implicit_conv(all_parsers): + # see gh-2184 + parser = all_parsers + data = """000102,1.2,A\n001245,2,B""" + + converters = {0: lambda x: x.strip()} + + if parser.engine == "pyarrow": + msg = "The 'converters' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), header=None, converters=converters) + return + + result = parser.read_csv(StringIO(data), header=None, converters=converters) + + # Column 0 should not be casted to numeric and should remain as object. + expected = DataFrame([["000102", 1.2, "A"], ["001245", 2, "B"]]) + tm.assert_frame_equal(result, expected) + + +def test_converters_euro_decimal_format(all_parsers): + # see gh-583 + converters = {} + parser = all_parsers + + data = """Id;Number1;Number2;Text1;Text2;Number3 +1;1521,1541;187101,9543;ABC;poi;4,7387 +2;121,12;14897,76;DEF;uyt;0,3773 +3;878,158;108013,434;GHI;rez;2,7356""" + converters["Number1"] = converters["Number2"] = converters[ + "Number3" + ] = lambda x: float(x.replace(",", ".")) + + if parser.engine == "pyarrow": + msg = "The 'converters' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), sep=";", converters=converters) + return + + result = parser.read_csv(StringIO(data), sep=";", converters=converters) + expected = DataFrame( + [ + [1, 1521.1541, 187101.9543, "ABC", "poi", 4.7387], + [2, 121.12, 14897.76, "DEF", "uyt", 0.3773], + [3, 878.158, 108013.434, "GHI", "rez", 2.7356], + ], + columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"], + ) + tm.assert_frame_equal(result, expected) + + +def test_converters_corner_with_nans(all_parsers): + parser = all_parsers + data = """id,score,days +1,2,12 +2,2-5, +3,,14+ +4,6-12,2""" + + # Example converters. + def convert_days(x): + x = x.strip() + + if not x: + return np.nan + + is_plus = x.endswith("+") + + if is_plus: + x = int(x[:-1]) + 1 + else: + x = int(x) + + return x + + def convert_days_sentinel(x): + x = x.strip() + + if not x: + return np.nan + + is_plus = x.endswith("+") + + if is_plus: + x = int(x[:-1]) + 1 + else: + x = int(x) + + return x + + def convert_score(x): + x = x.strip() + + if not x: + return np.nan + + if x.find("-") > 0: + val_min, val_max = map(int, x.split("-")) + val = 0.5 * (val_min + val_max) + else: + val = float(x) + + return val + + results = [] + + for day_converter in [convert_days, convert_days_sentinel]: + if parser.engine == "pyarrow": + msg = "The 'converters' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), + converters={"score": convert_score, "days": day_converter}, + na_values=["", None], + ) + continue + + result = parser.read_csv( + StringIO(data), + converters={"score": convert_score, "days": day_converter}, + na_values=["", None], + ) + assert pd.isna(result["days"][1]) + results.append(result) + + if parser.engine != "pyarrow": + tm.assert_frame_equal(results[0], results[1]) + + +@pytest.mark.parametrize("conv_f", [lambda x: x, str]) +def test_converter_index_col_bug(all_parsers, conv_f): + # see gh-1835 , GH#40589 + parser = all_parsers + data = "A;B\n1;2\n3;4" + + if parser.engine == "pyarrow": + msg = "The 'converters' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), sep=";", index_col="A", converters={"A": conv_f} + ) + return + + rs = parser.read_csv( + StringIO(data), sep=";", index_col="A", converters={"A": conv_f} + ) + + xp = DataFrame({"B": [2, 4]}, index=Index(["1", "3"], name="A", dtype="object")) + tm.assert_frame_equal(rs, xp) + + +def test_converter_identity_object(all_parsers): + # GH#40589 + parser = all_parsers + data = "A,B\n1,2\n3,4" + + if parser.engine == "pyarrow": + msg = "The 'converters' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), converters={"A": lambda x: x}) + return + + rs = parser.read_csv(StringIO(data), converters={"A": lambda x: x}) + + xp = DataFrame({"A": ["1", "3"], "B": [2, 4]}) + tm.assert_frame_equal(rs, xp) + + +def test_converter_multi_index(all_parsers): + # GH 42446 + parser = all_parsers + data = "A,B,B\nX,Y,Z\n1,2,3" + + if parser.engine == "pyarrow": + msg = "The 'converters' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), + header=list(range(2)), + converters={ + ("A", "X"): np.int32, + ("B", "Y"): np.int32, + ("B", "Z"): np.float32, + }, + ) + return + + result = parser.read_csv( + StringIO(data), + header=list(range(2)), + converters={ + ("A", "X"): np.int32, + ("B", "Y"): np.int32, + ("B", "Z"): np.float32, + }, + ) + + expected = DataFrame( + { + ("A", "X"): np.int32([1]), + ("B", "Y"): np.int32([2]), + ("B", "Z"): np.float32([3]), + } + ) + + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_dialect.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_dialect.py new file mode 100644 index 0000000000000000000000000000000000000000..7a72e66996d435cbb76a632c22f36bcad5b74650 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_dialect.py @@ -0,0 +1,195 @@ +""" +Tests that dialects are properly handled during parsing +for all of the parsers defined in parsers.py +""" + +import csv +from io import StringIO + +import pytest + +from pandas.errors import ParserWarning + +from pandas import DataFrame +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.fixture +def custom_dialect(): + dialect_name = "weird" + dialect_kwargs = { + "doublequote": False, + "escapechar": "~", + "delimiter": ":", + "skipinitialspace": False, + "quotechar": "~", + "quoting": 3, + } + return dialect_name, dialect_kwargs + + +def test_dialect(all_parsers): + parser = all_parsers + data = """\ +label1,label2,label3 +index1,"a,c,e +index2,b,d,f +""" + + dia = csv.excel() + dia.quoting = csv.QUOTE_NONE + + if parser.engine == "pyarrow": + msg = "The 'dialect' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), dialect=dia) + return + + df = parser.read_csv(StringIO(data), dialect=dia) + + data = """\ +label1,label2,label3 +index1,a,c,e +index2,b,d,f +""" + exp = parser.read_csv(StringIO(data)) + exp.replace("a", '"a', inplace=True) + tm.assert_frame_equal(df, exp) + + +def test_dialect_str(all_parsers): + dialect_name = "mydialect" + parser = all_parsers + data = """\ +fruit:vegetable +apple:broccoli +pear:tomato +""" + exp = DataFrame({"fruit": ["apple", "pear"], "vegetable": ["broccoli", "tomato"]}) + + with tm.with_csv_dialect(dialect_name, delimiter=":"): + if parser.engine == "pyarrow": + msg = "The 'dialect' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), dialect=dialect_name) + return + + df = parser.read_csv(StringIO(data), dialect=dialect_name) + tm.assert_frame_equal(df, exp) + + +def test_invalid_dialect(all_parsers): + class InvalidDialect: + pass + + data = "a\n1" + parser = all_parsers + msg = "Invalid dialect" + + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), dialect=InvalidDialect) + + +@pytest.mark.parametrize( + "arg", + [None, "doublequote", "escapechar", "skipinitialspace", "quotechar", "quoting"], +) +@pytest.mark.parametrize("value", ["dialect", "default", "other"]) +def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, value): + # see gh-23761. + dialect_name, dialect_kwargs = custom_dialect + parser = all_parsers + + expected = DataFrame({"a": [1], "b": [2]}) + data = "a:b\n1:2" + + warning_klass = None + kwds = {} + + # arg=None tests when we pass in the dialect without any other arguments. + if arg is not None: + if value == "dialect": # No conflict --> no warning. + kwds[arg] = dialect_kwargs[arg] + elif value == "default": # Default --> no warning. + from pandas.io.parsers.base_parser import parser_defaults + + kwds[arg] = parser_defaults[arg] + else: # Non-default + conflict with dialect --> warning. + warning_klass = ParserWarning + kwds[arg] = "blah" + + with tm.with_csv_dialect(dialect_name, **dialect_kwargs): + if parser.engine == "pyarrow": + msg = "The 'dialect' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv_check_warnings( + # No warning bc we raise + None, + "Conflicting values for", + StringIO(data), + dialect=dialect_name, + **kwds, + ) + return + result = parser.read_csv_check_warnings( + warning_klass, + "Conflicting values for", + StringIO(data), + dialect=dialect_name, + **kwds, + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "kwargs,warning_klass", + [ + ({"sep": ","}, None), # sep is default --> sep_override=True + ({"sep": "."}, ParserWarning), # sep isn't default --> sep_override=False + ({"delimiter": ":"}, None), # No conflict + ({"delimiter": None}, None), # Default arguments --> sep_override=True + ({"delimiter": ","}, ParserWarning), # Conflict + ({"delimiter": "."}, ParserWarning), # Conflict + ], + ids=[ + "sep-override-true", + "sep-override-false", + "delimiter-no-conflict", + "delimiter-default-arg", + "delimiter-conflict", + "delimiter-conflict2", + ], +) +def test_dialect_conflict_delimiter(all_parsers, custom_dialect, kwargs, warning_klass): + # see gh-23761. + dialect_name, dialect_kwargs = custom_dialect + parser = all_parsers + + expected = DataFrame({"a": [1], "b": [2]}) + data = "a:b\n1:2" + + with tm.with_csv_dialect(dialect_name, **dialect_kwargs): + if parser.engine == "pyarrow": + msg = "The 'dialect' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv_check_warnings( + # no warning bc we raise + None, + "Conflicting values for 'delimiter'", + StringIO(data), + dialect=dialect_name, + **kwargs, + ) + return + result = parser.read_csv_check_warnings( + warning_klass, + "Conflicting values for 'delimiter'", + StringIO(data), + dialect=dialect_name, + **kwargs, + ) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_encoding.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..cbd3917ba9c044962c262cac705e43b6d597599c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_encoding.py @@ -0,0 +1,337 @@ +""" +Tests encoding functionality during parsing +for all of the parsers defined in parsers.py +""" +from io import ( + BytesIO, + TextIOWrapper, +) +import os +import tempfile +import uuid + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + read_csv, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + + +def test_bytes_io_input(all_parsers): + encoding = "cp1255" + parser = all_parsers + + data = BytesIO("שלום:1234\n562:123".encode(encoding)) + result = parser.read_csv(data, sep=":", encoding=encoding) + + expected = DataFrame([[562, 123]], columns=["שלום", "1234"]) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Empty CSV file or block +def test_read_csv_unicode(all_parsers): + parser = all_parsers + data = BytesIO("\u0141aski, Jan;1".encode()) + + result = parser.read_csv(data, sep=";", encoding="utf-8", header=None) + expected = DataFrame([["\u0141aski, Jan", 1]]) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow +@pytest.mark.parametrize("sep", [",", "\t"]) +@pytest.mark.parametrize("encoding", ["utf-16", "utf-16le", "utf-16be"]) +def test_utf16_bom_skiprows(all_parsers, sep, encoding): + # see gh-2298 + parser = all_parsers + data = """skip this +skip this too +A,B,C +1,2,3 +4,5,6""".replace( + ",", sep + ) + path = f"__{uuid.uuid4()}__.csv" + kwargs = {"sep": sep, "skiprows": 2} + utf8 = "utf-8" + + with tm.ensure_clean(path) as path: + bytes_data = data.encode(encoding) + + with open(path, "wb") as f: + f.write(bytes_data) + + with TextIOWrapper(BytesIO(data.encode(utf8)), encoding=utf8) as bytes_buffer: + result = parser.read_csv(path, encoding=encoding, **kwargs) + expected = parser.read_csv(bytes_buffer, encoding=utf8, **kwargs) + tm.assert_frame_equal(result, expected) + + +def test_utf16_example(all_parsers, csv_dir_path): + path = os.path.join(csv_dir_path, "utf16_ex.txt") + parser = all_parsers + result = parser.read_csv(path, encoding="utf-16", sep="\t") + assert len(result) == 50 + + +def test_unicode_encoding(all_parsers, csv_dir_path): + path = os.path.join(csv_dir_path, "unicode_series.csv") + parser = all_parsers + + result = parser.read_csv(path, header=None, encoding="latin-1") + result = result.set_index(0) + got = result[1][1632] + + expected = "\xc1 k\xf6ldum klaka (Cold Fever) (1994)" + assert got == expected + + +@pytest.mark.parametrize( + "data,kwargs,expected", + [ + # Basic test + ("a\n1", {}, DataFrame({"a": [1]})), + # "Regular" quoting + ('"a"\n1', {"quotechar": '"'}, DataFrame({"a": [1]})), + # Test in a data row instead of header + ("b\n1", {"names": ["a"]}, DataFrame({"a": ["b", "1"]})), + # Test in empty data row with skipping + ("\n1", {"names": ["a"], "skip_blank_lines": True}, DataFrame({"a": [1]})), + # Test in empty data row without skipping + ( + "\n1", + {"names": ["a"], "skip_blank_lines": False}, + DataFrame({"a": [np.nan, 1]}), + ), + ], +) +def test_utf8_bom(all_parsers, data, kwargs, expected, request): + # see gh-4793 + parser = all_parsers + bom = "\ufeff" + utf8 = "utf-8" + + def _encode_data_with_bom(_data): + bom_data = (bom + _data).encode(utf8) + return BytesIO(bom_data) + + if ( + parser.engine == "pyarrow" + and data == "\n1" + and kwargs.get("skip_blank_lines", True) + ): + # CSV parse error: Empty CSV file or block: cannot infer number of columns + pytest.skip(reason="https://github.com/apache/arrow/issues/38676") + + result = parser.read_csv(_encode_data_with_bom(data), encoding=utf8, **kwargs) + tm.assert_frame_equal(result, expected) + + +def test_read_csv_utf_aliases(all_parsers, utf_value, encoding_fmt): + # see gh-13549 + expected = DataFrame({"mb_num": [4.8], "multibyte": ["test"]}) + parser = all_parsers + + encoding = encoding_fmt.format(utf_value) + data = "mb_num,multibyte\n4.8,test".encode(encoding) + + result = parser.read_csv(BytesIO(data), encoding=encoding) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "file_path,encoding", + [ + (("io", "data", "csv", "test1.csv"), "utf-8"), + (("io", "parser", "data", "unicode_series.csv"), "latin-1"), + (("io", "parser", "data", "sauron.SHIFT_JIS.csv"), "shiftjis"), + ], +) +def test_binary_mode_file_buffers(all_parsers, file_path, encoding, datapath): + # gh-23779: Python csv engine shouldn't error on files opened in binary. + # gh-31575: Python csv engine shouldn't error on files opened in raw binary. + parser = all_parsers + + fpath = datapath(*file_path) + expected = parser.read_csv(fpath, encoding=encoding) + + with open(fpath, encoding=encoding) as fa: + result = parser.read_csv(fa) + assert not fa.closed + tm.assert_frame_equal(expected, result) + + with open(fpath, mode="rb") as fb: + result = parser.read_csv(fb, encoding=encoding) + assert not fb.closed + tm.assert_frame_equal(expected, result) + + with open(fpath, mode="rb", buffering=0) as fb: + result = parser.read_csv(fb, encoding=encoding) + assert not fb.closed + tm.assert_frame_equal(expected, result) + + +@pytest.mark.parametrize("pass_encoding", [True, False]) +def test_encoding_temp_file(all_parsers, utf_value, encoding_fmt, pass_encoding): + # see gh-24130 + parser = all_parsers + encoding = encoding_fmt.format(utf_value) + + if parser.engine == "pyarrow" and pass_encoding is True and utf_value in [16, 32]: + # FIXME: this is bad! + pytest.skip("These cases freeze") + + expected = DataFrame({"foo": ["bar"]}) + + with tm.ensure_clean(mode="w+", encoding=encoding, return_filelike=True) as f: + f.write("foo\nbar") + f.seek(0) + + result = parser.read_csv(f, encoding=encoding if pass_encoding else None) + tm.assert_frame_equal(result, expected) + + +def test_encoding_named_temp_file(all_parsers): + # see gh-31819 + parser = all_parsers + encoding = "shift-jis" + + title = "てすと" + data = "こむ" + + expected = DataFrame({title: [data]}) + + with tempfile.NamedTemporaryFile() as f: + f.write(f"{title}\n{data}".encode(encoding)) + + f.seek(0) + + result = parser.read_csv(f, encoding=encoding) + tm.assert_frame_equal(result, expected) + assert not f.closed + + +@pytest.mark.parametrize( + "encoding", ["utf-8", "utf-16", "utf-16-be", "utf-16-le", "utf-32"] +) +def test_parse_encoded_special_characters(encoding): + # GH16218 Verify parsing of data with encoded special characters + # Data contains a Unicode 'FULLWIDTH COLON' (U+FF1A) at position (0,"a") + data = "a\tb\n:foo\t0\nbar\t1\nbaz\t2" # noqa: RUF001 + encoded_data = BytesIO(data.encode(encoding)) + result = read_csv(encoded_data, delimiter="\t", encoding=encoding) + + expected = DataFrame( + data=[[":foo", 0], ["bar", 1], ["baz", 2]], # noqa: RUF001 + columns=["a", "b"], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("encoding", ["utf-8", None, "utf-16", "cp1255", "latin-1"]) +def test_encoding_memory_map(all_parsers, encoding): + # GH40986 + parser = all_parsers + expected = DataFrame( + { + "name": ["Raphael", "Donatello", "Miguel Angel", "Leonardo"], + "mask": ["red", "purple", "orange", "blue"], + "weapon": ["sai", "bo staff", "nunchunk", "katana"], + } + ) + with tm.ensure_clean() as file: + expected.to_csv(file, index=False, encoding=encoding) + + if parser.engine == "pyarrow": + msg = "The 'memory_map' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(file, encoding=encoding, memory_map=True) + return + + df = parser.read_csv(file, encoding=encoding, memory_map=True) + tm.assert_frame_equal(df, expected) + + +def test_chunk_splits_multibyte_char(all_parsers): + """ + Chunk splits a multibyte character with memory_map=True + + GH 43540 + """ + parser = all_parsers + # DEFAULT_CHUNKSIZE = 262144, defined in parsers.pyx + df = DataFrame(data=["a" * 127] * 2048) + + # Put two-bytes utf-8 encoded character "ą" at the end of chunk + # utf-8 encoding of "ą" is b'\xc4\x85' + df.iloc[2047] = "a" * 127 + "ą" + with tm.ensure_clean("bug-gh43540.csv") as fname: + df.to_csv(fname, index=False, header=False, encoding="utf-8") + + if parser.engine == "pyarrow": + msg = "The 'memory_map' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(fname, header=None, memory_map=True) + return + + dfr = parser.read_csv(fname, header=None, memory_map=True) + tm.assert_frame_equal(dfr, df) + + +def test_readcsv_memmap_utf8(all_parsers): + """ + GH 43787 + + Test correct handling of UTF-8 chars when memory_map=True and encoding is UTF-8 + """ + lines = [] + line_length = 128 + start_char = " " + end_char = "\U00010080" + # This for loop creates a list of 128-char strings + # consisting of consecutive Unicode chars + for lnum in range(ord(start_char), ord(end_char), line_length): + line = "".join([chr(c) for c in range(lnum, lnum + 0x80)]) + "\n" + try: + line.encode("utf-8") + except UnicodeEncodeError: + continue + lines.append(line) + parser = all_parsers + df = DataFrame(lines) + with tm.ensure_clean("utf8test.csv") as fname: + df.to_csv(fname, index=False, header=False, encoding="utf-8") + + if parser.engine == "pyarrow": + msg = "The 'memory_map' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(fname, header=None, memory_map=True, encoding="utf-8") + return + + dfr = parser.read_csv(fname, header=None, memory_map=True, encoding="utf-8") + tm.assert_frame_equal(df, dfr) + + +@pytest.mark.usefixtures("pyarrow_xfail") +@pytest.mark.parametrize("mode", ["w+b", "w+t"]) +def test_not_readable(all_parsers, mode): + # GH43439 + parser = all_parsers + content = b"abcd" + if "t" in mode: + content = "abcd" + with tempfile.SpooledTemporaryFile(mode=mode, encoding="utf-8") as handle: + handle.write(content) + handle.seek(0) + df = parser.read_csv(handle) + expected = DataFrame([], columns=["abcd"]) + tm.assert_frame_equal(df, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_header.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_header.py new file mode 100644 index 0000000000000000000000000000000000000000..0dbd4e3569ad6ddeca3da5ed1e5e73ef0f29ec57 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_header.py @@ -0,0 +1,733 @@ +""" +Tests that the file header is properly handled or inferred +during parsing for all of the parsers defined in parsers.py +""" + +from collections import namedtuple +from io import StringIO + +import numpy as np +import pytest + +from pandas.errors import ParserError + +from pandas import ( + DataFrame, + Index, + MultiIndex, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + + +@xfail_pyarrow # TypeError: an integer is required +def test_read_with_bad_header(all_parsers): + parser = all_parsers + msg = r"but only \d+ lines in file" + + with pytest.raises(ValueError, match=msg): + s = StringIO(",,") + parser.read_csv(s, header=[10]) + + +def test_negative_header(all_parsers): + # see gh-27779 + parser = all_parsers + data = """1,2,3,4,5 +6,7,8,9,10 +11,12,13,14,15 +""" + with pytest.raises( + ValueError, + match="Passing negative integer to header is invalid. " + "For no header, use header=None instead", + ): + parser.read_csv(StringIO(data), header=-1) + + +@pytest.mark.parametrize("header", [([-1, 2, 4]), ([-5, 0])]) +def test_negative_multi_index_header(all_parsers, header): + # see gh-27779 + parser = all_parsers + data = """1,2,3,4,5 + 6,7,8,9,10 + 11,12,13,14,15 + """ + with pytest.raises( + ValueError, match="cannot specify multi-index header with negative integers" + ): + parser.read_csv(StringIO(data), header=header) + + +@pytest.mark.parametrize("header", [True, False]) +def test_bool_header_arg(all_parsers, header): + # see gh-6114 + parser = all_parsers + data = """\ +MyColumn +a +b +a +b""" + msg = "Passing a bool to header is invalid" + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO(data), header=header) + + +@xfail_pyarrow # AssertionError: DataFrame are different +def test_header_with_index_col(all_parsers): + parser = all_parsers + data = """foo,1,2,3 +bar,4,5,6 +baz,7,8,9 +""" + names = ["A", "B", "C"] + result = parser.read_csv(StringIO(data), names=names) + + expected = DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + index=["foo", "bar", "baz"], + columns=["A", "B", "C"], + ) + tm.assert_frame_equal(result, expected) + + +def test_header_not_first_line(all_parsers): + parser = all_parsers + data = """got,to,ignore,this,line +got,to,ignore,this,line +index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +""" + data2 = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +""" + + result = parser.read_csv(StringIO(data), header=2, index_col=0) + expected = parser.read_csv(StringIO(data2), header=0, index_col=0) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is required +def test_header_multi_index(all_parsers): + parser = all_parsers + + data = """\ +C0,,C_l0_g0,C_l0_g1,C_l0_g2 + +C1,,C_l1_g0,C_l1_g1,C_l1_g2 +C2,,C_l2_g0,C_l2_g1,C_l2_g2 +C3,,C_l3_g0,C_l3_g1,C_l3_g2 +R0,R1,,, +R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2 +R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2 +R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2 +R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2 +R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2 +""" + result = parser.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[0, 1]) + data_gen_f = lambda r, c: f"R{r}C{c}" + + data = [[data_gen_f(r, c) for c in range(3)] for r in range(5)] + index = MultiIndex.from_arrays( + [[f"R_l0_g{i}" for i in range(5)], [f"R_l1_g{i}" for i in range(5)]], + names=["R0", "R1"], + ) + columns = MultiIndex.from_arrays( + [ + [f"C_l0_g{i}" for i in range(3)], + [f"C_l1_g{i}" for i in range(3)], + [f"C_l2_g{i}" for i in range(3)], + [f"C_l3_g{i}" for i in range(3)], + ], + names=["C0", "C1", "C2", "C3"], + ) + expected = DataFrame(data, columns=columns, index=index) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "kwargs,msg", + [ + ( + {"index_col": ["foo", "bar"]}, + ( + "index_col must only contain " + "row numbers when specifying " + "a multi-index header" + ), + ), + ( + {"index_col": [0, 1], "names": ["foo", "bar"]}, + ("cannot specify names when specifying a multi-index header"), + ), + ( + {"index_col": [0, 1], "usecols": ["foo", "bar"]}, + ("cannot specify usecols when specifying a multi-index header"), + ), + ], +) +def test_header_multi_index_invalid(all_parsers, kwargs, msg): + data = """\ +C0,,C_l0_g0,C_l0_g1,C_l0_g2 + +C1,,C_l1_g0,C_l1_g1,C_l1_g2 +C2,,C_l2_g0,C_l2_g1,C_l2_g2 +C3,,C_l3_g0,C_l3_g1,C_l3_g2 +R0,R1,,, +R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2 +R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2 +R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2 +R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2 +R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2 +""" + parser = all_parsers + + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), header=[0, 1, 2, 3], **kwargs) + + +_TestTuple = namedtuple("_TestTuple", ["first", "second"]) + + +@xfail_pyarrow # TypeError: an integer is required +@pytest.mark.parametrize( + "kwargs", + [ + {"header": [0, 1]}, + { + "skiprows": 3, + "names": [ + ("a", "q"), + ("a", "r"), + ("a", "s"), + ("b", "t"), + ("c", "u"), + ("c", "v"), + ], + }, + { + "skiprows": 3, + "names": [ + _TestTuple("a", "q"), + _TestTuple("a", "r"), + _TestTuple("a", "s"), + _TestTuple("b", "t"), + _TestTuple("c", "u"), + _TestTuple("c", "v"), + ], + }, + ], +) +def test_header_multi_index_common_format1(all_parsers, kwargs): + parser = all_parsers + expected = DataFrame( + [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], + index=["one", "two"], + columns=MultiIndex.from_tuples( + [("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")] + ), + ) + data = """,a,a,a,b,c,c +,q,r,s,t,u,v +,,,,,, +one,1,2,3,4,5,6 +two,7,8,9,10,11,12""" + + result = parser.read_csv(StringIO(data), index_col=0, **kwargs) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is required +@pytest.mark.parametrize( + "kwargs", + [ + {"header": [0, 1]}, + { + "skiprows": 2, + "names": [ + ("a", "q"), + ("a", "r"), + ("a", "s"), + ("b", "t"), + ("c", "u"), + ("c", "v"), + ], + }, + { + "skiprows": 2, + "names": [ + _TestTuple("a", "q"), + _TestTuple("a", "r"), + _TestTuple("a", "s"), + _TestTuple("b", "t"), + _TestTuple("c", "u"), + _TestTuple("c", "v"), + ], + }, + ], +) +def test_header_multi_index_common_format2(all_parsers, kwargs): + parser = all_parsers + expected = DataFrame( + [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], + index=["one", "two"], + columns=MultiIndex.from_tuples( + [("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")] + ), + ) + data = """,a,a,a,b,c,c +,q,r,s,t,u,v +one,1,2,3,4,5,6 +two,7,8,9,10,11,12""" + + result = parser.read_csv(StringIO(data), index_col=0, **kwargs) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is required +@pytest.mark.parametrize( + "kwargs", + [ + {"header": [0, 1]}, + { + "skiprows": 2, + "names": [ + ("a", "q"), + ("a", "r"), + ("a", "s"), + ("b", "t"), + ("c", "u"), + ("c", "v"), + ], + }, + { + "skiprows": 2, + "names": [ + _TestTuple("a", "q"), + _TestTuple("a", "r"), + _TestTuple("a", "s"), + _TestTuple("b", "t"), + _TestTuple("c", "u"), + _TestTuple("c", "v"), + ], + }, + ], +) +def test_header_multi_index_common_format3(all_parsers, kwargs): + parser = all_parsers + expected = DataFrame( + [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], + index=["one", "two"], + columns=MultiIndex.from_tuples( + [("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")] + ), + ) + expected = expected.reset_index(drop=True) + data = """a,a,a,b,c,c +q,r,s,t,u,v +1,2,3,4,5,6 +7,8,9,10,11,12""" + + result = parser.read_csv(StringIO(data), index_col=None, **kwargs) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is required +def test_header_multi_index_common_format_malformed1(all_parsers): + parser = all_parsers + expected = DataFrame( + np.array([[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype="int64"), + index=Index([1, 7]), + columns=MultiIndex( + levels=[["a", "b", "c"], ["r", "s", "t", "u", "v"]], + codes=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]], + names=["a", "q"], + ), + ) + data = """a,a,a,b,c,c +q,r,s,t,u,v +1,2,3,4,5,6 +7,8,9,10,11,12""" + + result = parser.read_csv(StringIO(data), header=[0, 1], index_col=0) + tm.assert_frame_equal(expected, result) + + +@xfail_pyarrow # TypeError: an integer is required +def test_header_multi_index_common_format_malformed2(all_parsers): + parser = all_parsers + expected = DataFrame( + np.array([[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype="int64"), + index=Index([1, 7]), + columns=MultiIndex( + levels=[["a", "b", "c"], ["r", "s", "t", "u", "v"]], + codes=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]], + names=[None, "q"], + ), + ) + + data = """,a,a,b,c,c +q,r,s,t,u,v +1,2,3,4,5,6 +7,8,9,10,11,12""" + + result = parser.read_csv(StringIO(data), header=[0, 1], index_col=0) + tm.assert_frame_equal(expected, result) + + +@xfail_pyarrow # TypeError: an integer is required +def test_header_multi_index_common_format_malformed3(all_parsers): + parser = all_parsers + expected = DataFrame( + np.array([[3, 4, 5, 6], [9, 10, 11, 12]], dtype="int64"), + index=MultiIndex(levels=[[1, 7], [2, 8]], codes=[[0, 1], [0, 1]]), + columns=MultiIndex( + levels=[["a", "b", "c"], ["s", "t", "u", "v"]], + codes=[[0, 1, 2, 2], [0, 1, 2, 3]], + names=[None, "q"], + ), + ) + data = """,a,a,b,c,c +q,r,s,t,u,v +1,2,3,4,5,6 +7,8,9,10,11,12""" + + result = parser.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1]) + tm.assert_frame_equal(expected, result) + + +@xfail_pyarrow # TypeError: an integer is required +def test_header_multi_index_blank_line(all_parsers): + # GH 40442 + parser = all_parsers + data = [[None, None], [1, 2], [3, 4]] + columns = MultiIndex.from_tuples([("a", "A"), ("b", "B")]) + expected = DataFrame(data, columns=columns) + data = "a,b\nA,B\n,\n1,2\n3,4" + result = parser.read_csv(StringIO(data), header=[0, 1]) + tm.assert_frame_equal(expected, result) + + +@pytest.mark.parametrize( + "data,header", [("1,2,3\n4,5,6", None), ("foo,bar,baz\n1,2,3\n4,5,6", 0)] +) +def test_header_names_backward_compat(all_parsers, data, header, request): + # see gh-2539 + parser = all_parsers + + if parser.engine == "pyarrow" and header is not None: + mark = pytest.mark.xfail(reason="DataFrame.columns are different") + request.applymarker(mark) + + expected = parser.read_csv(StringIO("1,2,3\n4,5,6"), names=["a", "b", "c"]) + + result = parser.read_csv(StringIO(data), names=["a", "b", "c"], header=header) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Empty CSV file or block: cannot infer +@pytest.mark.parametrize("kwargs", [{}, {"index_col": False}]) +def test_read_only_header_no_rows(all_parsers, kwargs): + # See gh-7773 + parser = all_parsers + expected = DataFrame(columns=["a", "b", "c"]) + + result = parser.read_csv(StringIO("a,b,c"), **kwargs) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "kwargs,names", + [ + ({}, [0, 1, 2, 3, 4]), + ( + {"names": ["foo", "bar", "baz", "quux", "panda"]}, + ["foo", "bar", "baz", "quux", "panda"], + ), + ], +) +def test_no_header(all_parsers, kwargs, names): + parser = all_parsers + data = """1,2,3,4,5 +6,7,8,9,10 +11,12,13,14,15 +""" + expected = DataFrame( + [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], columns=names + ) + result = parser.read_csv(StringIO(data), header=None, **kwargs) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("header", [["a", "b"], "string_header"]) +def test_non_int_header(all_parsers, header): + # see gh-16338 + msg = "header must be integer or list of integers" + data = """1,2\n3,4""" + parser = all_parsers + + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), header=header) + + +@xfail_pyarrow # TypeError: an integer is required +def test_singleton_header(all_parsers): + # see gh-7757 + data = """a,b,c\n0,1,2\n1,2,3""" + parser = all_parsers + + expected = DataFrame({"a": [0, 1], "b": [1, 2], "c": [2, 3]}) + result = parser.read_csv(StringIO(data), header=[0]) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is required +@pytest.mark.parametrize( + "data,expected", + [ + ( + "A,A,A,B\none,one,one,two\n0,40,34,0.1", + DataFrame( + [[0, 40, 34, 0.1]], + columns=MultiIndex.from_tuples( + [("A", "one"), ("A", "one.1"), ("A", "one.2"), ("B", "two")] + ), + ), + ), + ( + "A,A,A,B\none,one,one.1,two\n0,40,34,0.1", + DataFrame( + [[0, 40, 34, 0.1]], + columns=MultiIndex.from_tuples( + [("A", "one"), ("A", "one.1"), ("A", "one.1.1"), ("B", "two")] + ), + ), + ), + ( + "A,A,A,B,B\none,one,one.1,two,two\n0,40,34,0.1,0.1", + DataFrame( + [[0, 40, 34, 0.1, 0.1]], + columns=MultiIndex.from_tuples( + [ + ("A", "one"), + ("A", "one.1"), + ("A", "one.1.1"), + ("B", "two"), + ("B", "two.1"), + ] + ), + ), + ), + ], +) +def test_mangles_multi_index(all_parsers, data, expected): + # see gh-18062 + parser = all_parsers + + result = parser.read_csv(StringIO(data), header=[0, 1]) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is requireds +@pytest.mark.parametrize("index_col", [None, [0]]) +@pytest.mark.parametrize( + "columns", [None, (["", "Unnamed"]), (["Unnamed", ""]), (["Unnamed", "NotUnnamed"])] +) +def test_multi_index_unnamed(all_parsers, index_col, columns): + # see gh-23687 + # + # When specifying a multi-index header, make sure that + # we don't error just because one of the rows in our header + # has ALL column names containing the string "Unnamed". The + # correct condition to check is whether the row contains + # ALL columns that did not have names (and instead were given + # placeholder ones). + parser = all_parsers + header = [0, 1] + + if index_col is None: + data = ",".join(columns or ["", ""]) + "\n0,1\n2,3\n4,5\n" + else: + data = ",".join([""] + (columns or ["", ""])) + "\n,0,1\n0,2,3\n1,4,5\n" + + result = parser.read_csv(StringIO(data), header=header, index_col=index_col) + exp_columns = [] + + if columns is None: + columns = ["", "", ""] + + for i, col in enumerate(columns): + if not col: # Unnamed. + col = f"Unnamed: {i if index_col is None else i + 1}_level_0" + + exp_columns.append(col) + + columns = MultiIndex.from_tuples(zip(exp_columns, ["0", "1"])) + expected = DataFrame([[2, 3], [4, 5]], columns=columns) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Expected 2 columns, got 3 +def test_names_longer_than_header_but_equal_with_data_rows(all_parsers): + # GH#38453 + parser = all_parsers + data = """a, b +1,2,3 +5,6,4 +""" + result = parser.read_csv(StringIO(data), header=0, names=["A", "B", "C"]) + expected = DataFrame({"A": [1, 5], "B": [2, 6], "C": [3, 4]}) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is required +def test_read_csv_multiindex_columns(all_parsers): + # GH#6051 + parser = all_parsers + + s1 = "Male, Male, Male, Female, Female\nR, R, L, R, R\n.86, .67, .88, .78, .81" + s2 = ( + "Male, Male, Male, Female, Female\n" + "R, R, L, R, R\n" + ".86, .67, .88, .78, .81\n" + ".86, .67, .88, .78, .82" + ) + + mi = MultiIndex.from_tuples( + [ + ("Male", "R"), + (" Male", " R"), + (" Male", " L"), + (" Female", " R"), + (" Female", " R.1"), + ] + ) + expected = DataFrame( + [[0.86, 0.67, 0.88, 0.78, 0.81], [0.86, 0.67, 0.88, 0.78, 0.82]], columns=mi + ) + + df1 = parser.read_csv(StringIO(s1), header=[0, 1]) + tm.assert_frame_equal(df1, expected.iloc[:1]) + df2 = parser.read_csv(StringIO(s2), header=[0, 1]) + tm.assert_frame_equal(df2, expected) + + +@xfail_pyarrow # TypeError: an integer is required +def test_read_csv_multi_header_length_check(all_parsers): + # GH#43102 + parser = all_parsers + + case = """row11,row12,row13 +row21,row22, row23 +row31,row32 +""" + + with pytest.raises( + ParserError, match="Header rows must have an equal number of columns." + ): + parser.read_csv(StringIO(case), header=[0, 2]) + + +@skip_pyarrow # CSV parse error: Expected 3 columns, got 2 +def test_header_none_and_implicit_index(all_parsers): + # GH#22144 + parser = all_parsers + data = "x,1,5\ny,2\nz,3\n" + result = parser.read_csv(StringIO(data), names=["a", "b"], header=None) + expected = DataFrame( + {"a": [1, 2, 3], "b": [5, np.nan, np.nan]}, index=["x", "y", "z"] + ) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # regex mismatch "CSV parse error: Expected 2 columns, got " +def test_header_none_and_implicit_index_in_second_row(all_parsers): + # GH#22144 + parser = all_parsers + data = "x,1\ny,2,5\nz,3\n" + with pytest.raises(ParserError, match="Expected 2 fields in line 2, saw 3"): + parser.read_csv(StringIO(data), names=["a", "b"], header=None) + + +def test_header_none_and_on_bad_lines_skip(all_parsers): + # GH#22144 + parser = all_parsers + data = "x,1\ny,2,5\nz,3\n" + result = parser.read_csv( + StringIO(data), names=["a", "b"], header=None, on_bad_lines="skip" + ) + expected = DataFrame({"a": ["x", "z"], "b": [1, 3]}) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is requireds +def test_header_missing_rows(all_parsers): + # GH#47400 + parser = all_parsers + data = """a,b +1,2 +""" + msg = r"Passed header=\[0,1,2\], len of 3, but only 2 lines in file" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), header=[0, 1, 2]) + + +# ValueError: The 'delim_whitespace' option is not supported with the 'pyarrow' engine +@xfail_pyarrow +def test_header_multiple_whitespaces(all_parsers): + # GH#54931 + parser = all_parsers + data = """aa bb(1,1) cc(1,1) + 0 2 3.5""" + + result = parser.read_csv(StringIO(data), sep=r"\s+") + expected = DataFrame({"aa": [0], "bb(1,1)": 2, "cc(1,1)": 3.5}) + tm.assert_frame_equal(result, expected) + + +# ValueError: The 'delim_whitespace' option is not supported with the 'pyarrow' engine +@xfail_pyarrow +def test_header_delim_whitespace(all_parsers): + # GH#54918 + parser = all_parsers + data = """a,b +1,2 +3,4 + """ + + depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated" + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv(StringIO(data), delim_whitespace=True) + expected = DataFrame({"a,b": ["1,2", "3,4"]}) + tm.assert_frame_equal(result, expected) + + +def test_usecols_no_header_pyarrow(pyarrow_parser_only): + parser = pyarrow_parser_only + data = """ +a,i,x +b,j,y +""" + result = parser.read_csv( + StringIO(data), + header=None, + usecols=[0, 1], + dtype="string[pyarrow]", + dtype_backend="pyarrow", + engine="pyarrow", + ) + expected = DataFrame([["a", "i"], ["b", "j"]], dtype="string[pyarrow]") + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_index_col.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_index_col.py new file mode 100644 index 0000000000000000000000000000000000000000..ba15d061b2deba294cd054d58eac93e4647e8a62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_index_col.py @@ -0,0 +1,376 @@ +""" +Tests that the specified index column (a.k.a "index_col") +is properly handled or inferred during parsing for all of +the parsers defined in parsers.py +""" +from io import StringIO + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + MultiIndex, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + + +@pytest.mark.parametrize("with_header", [True, False]) +def test_index_col_named(all_parsers, with_header): + parser = all_parsers + no_header = """\ +KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 +KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 +KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 +KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 +KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 +KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" + header = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n" + + if with_header: + data = header + no_header + + result = parser.read_csv(StringIO(data), index_col="ID") + expected = parser.read_csv(StringIO(data), header=0).set_index("ID") + tm.assert_frame_equal(result, expected) + else: + data = no_header + msg = "Index ID invalid" + + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), index_col="ID") + + +def test_index_col_named2(all_parsers): + parser = all_parsers + data = """\ +1,2,3,4,hello +5,6,7,8,world +9,10,11,12,foo +""" + + expected = DataFrame( + {"a": [1, 5, 9], "b": [2, 6, 10], "c": [3, 7, 11], "d": [4, 8, 12]}, + index=Index(["hello", "world", "foo"], name="message"), + ) + names = ["a", "b", "c", "d", "message"] + + result = parser.read_csv(StringIO(data), names=names, index_col=["message"]) + tm.assert_frame_equal(result, expected) + + +def test_index_col_is_true(all_parsers): + # see gh-9798 + data = "a,b\n1,2" + parser = all_parsers + + msg = "The value of index_col couldn't be 'True'" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), index_col=True) + + +@skip_pyarrow # CSV parse error: Expected 3 columns, got 4 +def test_infer_index_col(all_parsers): + data = """A,B,C +foo,1,2,3 +bar,4,5,6 +baz,7,8,9 +""" + parser = all_parsers + result = parser.read_csv(StringIO(data)) + + expected = DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + index=["foo", "bar", "baz"], + columns=["A", "B", "C"], + ) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Empty CSV file or block +@pytest.mark.parametrize( + "index_col,kwargs", + [ + (None, {"columns": ["x", "y", "z"]}), + (False, {"columns": ["x", "y", "z"]}), + (0, {"columns": ["y", "z"], "index": Index([], name="x")}), + (1, {"columns": ["x", "z"], "index": Index([], name="y")}), + ("x", {"columns": ["y", "z"], "index": Index([], name="x")}), + ("y", {"columns": ["x", "z"], "index": Index([], name="y")}), + ( + [0, 1], + { + "columns": ["z"], + "index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]), + }, + ), + ( + ["x", "y"], + { + "columns": ["z"], + "index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]), + }, + ), + ( + [1, 0], + { + "columns": ["z"], + "index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]), + }, + ), + ( + ["y", "x"], + { + "columns": ["z"], + "index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]), + }, + ), + ], +) +def test_index_col_empty_data(all_parsers, index_col, kwargs): + data = "x,y,z" + parser = all_parsers + result = parser.read_csv(StringIO(data), index_col=index_col) + + expected = DataFrame(**kwargs) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Empty CSV file or block +def test_empty_with_index_col_false(all_parsers): + # see gh-10413 + data = "x,y" + parser = all_parsers + result = parser.read_csv(StringIO(data), index_col=False) + + expected = DataFrame(columns=["x", "y"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "index_names", + [ + ["", ""], + ["foo", ""], + ["", "bar"], + ["foo", "bar"], + ["NotReallyUnnamed", "Unnamed: 0"], + ], +) +def test_multi_index_naming(all_parsers, index_names, request): + parser = all_parsers + + if parser.engine == "pyarrow" and "" in index_names: + mark = pytest.mark.xfail(reason="One case raises, others are wrong") + request.applymarker(mark) + + # We don't want empty index names being replaced with "Unnamed: 0" + data = ",".join(index_names + ["col\na,c,1\na,d,2\nb,c,3\nb,d,4"]) + result = parser.read_csv(StringIO(data), index_col=[0, 1]) + + expected = DataFrame( + {"col": [1, 2, 3, 4]}, index=MultiIndex.from_product([["a", "b"], ["c", "d"]]) + ) + expected.index.names = [name if name else None for name in index_names] + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # ValueError: Found non-unique column index +def test_multi_index_naming_not_all_at_beginning(all_parsers): + parser = all_parsers + data = ",Unnamed: 2,\na,c,1\na,d,2\nb,c,3\nb,d,4" + result = parser.read_csv(StringIO(data), index_col=[0, 2]) + + expected = DataFrame( + {"Unnamed: 2": ["c", "d", "c", "d"]}, + index=MultiIndex( + levels=[["a", "b"], [1, 2, 3, 4]], codes=[[0, 0, 1, 1], [0, 1, 2, 3]] + ), + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # ValueError: Found non-unique column index +def test_no_multi_index_level_names_empty(all_parsers): + # GH 10984 + parser = all_parsers + midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)]) + expected = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=midx, + columns=["x", "y", "z"], + ) + with tm.ensure_clean() as path: + expected.to_csv(path) + result = parser.read_csv(path, index_col=[0, 1, 2]) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is required +def test_header_with_index_col(all_parsers): + # GH 33476 + parser = all_parsers + data = """ +I11,A,A +I12,B,B +I2,1,3 +""" + midx = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"]) + idx = Index(["I2"]) + expected = DataFrame([[1, 3]], index=idx, columns=midx) + + result = parser.read_csv(StringIO(data), index_col=0, header=[0, 1]) + tm.assert_frame_equal(result, expected) + + col_idx = Index(["A", "A.1"]) + idx = Index(["I12", "I2"], name="I11") + expected = DataFrame([["B", "B"], ["1", "3"]], index=idx, columns=col_idx) + + result = parser.read_csv(StringIO(data), index_col="I11", header=0) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.slow +def test_index_col_large_csv(all_parsers, monkeypatch): + # https://github.com/pandas-dev/pandas/issues/37094 + parser = all_parsers + + ARR_LEN = 100 + df = DataFrame( + { + "a": range(ARR_LEN + 1), + "b": np.random.default_rng(2).standard_normal(ARR_LEN + 1), + } + ) + + with tm.ensure_clean() as path: + df.to_csv(path, index=False) + with monkeypatch.context() as m: + m.setattr("pandas.core.algorithms._MINIMUM_COMP_ARR_LEN", ARR_LEN) + result = parser.read_csv(path, index_col=[0]) + + tm.assert_frame_equal(result, df.set_index("a")) + + +@xfail_pyarrow # TypeError: an integer is required +def test_index_col_multiindex_columns_no_data(all_parsers): + # GH#38292 + parser = all_parsers + result = parser.read_csv( + StringIO("a0,a1,a2\nb0,b1,b2\n"), header=[0, 1], index_col=0 + ) + expected = DataFrame( + [], + index=Index([]), + columns=MultiIndex.from_arrays( + [["a1", "a2"], ["b1", "b2"]], names=["a0", "b0"] + ), + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is required +def test_index_col_header_no_data(all_parsers): + # GH#38292 + parser = all_parsers + result = parser.read_csv(StringIO("a0,a1,a2\n"), header=[0], index_col=0) + expected = DataFrame( + [], + columns=["a1", "a2"], + index=Index([], name="a0"), + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is required +def test_multiindex_columns_no_data(all_parsers): + # GH#38292 + parser = all_parsers + result = parser.read_csv(StringIO("a0,a1,a2\nb0,b1,b2\n"), header=[0, 1]) + expected = DataFrame( + [], columns=MultiIndex.from_arrays([["a0", "a1", "a2"], ["b0", "b1", "b2"]]) + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is required +def test_multiindex_columns_index_col_with_data(all_parsers): + # GH#38292 + parser = all_parsers + result = parser.read_csv( + StringIO("a0,a1,a2\nb0,b1,b2\ndata,data,data"), header=[0, 1], index_col=0 + ) + expected = DataFrame( + [["data", "data"]], + columns=MultiIndex.from_arrays( + [["a1", "a2"], ["b1", "b2"]], names=["a0", "b0"] + ), + index=Index(["data"]), + ) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Empty CSV file or block +def test_infer_types_boolean_sum(all_parsers): + # GH#44079 + parser = all_parsers + result = parser.read_csv( + StringIO("0,1"), + names=["a", "b"], + index_col=["a"], + dtype={"a": "UInt8"}, + ) + expected = DataFrame( + data={ + "a": [ + 0, + ], + "b": [1], + } + ).set_index("a") + # Not checking index type now, because the C parser will return a + # index column of dtype 'object', and the Python parser will return a + # index column of dtype 'int64'. + tm.assert_frame_equal(result, expected, check_index_type=False) + + +@pytest.mark.parametrize("dtype, val", [(object, "01"), ("int64", 1)]) +def test_specify_dtype_for_index_col(all_parsers, dtype, val, request): + # GH#9435 + data = "a,b\n01,2" + parser = all_parsers + if dtype == object and parser.engine == "pyarrow": + request.applymarker( + pytest.mark.xfail(reason="Cannot disable type-inference for pyarrow engine") + ) + result = parser.read_csv(StringIO(data), index_col="a", dtype={"a": dtype}) + expected = DataFrame({"b": [2]}, index=Index([val], name="a")) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is required +def test_multiindex_columns_not_leading_index_col(all_parsers): + # GH#38549 + parser = all_parsers + data = """a,b,c,d +e,f,g,h +x,y,1,2 +""" + result = parser.read_csv( + StringIO(data), + header=[0, 1], + index_col=1, + ) + cols = MultiIndex.from_tuples( + [("a", "e"), ("c", "g"), ("d", "h")], names=["b", "f"] + ) + expected = DataFrame([["x", 1, 2]], columns=cols, index=["y"]) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_mangle_dupes.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_mangle_dupes.py new file mode 100644 index 0000000000000000000000000000000000000000..1d245f81f027c6d5fc86bb50f99c805724a9619a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_mangle_dupes.py @@ -0,0 +1,179 @@ +""" +Tests that duplicate columns are handled appropriately when parsed by the +CSV engine. In general, the expected result is that they are either thoroughly +de-duplicated (if mangling requested) or ignored otherwise. +""" +from io import StringIO + +import pytest + +from pandas import DataFrame +import pandas._testing as tm + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") + + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@xfail_pyarrow # ValueError: Found non-unique column index +def test_basic(all_parsers): + parser = all_parsers + + data = "a,a,b,b,b\n1,2,3,4,5" + result = parser.read_csv(StringIO(data), sep=",") + + expected = DataFrame([[1, 2, 3, 4, 5]], columns=["a", "a.1", "b", "b.1", "b.2"]) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # ValueError: Found non-unique column index +def test_basic_names(all_parsers): + # See gh-7160 + parser = all_parsers + + data = "a,b,a\n0,1,2\n3,4,5" + expected = DataFrame([[0, 1, 2], [3, 4, 5]], columns=["a", "b", "a.1"]) + + result = parser.read_csv(StringIO(data)) + tm.assert_frame_equal(result, expected) + + +def test_basic_names_raise(all_parsers): + # See gh-7160 + parser = all_parsers + + data = "0,1,2\n3,4,5" + with pytest.raises(ValueError, match="Duplicate names"): + parser.read_csv(StringIO(data), names=["a", "b", "a"]) + + +@xfail_pyarrow # ValueError: Found non-unique column index +@pytest.mark.parametrize( + "data,expected", + [ + ("a,a,a.1\n1,2,3", DataFrame([[1, 2, 3]], columns=["a", "a.2", "a.1"])), + ( + "a,a,a.1,a.1.1,a.1.1.1,a.1.1.1.1\n1,2,3,4,5,6", + DataFrame( + [[1, 2, 3, 4, 5, 6]], + columns=["a", "a.2", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1"], + ), + ), + ( + "a,a,a.3,a.1,a.2,a,a\n1,2,3,4,5,6,7", + DataFrame( + [[1, 2, 3, 4, 5, 6, 7]], + columns=["a", "a.4", "a.3", "a.1", "a.2", "a.5", "a.6"], + ), + ), + ], +) +def test_thorough_mangle_columns(all_parsers, data, expected): + # see gh-17060 + parser = all_parsers + + result = parser.read_csv(StringIO(data)) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data,names,expected", + [ + ( + "a,b,b\n1,2,3", + ["a.1", "a.1", "a.1.1"], + DataFrame( + [["a", "b", "b"], ["1", "2", "3"]], columns=["a.1", "a.1.1", "a.1.1.1"] + ), + ), + ( + "a,b,c,d,e,f\n1,2,3,4,5,6", + ["a", "a", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1"], + DataFrame( + [["a", "b", "c", "d", "e", "f"], ["1", "2", "3", "4", "5", "6"]], + columns=["a", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1", "a.1.1.1.1.1"], + ), + ), + ( + "a,b,c,d,e,f,g\n1,2,3,4,5,6,7", + ["a", "a", "a.3", "a.1", "a.2", "a", "a"], + DataFrame( + [ + ["a", "b", "c", "d", "e", "f", "g"], + ["1", "2", "3", "4", "5", "6", "7"], + ], + columns=["a", "a.1", "a.3", "a.1.1", "a.2", "a.2.1", "a.3.1"], + ), + ), + ], +) +def test_thorough_mangle_names(all_parsers, data, names, expected): + # see gh-17095 + parser = all_parsers + + with pytest.raises(ValueError, match="Duplicate names"): + parser.read_csv(StringIO(data), names=names) + + +@xfail_pyarrow # AssertionError: DataFrame.columns are different +def test_mangled_unnamed_placeholders(all_parsers): + # xref gh-13017 + orig_key = "0" + parser = all_parsers + + orig_value = [1, 2, 3] + df = DataFrame({orig_key: orig_value}) + + # This test recursively updates `df`. + for i in range(3): + expected = DataFrame() + + for j in range(i + 1): + col_name = "Unnamed: 0" + f".{1*j}" * min(j, 1) + expected.insert(loc=0, column=col_name, value=[0, 1, 2]) + + expected[orig_key] = orig_value + df = parser.read_csv(StringIO(df.to_csv())) + + tm.assert_frame_equal(df, expected) + + +@xfail_pyarrow # ValueError: Found non-unique column index +def test_mangle_dupe_cols_already_exists(all_parsers): + # GH#14704 + parser = all_parsers + + data = "a,a,a.1,a,a.3,a.1,a.1.1\n1,2,3,4,5,6,7" + result = parser.read_csv(StringIO(data)) + expected = DataFrame( + [[1, 2, 3, 4, 5, 6, 7]], + columns=["a", "a.2", "a.1", "a.4", "a.3", "a.1.2", "a.1.1"], + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # ValueError: Found non-unique column index +def test_mangle_dupe_cols_already_exists_unnamed_col(all_parsers): + # GH#14704 + parser = all_parsers + + data = ",Unnamed: 0,,Unnamed: 2\n1,2,3,4" + result = parser.read_csv(StringIO(data)) + expected = DataFrame( + [[1, 2, 3, 4]], + columns=["Unnamed: 0.1", "Unnamed: 0", "Unnamed: 2.1", "Unnamed: 2"], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("usecol, engine", [([0, 1, 1], "python"), ([0, 1, 1], "c")]) +def test_mangle_cols_names(all_parsers, usecol, engine): + # GH 11823 + parser = all_parsers + data = "1,2,3" + names = ["A", "A", "B"] + with pytest.raises(ValueError, match="Duplicate names"): + parser.read_csv(StringIO(data), names=names, usecols=usecol, engine=engine) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_multi_thread.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_multi_thread.py new file mode 100644 index 0000000000000000000000000000000000000000..da9b9bddd30cdedacfa54fbeafb43b60c852f2e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_multi_thread.py @@ -0,0 +1,150 @@ +""" +Tests multithreading behaviour for reading and +parsing files for each parser defined in parsers.py +""" +from contextlib import ExitStack +from io import BytesIO +from multiprocessing.pool import ThreadPool + +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") + +# We'll probably always skip these for pyarrow +# Maybe we'll add our own tests for pyarrow too +pytestmark = [ + pytest.mark.single_cpu, + pytest.mark.slow, +] + + +@xfail_pyarrow # ValueError: Found non-unique column index +def test_multi_thread_string_io_read_csv(all_parsers): + # see gh-11786 + parser = all_parsers + max_row_range = 100 + num_files = 10 + + bytes_to_df = ( + "\n".join([f"{i:d},{i:d},{i:d}" for i in range(max_row_range)]).encode() + for _ in range(num_files) + ) + + # Read all files in many threads. + with ExitStack() as stack: + files = [stack.enter_context(BytesIO(b)) for b in bytes_to_df] + + pool = stack.enter_context(ThreadPool(8)) + + results = pool.map(parser.read_csv, files) + first_result = results[0] + + for result in results: + tm.assert_frame_equal(first_result, result) + + +def _generate_multi_thread_dataframe(parser, path, num_rows, num_tasks): + """ + Generate a DataFrame via multi-thread. + + Parameters + ---------- + parser : BaseParser + The parser object to use for reading the data. + path : str + The location of the CSV file to read. + num_rows : int + The number of rows to read per task. + num_tasks : int + The number of tasks to use for reading this DataFrame. + + Returns + ------- + df : DataFrame + """ + + def reader(arg): + """ + Create a reader for part of the CSV. + + Parameters + ---------- + arg : tuple + A tuple of the following: + + * start : int + The starting row to start for parsing CSV + * nrows : int + The number of rows to read. + + Returns + ------- + df : DataFrame + """ + start, nrows = arg + + if not start: + return parser.read_csv( + path, index_col=0, header=0, nrows=nrows, parse_dates=["date"] + ) + + return parser.read_csv( + path, + index_col=0, + header=None, + skiprows=int(start) + 1, + nrows=nrows, + parse_dates=[9], + ) + + tasks = [ + (num_rows * i // num_tasks, num_rows // num_tasks) for i in range(num_tasks) + ] + + with ThreadPool(processes=num_tasks) as pool: + results = pool.map(reader, tasks) + + header = results[0].columns + + for r in results[1:]: + r.columns = header + + final_dataframe = pd.concat(results) + return final_dataframe + + +@xfail_pyarrow # ValueError: The 'nrows' option is not supported +def test_multi_thread_path_multipart_read_csv(all_parsers): + # see gh-11786 + num_tasks = 4 + num_rows = 48 + + parser = all_parsers + file_name = "__thread_pool_reader__.csv" + df = DataFrame( + { + "a": np.random.default_rng(2).random(num_rows), + "b": np.random.default_rng(2).random(num_rows), + "c": np.random.default_rng(2).random(num_rows), + "d": np.random.default_rng(2).random(num_rows), + "e": np.random.default_rng(2).random(num_rows), + "foo": ["foo"] * num_rows, + "bar": ["bar"] * num_rows, + "baz": ["baz"] * num_rows, + "date": pd.date_range("20000101 09:00:00", periods=num_rows, freq="s"), + "int": np.arange(num_rows, dtype="int64"), + } + ) + + with tm.ensure_clean(file_name) as path: + df.to_csv(path) + + final_dataframe = _generate_multi_thread_dataframe( + parser, path, num_rows, num_tasks + ) + tm.assert_frame_equal(df, final_dataframe) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_na_values.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_na_values.py new file mode 100644 index 0000000000000000000000000000000000000000..ca106fa772e822ad6611ab4468294c52918e07c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_na_values.py @@ -0,0 +1,771 @@ +""" +Tests that NA values are properly handled during +parsing for all of the parsers defined in parsers.py +""" +from io import StringIO + +import numpy as np +import pytest + +from pandas._libs.parsers import STR_NA_VALUES + +from pandas import ( + DataFrame, + Index, + MultiIndex, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + + +def test_string_nas(all_parsers): + parser = all_parsers + data = """A,B,C +a,b,c +d,,f +,g,h +""" + result = parser.read_csv(StringIO(data)) + expected = DataFrame( + [["a", "b", "c"], ["d", np.nan, "f"], [np.nan, "g", "h"]], + columns=["A", "B", "C"], + ) + if parser.engine == "pyarrow": + expected.loc[2, "A"] = None + expected.loc[1, "B"] = None + tm.assert_frame_equal(result, expected) + + +def test_detect_string_na(all_parsers): + parser = all_parsers + data = """A,B +foo,bar +NA,baz +NaN,nan +""" + expected = DataFrame( + [["foo", "bar"], [np.nan, "baz"], [np.nan, np.nan]], columns=["A", "B"] + ) + if parser.engine == "pyarrow": + expected.loc[[1, 2], "A"] = None + expected.loc[2, "B"] = None + result = parser.read_csv(StringIO(data)) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "na_values", + [ + ["-999.0", "-999"], + [-999, -999.0], + [-999.0, -999], + ["-999.0"], + ["-999"], + [-999.0], + [-999], + ], +) +@pytest.mark.parametrize( + "data", + [ + """A,B +-999,1.2 +2,-999 +3,4.5 +""", + """A,B +-999,1.200 +2,-999.000 +3,4.500 +""", + ], +) +def test_non_string_na_values(all_parsers, data, na_values, request): + # see gh-3611: with an odd float format, we can't match + # the string "999.0" exactly but still need float matching + parser = all_parsers + expected = DataFrame([[np.nan, 1.2], [2.0, np.nan], [3.0, 4.5]], columns=["A", "B"]) + + if parser.engine == "pyarrow" and not all(isinstance(x, str) for x in na_values): + msg = "The 'pyarrow' engine requires all na_values to be strings" + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO(data), na_values=na_values) + return + elif parser.engine == "pyarrow" and "-999.000" in data: + # bc the pyarrow engine does not include the float-ified version + # of "-999" -> -999, it does not match the entry with the trailing + # zeros, so "-999.000" is not treated as null. + mark = pytest.mark.xfail( + reason="pyarrow engined does not recognize equivalent floats" + ) + request.applymarker(mark) + + result = parser.read_csv(StringIO(data), na_values=na_values) + tm.assert_frame_equal(result, expected) + + +def test_default_na_values(all_parsers): + _NA_VALUES = { + "-1.#IND", + "1.#QNAN", + "1.#IND", + "-1.#QNAN", + "#N/A", + "N/A", + "n/a", + "NA", + "", + "#NA", + "NULL", + "null", + "NaN", + "nan", + "-NaN", + "-nan", + "#N/A N/A", + "", + "None", + } + assert _NA_VALUES == STR_NA_VALUES + + parser = all_parsers + nv = len(_NA_VALUES) + + def f(i, v): + if i == 0: + buf = "" + elif i > 0: + buf = "".join([","] * i) + + buf = f"{buf}{v}" + + if i < nv - 1: + joined = "".join([","] * (nv - i - 1)) + buf = f"{buf}{joined}" + + return buf + + data = StringIO("\n".join([f(i, v) for i, v in enumerate(_NA_VALUES)])) + expected = DataFrame(np.nan, columns=range(nv), index=range(nv)) + + result = parser.read_csv(data, header=None) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("na_values", ["baz", ["baz"]]) +def test_custom_na_values(all_parsers, na_values): + parser = all_parsers + data = """A,B,C +ignore,this,row +1,NA,3 +-1.#IND,5,baz +7,8,NaN +""" + expected = DataFrame( + [[1.0, np.nan, 3], [np.nan, 5, np.nan], [7, 8, np.nan]], columns=["A", "B", "C"] + ) + if parser.engine == "pyarrow": + msg = "skiprows argument must be an integer when using engine='pyarrow'" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), na_values=na_values, skiprows=[1]) + return + + result = parser.read_csv(StringIO(data), na_values=na_values, skiprows=[1]) + tm.assert_frame_equal(result, expected) + + +def test_bool_na_values(all_parsers): + data = """A,B,C +True,False,True +NA,True,False +False,NA,True""" + parser = all_parsers + result = parser.read_csv(StringIO(data)) + expected = DataFrame( + { + "A": np.array([True, np.nan, False], dtype=object), + "B": np.array([False, True, np.nan], dtype=object), + "C": [True, False, True], + } + ) + if parser.engine == "pyarrow": + expected.loc[1, "A"] = None + expected.loc[2, "B"] = None + tm.assert_frame_equal(result, expected) + + +def test_na_value_dict(all_parsers): + data = """A,B,C +foo,bar,NA +bar,foo,foo +foo,bar,NA +bar,foo,foo""" + parser = all_parsers + + if parser.engine == "pyarrow": + msg = "pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), na_values={"A": ["foo"], "B": ["bar"]}) + return + + df = parser.read_csv(StringIO(data), na_values={"A": ["foo"], "B": ["bar"]}) + expected = DataFrame( + { + "A": [np.nan, "bar", np.nan, "bar"], + "B": [np.nan, "foo", np.nan, "foo"], + "C": [np.nan, "foo", np.nan, "foo"], + } + ) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize( + "index_col,expected", + [ + ( + [0], + DataFrame({"b": [np.nan], "c": [1], "d": [5]}, index=Index([0], name="a")), + ), + ( + [0, 2], + DataFrame( + {"b": [np.nan], "d": [5]}, + index=MultiIndex.from_tuples([(0, 1)], names=["a", "c"]), + ), + ), + ( + ["a", "c"], + DataFrame( + {"b": [np.nan], "d": [5]}, + index=MultiIndex.from_tuples([(0, 1)], names=["a", "c"]), + ), + ), + ], +) +def test_na_value_dict_multi_index(all_parsers, index_col, expected): + data = """\ +a,b,c,d +0,NA,1,5 +""" + parser = all_parsers + result = parser.read_csv(StringIO(data), na_values=set(), index_col=index_col) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "kwargs,expected", + [ + ( + {}, + DataFrame( + { + "A": ["a", "b", np.nan, "d", "e", np.nan, "g"], + "B": [1, 2, 3, 4, 5, 6, 7], + "C": ["one", "two", "three", np.nan, "five", np.nan, "seven"], + } + ), + ), + ( + {"na_values": {"A": [], "C": []}, "keep_default_na": False}, + DataFrame( + { + "A": ["a", "b", "", "d", "e", "nan", "g"], + "B": [1, 2, 3, 4, 5, 6, 7], + "C": ["one", "two", "three", "nan", "five", "", "seven"], + } + ), + ), + ( + {"na_values": ["a"], "keep_default_na": False}, + DataFrame( + { + "A": [np.nan, "b", "", "d", "e", "nan", "g"], + "B": [1, 2, 3, 4, 5, 6, 7], + "C": ["one", "two", "three", "nan", "five", "", "seven"], + } + ), + ), + ( + {"na_values": {"A": [], "C": []}}, + DataFrame( + { + "A": ["a", "b", np.nan, "d", "e", np.nan, "g"], + "B": [1, 2, 3, 4, 5, 6, 7], + "C": ["one", "two", "three", np.nan, "five", np.nan, "seven"], + } + ), + ), + ], +) +def test_na_values_keep_default(all_parsers, kwargs, expected, request): + data = """\ +A,B,C +a,1,one +b,2,two +,3,three +d,4,nan +e,5,five +nan,6, +g,7,seven +""" + parser = all_parsers + if parser.engine == "pyarrow": + if "na_values" in kwargs and isinstance(kwargs["na_values"], dict): + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), **kwargs) + return + mark = pytest.mark.xfail() + request.applymarker(mark) + + result = parser.read_csv(StringIO(data), **kwargs) + tm.assert_frame_equal(result, expected) + + +def test_no_na_values_no_keep_default(all_parsers): + # see gh-4318: passing na_values=None and + # keep_default_na=False yields 'None" as a na_value + data = """\ +A,B,C +a,1,None +b,2,two +,3,None +d,4,nan +e,5,five +nan,6, +g,7,seven +""" + parser = all_parsers + result = parser.read_csv(StringIO(data), keep_default_na=False) + + expected = DataFrame( + { + "A": ["a", "b", "", "d", "e", "nan", "g"], + "B": [1, 2, 3, 4, 5, 6, 7], + "C": ["None", "two", "None", "nan", "five", "", "seven"], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_no_keep_default_na_dict_na_values(all_parsers): + # see gh-19227 + data = "a,b\n,2" + parser = all_parsers + + if parser.engine == "pyarrow": + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), na_values={"b": ["2"]}, keep_default_na=False + ) + return + + result = parser.read_csv( + StringIO(data), na_values={"b": ["2"]}, keep_default_na=False + ) + expected = DataFrame({"a": [""], "b": [np.nan]}) + tm.assert_frame_equal(result, expected) + + +def test_no_keep_default_na_dict_na_scalar_values(all_parsers): + # see gh-19227 + # + # Scalar values shouldn't cause the parsing to crash or fail. + data = "a,b\n1,2" + parser = all_parsers + + if parser.engine == "pyarrow": + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), na_values={"b": 2}, keep_default_na=False) + return + + df = parser.read_csv(StringIO(data), na_values={"b": 2}, keep_default_na=False) + expected = DataFrame({"a": [1], "b": [np.nan]}) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("col_zero_na_values", [113125, "113125"]) +def test_no_keep_default_na_dict_na_values_diff_reprs(all_parsers, col_zero_na_values): + # see gh-19227 + data = """\ +113125,"blah","/blaha",kjsdkj,412.166,225.874,214.008 +729639,"qwer","",asdfkj,466.681,,252.373 +""" + parser = all_parsers + expected = DataFrame( + { + 0: [np.nan, 729639.0], + 1: [np.nan, "qwer"], + 2: ["/blaha", np.nan], + 3: ["kjsdkj", "asdfkj"], + 4: [412.166, 466.681], + 5: ["225.874", ""], + 6: [np.nan, 252.373], + } + ) + + if parser.engine == "pyarrow": + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), + header=None, + keep_default_na=False, + na_values={2: "", 6: "214.008", 1: "blah", 0: col_zero_na_values}, + ) + return + + result = parser.read_csv( + StringIO(data), + header=None, + keep_default_na=False, + na_values={2: "", 6: "214.008", 1: "blah", 0: col_zero_na_values}, + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # mismatched dtypes in both cases, FutureWarning in the True case +@pytest.mark.parametrize( + "na_filter,row_data", + [ + (True, [[1, "A"], [np.nan, np.nan], [3, "C"]]), + (False, [["1", "A"], ["nan", "B"], ["3", "C"]]), + ], +) +def test_na_values_na_filter_override(all_parsers, na_filter, row_data): + data = """\ +A,B +1,A +nan,B +3,C +""" + parser = all_parsers + result = parser.read_csv(StringIO(data), na_values=["B"], na_filter=na_filter) + + expected = DataFrame(row_data, columns=["A", "B"]) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Expected 8 columns, got 5: +def test_na_trailing_columns(all_parsers): + parser = all_parsers + data = """Date,Currency,Symbol,Type,Units,UnitPrice,Cost,Tax +2012-03-14,USD,AAPL,BUY,1000 +2012-05-12,USD,SBUX,SELL,500""" + + # Trailing columns should be all NaN. + result = parser.read_csv(StringIO(data)) + expected = DataFrame( + [ + ["2012-03-14", "USD", "AAPL", "BUY", 1000, np.nan, np.nan, np.nan], + ["2012-05-12", "USD", "SBUX", "SELL", 500, np.nan, np.nan, np.nan], + ], + columns=[ + "Date", + "Currency", + "Symbol", + "Type", + "Units", + "UnitPrice", + "Cost", + "Tax", + ], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "na_values,row_data", + [ + (1, [[np.nan, 2.0], [2.0, np.nan]]), + ({"a": 2, "b": 1}, [[1.0, 2.0], [np.nan, np.nan]]), + ], +) +def test_na_values_scalar(all_parsers, na_values, row_data): + # see gh-12224 + parser = all_parsers + names = ["a", "b"] + data = "1,2\n2,1" + + if parser.engine == "pyarrow" and isinstance(na_values, dict): + if isinstance(na_values, dict): + err = ValueError + msg = "The pyarrow engine doesn't support passing a dict for na_values" + else: + err = TypeError + msg = "The 'pyarrow' engine requires all na_values to be strings" + with pytest.raises(err, match=msg): + parser.read_csv(StringIO(data), names=names, na_values=na_values) + return + elif parser.engine == "pyarrow": + msg = "The 'pyarrow' engine requires all na_values to be strings" + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO(data), names=names, na_values=na_values) + return + + result = parser.read_csv(StringIO(data), names=names, na_values=na_values) + expected = DataFrame(row_data, columns=names) + tm.assert_frame_equal(result, expected) + + +def test_na_values_dict_aliasing(all_parsers): + parser = all_parsers + na_values = {"a": 2, "b": 1} + na_values_copy = na_values.copy() + + names = ["a", "b"] + data = "1,2\n2,1" + + expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]], columns=names) + + if parser.engine == "pyarrow": + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), names=names, na_values=na_values) + return + + result = parser.read_csv(StringIO(data), names=names, na_values=na_values) + + tm.assert_frame_equal(result, expected) + tm.assert_dict_equal(na_values, na_values_copy) + + +def test_na_values_dict_col_index(all_parsers): + # see gh-14203 + data = "a\nfoo\n1" + parser = all_parsers + na_values = {0: "foo"} + + if parser.engine == "pyarrow": + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), na_values=na_values) + return + + result = parser.read_csv(StringIO(data), na_values=na_values) + expected = DataFrame({"a": [np.nan, 1]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data,kwargs,expected", + [ + ( + str(2**63) + "\n" + str(2**63 + 1), + {"na_values": [2**63]}, + DataFrame([str(2**63), str(2**63 + 1)]), + ), + (str(2**63) + ",1" + "\n,2", {}, DataFrame([[str(2**63), 1], ["", 2]])), + (str(2**63) + "\n1", {"na_values": [2**63]}, DataFrame([np.nan, 1])), + ], +) +def test_na_values_uint64(all_parsers, data, kwargs, expected, request): + # see gh-14983 + parser = all_parsers + + if parser.engine == "pyarrow" and "na_values" in kwargs: + msg = "The 'pyarrow' engine requires all na_values to be strings" + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO(data), header=None, **kwargs) + return + elif parser.engine == "pyarrow": + mark = pytest.mark.xfail(reason="Returns float64 instead of object") + request.applymarker(mark) + + result = parser.read_csv(StringIO(data), header=None, **kwargs) + tm.assert_frame_equal(result, expected) + + +def test_empty_na_values_no_default_with_index(all_parsers): + # see gh-15835 + data = "a,1\nb,2" + parser = all_parsers + expected = DataFrame({"1": [2]}, index=Index(["b"], name="a")) + + result = parser.read_csv(StringIO(data), index_col=0, keep_default_na=False) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "na_filter,index_data", [(False, ["", "5"]), (True, [np.nan, 5.0])] +) +def test_no_na_filter_on_index(all_parsers, na_filter, index_data, request): + # see gh-5239 + # + # Don't parse NA-values in index unless na_filter=True + parser = all_parsers + data = "a,b,c\n1,,3\n4,5,6" + + if parser.engine == "pyarrow" and na_filter is False: + mark = pytest.mark.xfail(reason="mismatched index result") + request.applymarker(mark) + + expected = DataFrame({"a": [1, 4], "c": [3, 6]}, index=Index(index_data, name="b")) + result = parser.read_csv(StringIO(data), index_col=[1], na_filter=na_filter) + tm.assert_frame_equal(result, expected) + + +def test_inf_na_values_with_int_index(all_parsers): + # see gh-17128 + parser = all_parsers + data = "idx,col1,col2\n1,3,4\n2,inf,-inf" + + # Don't fail with OverflowError with inf's and integer index column. + out = parser.read_csv(StringIO(data), index_col=[0], na_values=["inf", "-inf"]) + expected = DataFrame( + {"col1": [3, np.nan], "col2": [4, np.nan]}, index=Index([1, 2], name="idx") + ) + tm.assert_frame_equal(out, expected) + + +@xfail_pyarrow # mismatched shape +@pytest.mark.parametrize("na_filter", [True, False]) +def test_na_values_with_dtype_str_and_na_filter(all_parsers, na_filter): + # see gh-20377 + parser = all_parsers + data = "a,b,c\n1,,3\n4,5,6" + + # na_filter=True --> missing value becomes NaN. + # na_filter=False --> missing value remains empty string. + empty = np.nan if na_filter else "" + expected = DataFrame({"a": ["1", "4"], "b": [empty, "5"], "c": ["3", "6"]}) + + result = parser.read_csv(StringIO(data), na_filter=na_filter, dtype=str) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # mismatched exception message +@pytest.mark.parametrize( + "data, na_values", + [ + ("false,1\n,1\ntrue", None), + ("false,1\nnull,1\ntrue", None), + ("false,1\nnan,1\ntrue", None), + ("false,1\nfoo,1\ntrue", "foo"), + ("false,1\nfoo,1\ntrue", ["foo"]), + ("false,1\nfoo,1\ntrue", {"a": "foo"}), + ], +) +def test_cast_NA_to_bool_raises_error(all_parsers, data, na_values): + parser = all_parsers + msg = "|".join( + [ + "Bool column has NA values in column [0a]", + "cannot safely convert passed user dtype of " + "bool for object dtyped data in column 0", + ] + ) + + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), + header=None, + names=["a", "b"], + dtype={"a": "bool"}, + na_values=na_values, + ) + + +# TODO: this test isn't about the na_values keyword, it is about the empty entries +# being returned with NaN entries, whereas the pyarrow engine returns "nan" +@xfail_pyarrow # mismatched shapes +def test_str_nan_dropped(all_parsers): + # see gh-21131 + parser = all_parsers + + data = """File: small.csv,, +10010010233,0123,654 +foo,,bar +01001000155,4530,898""" + + result = parser.read_csv( + StringIO(data), + header=None, + names=["col1", "col2", "col3"], + dtype={"col1": str, "col2": str, "col3": str}, + ).dropna() + + expected = DataFrame( + { + "col1": ["10010010233", "01001000155"], + "col2": ["0123", "4530"], + "col3": ["654", "898"], + }, + index=[1, 3], + ) + + tm.assert_frame_equal(result, expected) + + +def test_nan_multi_index(all_parsers): + # GH 42446 + parser = all_parsers + data = "A,B,B\nX,Y,Z\n1,2,inf" + + if parser.engine == "pyarrow": + msg = "The pyarrow engine doesn't support passing a dict for na_values" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), header=list(range(2)), na_values={("B", "Z"): "inf"} + ) + return + + result = parser.read_csv( + StringIO(data), header=list(range(2)), na_values={("B", "Z"): "inf"} + ) + + expected = DataFrame( + { + ("A", "X"): [1], + ("B", "Y"): [2], + ("B", "Z"): [np.nan], + } + ) + + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # Failed: DID NOT RAISE ; it casts the NaN to False +def test_bool_and_nan_to_bool(all_parsers): + # GH#42808 + parser = all_parsers + data = """0 +NaN +True +False +""" + with pytest.raises(ValueError, match="NA values"): + parser.read_csv(StringIO(data), dtype="bool") + + +def test_bool_and_nan_to_int(all_parsers): + # GH#42808 + parser = all_parsers + data = """0 +NaN +True +False +""" + with pytest.raises(ValueError, match="convert|NoneType"): + parser.read_csv(StringIO(data), dtype="int") + + +def test_bool_and_nan_to_float(all_parsers): + # GH#42808 + parser = all_parsers + data = """0 +NaN +True +False +""" + result = parser.read_csv(StringIO(data), dtype="float") + expected = DataFrame.from_dict({"0": [np.nan, 1.0, 0.0]}) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_network.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_network.py new file mode 100644 index 0000000000000000000000000000000000000000..9351387dfc3379e6b90756a3a771ec5d46ec4065 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_network.py @@ -0,0 +1,327 @@ +""" +Tests parsers ability to read and parse non-local files +and hence require a network connection to be read. +""" +from io import BytesIO +import logging +import re + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import DataFrame +import pandas._testing as tm + +from pandas.io.feather_format import read_feather +from pandas.io.parsers import read_csv + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.mark.network +@pytest.mark.single_cpu +@pytest.mark.parametrize("mode", ["explicit", "infer"]) +@pytest.mark.parametrize("engine", ["python", "c"]) +def test_compressed_urls( + httpserver, + datapath, + salaries_table, + mode, + engine, + compression_only, + compression_to_extension, +): + # test reading compressed urls with various engines and + # extension inference + if compression_only == "tar": + pytest.skip("TODO: Add tar salaraies.csv to pandas/io/parsers/data") + + extension = compression_to_extension[compression_only] + with open(datapath("io", "parser", "data", "salaries.csv" + extension), "rb") as f: + httpserver.serve_content(content=f.read()) + + url = httpserver.url + "/salaries.csv" + extension + + if mode != "explicit": + compression_only = mode + + url_table = read_csv(url, sep="\t", compression=compression_only, engine=engine) + tm.assert_frame_equal(url_table, salaries_table) + + +@pytest.mark.network +@pytest.mark.single_cpu +def test_url_encoding_csv(httpserver, datapath): + """ + read_csv should honor the requested encoding for URLs. + + GH 10424 + """ + with open(datapath("io", "parser", "data", "unicode_series.csv"), "rb") as f: + httpserver.serve_content(content=f.read()) + df = read_csv(httpserver.url, encoding="latin-1", header=None) + assert df.loc[15, 1] == "Á köldum klaka (Cold Fever) (1994)" + + +@pytest.fixture +def tips_df(datapath): + """DataFrame with the tips dataset.""" + return read_csv(datapath("io", "data", "csv", "tips.csv")) + + +@pytest.mark.single_cpu +@pytest.mark.usefixtures("s3_resource") +@td.skip_if_not_us_locale() +class TestS3: + def test_parse_public_s3_bucket(self, s3_public_bucket_with_data, tips_df, s3so): + # more of an integration test due to the not-public contents portion + # can probably mock this though. + pytest.importorskip("s3fs") + for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: + df = read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext, + compression=comp, + storage_options=s3so, + ) + assert isinstance(df, DataFrame) + assert not df.empty + tm.assert_frame_equal(df, tips_df) + + def test_parse_private_s3_bucket(self, s3_private_bucket_with_data, tips_df, s3so): + # Read public file from bucket with not-public contents + pytest.importorskip("s3fs") + df = read_csv( + f"s3://{s3_private_bucket_with_data.name}/tips.csv", storage_options=s3so + ) + assert isinstance(df, DataFrame) + assert not df.empty + tm.assert_frame_equal(df, tips_df) + + def test_parse_public_s3n_bucket(self, s3_public_bucket_with_data, tips_df, s3so): + # Read from AWS s3 as "s3n" URL + df = read_csv( + f"s3n://{s3_public_bucket_with_data.name}/tips.csv", + nrows=10, + storage_options=s3so, + ) + assert isinstance(df, DataFrame) + assert not df.empty + tm.assert_frame_equal(tips_df.iloc[:10], df) + + def test_parse_public_s3a_bucket(self, s3_public_bucket_with_data, tips_df, s3so): + # Read from AWS s3 as "s3a" URL + df = read_csv( + f"s3a://{s3_public_bucket_with_data.name}/tips.csv", + nrows=10, + storage_options=s3so, + ) + assert isinstance(df, DataFrame) + assert not df.empty + tm.assert_frame_equal(tips_df.iloc[:10], df) + + def test_parse_public_s3_bucket_nrows( + self, s3_public_bucket_with_data, tips_df, s3so + ): + for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: + df = read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext, + nrows=10, + compression=comp, + storage_options=s3so, + ) + assert isinstance(df, DataFrame) + assert not df.empty + tm.assert_frame_equal(tips_df.iloc[:10], df) + + def test_parse_public_s3_bucket_chunked( + self, s3_public_bucket_with_data, tips_df, s3so + ): + # Read with a chunksize + chunksize = 5 + for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: + with read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext, + chunksize=chunksize, + compression=comp, + storage_options=s3so, + ) as df_reader: + assert df_reader.chunksize == chunksize + for i_chunk in [0, 1, 2]: + # Read a couple of chunks and make sure we see them + # properly. + df = df_reader.get_chunk() + assert isinstance(df, DataFrame) + assert not df.empty + true_df = tips_df.iloc[ + chunksize * i_chunk : chunksize * (i_chunk + 1) + ] + tm.assert_frame_equal(true_df, df) + + def test_parse_public_s3_bucket_chunked_python( + self, s3_public_bucket_with_data, tips_df, s3so + ): + # Read with a chunksize using the Python parser + chunksize = 5 + for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: + with read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext, + chunksize=chunksize, + compression=comp, + engine="python", + storage_options=s3so, + ) as df_reader: + assert df_reader.chunksize == chunksize + for i_chunk in [0, 1, 2]: + # Read a couple of chunks and make sure we see them properly. + df = df_reader.get_chunk() + assert isinstance(df, DataFrame) + assert not df.empty + true_df = tips_df.iloc[ + chunksize * i_chunk : chunksize * (i_chunk + 1) + ] + tm.assert_frame_equal(true_df, df) + + def test_parse_public_s3_bucket_python( + self, s3_public_bucket_with_data, tips_df, s3so + ): + for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: + df = read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext, + engine="python", + compression=comp, + storage_options=s3so, + ) + assert isinstance(df, DataFrame) + assert not df.empty + tm.assert_frame_equal(df, tips_df) + + def test_infer_s3_compression(self, s3_public_bucket_with_data, tips_df, s3so): + for ext in ["", ".gz", ".bz2"]: + df = read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext, + engine="python", + compression="infer", + storage_options=s3so, + ) + assert isinstance(df, DataFrame) + assert not df.empty + tm.assert_frame_equal(df, tips_df) + + def test_parse_public_s3_bucket_nrows_python( + self, s3_public_bucket_with_data, tips_df, s3so + ): + for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: + df = read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext, + engine="python", + nrows=10, + compression=comp, + storage_options=s3so, + ) + assert isinstance(df, DataFrame) + assert not df.empty + tm.assert_frame_equal(tips_df.iloc[:10], df) + + def test_read_s3_fails(self, s3so): + msg = "The specified bucket does not exist" + with pytest.raises(OSError, match=msg): + read_csv("s3://nyqpug/asdf.csv", storage_options=s3so) + + def test_read_s3_fails_private(self, s3_private_bucket, s3so): + msg = "The specified bucket does not exist" + # Receive a permission error when trying to read a private bucket. + # It's irrelevant here that this isn't actually a table. + with pytest.raises(OSError, match=msg): + read_csv(f"s3://{s3_private_bucket.name}/file.csv") + + @pytest.mark.xfail(reason="GH#39155 s3fs upgrade", strict=False) + def test_write_s3_csv_fails(self, tips_df, s3so): + # GH 32486 + # Attempting to write to an invalid S3 path should raise + import botocore + + # GH 34087 + # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/error-handling.html + # Catch a ClientError since AWS Service Errors are defined dynamically + error = (FileNotFoundError, botocore.exceptions.ClientError) + + with pytest.raises(error, match="The specified bucket does not exist"): + tips_df.to_csv( + "s3://an_s3_bucket_data_doesnt_exit/not_real.csv", storage_options=s3so + ) + + @pytest.mark.xfail(reason="GH#39155 s3fs upgrade", strict=False) + def test_write_s3_parquet_fails(self, tips_df, s3so): + # GH 27679 + # Attempting to write to an invalid S3 path should raise + pytest.importorskip("pyarrow") + import botocore + + # GH 34087 + # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/error-handling.html + # Catch a ClientError since AWS Service Errors are defined dynamically + error = (FileNotFoundError, botocore.exceptions.ClientError) + + with pytest.raises(error, match="The specified bucket does not exist"): + tips_df.to_parquet( + "s3://an_s3_bucket_data_doesnt_exit/not_real.parquet", + storage_options=s3so, + ) + + @pytest.mark.single_cpu + def test_read_csv_handles_boto_s3_object( + self, s3_public_bucket_with_data, tips_file + ): + # see gh-16135 + + s3_object = s3_public_bucket_with_data.Object("tips.csv") + + with BytesIO(s3_object.get()["Body"].read()) as buffer: + result = read_csv(buffer, encoding="utf8") + assert isinstance(result, DataFrame) + assert not result.empty + + expected = read_csv(tips_file) + tm.assert_frame_equal(result, expected) + + @pytest.mark.single_cpu + def test_read_csv_chunked_download(self, s3_public_bucket, caplog, s3so): + # 8 MB, S3FS uses 5MB chunks + df = DataFrame(np.zeros((100000, 4)), columns=list("abcd")) + with BytesIO(df.to_csv().encode("utf-8")) as buf: + s3_public_bucket.put_object(Key="large-file.csv", Body=buf) + uri = f"{s3_public_bucket.name}/large-file.csv" + match_re = re.compile(rf"^Fetch: {uri}, 0-(?P\d+)$") + with caplog.at_level(logging.DEBUG, logger="s3fs"): + read_csv( + f"s3://{uri}", + nrows=5, + storage_options=s3so, + ) + for log in caplog.messages: + if match := re.match(match_re, log): + # Less than 8 MB + assert int(match.group("stop")) < 8000000 + + def test_read_s3_with_hash_in_key(self, s3_public_bucket_with_data, tips_df, s3so): + # GH 25945 + result = read_csv( + f"s3://{s3_public_bucket_with_data.name}/tips#1.csv", storage_options=s3so + ) + tm.assert_frame_equal(tips_df, result) + + def test_read_feather_s3_file_path( + self, s3_public_bucket_with_data, feather_file, s3so + ): + # GH 29055 + pytest.importorskip("pyarrow") + expected = read_feather(feather_file) + res = read_feather( + f"s3://{s3_public_bucket_with_data.name}/simple_dataset.feather", + storage_options=s3so, + ) + tm.assert_frame_equal(expected, res) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_parse_dates.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_parse_dates.py new file mode 100644 index 0000000000000000000000000000000000000000..623657b412682ef82c116853eacdb550aa386fb5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_parse_dates.py @@ -0,0 +1,2340 @@ +""" +Tests date parsing functionality for all of the +parsers defined in parsers.py +""" + +from datetime import ( + date, + datetime, + timedelta, + timezone, +) +from io import StringIO + +from dateutil.parser import parse as du_parse +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs import parsing + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + Timestamp, +) +import pandas._testing as tm +from pandas.core.indexes.datetimes import date_range +from pandas.core.tools.datetimes import start_caching_at + +from pandas.io.parsers import read_csv + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + + +@xfail_pyarrow +def test_read_csv_with_custom_date_parser(all_parsers): + # GH36111 + def __custom_date_parser(time): + time = time.astype(np.float64) + time = time.astype(int) # convert float seconds to int type + return pd.to_timedelta(time, unit="s") + + testdata = StringIO( + """time e n h + 41047.00 -98573.7297 871458.0640 389.0089 + 41048.00 -98573.7299 871458.0640 389.0089 + 41049.00 -98573.7300 871458.0642 389.0088 + 41050.00 -98573.7299 871458.0643 389.0088 + 41051.00 -98573.7302 871458.0640 389.0086 + """ + ) + result = all_parsers.read_csv_check_warnings( + FutureWarning, + "Please use 'date_format' instead", + testdata, + delim_whitespace=True, + parse_dates=True, + date_parser=__custom_date_parser, + index_col="time", + ) + time = [41047, 41048, 41049, 41050, 41051] + time = pd.TimedeltaIndex([pd.to_timedelta(i, unit="s") for i in time], name="time") + expected = DataFrame( + { + "e": [-98573.7297, -98573.7299, -98573.7300, -98573.7299, -98573.7302], + "n": [871458.0640, 871458.0640, 871458.0642, 871458.0643, 871458.0640], + "h": [389.0089, 389.0089, 389.0088, 389.0088, 389.0086], + }, + index=time, + ) + + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow +def test_read_csv_with_custom_date_parser_parse_dates_false(all_parsers): + # GH44366 + def __custom_date_parser(time): + time = time.astype(np.float64) + time = time.astype(int) # convert float seconds to int type + return pd.to_timedelta(time, unit="s") + + testdata = StringIO( + """time e + 41047.00 -93.77 + 41048.00 -95.79 + 41049.00 -98.73 + 41050.00 -93.99 + 41051.00 -97.72 + """ + ) + result = all_parsers.read_csv_check_warnings( + FutureWarning, + "Please use 'date_format' instead", + testdata, + delim_whitespace=True, + parse_dates=False, + date_parser=__custom_date_parser, + index_col="time", + ) + time = Series([41047.00, 41048.00, 41049.00, 41050.00, 41051.00], name="time") + expected = DataFrame( + {"e": [-93.77, -95.79, -98.73, -93.99, -97.72]}, + index=time, + ) + + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow +def test_separator_date_conflict(all_parsers): + # Regression test for gh-4678 + # + # Make sure thousands separator and + # date parsing do not conflict. + parser = all_parsers + data = "06-02-2013;13:00;1-000.215" + expected = DataFrame( + [[datetime(2013, 6, 2, 13, 0, 0), 1000.215]], columns=["Date", 2] + ) + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + df = parser.read_csv( + StringIO(data), + sep=";", + thousands="-", + parse_dates={"Date": [0, 1]}, + header=None, + ) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("keep_date_col", [True, False]) +def test_multiple_date_col_custom(all_parsers, keep_date_col, request): + data = """\ +KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 +KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 +KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 +KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 +KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 +KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000 +""" + parser = all_parsers + + if keep_date_col and parser.engine == "pyarrow": + # For this to pass, we need to disable auto-inference on the date columns + # in parse_dates. We have no way of doing this though + mark = pytest.mark.xfail( + reason="pyarrow doesn't support disabling auto-inference on column numbers." + ) + request.applymarker(mark) + + def date_parser(*date_cols): + """ + Test date parser. + + Parameters + ---------- + date_cols : args + The list of data columns to parse. + + Returns + ------- + parsed : Series + """ + return parsing.try_parse_dates( + parsing.concat_date_cols(date_cols), parser=du_parse + ) + + kwds = { + "header": None, + "date_parser": date_parser, + "parse_dates": {"actual": [1, 2], "nominal": [1, 3]}, + "keep_date_col": keep_date_col, + "names": ["X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8"], + } + result = parser.read_csv_check_warnings( + FutureWarning, + "use 'date_format' instead", + StringIO(data), + **kwds, + raise_on_extra_warnings=False, + ) + + expected = DataFrame( + [ + [ + datetime(1999, 1, 27, 19, 0), + datetime(1999, 1, 27, 18, 56), + "KORD", + "19990127", + " 19:00:00", + " 18:56:00", + 0.81, + 2.81, + 7.2, + 0.0, + 280.0, + ], + [ + datetime(1999, 1, 27, 20, 0), + datetime(1999, 1, 27, 19, 56), + "KORD", + "19990127", + " 20:00:00", + " 19:56:00", + 0.01, + 2.21, + 7.2, + 0.0, + 260.0, + ], + [ + datetime(1999, 1, 27, 21, 0), + datetime(1999, 1, 27, 20, 56), + "KORD", + "19990127", + " 21:00:00", + " 20:56:00", + -0.59, + 2.21, + 5.7, + 0.0, + 280.0, + ], + [ + datetime(1999, 1, 27, 21, 0), + datetime(1999, 1, 27, 21, 18), + "KORD", + "19990127", + " 21:00:00", + " 21:18:00", + -0.99, + 2.01, + 3.6, + 0.0, + 270.0, + ], + [ + datetime(1999, 1, 27, 22, 0), + datetime(1999, 1, 27, 21, 56), + "KORD", + "19990127", + " 22:00:00", + " 21:56:00", + -0.59, + 1.71, + 5.1, + 0.0, + 290.0, + ], + [ + datetime(1999, 1, 27, 23, 0), + datetime(1999, 1, 27, 22, 56), + "KORD", + "19990127", + " 23:00:00", + " 22:56:00", + -0.59, + 1.71, + 4.6, + 0.0, + 280.0, + ], + ], + columns=[ + "actual", + "nominal", + "X0", + "X1", + "X2", + "X3", + "X4", + "X5", + "X6", + "X7", + "X8", + ], + ) + + if not keep_date_col: + expected = expected.drop(["X1", "X2", "X3"], axis=1) + + # Python can sometimes be flaky about how + # the aggregated columns are entered, so + # this standardizes the order. + result = result[expected.columns] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("container", [list, tuple, Index, Series]) +@pytest.mark.parametrize("dim", [1, 2]) +def test_concat_date_col_fail(container, dim): + msg = "not all elements from date_cols are numpy arrays" + value = "19990127" + + date_cols = tuple(container([value]) for _ in range(dim)) + + with pytest.raises(ValueError, match=msg): + parsing.concat_date_cols(date_cols) + + +@pytest.mark.parametrize("keep_date_col", [True, False]) +def test_multiple_date_col(all_parsers, keep_date_col, request): + data = """\ +KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 +KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 +KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 +KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 +KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 +KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000 +""" + parser = all_parsers + + if keep_date_col and parser.engine == "pyarrow": + # For this to pass, we need to disable auto-inference on the date columns + # in parse_dates. We have no way of doing this though + mark = pytest.mark.xfail( + reason="pyarrow doesn't support disabling auto-inference on column numbers." + ) + request.applymarker(mark) + + depr_msg = "The 'keep_date_col' keyword in pd.read_csv is deprecated" + + kwds = { + "header": None, + "parse_dates": [[1, 2], [1, 3]], + "keep_date_col": keep_date_col, + "names": ["X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8"], + } + with tm.assert_produces_warning( + (DeprecationWarning, FutureWarning), match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv(StringIO(data), **kwds) + + expected = DataFrame( + [ + [ + datetime(1999, 1, 27, 19, 0), + datetime(1999, 1, 27, 18, 56), + "KORD", + "19990127", + " 19:00:00", + " 18:56:00", + 0.81, + 2.81, + 7.2, + 0.0, + 280.0, + ], + [ + datetime(1999, 1, 27, 20, 0), + datetime(1999, 1, 27, 19, 56), + "KORD", + "19990127", + " 20:00:00", + " 19:56:00", + 0.01, + 2.21, + 7.2, + 0.0, + 260.0, + ], + [ + datetime(1999, 1, 27, 21, 0), + datetime(1999, 1, 27, 20, 56), + "KORD", + "19990127", + " 21:00:00", + " 20:56:00", + -0.59, + 2.21, + 5.7, + 0.0, + 280.0, + ], + [ + datetime(1999, 1, 27, 21, 0), + datetime(1999, 1, 27, 21, 18), + "KORD", + "19990127", + " 21:00:00", + " 21:18:00", + -0.99, + 2.01, + 3.6, + 0.0, + 270.0, + ], + [ + datetime(1999, 1, 27, 22, 0), + datetime(1999, 1, 27, 21, 56), + "KORD", + "19990127", + " 22:00:00", + " 21:56:00", + -0.59, + 1.71, + 5.1, + 0.0, + 290.0, + ], + [ + datetime(1999, 1, 27, 23, 0), + datetime(1999, 1, 27, 22, 56), + "KORD", + "19990127", + " 23:00:00", + " 22:56:00", + -0.59, + 1.71, + 4.6, + 0.0, + 280.0, + ], + ], + columns=[ + "X1_X2", + "X1_X3", + "X0", + "X1", + "X2", + "X3", + "X4", + "X5", + "X6", + "X7", + "X8", + ], + ) + + if not keep_date_col: + expected = expected.drop(["X1", "X2", "X3"], axis=1) + + tm.assert_frame_equal(result, expected) + + +def test_date_col_as_index_col(all_parsers): + data = """\ +KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 +KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 +KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 +KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 +KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 +""" + parser = all_parsers + kwds = { + "header": None, + "parse_dates": [1], + "index_col": 1, + "names": ["X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7"], + } + result = parser.read_csv(StringIO(data), **kwds) + + index = Index( + [ + datetime(1999, 1, 27, 19, 0), + datetime(1999, 1, 27, 20, 0), + datetime(1999, 1, 27, 21, 0), + datetime(1999, 1, 27, 21, 0), + datetime(1999, 1, 27, 22, 0), + ], + name="X1", + ) + expected = DataFrame( + [ + ["KORD", " 18:56:00", 0.81, 2.81, 7.2, 0.0, 280.0], + ["KORD", " 19:56:00", 0.01, 2.21, 7.2, 0.0, 260.0], + ["KORD", " 20:56:00", -0.59, 2.21, 5.7, 0.0, 280.0], + ["KORD", " 21:18:00", -0.99, 2.01, 3.6, 0.0, 270.0], + ["KORD", " 21:56:00", -0.59, 1.71, 5.1, 0.0, 290.0], + ], + columns=["X0", "X2", "X3", "X4", "X5", "X6", "X7"], + index=index, + ) + if parser.engine == "pyarrow": + # https://github.com/pandas-dev/pandas/issues/44231 + # pyarrow 6.0 starts to infer time type + expected["X2"] = pd.to_datetime("1970-01-01" + expected["X2"]).dt.time + + tm.assert_frame_equal(result, expected) + + +def test_multiple_date_cols_int_cast(all_parsers): + data = ( + "KORD,19990127, 19:00:00, 18:56:00, 0.8100\n" + "KORD,19990127, 20:00:00, 19:56:00, 0.0100\n" + "KORD,19990127, 21:00:00, 20:56:00, -0.5900\n" + "KORD,19990127, 21:00:00, 21:18:00, -0.9900\n" + "KORD,19990127, 22:00:00, 21:56:00, -0.5900\n" + "KORD,19990127, 23:00:00, 22:56:00, -0.5900" + ) + parse_dates = {"actual": [1, 2], "nominal": [1, 3]} + parser = all_parsers + + kwds = { + "header": None, + "parse_dates": parse_dates, + "date_parser": pd.to_datetime, + } + result = parser.read_csv_check_warnings( + FutureWarning, + "use 'date_format' instead", + StringIO(data), + **kwds, + raise_on_extra_warnings=False, + ) + + expected = DataFrame( + [ + [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56), "KORD", 0.81], + [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56), "KORD", 0.01], + [ + datetime(1999, 1, 27, 21, 0), + datetime(1999, 1, 27, 20, 56), + "KORD", + -0.59, + ], + [ + datetime(1999, 1, 27, 21, 0), + datetime(1999, 1, 27, 21, 18), + "KORD", + -0.99, + ], + [ + datetime(1999, 1, 27, 22, 0), + datetime(1999, 1, 27, 21, 56), + "KORD", + -0.59, + ], + [ + datetime(1999, 1, 27, 23, 0), + datetime(1999, 1, 27, 22, 56), + "KORD", + -0.59, + ], + ], + columns=["actual", "nominal", 0, 4], + ) + + # Python can sometimes be flaky about how + # the aggregated columns are entered, so + # this standardizes the order. + result = result[expected.columns] + tm.assert_frame_equal(result, expected) + + +def test_multiple_date_col_timestamp_parse(all_parsers): + parser = all_parsers + data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25 +05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25""" + + result = parser.read_csv_check_warnings( + FutureWarning, + "use 'date_format' instead", + StringIO(data), + parse_dates=[[0, 1]], + header=None, + date_parser=Timestamp, + raise_on_extra_warnings=False, + ) + expected = DataFrame( + [ + [ + Timestamp("05/31/2012, 15:30:00.029"), + 1306.25, + 1, + "E", + 0, + np.nan, + 1306.25, + ], + [ + Timestamp("05/31/2012, 15:30:00.029"), + 1306.25, + 8, + "E", + 0, + np.nan, + 1306.25, + ], + ], + columns=["0_1", 2, 3, 4, 5, 6, 7], + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow +def test_multiple_date_cols_with_header(all_parsers): + parser = all_parsers + data = """\ +ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir +KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 +KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 +KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 +KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 +KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 +KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv(StringIO(data), parse_dates={"nominal": [1, 2]}) + expected = DataFrame( + [ + [ + datetime(1999, 1, 27, 19, 0), + "KORD", + " 18:56:00", + 0.81, + 2.81, + 7.2, + 0.0, + 280.0, + ], + [ + datetime(1999, 1, 27, 20, 0), + "KORD", + " 19:56:00", + 0.01, + 2.21, + 7.2, + 0.0, + 260.0, + ], + [ + datetime(1999, 1, 27, 21, 0), + "KORD", + " 20:56:00", + -0.59, + 2.21, + 5.7, + 0.0, + 280.0, + ], + [ + datetime(1999, 1, 27, 21, 0), + "KORD", + " 21:18:00", + -0.99, + 2.01, + 3.6, + 0.0, + 270.0, + ], + [ + datetime(1999, 1, 27, 22, 0), + "KORD", + " 21:56:00", + -0.59, + 1.71, + 5.1, + 0.0, + 290.0, + ], + [ + datetime(1999, 1, 27, 23, 0), + "KORD", + " 22:56:00", + -0.59, + 1.71, + 4.6, + 0.0, + 280.0, + ], + ], + columns=[ + "nominal", + "ID", + "ActualTime", + "TDew", + "TAir", + "Windspeed", + "Precip", + "WindDir", + ], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data,parse_dates,msg", + [ + ( + """\ +date_NominalTime,date,NominalTime +KORD1,19990127, 19:00:00 +KORD2,19990127, 20:00:00""", + [[1, 2]], + ("New date column already in dict date_NominalTime"), + ), + ( + """\ +ID,date,nominalTime +KORD,19990127, 19:00:00 +KORD,19990127, 20:00:00""", + {"ID": [1, 2]}, + "Date column ID already in dict", + ), + ], +) +def test_multiple_date_col_name_collision(all_parsers, data, parse_dates, msg): + parser = all_parsers + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False + ): + parser.read_csv(StringIO(data), parse_dates=parse_dates) + + +def test_date_parser_int_bug(all_parsers): + # see gh-3071 + parser = all_parsers + data = ( + "posix_timestamp,elapsed,sys,user,queries,query_time,rows," + "accountid,userid,contactid,level,silo,method\n" + "1343103150,0.062353,0,4,6,0.01690,3," + "12345,1,-1,3,invoice_InvoiceResource,search\n" + ) + + result = parser.read_csv_check_warnings( + FutureWarning, + "use 'date_format' instead", + StringIO(data), + index_col=0, + parse_dates=[0], + # Note: we must pass tz and then drop the tz attribute + # (if we don't CI will flake out depending on the runner's local time) + date_parser=lambda x: datetime.fromtimestamp(int(x), tz=timezone.utc).replace( + tzinfo=None + ), + raise_on_extra_warnings=False, + ) + expected = DataFrame( + [ + [ + 0.062353, + 0, + 4, + 6, + 0.01690, + 3, + 12345, + 1, + -1, + 3, + "invoice_InvoiceResource", + "search", + ] + ], + columns=[ + "elapsed", + "sys", + "user", + "queries", + "query_time", + "rows", + "accountid", + "userid", + "contactid", + "level", + "silo", + "method", + ], + index=Index([Timestamp("2012-07-24 04:12:30")], name="posix_timestamp"), + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow +def test_nat_parse(all_parsers): + # see gh-3062 + parser = all_parsers + df = DataFrame( + { + "A": np.arange(10, dtype="float64"), + "B": Timestamp("20010101").as_unit("ns"), + } + ) + df.iloc[3:6, :] = np.nan + + with tm.ensure_clean("__nat_parse_.csv") as path: + df.to_csv(path) + + result = parser.read_csv(path, index_col=0, parse_dates=["B"]) + tm.assert_frame_equal(result, df) + + +@skip_pyarrow +def test_csv_custom_parser(all_parsers): + data = """A,B,C +20090101,a,1,2 +20090102,b,3,4 +20090103,c,4,5 +""" + parser = all_parsers + result = parser.read_csv_check_warnings( + FutureWarning, + "use 'date_format' instead", + StringIO(data), + date_parser=lambda x: datetime.strptime(x, "%Y%m%d"), + ) + expected = parser.read_csv(StringIO(data), parse_dates=True) + tm.assert_frame_equal(result, expected) + result = parser.read_csv(StringIO(data), date_format="%Y%m%d") + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow +def test_parse_dates_implicit_first_col(all_parsers): + data = """A,B,C +20090101,a,1,2 +20090102,b,3,4 +20090103,c,4,5 +""" + parser = all_parsers + result = parser.read_csv(StringIO(data), parse_dates=True) + + expected = parser.read_csv(StringIO(data), index_col=0, parse_dates=True) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow +def test_parse_dates_string(all_parsers): + data = """date,A,B,C +20090101,a,1,2 +20090102,b,3,4 +20090103,c,4,5 +""" + parser = all_parsers + result = parser.read_csv(StringIO(data), index_col="date", parse_dates=["date"]) + # freq doesn't round-trip + index = date_range("1/1/2009", periods=3, name="date")._with_freq(None) + + expected = DataFrame( + {"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]}, index=index + ) + tm.assert_frame_equal(result, expected) + + +# Bug in https://github.com/dateutil/dateutil/issues/217 +# has been addressed, but we just don't pass in the `yearfirst` +@pytest.mark.xfail(reason="yearfirst is not surfaced in read_*") +@pytest.mark.parametrize("parse_dates", [[["date", "time"]], [[0, 1]]]) +def test_yy_format_with_year_first(all_parsers, parse_dates): + data = """date,time,B,C +090131,0010,1,2 +090228,1020,3,4 +090331,0830,5,6 +""" + parser = all_parsers + result = parser.read_csv_check_warnings( + UserWarning, + "Could not infer format", + StringIO(data), + index_col=0, + parse_dates=parse_dates, + ) + index = DatetimeIndex( + [ + datetime(2009, 1, 31, 0, 10, 0), + datetime(2009, 2, 28, 10, 20, 0), + datetime(2009, 3, 31, 8, 30, 0), + ], + dtype=object, + name="date_time", + ) + expected = DataFrame({"B": [1, 3, 5], "C": [2, 4, 6]}, index=index) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow +@pytest.mark.parametrize("parse_dates", [[0, 2], ["a", "c"]]) +def test_parse_dates_column_list(all_parsers, parse_dates): + data = "a,b,c\n01/01/2010,1,15/02/2010" + parser = all_parsers + + expected = DataFrame( + {"a": [datetime(2010, 1, 1)], "b": [1], "c": [datetime(2010, 2, 15)]} + ) + expected = expected.set_index(["a", "b"]) + + result = parser.read_csv( + StringIO(data), index_col=[0, 1], parse_dates=parse_dates, dayfirst=True + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow +@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]]) +def test_multi_index_parse_dates(all_parsers, index_col): + data = """index1,index2,A,B,C +20090101,one,a,1,2 +20090101,two,b,3,4 +20090101,three,c,4,5 +20090102,one,a,1,2 +20090102,two,b,3,4 +20090102,three,c,4,5 +20090103,one,a,1,2 +20090103,two,b,3,4 +20090103,three,c,4,5 +""" + parser = all_parsers + index = MultiIndex.from_product( + [ + (datetime(2009, 1, 1), datetime(2009, 1, 2), datetime(2009, 1, 3)), + ("one", "two", "three"), + ], + names=["index1", "index2"], + ) + + # Out of order. + if index_col == [1, 0]: + index = index.swaplevel(0, 1) + + expected = DataFrame( + [ + ["a", 1, 2], + ["b", 3, 4], + ["c", 4, 5], + ["a", 1, 2], + ["b", 3, 4], + ["c", 4, 5], + ["a", 1, 2], + ["b", 3, 4], + ["c", 4, 5], + ], + columns=["A", "B", "C"], + index=index, + ) + result = parser.read_csv_check_warnings( + UserWarning, + "Could not infer format", + StringIO(data), + index_col=index_col, + parse_dates=True, + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow +@pytest.mark.parametrize("kwargs", [{"dayfirst": True}, {"day_first": True}]) +def test_parse_dates_custom_euro_format(all_parsers, kwargs): + parser = all_parsers + data = """foo,bar,baz +31/01/2010,1,2 +01/02/2010,1,NA +02/02/2010,1,2 +""" + if "dayfirst" in kwargs: + df = parser.read_csv_check_warnings( + FutureWarning, + "use 'date_format' instead", + StringIO(data), + names=["time", "Q", "NTU"], + date_parser=lambda d: du_parse(d, **kwargs), + header=0, + index_col=0, + parse_dates=True, + na_values=["NA"], + ) + exp_index = Index( + [datetime(2010, 1, 31), datetime(2010, 2, 1), datetime(2010, 2, 2)], + name="time", + ) + expected = DataFrame( + {"Q": [1, 1, 1], "NTU": [2, np.nan, 2]}, + index=exp_index, + columns=["Q", "NTU"], + ) + tm.assert_frame_equal(df, expected) + else: + msg = "got an unexpected keyword argument 'day_first'" + with pytest.raises(TypeError, match=msg): + parser.read_csv_check_warnings( + FutureWarning, + "use 'date_format' instead", + StringIO(data), + names=["time", "Q", "NTU"], + date_parser=lambda d: du_parse(d, **kwargs), + skiprows=[0], + index_col=0, + parse_dates=True, + na_values=["NA"], + ) + + +def test_parse_tz_aware(all_parsers): + # See gh-1693 + parser = all_parsers + data = "Date,x\n2012-06-13T01:39:00Z,0.5" + + result = parser.read_csv(StringIO(data), index_col=0, parse_dates=True) + # TODO: make unit check more specific + if parser.engine == "pyarrow": + result.index = result.index.as_unit("ns") + expected = DataFrame( + {"x": [0.5]}, index=Index([Timestamp("2012-06-13 01:39:00+00:00")], name="Date") + ) + if parser.engine == "pyarrow": + expected_tz = pytz.utc + else: + expected_tz = timezone.utc + tm.assert_frame_equal(result, expected) + assert result.index.tz is expected_tz + + +@xfail_pyarrow +@pytest.mark.parametrize( + "parse_dates,index_col", + [({"nominal": [1, 2]}, "nominal"), ({"nominal": [1, 2]}, 0), ([[1, 2]], 0)], +) +def test_multiple_date_cols_index(all_parsers, parse_dates, index_col): + parser = all_parsers + data = """ +ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir +KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 +KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 +KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 +KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 +KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 +KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000 +""" + expected = DataFrame( + [ + [ + datetime(1999, 1, 27, 19, 0), + "KORD1", + " 18:56:00", + 0.81, + 2.81, + 7.2, + 0.0, + 280.0, + ], + [ + datetime(1999, 1, 27, 20, 0), + "KORD2", + " 19:56:00", + 0.01, + 2.21, + 7.2, + 0.0, + 260.0, + ], + [ + datetime(1999, 1, 27, 21, 0), + "KORD3", + " 20:56:00", + -0.59, + 2.21, + 5.7, + 0.0, + 280.0, + ], + [ + datetime(1999, 1, 27, 21, 0), + "KORD4", + " 21:18:00", + -0.99, + 2.01, + 3.6, + 0.0, + 270.0, + ], + [ + datetime(1999, 1, 27, 22, 0), + "KORD5", + " 21:56:00", + -0.59, + 1.71, + 5.1, + 0.0, + 290.0, + ], + [ + datetime(1999, 1, 27, 23, 0), + "KORD6", + " 22:56:00", + -0.59, + 1.71, + 4.6, + 0.0, + 280.0, + ], + ], + columns=[ + "nominal", + "ID", + "ActualTime", + "TDew", + "TAir", + "Windspeed", + "Precip", + "WindDir", + ], + ) + expected = expected.set_index("nominal") + + if not isinstance(parse_dates, dict): + expected.index.name = "date_NominalTime" + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), parse_dates=parse_dates, index_col=index_col + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow +def test_multiple_date_cols_chunked(all_parsers): + parser = all_parsers + data = """\ +ID,date,nominalTime,actualTime,A,B,C,D,E +KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 +KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 +KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 +KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 +KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 +KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000 +""" + + expected = DataFrame( + [ + [ + datetime(1999, 1, 27, 19, 0), + "KORD", + " 18:56:00", + 0.81, + 2.81, + 7.2, + 0.0, + 280.0, + ], + [ + datetime(1999, 1, 27, 20, 0), + "KORD", + " 19:56:00", + 0.01, + 2.21, + 7.2, + 0.0, + 260.0, + ], + [ + datetime(1999, 1, 27, 21, 0), + "KORD", + " 20:56:00", + -0.59, + 2.21, + 5.7, + 0.0, + 280.0, + ], + [ + datetime(1999, 1, 27, 21, 0), + "KORD", + " 21:18:00", + -0.99, + 2.01, + 3.6, + 0.0, + 270.0, + ], + [ + datetime(1999, 1, 27, 22, 0), + "KORD", + " 21:56:00", + -0.59, + 1.71, + 5.1, + 0.0, + 290.0, + ], + [ + datetime(1999, 1, 27, 23, 0), + "KORD", + " 22:56:00", + -0.59, + 1.71, + 4.6, + 0.0, + 280.0, + ], + ], + columns=["nominal", "ID", "actualTime", "A", "B", "C", "D", "E"], + ) + expected = expected.set_index("nominal") + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + with parser.read_csv( + StringIO(data), + parse_dates={"nominal": [1, 2]}, + index_col="nominal", + chunksize=2, + ) as reader: + chunks = list(reader) + + tm.assert_frame_equal(chunks[0], expected[:2]) + tm.assert_frame_equal(chunks[1], expected[2:4]) + tm.assert_frame_equal(chunks[2], expected[4:]) + + +def test_multiple_date_col_named_index_compat(all_parsers): + parser = all_parsers + data = """\ +ID,date,nominalTime,actualTime,A,B,C,D,E +KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 +KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 +KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 +KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 +KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 +KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000 +""" + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False + ): + with_indices = parser.read_csv( + StringIO(data), parse_dates={"nominal": [1, 2]}, index_col="nominal" + ) + + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False + ): + with_names = parser.read_csv( + StringIO(data), + index_col="nominal", + parse_dates={"nominal": ["date", "nominalTime"]}, + ) + tm.assert_frame_equal(with_indices, with_names) + + +def test_multiple_date_col_multiple_index_compat(all_parsers): + parser = all_parsers + data = """\ +ID,date,nominalTime,actualTime,A,B,C,D,E +KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 +KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 +KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 +KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 +KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 +KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000 +""" + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), index_col=["nominal", "ID"], parse_dates={"nominal": [1, 2]} + ) + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False + ): + expected = parser.read_csv(StringIO(data), parse_dates={"nominal": [1, 2]}) + + expected = expected.set_index(["nominal", "ID"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("kwargs", [{}, {"index_col": "C"}]) +def test_read_with_parse_dates_scalar_non_bool(all_parsers, kwargs): + # see gh-5636 + parser = all_parsers + msg = ( + "Only booleans, lists, and dictionaries " + "are accepted for the 'parse_dates' parameter" + ) + data = """A,B,C + 1,2,2003-11-1""" + + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO(data), parse_dates="C", **kwargs) + + +@pytest.mark.parametrize("parse_dates", [(1,), np.array([4, 5]), {1, 3}]) +def test_read_with_parse_dates_invalid_type(all_parsers, parse_dates): + parser = all_parsers + msg = ( + "Only booleans, lists, and dictionaries " + "are accepted for the 'parse_dates' parameter" + ) + data = """A,B,C + 1,2,2003-11-1""" + + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO(data), parse_dates=(1,)) + + +@pytest.mark.parametrize("cache_dates", [True, False]) +@pytest.mark.parametrize("value", ["nan", ""]) +def test_bad_date_parse(all_parsers, cache_dates, value): + # if we have an invalid date make sure that we handle this with + # and w/o the cache properly + parser = all_parsers + s = StringIO((f"{value},\n") * (start_caching_at + 1)) + + parser.read_csv( + s, + header=None, + names=["foo", "bar"], + parse_dates=["foo"], + cache_dates=cache_dates, + ) + + +@pytest.mark.parametrize("cache_dates", [True, False]) +@pytest.mark.parametrize("value", ["0"]) +def test_bad_date_parse_with_warning(all_parsers, cache_dates, value): + # if we have an invalid date make sure that we handle this with + # and w/o the cache properly. + parser = all_parsers + s = StringIO((f"{value},\n") * 50000) + + if parser.engine == "pyarrow": + # pyarrow reads "0" as 0 (of type int64), and so + # pandas doesn't try to guess the datetime format + # TODO: parse dates directly in pyarrow, see + # https://github.com/pandas-dev/pandas/issues/48017 + warn = None + elif cache_dates: + # Note: warning is not raised if 'cache_dates', because here there is only a + # single unique date and hence no risk of inconsistent parsing. + warn = None + else: + warn = UserWarning + parser.read_csv_check_warnings( + warn, + "Could not infer format", + s, + header=None, + names=["foo", "bar"], + parse_dates=["foo"], + cache_dates=cache_dates, + raise_on_extra_warnings=False, + ) + + +@xfail_pyarrow +def test_parse_dates_empty_string(all_parsers): + # see gh-2263 + parser = all_parsers + data = "Date,test\n2012-01-01,1\n,2" + result = parser.read_csv(StringIO(data), parse_dates=["Date"], na_filter=False) + + expected = DataFrame( + [[datetime(2012, 1, 1), 1], [pd.NaT, 2]], columns=["Date", "test"] + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "reader", ["read_csv_check_warnings", "read_table_check_warnings"] +) +def test_parse_dates_infer_datetime_format_warning(all_parsers, reader): + # GH 49024, 51017 + parser = all_parsers + data = "Date,test\n2012-01-01,1\n,2" + + getattr(parser, reader)( + FutureWarning, + "The argument 'infer_datetime_format' is deprecated", + StringIO(data), + parse_dates=["Date"], + infer_datetime_format=True, + sep=",", + raise_on_extra_warnings=False, + ) + + +@pytest.mark.parametrize( + "reader", ["read_csv_check_warnings", "read_table_check_warnings"] +) +def test_parse_dates_date_parser_and_date_format(all_parsers, reader): + # GH 50601 + parser = all_parsers + data = "Date,test\n2012-01-01,1\n,2" + msg = "Cannot use both 'date_parser' and 'date_format'" + with pytest.raises(TypeError, match=msg): + getattr(parser, reader)( + FutureWarning, + "use 'date_format' instead", + StringIO(data), + parse_dates=["Date"], + date_parser=pd.to_datetime, + date_format="ISO8601", + sep=",", + ) + + +@xfail_pyarrow +@pytest.mark.parametrize( + "data,kwargs,expected", + [ + ( + "a\n04.15.2016", + {"parse_dates": ["a"]}, + DataFrame([datetime(2016, 4, 15)], columns=["a"]), + ), + ( + "a\n04.15.2016", + {"parse_dates": True, "index_col": 0}, + DataFrame(index=DatetimeIndex(["2016-04-15"], name="a"), columns=[]), + ), + ( + "a,b\n04.15.2016,09.16.2013", + {"parse_dates": ["a", "b"]}, + DataFrame( + [[datetime(2016, 4, 15), datetime(2013, 9, 16)]], columns=["a", "b"] + ), + ), + ( + "a,b\n04.15.2016,09.16.2013", + {"parse_dates": True, "index_col": [0, 1]}, + DataFrame( + index=MultiIndex.from_tuples( + [(datetime(2016, 4, 15), datetime(2013, 9, 16))], names=["a", "b"] + ), + columns=[], + ), + ), + ], +) +def test_parse_dates_no_convert_thousands(all_parsers, data, kwargs, expected): + # see gh-14066 + parser = all_parsers + + result = parser.read_csv(StringIO(data), thousands=".", **kwargs) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow +def test_parse_date_time_multi_level_column_name(all_parsers): + data = """\ +D,T,A,B +date, time,a,b +2001-01-05, 09:00:00, 0.0, 10. +2001-01-06, 00:00:00, 1.0, 11. +""" + parser = all_parsers + result = parser.read_csv_check_warnings( + FutureWarning, + "use 'date_format' instead", + StringIO(data), + header=[0, 1], + parse_dates={"date_time": [0, 1]}, + date_parser=pd.to_datetime, + ) + + expected_data = [ + [datetime(2001, 1, 5, 9, 0, 0), 0.0, 10.0], + [datetime(2001, 1, 6, 0, 0, 0), 1.0, 11.0], + ] + expected = DataFrame(expected_data, columns=["date_time", ("A", "a"), ("B", "b")]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data,kwargs,expected", + [ + ( + """\ +date,time,a,b +2001-01-05, 10:00:00, 0.0, 10. +2001-01-05, 00:00:00, 1., 11. +""", + {"header": 0, "parse_dates": {"date_time": [0, 1]}}, + DataFrame( + [ + [datetime(2001, 1, 5, 10, 0, 0), 0.0, 10], + [datetime(2001, 1, 5, 0, 0, 0), 1.0, 11.0], + ], + columns=["date_time", "a", "b"], + ), + ), + ( + ( + "KORD,19990127, 19:00:00, 18:56:00, 0.8100\n" + "KORD,19990127, 20:00:00, 19:56:00, 0.0100\n" + "KORD,19990127, 21:00:00, 20:56:00, -0.5900\n" + "KORD,19990127, 21:00:00, 21:18:00, -0.9900\n" + "KORD,19990127, 22:00:00, 21:56:00, -0.5900\n" + "KORD,19990127, 23:00:00, 22:56:00, -0.5900" + ), + {"header": None, "parse_dates": {"actual": [1, 2], "nominal": [1, 3]}}, + DataFrame( + [ + [ + datetime(1999, 1, 27, 19, 0), + datetime(1999, 1, 27, 18, 56), + "KORD", + 0.81, + ], + [ + datetime(1999, 1, 27, 20, 0), + datetime(1999, 1, 27, 19, 56), + "KORD", + 0.01, + ], + [ + datetime(1999, 1, 27, 21, 0), + datetime(1999, 1, 27, 20, 56), + "KORD", + -0.59, + ], + [ + datetime(1999, 1, 27, 21, 0), + datetime(1999, 1, 27, 21, 18), + "KORD", + -0.99, + ], + [ + datetime(1999, 1, 27, 22, 0), + datetime(1999, 1, 27, 21, 56), + "KORD", + -0.59, + ], + [ + datetime(1999, 1, 27, 23, 0), + datetime(1999, 1, 27, 22, 56), + "KORD", + -0.59, + ], + ], + columns=["actual", "nominal", 0, 4], + ), + ), + ], +) +def test_parse_date_time(all_parsers, data, kwargs, expected): + parser = all_parsers + result = parser.read_csv_check_warnings( + FutureWarning, + "use 'date_format' instead", + StringIO(data), + date_parser=pd.to_datetime, + **kwargs, + raise_on_extra_warnings=False, + ) + + # Python can sometimes be flaky about how + # the aggregated columns are entered, so + # this standardizes the order. + result = result[expected.columns] + tm.assert_frame_equal(result, expected) + + +def test_parse_date_fields(all_parsers): + parser = all_parsers + data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11." + result = parser.read_csv_check_warnings( + FutureWarning, + "use 'date_format' instead", + StringIO(data), + header=0, + parse_dates={"ymd": [0, 1, 2]}, + date_parser=lambda x: x, + raise_on_extra_warnings=False, + ) + + expected = DataFrame( + [[datetime(2001, 1, 10), 10.0], [datetime(2001, 2, 1), 11.0]], + columns=["ymd", "a"], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + ("key", "value", "warn"), + [ + ( + "date_parser", + lambda x: pd.to_datetime(x, format="%Y %m %d %H %M %S"), + FutureWarning, + ), + ("date_format", "%Y %m %d %H %M %S", None), + ], +) +def test_parse_date_all_fields(all_parsers, key, value, warn): + parser = all_parsers + data = """\ +year,month,day,hour,minute,second,a,b +2001,01,05,10,00,0,0.0,10. +2001,01,5,10,0,00,1.,11. +""" + result = parser.read_csv_check_warnings( + warn, + "use 'date_format' instead", + StringIO(data), + header=0, + parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]}, + **{key: value}, + raise_on_extra_warnings=False, + ) + expected = DataFrame( + [ + [datetime(2001, 1, 5, 10, 0, 0), 0.0, 10.0], + [datetime(2001, 1, 5, 10, 0, 0), 1.0, 11.0], + ], + columns=["ymdHMS", "a", "b"], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + ("key", "value", "warn"), + [ + ( + "date_parser", + lambda x: pd.to_datetime(x, format="%Y %m %d %H %M %S.%f"), + FutureWarning, + ), + ("date_format", "%Y %m %d %H %M %S.%f", None), + ], +) +def test_datetime_fractional_seconds(all_parsers, key, value, warn): + parser = all_parsers + data = """\ +year,month,day,hour,minute,second,a,b +2001,01,05,10,00,0.123456,0.0,10. +2001,01,5,10,0,0.500000,1.,11. +""" + result = parser.read_csv_check_warnings( + warn, + "use 'date_format' instead", + StringIO(data), + header=0, + parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]}, + **{key: value}, + raise_on_extra_warnings=False, + ) + expected = DataFrame( + [ + [datetime(2001, 1, 5, 10, 0, 0, microsecond=123456), 0.0, 10.0], + [datetime(2001, 1, 5, 10, 0, 0, microsecond=500000), 1.0, 11.0], + ], + columns=["ymdHMS", "a", "b"], + ) + tm.assert_frame_equal(result, expected) + + +def test_generic(all_parsers): + parser = all_parsers + data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11." + + def parse_function(yy, mm): + return [date(year=int(y), month=int(m), day=1) for y, m in zip(yy, mm)] + + result = parser.read_csv_check_warnings( + FutureWarning, + "use 'date_format' instead", + StringIO(data), + header=0, + parse_dates={"ym": [0, 1]}, + date_parser=parse_function, + raise_on_extra_warnings=False, + ) + expected = DataFrame( + [[date(2001, 1, 1), 10, 10.0], [date(2001, 2, 1), 1, 11.0]], + columns=["ym", "day", "a"], + ) + expected["ym"] = expected["ym"].astype("datetime64[ns]") + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow +def test_date_parser_resolution_if_not_ns(all_parsers): + # see gh-10245 + parser = all_parsers + data = """\ +date,time,prn,rxstatus +2013-11-03,19:00:00,126,00E80000 +2013-11-03,19:00:00,23,00E80000 +2013-11-03,19:00:00,13,00E80000 +""" + + def date_parser(dt, time): + try: + arr = dt + "T" + time + except TypeError: + # dt & time are date/time objects + arr = [datetime.combine(d, t) for d, t in zip(dt, time)] + return np.array(arr, dtype="datetime64[s]") + + result = parser.read_csv_check_warnings( + FutureWarning, + "use 'date_format' instead", + StringIO(data), + date_parser=date_parser, + parse_dates={"datetime": ["date", "time"]}, + index_col=["datetime", "prn"], + ) + + datetimes = np.array(["2013-11-03T19:00:00"] * 3, dtype="datetime64[s]") + expected = DataFrame( + data={"rxstatus": ["00E80000"] * 3}, + index=MultiIndex.from_arrays( + [datetimes, [126, 23, 13]], + names=["datetime", "prn"], + ), + ) + tm.assert_frame_equal(result, expected) + + +def test_parse_date_column_with_empty_string(all_parsers): + # see gh-6428 + parser = all_parsers + data = "case,opdate\n7,10/18/2006\n7,10/18/2008\n621, " + result = parser.read_csv(StringIO(data), parse_dates=["opdate"]) + + expected_data = [[7, "10/18/2006"], [7, "10/18/2008"], [621, " "]] + expected = DataFrame(expected_data, columns=["case", "opdate"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data,expected", + [ + ( + "a\n135217135789158401\n1352171357E+5", + DataFrame({"a": [135217135789158401, 135217135700000]}, dtype="float64"), + ), + ( + "a\n99999999999\n123456789012345\n1234E+0", + DataFrame({"a": [99999999999, 123456789012345, 1234]}, dtype="float64"), + ), + ], +) +@pytest.mark.parametrize("parse_dates", [True, False]) +def test_parse_date_float(all_parsers, data, expected, parse_dates): + # see gh-2697 + # + # Date parsing should fail, so we leave the data untouched + # (i.e. float precision should remain unchanged). + parser = all_parsers + + result = parser.read_csv(StringIO(data), parse_dates=parse_dates) + tm.assert_frame_equal(result, expected) + + +def test_parse_timezone(all_parsers): + # see gh-22256 + parser = all_parsers + data = """dt,val + 2018-01-04 09:01:00+09:00,23350 + 2018-01-04 09:02:00+09:00,23400 + 2018-01-04 09:03:00+09:00,23400 + 2018-01-04 09:04:00+09:00,23400 + 2018-01-04 09:05:00+09:00,23400""" + result = parser.read_csv(StringIO(data), parse_dates=["dt"]) + + dti = date_range( + start="2018-01-04 09:01:00", + end="2018-01-04 09:05:00", + freq="1min", + tz=timezone(timedelta(minutes=540)), + )._with_freq(None) + expected_data = {"dt": dti, "val": [23350, 23400, 23400, 23400, 23400]} + + expected = DataFrame(expected_data) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # pandas.errors.ParserError: CSV parse error +@pytest.mark.parametrize( + "date_string", + ["32/32/2019", "02/30/2019", "13/13/2019", "13/2019", "a3/11/2018", "10/11/2o17"], +) +def test_invalid_parse_delimited_date(all_parsers, date_string): + parser = all_parsers + expected = DataFrame({0: [date_string]}, dtype="object") + result = parser.read_csv( + StringIO(date_string), + header=None, + parse_dates=[0], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "date_string,dayfirst,expected", + [ + # %d/%m/%Y; month > 12 thus replacement + ("13/02/2019", True, datetime(2019, 2, 13)), + # %m/%d/%Y; day > 12 thus there will be no replacement + ("02/13/2019", False, datetime(2019, 2, 13)), + # %d/%m/%Y; dayfirst==True thus replacement + ("04/02/2019", True, datetime(2019, 2, 4)), + ], +) +def test_parse_delimited_date_swap_no_warning( + all_parsers, date_string, dayfirst, expected, request +): + parser = all_parsers + expected = DataFrame({0: [expected]}, dtype="datetime64[ns]") + if parser.engine == "pyarrow": + if not dayfirst: + # "CSV parse error: Empty CSV file or block" + pytest.skip(reason="https://github.com/apache/arrow/issues/38676") + msg = "The 'dayfirst' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(date_string), header=None, dayfirst=dayfirst, parse_dates=[0] + ) + return + + result = parser.read_csv( + StringIO(date_string), header=None, dayfirst=dayfirst, parse_dates=[0] + ) + tm.assert_frame_equal(result, expected) + + +# ArrowInvalid: CSV parse error: Empty CSV file or block: cannot infer number of columns +@skip_pyarrow +@pytest.mark.parametrize( + "date_string,dayfirst,expected", + [ + # %d/%m/%Y; month > 12 + ("13/02/2019", False, datetime(2019, 2, 13)), + # %m/%d/%Y; day > 12 + ("02/13/2019", True, datetime(2019, 2, 13)), + ], +) +def test_parse_delimited_date_swap_with_warning( + all_parsers, date_string, dayfirst, expected +): + parser = all_parsers + expected = DataFrame({0: [expected]}, dtype="datetime64[ns]") + warning_msg = ( + "Parsing dates in .* format when dayfirst=.* was specified. " + "Pass `dayfirst=.*` or specify a format to silence this warning." + ) + result = parser.read_csv_check_warnings( + UserWarning, + warning_msg, + StringIO(date_string), + header=None, + dayfirst=dayfirst, + parse_dates=[0], + ) + tm.assert_frame_equal(result, expected) + + +def test_parse_multiple_delimited_dates_with_swap_warnings(): + # GH46210 + with pytest.raises( + ValueError, + match=( + r'^time data "31/05/2000" doesn\'t match format "%m/%d/%Y", ' + r"at position 1. You might want to try:" + ), + ): + pd.to_datetime(["01/01/2000", "31/05/2000", "31/05/2001", "01/02/2000"]) + + +# ArrowKeyError: Column 'fdate1' in include_columns does not exist in CSV file +@skip_pyarrow +@pytest.mark.parametrize( + "names, usecols, parse_dates, missing_cols", + [ + (None, ["val"], ["date", "time"], "date, time"), + (None, ["val"], [0, "time"], "time"), + (None, ["val"], [["date", "time"]], "date, time"), + (None, ["val"], [[0, "time"]], "time"), + (None, ["val"], {"date": [0, "time"]}, "time"), + (None, ["val"], {"date": ["date", "time"]}, "date, time"), + (None, ["val"], [["date", "time"], "date"], "date, time"), + (["date1", "time1", "temperature"], None, ["date", "time"], "date, time"), + ( + ["date1", "time1", "temperature"], + ["date1", "temperature"], + ["date1", "time"], + "time", + ), + ], +) +def test_missing_parse_dates_column_raises( + all_parsers, names, usecols, parse_dates, missing_cols +): + # gh-31251 column names provided in parse_dates could be missing. + parser = all_parsers + content = StringIO("date,time,val\n2020-01-31,04:20:32,32\n") + msg = f"Missing column provided to 'parse_dates': '{missing_cols}'" + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + warn = FutureWarning + if isinstance(parse_dates, list) and all( + isinstance(x, (int, str)) for x in parse_dates + ): + warn = None + + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(warn, match=depr_msg, check_stacklevel=False): + parser.read_csv( + content, sep=",", names=names, usecols=usecols, parse_dates=parse_dates + ) + + +@xfail_pyarrow # mismatched shape +def test_date_parser_and_names(all_parsers): + # GH#33699 + parser = all_parsers + data = StringIO("""x,y\n1,2""") + warn = UserWarning + if parser.engine == "pyarrow": + # DeprecationWarning for passing a Manager object + warn = (UserWarning, DeprecationWarning) + result = parser.read_csv_check_warnings( + warn, + "Could not infer format", + data, + parse_dates=["B"], + names=["B"], + ) + expected = DataFrame({"B": ["y", "2"]}, index=["x", "1"]) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is required +def test_date_parser_multiindex_columns(all_parsers): + parser = all_parsers + data = """a,b +1,2 +2019-12-31,6""" + result = parser.read_csv(StringIO(data), parse_dates=[("a", "1")], header=[0, 1]) + expected = DataFrame( + {("a", "1"): Timestamp("2019-12-31").as_unit("ns"), ("b", "2"): [6]} + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # TypeError: an integer is required +@pytest.mark.parametrize( + "parse_spec, col_name", + [ + ([[("a", "1"), ("b", "2")]], ("a_b", "1_2")), + ({("foo", "1"): [("a", "1"), ("b", "2")]}, ("foo", "1")), + ], +) +def test_date_parser_multiindex_columns_combine_cols(all_parsers, parse_spec, col_name): + parser = all_parsers + data = """a,b,c +1,2,3 +2019-12,-31,6""" + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), + parse_dates=parse_spec, + header=[0, 1], + ) + expected = DataFrame( + {col_name: Timestamp("2019-12-31").as_unit("ns"), ("c", "3"): [6]} + ) + tm.assert_frame_equal(result, expected) + + +def test_date_parser_usecols_thousands(all_parsers): + # GH#39365 + data = """A,B,C + 1,3,20-09-01-01 + 2,4,20-09-01-01 + """ + + parser = all_parsers + + if parser.engine == "pyarrow": + # DeprecationWarning for passing a Manager object + msg = "The 'thousands' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), + parse_dates=[1], + usecols=[1, 2], + thousands="-", + ) + return + + result = parser.read_csv_check_warnings( + UserWarning, + "Could not infer format", + StringIO(data), + parse_dates=[1], + usecols=[1, 2], + thousands="-", + ) + expected = DataFrame({"B": [3, 4], "C": [Timestamp("20-09-2001 01:00:00")] * 2}) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # mismatched shape +def test_parse_dates_and_keep_original_column(all_parsers): + # GH#13378 + parser = all_parsers + data = """A +20150908 +20150909 +""" + depr_msg = "The 'keep_date_col' keyword in pd.read_csv is deprecated" + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), parse_dates={"date": ["A"]}, keep_date_col=True + ) + expected_data = [Timestamp("2015-09-08"), Timestamp("2015-09-09")] + expected = DataFrame({"date": expected_data, "A": expected_data}) + tm.assert_frame_equal(result, expected) + + +def test_dayfirst_warnings(): + # GH 12585 + + # CASE 1: valid input + input = "date\n31/12/2014\n10/03/2011" + expected = DatetimeIndex( + ["2014-12-31", "2011-03-10"], dtype="datetime64[ns]", freq=None, name="date" + ) + warning_msg = ( + "Parsing dates in .* format when dayfirst=.* was specified. " + "Pass `dayfirst=.*` or specify a format to silence this warning." + ) + + # A. dayfirst arg correct, no warning + res1 = read_csv( + StringIO(input), parse_dates=["date"], dayfirst=True, index_col="date" + ).index + tm.assert_index_equal(expected, res1) + + # B. dayfirst arg incorrect, warning + with tm.assert_produces_warning(UserWarning, match=warning_msg): + res2 = read_csv( + StringIO(input), parse_dates=["date"], dayfirst=False, index_col="date" + ).index + tm.assert_index_equal(expected, res2) + + # CASE 2: invalid input + # cannot consistently process with single format + # return to user unaltered + + # first in DD/MM/YYYY, second in MM/DD/YYYY + input = "date\n31/12/2014\n03/30/2011" + expected = Index(["31/12/2014", "03/30/2011"], dtype="object", name="date") + + # A. use dayfirst=True + res5 = read_csv( + StringIO(input), parse_dates=["date"], dayfirst=True, index_col="date" + ).index + tm.assert_index_equal(expected, res5) + + # B. use dayfirst=False + with tm.assert_produces_warning(UserWarning, match=warning_msg): + res6 = read_csv( + StringIO(input), parse_dates=["date"], dayfirst=False, index_col="date" + ).index + tm.assert_index_equal(expected, res6) + + +@pytest.mark.parametrize( + "date_string, dayfirst", + [ + pytest.param( + "31/1/2014", + False, + id="second date is single-digit", + ), + pytest.param( + "1/31/2014", + True, + id="first date is single-digit", + ), + ], +) +def test_dayfirst_warnings_no_leading_zero(date_string, dayfirst): + # GH47880 + initial_value = f"date\n{date_string}" + expected = DatetimeIndex( + ["2014-01-31"], dtype="datetime64[ns]", freq=None, name="date" + ) + warning_msg = ( + "Parsing dates in .* format when dayfirst=.* was specified. " + "Pass `dayfirst=.*` or specify a format to silence this warning." + ) + with tm.assert_produces_warning(UserWarning, match=warning_msg): + res = read_csv( + StringIO(initial_value), + parse_dates=["date"], + index_col="date", + dayfirst=dayfirst, + ).index + tm.assert_index_equal(expected, res) + + +@skip_pyarrow # CSV parse error: Expected 3 columns, got 4 +def test_infer_first_column_as_index(all_parsers): + # GH#11019 + parser = all_parsers + data = "a,b,c\n1970-01-01,2,3,4" + result = parser.read_csv( + StringIO(data), + parse_dates=["a"], + ) + expected = DataFrame({"a": "2", "b": 3, "c": 4}, index=["1970-01-01"]) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # pyarrow engine doesn't support passing a dict for na_values +@pytest.mark.parametrize( + ("key", "value", "warn"), + [ + ("date_parser", lambda x: pd.to_datetime(x, format="%Y-%m-%d"), FutureWarning), + ("date_format", "%Y-%m-%d", None), + ], +) +def test_replace_nans_before_parsing_dates(all_parsers, key, value, warn): + # GH#26203 + parser = all_parsers + data = """Test +2012-10-01 +0 +2015-05-15 +# +2017-09-09 +""" + result = parser.read_csv_check_warnings( + warn, + "use 'date_format' instead", + StringIO(data), + na_values={"Test": ["#", "0"]}, + parse_dates=["Test"], + **{key: value}, + ) + expected = DataFrame( + { + "Test": [ + Timestamp("2012-10-01"), + pd.NaT, + Timestamp("2015-05-15"), + pd.NaT, + Timestamp("2017-09-09"), + ] + } + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # string[python] instead of dt64[ns] +def test_parse_dates_and_string_dtype(all_parsers): + # GH#34066 + parser = all_parsers + data = """a,b +1,2019-12-31 +""" + result = parser.read_csv(StringIO(data), dtype="string", parse_dates=["b"]) + expected = DataFrame({"a": ["1"], "b": [Timestamp("2019-12-31")]}) + expected["a"] = expected["a"].astype("string") + tm.assert_frame_equal(result, expected) + + +def test_parse_dot_separated_dates(all_parsers): + # https://github.com/pandas-dev/pandas/issues/2586 + parser = all_parsers + data = """a,b +27.03.2003 14:55:00.000,1 +03.08.2003 15:20:00.000,2""" + if parser.engine == "pyarrow": + expected_index = Index( + ["27.03.2003 14:55:00.000", "03.08.2003 15:20:00.000"], + dtype="object", + name="a", + ) + warn = None + else: + expected_index = DatetimeIndex( + ["2003-03-27 14:55:00", "2003-08-03 15:20:00"], + dtype="datetime64[ns]", + name="a", + ) + warn = UserWarning + msg = r"when dayfirst=False \(the default\) was specified" + result = parser.read_csv_check_warnings( + warn, + msg, + StringIO(data), + parse_dates=True, + index_col=0, + raise_on_extra_warnings=False, + ) + expected = DataFrame({"b": [1, 2]}, index=expected_index) + tm.assert_frame_equal(result, expected) + + +def test_parse_dates_dict_format(all_parsers): + # GH#51240 + parser = all_parsers + data = """a,b +2019-12-31,31-12-2019 +2020-12-31,31-12-2020""" + + result = parser.read_csv( + StringIO(data), + date_format={"a": "%Y-%m-%d", "b": "%d-%m-%Y"}, + parse_dates=["a", "b"], + ) + expected = DataFrame( + { + "a": [Timestamp("2019-12-31"), Timestamp("2020-12-31")], + "b": [Timestamp("2019-12-31"), Timestamp("2020-12-31")], + } + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "key, parse_dates", [("a_b", [[0, 1]]), ("foo", {"foo": [0, 1]})] +) +def test_parse_dates_dict_format_two_columns(all_parsers, key, parse_dates): + # GH#51240 + parser = all_parsers + data = """a,b +31-,12-2019 +31-,12-2020""" + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), date_format={key: "%d- %m-%Y"}, parse_dates=parse_dates + ) + expected = DataFrame( + { + key: [Timestamp("2019-12-31"), Timestamp("2020-12-31")], + } + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # object dtype index +def test_parse_dates_dict_format_index(all_parsers): + # GH#51240 + parser = all_parsers + data = """a,b +2019-12-31,31-12-2019 +2020-12-31,31-12-2020""" + + result = parser.read_csv( + StringIO(data), date_format={"a": "%Y-%m-%d"}, parse_dates=True, index_col=0 + ) + expected = DataFrame( + { + "b": ["31-12-2019", "31-12-2020"], + }, + index=Index([Timestamp("2019-12-31"), Timestamp("2020-12-31")], name="a"), + ) + tm.assert_frame_equal(result, expected) + + +def test_parse_dates_arrow_engine(all_parsers): + # GH#53295 + parser = all_parsers + data = """a,b +2000-01-01 00:00:00,1 +2000-01-01 00:00:01,1""" + + result = parser.read_csv(StringIO(data), parse_dates=["a"]) + # TODO: make unit check more specific + if parser.engine == "pyarrow": + result["a"] = result["a"].dt.as_unit("ns") + expected = DataFrame( + { + "a": [ + Timestamp("2000-01-01 00:00:00"), + Timestamp("2000-01-01 00:00:01"), + ], + "b": 1, + } + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # object dtype index +def test_from_csv_with_mixed_offsets(all_parsers): + parser = all_parsers + data = "a\n2020-01-01T00:00:00+01:00\n2020-01-01T00:00:00+00:00" + result = parser.read_csv(StringIO(data), parse_dates=["a"])["a"] + expected = Series( + [ + Timestamp("2020-01-01 00:00:00+01:00"), + Timestamp("2020-01-01 00:00:00+00:00"), + ], + name="a", + index=[0, 1], + ) + tm.assert_series_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_python_parser_only.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_python_parser_only.py new file mode 100644 index 0000000000000000000000000000000000000000..dbd474c6ae0b994911316517277f9c60d74bf862 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_python_parser_only.py @@ -0,0 +1,564 @@ +""" +Tests that apply specifically to the Python parser. Unless specifically +stated as a Python-specific issue, the goal is to eventually move as many of +these tests out of this module as soon as the C parser can accept further +arguments when parsing. +""" +from __future__ import annotations + +import csv +from io import ( + BytesIO, + StringIO, + TextIOWrapper, +) +from typing import TYPE_CHECKING + +import numpy as np +import pytest + +from pandas.errors import ( + ParserError, + ParserWarning, +) + +from pandas import ( + DataFrame, + Index, + MultiIndex, +) +import pandas._testing as tm + +if TYPE_CHECKING: + from collections.abc import Iterator + + +def test_default_separator(python_parser_only): + # see gh-17333 + # + # csv.Sniffer in Python treats "o" as separator. + data = "aob\n1o2\n3o4" + parser = python_parser_only + expected = DataFrame({"a": [1, 3], "b": [2, 4]}) + + result = parser.read_csv(StringIO(data), sep=None) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("skipfooter", ["foo", 1.5, True]) +def test_invalid_skipfooter_non_int(python_parser_only, skipfooter): + # see gh-15925 (comment) + data = "a\n1\n2" + parser = python_parser_only + msg = "skipfooter must be an integer" + + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), skipfooter=skipfooter) + + +def test_invalid_skipfooter_negative(python_parser_only): + # see gh-15925 (comment) + data = "a\n1\n2" + parser = python_parser_only + msg = "skipfooter cannot be negative" + + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), skipfooter=-1) + + +@pytest.mark.parametrize("kwargs", [{"sep": None}, {"delimiter": "|"}]) +def test_sniff_delimiter(python_parser_only, kwargs): + data = """index|A|B|C +foo|1|2|3 +bar|4|5|6 +baz|7|8|9 +""" + parser = python_parser_only + result = parser.read_csv(StringIO(data), index_col=0, **kwargs) + expected = DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + columns=["A", "B", "C"], + index=Index(["foo", "bar", "baz"], name="index"), + ) + tm.assert_frame_equal(result, expected) + + +def test_sniff_delimiter_comment(python_parser_only): + data = """# comment line +index|A|B|C +# comment line +foo|1|2|3 # ignore | this +bar|4|5|6 +baz|7|8|9 +""" + parser = python_parser_only + result = parser.read_csv(StringIO(data), index_col=0, sep=None, comment="#") + expected = DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + columns=["A", "B", "C"], + index=Index(["foo", "bar", "baz"], name="index"), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("encoding", [None, "utf-8"]) +def test_sniff_delimiter_encoding(python_parser_only, encoding): + parser = python_parser_only + data = """ignore this +ignore this too +index|A|B|C +foo|1|2|3 +bar|4|5|6 +baz|7|8|9 +""" + + if encoding is not None: + data = data.encode(encoding) + data = BytesIO(data) + data = TextIOWrapper(data, encoding=encoding) + else: + data = StringIO(data) + + result = parser.read_csv(data, index_col=0, sep=None, skiprows=2, encoding=encoding) + expected = DataFrame( + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + columns=["A", "B", "C"], + index=Index(["foo", "bar", "baz"], name="index"), + ) + tm.assert_frame_equal(result, expected) + + +def test_single_line(python_parser_only): + # see gh-6607: sniff separator + parser = python_parser_only + result = parser.read_csv(StringIO("1,2"), names=["a", "b"], header=None, sep=None) + + expected = DataFrame({"a": [1], "b": [2]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("kwargs", [{"skipfooter": 2}, {"nrows": 3}]) +def test_skipfooter(python_parser_only, kwargs): + # see gh-6607 + data = """A,B,C +1,2,3 +4,5,6 +7,8,9 +want to skip this +also also skip this +""" + parser = python_parser_only + result = parser.read_csv(StringIO(data), **kwargs) + + expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "compression,klass", [("gzip", "GzipFile"), ("bz2", "BZ2File")] +) +def test_decompression_regex_sep(python_parser_only, csv1, compression, klass): + # see gh-6607 + parser = python_parser_only + + with open(csv1, "rb") as f: + data = f.read() + + data = data.replace(b",", b"::") + expected = parser.read_csv(csv1) + + module = pytest.importorskip(compression) + klass = getattr(module, klass) + + with tm.ensure_clean() as path: + with klass(path, mode="wb") as tmp: + tmp.write(data) + + result = parser.read_csv(path, sep="::", compression=compression) + tm.assert_frame_equal(result, expected) + + +def test_read_csv_buglet_4x_multi_index(python_parser_only): + # see gh-6607 + data = """ A B C D E +one two three four +a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 +a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 +x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" + parser = python_parser_only + + expected = DataFrame( + [ + [-0.5109, -2.3358, -0.4645, 0.05076, 0.3640], + [0.4473, 1.4152, 0.2834, 1.00661, 0.1744], + [-0.6662, -0.5243, -0.3580, 0.89145, 2.5838], + ], + columns=["A", "B", "C", "D", "E"], + index=MultiIndex.from_tuples( + [("a", "b", 10.0032, 5), ("a", "q", 20, 4), ("x", "q", 30, 3)], + names=["one", "two", "three", "four"], + ), + ) + result = parser.read_csv(StringIO(data), sep=r"\s+") + tm.assert_frame_equal(result, expected) + + +def test_read_csv_buglet_4x_multi_index2(python_parser_only): + # see gh-6893 + data = " A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9" + parser = python_parser_only + + expected = DataFrame.from_records( + [(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)], + columns=list("abcABC"), + index=list("abc"), + ) + result = parser.read_csv(StringIO(data), sep=r"\s+") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("add_footer", [True, False]) +def test_skipfooter_with_decimal(python_parser_only, add_footer): + # see gh-6971 + data = "1#2\n3#4" + parser = python_parser_only + expected = DataFrame({"a": [1.2, 3.4]}) + + if add_footer: + # The stray footer line should not mess with the + # casting of the first two lines if we skip it. + kwargs = {"skipfooter": 1} + data += "\nFooter" + else: + kwargs = {} + + result = parser.read_csv(StringIO(data), names=["a"], decimal="#", **kwargs) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "sep", ["::", "#####", "!!!", "123", "#1!c5", "%!c!d", "@@#4:2", "_!pd#_"] +) +@pytest.mark.parametrize( + "encoding", ["utf-16", "utf-16-be", "utf-16-le", "utf-32", "cp037"] +) +def test_encoding_non_utf8_multichar_sep(python_parser_only, sep, encoding): + # see gh-3404 + expected = DataFrame({"a": [1], "b": [2]}) + parser = python_parser_only + + data = "1" + sep + "2" + encoded_data = data.encode(encoding) + + result = parser.read_csv( + BytesIO(encoded_data), sep=sep, names=["a", "b"], encoding=encoding + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE]) +def test_multi_char_sep_quotes(python_parser_only, quoting): + # see gh-13374 + kwargs = {"sep": ",,"} + parser = python_parser_only + + data = 'a,,b\n1,,a\n2,,"2,,b"' + + if quoting == csv.QUOTE_NONE: + msg = "Expected 2 fields in line 3, saw 3" + with pytest.raises(ParserError, match=msg): + parser.read_csv(StringIO(data), quoting=quoting, **kwargs) + else: + msg = "ignored when a multi-char delimiter is used" + with pytest.raises(ParserError, match=msg): + parser.read_csv(StringIO(data), quoting=quoting, **kwargs) + + +def test_none_delimiter(python_parser_only): + # see gh-13374 and gh-17465 + parser = python_parser_only + data = "a,b,c\n0,1,2\n3,4,5,6\n7,8,9" + expected = DataFrame({"a": [0, 7], "b": [1, 8], "c": [2, 9]}) + + # We expect the third line in the data to be + # skipped because it is malformed, but we do + # not expect any errors to occur. + with tm.assert_produces_warning( + ParserWarning, match="Skipping line 3", check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), header=0, sep=None, on_bad_lines="warn" + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("data", ['a\n1\n"b"a', 'a,b,c\ncat,foo,bar\ndog,foo,"baz']) +@pytest.mark.parametrize("skipfooter", [0, 1]) +def test_skipfooter_bad_row(python_parser_only, data, skipfooter): + # see gh-13879 and gh-15910 + parser = python_parser_only + if skipfooter: + msg = "parsing errors in the skipped footer rows" + with pytest.raises(ParserError, match=msg): + parser.read_csv(StringIO(data), skipfooter=skipfooter) + else: + msg = "unexpected end of data|expected after" + with pytest.raises(ParserError, match=msg): + parser.read_csv(StringIO(data), skipfooter=skipfooter) + + +def test_malformed_skipfooter(python_parser_only): + parser = python_parser_only + data = """ignore +A,B,C +1,2,3 # comment +1,2,3,4,5 +2,3,4 +footer +""" + msg = "Expected 3 fields in line 4, saw 5" + with pytest.raises(ParserError, match=msg): + parser.read_csv(StringIO(data), header=1, comment="#", skipfooter=1) + + +def test_python_engine_file_no_next(python_parser_only): + parser = python_parser_only + + class NoNextBuffer: + def __init__(self, csv_data) -> None: + self.data = csv_data + + def __iter__(self) -> Iterator: + return self.data.__iter__() + + def read(self): + return self.data + + def readline(self): + return self.data + + parser.read_csv(NoNextBuffer("a\n1")) + + +@pytest.mark.parametrize("bad_line_func", [lambda x: ["2", "3"], lambda x: x[:2]]) +def test_on_bad_lines_callable(python_parser_only, bad_line_func): + # GH 5686 + parser = python_parser_only + data = """a,b +1,2 +2,3,4,5,6 +3,4 +""" + bad_sio = StringIO(data) + result = parser.read_csv(bad_sio, on_bad_lines=bad_line_func) + expected = DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}) + tm.assert_frame_equal(result, expected) + + +def test_on_bad_lines_callable_write_to_external_list(python_parser_only): + # GH 5686 + parser = python_parser_only + data = """a,b +1,2 +2,3,4,5,6 +3,4 +""" + bad_sio = StringIO(data) + lst = [] + + def bad_line_func(bad_line: list[str]) -> list[str]: + lst.append(bad_line) + return ["2", "3"] + + result = parser.read_csv(bad_sio, on_bad_lines=bad_line_func) + expected = DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}) + tm.assert_frame_equal(result, expected) + assert lst == [["2", "3", "4", "5", "6"]] + + +@pytest.mark.parametrize("bad_line_func", [lambda x: ["foo", "bar"], lambda x: x[:2]]) +@pytest.mark.parametrize("sep", [",", "111"]) +def test_on_bad_lines_callable_iterator_true(python_parser_only, bad_line_func, sep): + # GH 5686 + # iterator=True has a separate code path than iterator=False + parser = python_parser_only + data = f""" +0{sep}1 +hi{sep}there +foo{sep}bar{sep}baz +good{sep}bye +""" + bad_sio = StringIO(data) + result_iter = parser.read_csv( + bad_sio, on_bad_lines=bad_line_func, chunksize=1, iterator=True, sep=sep + ) + expecteds = [ + {"0": "hi", "1": "there"}, + {"0": "foo", "1": "bar"}, + {"0": "good", "1": "bye"}, + ] + for i, (result, expected) in enumerate(zip(result_iter, expecteds)): + expected = DataFrame(expected, index=range(i, i + 1)) + tm.assert_frame_equal(result, expected) + + +def test_on_bad_lines_callable_dont_swallow_errors(python_parser_only): + # GH 5686 + parser = python_parser_only + data = """a,b +1,2 +2,3,4,5,6 +3,4 +""" + bad_sio = StringIO(data) + msg = "This function is buggy." + + def bad_line_func(bad_line): + raise ValueError(msg) + + with pytest.raises(ValueError, match=msg): + parser.read_csv(bad_sio, on_bad_lines=bad_line_func) + + +def test_on_bad_lines_callable_not_expected_length(python_parser_only): + # GH 5686 + parser = python_parser_only + data = """a,b +1,2 +2,3,4,5,6 +3,4 +""" + bad_sio = StringIO(data) + + result = parser.read_csv_check_warnings( + ParserWarning, "Length of header or names", bad_sio, on_bad_lines=lambda x: x + ) + expected = DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}) + tm.assert_frame_equal(result, expected) + + +def test_on_bad_lines_callable_returns_none(python_parser_only): + # GH 5686 + parser = python_parser_only + data = """a,b +1,2 +2,3,4,5,6 +3,4 +""" + bad_sio = StringIO(data) + + result = parser.read_csv(bad_sio, on_bad_lines=lambda x: None) + expected = DataFrame({"a": [1, 3], "b": [2, 4]}) + tm.assert_frame_equal(result, expected) + + +def test_on_bad_lines_index_col_inferred(python_parser_only): + # GH 5686 + parser = python_parser_only + data = """a,b +1,2,3 +4,5,6 +""" + bad_sio = StringIO(data) + + result = parser.read_csv(bad_sio, on_bad_lines=lambda x: ["99", "99"]) + expected = DataFrame({"a": [2, 5], "b": [3, 6]}, index=[1, 4]) + tm.assert_frame_equal(result, expected) + + +def test_index_col_false_and_header_none(python_parser_only): + # GH#46955 + parser = python_parser_only + data = """ +0.5,0.03 +0.1,0.2,0.3,2 +""" + result = parser.read_csv_check_warnings( + ParserWarning, + "Length of header", + StringIO(data), + sep=",", + header=None, + index_col=False, + ) + expected = DataFrame({0: [0.5, 0.1], 1: [0.03, 0.2]}) + tm.assert_frame_equal(result, expected) + + +def test_header_int_do_not_infer_multiindex_names_on_different_line(python_parser_only): + # GH#46569 + parser = python_parser_only + data = StringIO("a\na,b\nc,d,e\nf,g,h") + result = parser.read_csv_check_warnings( + ParserWarning, "Length of header", data, engine="python", index_col=False + ) + expected = DataFrame({"a": ["a", "c", "f"]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype", [{"a": object}, {"a": str, "b": np.int64, "c": np.int64}] +) +def test_no_thousand_convert_with_dot_for_non_numeric_cols(python_parser_only, dtype): + # GH#50270 + parser = python_parser_only + data = """\ +a;b;c +0000.7995;16.000;0 +3.03.001.00514;0;4.000 +4923.600.041;23.000;131""" + result = parser.read_csv( + StringIO(data), + sep=";", + dtype=dtype, + thousands=".", + ) + expected = DataFrame( + { + "a": ["0000.7995", "3.03.001.00514", "4923.600.041"], + "b": [16000, 0, 23000], + "c": [0, 4000, 131], + } + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype,expected", + [ + ( + {"a": str, "b": np.float64, "c": np.int64}, + DataFrame( + { + "b": [16000.1, 0, 23000], + "c": [0, 4001, 131], + } + ), + ), + ( + str, + DataFrame( + { + "b": ["16,000.1", "0", "23,000"], + "c": ["0", "4,001", "131"], + } + ), + ), + ], +) +def test_no_thousand_convert_for_non_numeric_cols(python_parser_only, dtype, expected): + # GH#50270 + parser = python_parser_only + data = """a;b;c +0000,7995;16,000.1;0 +3,03,001,00514;0;4,001 +4923,600,041;23,000;131 +""" + result = parser.read_csv( + StringIO(data), + sep=";", + dtype=dtype, + thousands=",", + ) + expected.insert(0, "a", ["0000,7995", "3,03,001,00514", "4923,600,041"]) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_quoting.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_quoting.py new file mode 100644 index 0000000000000000000000000000000000000000..a70b7e3389c1ba5ebc6e2a8fadd54e13999e12f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_quoting.py @@ -0,0 +1,183 @@ +""" +Tests that quoting specifications are properly handled +during parsing for all of the parsers defined in parsers.py +""" + +import csv +from io import StringIO + +import pytest + +from pandas.compat import PY311 +from pandas.errors import ParserError + +from pandas import DataFrame +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + + +@pytest.mark.parametrize( + "kwargs,msg", + [ + ({"quotechar": "foo"}, '"quotechar" must be a(n)? 1-character string'), + ( + {"quotechar": None, "quoting": csv.QUOTE_MINIMAL}, + "quotechar must be set if quoting enabled", + ), + ({"quotechar": 2}, '"quotechar" must be string( or None)?, not int'), + ], +) +@skip_pyarrow # ParserError: CSV parse error: Empty CSV file or block +def test_bad_quote_char(all_parsers, kwargs, msg): + data = "1,2,3" + parser = all_parsers + + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO(data), **kwargs) + + +@pytest.mark.parametrize( + "quoting,msg", + [ + ("foo", '"quoting" must be an integer|Argument'), + (10, 'bad "quoting" value'), # quoting must be in the range [0, 3] + ], +) +@xfail_pyarrow # ValueError: The 'quoting' option is not supported +def test_bad_quoting(all_parsers, quoting, msg): + data = "1,2,3" + parser = all_parsers + + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO(data), quoting=quoting) + + +def test_quote_char_basic(all_parsers): + parser = all_parsers + data = 'a,b,c\n1,2,"cat"' + expected = DataFrame([[1, 2, "cat"]], columns=["a", "b", "c"]) + + result = parser.read_csv(StringIO(data), quotechar='"') + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("quote_char", ["~", "*", "%", "$", "@", "P"]) +def test_quote_char_various(all_parsers, quote_char): + parser = all_parsers + expected = DataFrame([[1, 2, "cat"]], columns=["a", "b", "c"]) + + data = 'a,b,c\n1,2,"cat"' + new_data = data.replace('"', quote_char) + + result = parser.read_csv(StringIO(new_data), quotechar=quote_char) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # ValueError: The 'quoting' option is not supported +@pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE]) +@pytest.mark.parametrize("quote_char", ["", None]) +def test_null_quote_char(all_parsers, quoting, quote_char): + kwargs = {"quotechar": quote_char, "quoting": quoting} + data = "a,b,c\n1,2,3" + parser = all_parsers + + if quoting != csv.QUOTE_NONE: + # Sanity checking. + msg = ( + '"quotechar" must be a 1-character string' + if PY311 and all_parsers.engine == "python" and quote_char == "" + else "quotechar must be set if quoting enabled" + ) + + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO(data), **kwargs) + elif not (PY311 and all_parsers.engine == "python"): + # Python 3.11+ doesn't support null/blank quote chars in their csv parsers + expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"]) + result = parser.read_csv(StringIO(data), **kwargs) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "kwargs,exp_data", + [ + ({}, [[1, 2, "foo"]]), # Test default. + # QUOTE_MINIMAL only applies to CSV writing, so no effect on reading. + ({"quotechar": '"', "quoting": csv.QUOTE_MINIMAL}, [[1, 2, "foo"]]), + # QUOTE_MINIMAL only applies to CSV writing, so no effect on reading. + ({"quotechar": '"', "quoting": csv.QUOTE_ALL}, [[1, 2, "foo"]]), + # QUOTE_NONE tells the reader to do no special handling + # of quote characters and leave them alone. + ({"quotechar": '"', "quoting": csv.QUOTE_NONE}, [[1, 2, '"foo"']]), + # QUOTE_NONNUMERIC tells the reader to cast + # all non-quoted fields to float + ({"quotechar": '"', "quoting": csv.QUOTE_NONNUMERIC}, [[1.0, 2.0, "foo"]]), + ], +) +@xfail_pyarrow # ValueError: The 'quoting' option is not supported +def test_quoting_various(all_parsers, kwargs, exp_data): + data = '1,2,"foo"' + parser = all_parsers + columns = ["a", "b", "c"] + + result = parser.read_csv(StringIO(data), names=columns, **kwargs) + expected = DataFrame(exp_data, columns=columns) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "doublequote,exp_data", [(True, [[3, '4 " 5']]), (False, [[3, '4 " 5"']])] +) +def test_double_quote(all_parsers, doublequote, exp_data, request): + parser = all_parsers + data = 'a,b\n3,"4 "" 5"' + + if parser.engine == "pyarrow" and not doublequote: + mark = pytest.mark.xfail(reason="Mismatched result") + request.applymarker(mark) + + result = parser.read_csv(StringIO(data), quotechar='"', doublequote=doublequote) + expected = DataFrame(exp_data, columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("quotechar", ['"', "\u0001"]) +def test_quotechar_unicode(all_parsers, quotechar): + # see gh-14477 + data = "a\n1" + parser = all_parsers + expected = DataFrame({"a": [1]}) + + result = parser.read_csv(StringIO(data), quotechar=quotechar) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("balanced", [True, False]) +def test_unbalanced_quoting(all_parsers, balanced, request): + # see gh-22789. + parser = all_parsers + data = 'a,b,c\n1,2,"3' + + if parser.engine == "pyarrow" and not balanced: + mark = pytest.mark.xfail(reason="Mismatched result") + request.applymarker(mark) + + if balanced: + # Re-balance the quoting and read in without errors. + expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"]) + result = parser.read_csv(StringIO(data + '"')) + tm.assert_frame_equal(result, expected) + else: + msg = ( + "EOF inside string starting at row 1" + if parser.engine == "c" + else "unexpected end of data" + ) + + with pytest.raises(ParserError, match=msg): + parser.read_csv(StringIO(data)) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_read_fwf.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_read_fwf.py new file mode 100644 index 0000000000000000000000000000000000000000..bed2b5e10a6f79cd3393be9859d652720800aadd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_read_fwf.py @@ -0,0 +1,1044 @@ +""" +Tests the 'read_fwf' function in parsers.py. This +test suite is independent of the others because the +engine is set to 'python-fwf' internally. +""" + +from datetime import datetime +from io import ( + BytesIO, + StringIO, +) +from pathlib import Path + +import numpy as np +import pytest + +from pandas.errors import EmptyDataError + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) + +from pandas.io.common import urlopen +from pandas.io.parsers import ( + read_csv, + read_fwf, +) + + +def test_basic(): + data = """\ +A B C D +201158 360.242940 149.910199 11950.7 +201159 444.953632 166.985655 11788.4 +201160 364.136849 183.628767 11806.2 +201161 413.836124 184.375703 11916.8 +201162 502.953953 173.237159 12468.3 +""" + result = read_fwf(StringIO(data)) + expected = DataFrame( + [ + [201158, 360.242940, 149.910199, 11950.7], + [201159, 444.953632, 166.985655, 11788.4], + [201160, 364.136849, 183.628767, 11806.2], + [201161, 413.836124, 184.375703, 11916.8], + [201162, 502.953953, 173.237159, 12468.3], + ], + columns=["A", "B", "C", "D"], + ) + tm.assert_frame_equal(result, expected) + + +def test_colspecs(): + data = """\ +A B C D E +201158 360.242940 149.910199 11950.7 +201159 444.953632 166.985655 11788.4 +201160 364.136849 183.628767 11806.2 +201161 413.836124 184.375703 11916.8 +201162 502.953953 173.237159 12468.3 +""" + colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)] + result = read_fwf(StringIO(data), colspecs=colspecs) + + expected = DataFrame( + [ + [2011, 58, 360.242940, 149.910199, 11950.7], + [2011, 59, 444.953632, 166.985655, 11788.4], + [2011, 60, 364.136849, 183.628767, 11806.2], + [2011, 61, 413.836124, 184.375703, 11916.8], + [2011, 62, 502.953953, 173.237159, 12468.3], + ], + columns=["A", "B", "C", "D", "E"], + ) + tm.assert_frame_equal(result, expected) + + +def test_widths(): + data = """\ +A B C D E +2011 58 360.242940 149.910199 11950.7 +2011 59 444.953632 166.985655 11788.4 +2011 60 364.136849 183.628767 11806.2 +2011 61 413.836124 184.375703 11916.8 +2011 62 502.953953 173.237159 12468.3 +""" + result = read_fwf(StringIO(data), widths=[5, 5, 13, 13, 7]) + + expected = DataFrame( + [ + [2011, 58, 360.242940, 149.910199, 11950.7], + [2011, 59, 444.953632, 166.985655, 11788.4], + [2011, 60, 364.136849, 183.628767, 11806.2], + [2011, 61, 413.836124, 184.375703, 11916.8], + [2011, 62, 502.953953, 173.237159, 12468.3], + ], + columns=["A", "B", "C", "D", "E"], + ) + tm.assert_frame_equal(result, expected) + + +def test_non_space_filler(): + # From Thomas Kluyver: + # + # Apparently, some non-space filler characters can be seen, this is + # supported by specifying the 'delimiter' character: + # + # http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html + data = """\ +A~~~~B~~~~C~~~~~~~~~~~~D~~~~~~~~~~~~E +201158~~~~360.242940~~~149.910199~~~11950.7 +201159~~~~444.953632~~~166.985655~~~11788.4 +201160~~~~364.136849~~~183.628767~~~11806.2 +201161~~~~413.836124~~~184.375703~~~11916.8 +201162~~~~502.953953~~~173.237159~~~12468.3 +""" + colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)] + result = read_fwf(StringIO(data), colspecs=colspecs, delimiter="~") + + expected = DataFrame( + [ + [2011, 58, 360.242940, 149.910199, 11950.7], + [2011, 59, 444.953632, 166.985655, 11788.4], + [2011, 60, 364.136849, 183.628767, 11806.2], + [2011, 61, 413.836124, 184.375703, 11916.8], + [2011, 62, 502.953953, 173.237159, 12468.3], + ], + columns=["A", "B", "C", "D", "E"], + ) + tm.assert_frame_equal(result, expected) + + +def test_over_specified(): + data = """\ +A B C D E +201158 360.242940 149.910199 11950.7 +201159 444.953632 166.985655 11788.4 +201160 364.136849 183.628767 11806.2 +201161 413.836124 184.375703 11916.8 +201162 502.953953 173.237159 12468.3 +""" + colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)] + + with pytest.raises(ValueError, match="must specify only one of"): + read_fwf(StringIO(data), colspecs=colspecs, widths=[6, 10, 10, 7]) + + +def test_under_specified(): + data = """\ +A B C D E +201158 360.242940 149.910199 11950.7 +201159 444.953632 166.985655 11788.4 +201160 364.136849 183.628767 11806.2 +201161 413.836124 184.375703 11916.8 +201162 502.953953 173.237159 12468.3 +""" + with pytest.raises(ValueError, match="Must specify either"): + read_fwf(StringIO(data), colspecs=None, widths=None) + + +def test_read_csv_compat(): + csv_data = """\ +A,B,C,D,E +2011,58,360.242940,149.910199,11950.7 +2011,59,444.953632,166.985655,11788.4 +2011,60,364.136849,183.628767,11806.2 +2011,61,413.836124,184.375703,11916.8 +2011,62,502.953953,173.237159,12468.3 +""" + expected = read_csv(StringIO(csv_data), engine="python") + + fwf_data = """\ +A B C D E +201158 360.242940 149.910199 11950.7 +201159 444.953632 166.985655 11788.4 +201160 364.136849 183.628767 11806.2 +201161 413.836124 184.375703 11916.8 +201162 502.953953 173.237159 12468.3 +""" + colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)] + result = read_fwf(StringIO(fwf_data), colspecs=colspecs) + tm.assert_frame_equal(result, expected) + + +def test_bytes_io_input(): + data = BytesIO("שלום\nשלום".encode()) # noqa: RUF001 + result = read_fwf(data, widths=[2, 2], encoding="utf8") + expected = DataFrame([["של", "ום"]], columns=["של", "ום"]) + tm.assert_frame_equal(result, expected) + + +def test_fwf_colspecs_is_list_or_tuple(): + data = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + + msg = "column specifications must be a list or tuple.+" + + with pytest.raises(TypeError, match=msg): + read_fwf(StringIO(data), colspecs={"a": 1}, delimiter=",") + + +def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(): + data = """index,A,B,C,D +foo,2,3,4,5 +bar,7,8,9,10 +baz,12,13,14,15 +qux,12,13,14,15 +foo2,12,13,14,15 +bar2,12,13,14,15 +""" + + msg = "Each column specification must be.+" + + with pytest.raises(TypeError, match=msg): + read_fwf(StringIO(data), colspecs=[("a", 1)]) + + +@pytest.mark.parametrize( + "colspecs,exp_data", + [ + ([(0, 3), (3, None)], [[123, 456], [456, 789]]), + ([(None, 3), (3, 6)], [[123, 456], [456, 789]]), + ([(0, None), (3, None)], [[123456, 456], [456789, 789]]), + ([(None, None), (3, 6)], [[123456, 456], [456789, 789]]), + ], +) +def test_fwf_colspecs_none(colspecs, exp_data): + # see gh-7079 + data = """\ +123456 +456789 +""" + expected = DataFrame(exp_data) + + result = read_fwf(StringIO(data), colspecs=colspecs, header=None) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "infer_nrows,exp_data", + [ + # infer_nrows --> colspec == [(2, 3), (5, 6)] + (1, [[1, 2], [3, 8]]), + # infer_nrows > number of rows + (10, [[1, 2], [123, 98]]), + ], +) +def test_fwf_colspecs_infer_nrows(infer_nrows, exp_data): + # see gh-15138 + data = """\ + 1 2 +123 98 +""" + expected = DataFrame(exp_data) + + result = read_fwf(StringIO(data), infer_nrows=infer_nrows, header=None) + tm.assert_frame_equal(result, expected) + + +def test_fwf_regression(): + # see gh-3594 + # + # Turns out "T060" is parsable as a datetime slice! + tz_list = [1, 10, 20, 30, 60, 80, 100] + widths = [16] + [8] * len(tz_list) + names = ["SST"] + [f"T{z:03d}" for z in tz_list[1:]] + + data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192 +2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869 +2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657 +2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379 +2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039 +""" + + with tm.assert_produces_warning(FutureWarning, match="use 'date_format' instead"): + result = read_fwf( + StringIO(data), + index_col=0, + header=None, + names=names, + widths=widths, + parse_dates=True, + date_parser=lambda s: datetime.strptime(s, "%Y%j%H%M%S"), + ) + expected = DataFrame( + [ + [9.5403, 9.4105, 8.6571, 7.8372, 6.0612, 5.8843, 5.5192], + [9.5435, 9.2010, 8.6167, 7.8176, 6.0804, 5.8728, 5.4869], + [9.5873, 9.1326, 8.4694, 7.5889, 6.0422, 5.8526, 5.4657], + [9.5810, 9.0896, 8.4009, 7.4652, 6.0322, 5.8189, 5.4379], + [9.6034, 9.0897, 8.3822, 7.4905, 6.0908, 5.7904, 5.4039], + ], + index=DatetimeIndex( + [ + "2009-06-13 20:20:00", + "2009-06-13 20:30:00", + "2009-06-13 20:40:00", + "2009-06-13 20:50:00", + "2009-06-13 21:00:00", + ] + ), + columns=["SST", "T010", "T020", "T030", "T060", "T080", "T100"], + ) + tm.assert_frame_equal(result, expected) + result = read_fwf( + StringIO(data), + index_col=0, + header=None, + names=names, + widths=widths, + parse_dates=True, + date_format="%Y%j%H%M%S", + ) + tm.assert_frame_equal(result, expected) + + +def test_fwf_for_uint8(): + data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127 +1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa: E501 + df = read_fwf( + StringIO(data), + colspecs=[(0, 17), (25, 26), (33, 37), (49, 51), (58, 62), (63, 1000)], + names=["time", "pri", "pgn", "dst", "src", "data"], + converters={ + "pgn": lambda x: int(x, 16), + "src": lambda x: int(x, 16), + "dst": lambda x: int(x, 16), + "data": lambda x: len(x.split(" ")), + }, + ) + + expected = DataFrame( + [ + [1421302965.213420, 3, 61184, 23, 40, 8], + [1421302964.226776, 6, 61442, None, 71, 8], + ], + columns=["time", "pri", "pgn", "dst", "src", "data"], + ) + expected["dst"] = expected["dst"].astype(object) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("comment", ["#", "~", "!"]) +def test_fwf_comment(comment): + data = """\ + 1 2. 4 #hello world + 5 NaN 10.0 +""" + data = data.replace("#", comment) + + colspecs = [(0, 3), (4, 9), (9, 25)] + expected = DataFrame([[1, 2.0, 4], [5, np.nan, 10.0]]) + + result = read_fwf(StringIO(data), colspecs=colspecs, header=None, comment=comment) + tm.assert_almost_equal(result, expected) + + +def test_fwf_skip_blank_lines(): + data = """ + +A B C D + +201158 360.242940 149.910199 11950.7 +201159 444.953632 166.985655 11788.4 + + +201162 502.953953 173.237159 12468.3 + +""" + result = read_fwf(StringIO(data), skip_blank_lines=True) + expected = DataFrame( + [ + [201158, 360.242940, 149.910199, 11950.7], + [201159, 444.953632, 166.985655, 11788.4], + [201162, 502.953953, 173.237159, 12468.3], + ], + columns=["A", "B", "C", "D"], + ) + tm.assert_frame_equal(result, expected) + + data = """\ +A B C D +201158 360.242940 149.910199 11950.7 +201159 444.953632 166.985655 11788.4 + + +201162 502.953953 173.237159 12468.3 +""" + result = read_fwf(StringIO(data), skip_blank_lines=False) + expected = DataFrame( + [ + [201158, 360.242940, 149.910199, 11950.7], + [201159, 444.953632, 166.985655, 11788.4], + [np.nan, np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan, np.nan], + [201162, 502.953953, 173.237159, 12468.3], + ], + columns=["A", "B", "C", "D"], + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("thousands", [",", "#", "~"]) +def test_fwf_thousands(thousands): + data = """\ + 1 2,334.0 5 +10 13 10. +""" + data = data.replace(",", thousands) + + colspecs = [(0, 3), (3, 11), (12, 16)] + expected = DataFrame([[1, 2334.0, 5], [10, 13, 10.0]]) + + result = read_fwf( + StringIO(data), header=None, colspecs=colspecs, thousands=thousands + ) + tm.assert_almost_equal(result, expected) + + +@pytest.mark.parametrize("header", [True, False]) +def test_bool_header_arg(header): + # see gh-6114 + data = """\ +MyColumn + a + b + a + b""" + + msg = "Passing a bool to header is invalid" + with pytest.raises(TypeError, match=msg): + read_fwf(StringIO(data), header=header) + + +def test_full_file(): + # File with all values. + test = """index A B C +2000-01-03T00:00:00 0.980268513777 3 foo +2000-01-04T00:00:00 1.04791624281 -4 bar +2000-01-05T00:00:00 0.498580885705 73 baz +2000-01-06T00:00:00 1.12020151869 1 foo +2000-01-07T00:00:00 0.487094399463 0 bar +2000-01-10T00:00:00 0.836648671666 2 baz +2000-01-11T00:00:00 0.157160753327 34 foo""" + colspecs = ((0, 19), (21, 35), (38, 40), (42, 45)) + expected = read_fwf(StringIO(test), colspecs=colspecs) + + result = read_fwf(StringIO(test)) + tm.assert_frame_equal(result, expected) + + +def test_full_file_with_missing(): + # File with missing values. + test = """index A B C +2000-01-03T00:00:00 0.980268513777 3 foo +2000-01-04T00:00:00 1.04791624281 -4 bar + 0.498580885705 73 baz +2000-01-06T00:00:00 1.12020151869 1 foo +2000-01-07T00:00:00 0 bar +2000-01-10T00:00:00 0.836648671666 2 baz + 34""" + colspecs = ((0, 19), (21, 35), (38, 40), (42, 45)) + expected = read_fwf(StringIO(test), colspecs=colspecs) + + result = read_fwf(StringIO(test)) + tm.assert_frame_equal(result, expected) + + +def test_full_file_with_spaces(): + # File with spaces in columns. + test = """ +Account Name Balance CreditLimit AccountCreated +101 Keanu Reeves 9315.45 10000.00 1/17/1998 +312 Gerard Butler 90.00 1000.00 8/6/2003 +868 Jennifer Love Hewitt 0 17000.00 5/25/1985 +761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006 +317 Bill Murray 789.65 5000.00 2/5/2007 +""".strip( + "\r\n" + ) + colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70)) + expected = read_fwf(StringIO(test), colspecs=colspecs) + + result = read_fwf(StringIO(test)) + tm.assert_frame_equal(result, expected) + + +def test_full_file_with_spaces_and_missing(): + # File with spaces and missing values in columns. + test = """ +Account Name Balance CreditLimit AccountCreated +101 10000.00 1/17/1998 +312 Gerard Butler 90.00 1000.00 8/6/2003 +868 5/25/1985 +761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006 +317 Bill Murray 789.65 +""".strip( + "\r\n" + ) + colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70)) + expected = read_fwf(StringIO(test), colspecs=colspecs) + + result = read_fwf(StringIO(test)) + tm.assert_frame_equal(result, expected) + + +def test_messed_up_data(): + # Completely messed up file. + test = """ + Account Name Balance Credit Limit Account Created + 101 10000.00 1/17/1998 + 312 Gerard Butler 90.00 1000.00 + + 761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006 + 317 Bill Murray 789.65 +""".strip( + "\r\n" + ) + colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79)) + expected = read_fwf(StringIO(test), colspecs=colspecs) + + result = read_fwf(StringIO(test)) + tm.assert_frame_equal(result, expected) + + +def test_multiple_delimiters(): + test = r""" +col1~~~~~col2 col3++++++++++++++++++col4 +~~22.....11.0+++foo~~~~~~~~~~Keanu Reeves + 33+++122.33\\\bar.........Gerard Butler +++44~~~~12.01 baz~~Jennifer Love Hewitt +~~55 11+++foo++++Jada Pinkett-Smith +..66++++++.03~~~bar Bill Murray +""".strip( + "\r\n" + ) + delimiter = " +~.\\" + colspecs = ((0, 4), (7, 13), (15, 19), (21, 41)) + expected = read_fwf(StringIO(test), colspecs=colspecs, delimiter=delimiter) + + result = read_fwf(StringIO(test), delimiter=delimiter) + tm.assert_frame_equal(result, expected) + + +def test_variable_width_unicode(): + data = """ +שלום שלום +ום שלל +של ום +""".strip( + "\r\n" + ) + encoding = "utf8" + kwargs = {"header": None, "encoding": encoding} + + expected = read_fwf( + BytesIO(data.encode(encoding)), colspecs=[(0, 4), (5, 9)], **kwargs + ) + result = read_fwf(BytesIO(data.encode(encoding)), **kwargs) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", [{}, {"a": "float64", "b": str, "c": "int32"}]) +def test_dtype(dtype): + data = """ a b c +1 2 3.2 +3 4 5.2 +""" + colspecs = [(0, 5), (5, 10), (10, None)] + result = read_fwf(StringIO(data), colspecs=colspecs, dtype=dtype) + + expected = DataFrame( + {"a": [1, 3], "b": [2, 4], "c": [3.2, 5.2]}, columns=["a", "b", "c"] + ) + + for col, dt in dtype.items(): + expected[col] = expected[col].astype(dt) + + tm.assert_frame_equal(result, expected) + + +def test_skiprows_inference(): + # see gh-11256 + data = """ +Text contained in the file header + +DataCol1 DataCol2 + 0.0 1.0 + 101.6 956.1 +""".strip() + skiprows = 2 + + depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + expected = read_csv(StringIO(data), skiprows=skiprows, delim_whitespace=True) + + result = read_fwf(StringIO(data), skiprows=skiprows) + tm.assert_frame_equal(result, expected) + + +def test_skiprows_by_index_inference(): + data = """ +To be skipped +Not To Be Skipped +Once more to be skipped +123 34 8 123 +456 78 9 456 +""".strip() + skiprows = [0, 2] + + depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + expected = read_csv(StringIO(data), skiprows=skiprows, delim_whitespace=True) + + result = read_fwf(StringIO(data), skiprows=skiprows) + tm.assert_frame_equal(result, expected) + + +def test_skiprows_inference_empty(): + data = """ +AA BBB C +12 345 6 +78 901 2 +""".strip() + + msg = "No rows from which to infer column width" + with pytest.raises(EmptyDataError, match=msg): + read_fwf(StringIO(data), skiprows=3) + + +def test_whitespace_preservation(): + # see gh-16772 + header = None + csv_data = """ + a ,bbb + cc,dd """ + + fwf_data = """ + a bbb + ccdd """ + result = read_fwf( + StringIO(fwf_data), widths=[3, 3], header=header, skiprows=[0], delimiter="\n\t" + ) + expected = read_csv(StringIO(csv_data), header=header) + tm.assert_frame_equal(result, expected) + + +def test_default_delimiter(): + header = None + csv_data = """ +a,bbb +cc,dd""" + + fwf_data = """ +a \tbbb +cc\tdd """ + result = read_fwf(StringIO(fwf_data), widths=[3, 3], header=header, skiprows=[0]) + expected = read_csv(StringIO(csv_data), header=header) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("infer", [True, False]) +def test_fwf_compression(compression_only, infer, compression_to_extension): + data = """1111111111 + 2222222222 + 3333333333""".strip() + + compression = compression_only + extension = compression_to_extension[compression] + + kwargs = {"widths": [5, 5], "names": ["one", "two"]} + expected = read_fwf(StringIO(data), **kwargs) + + data = bytes(data, encoding="utf-8") + + with tm.ensure_clean(filename="tmp." + extension) as path: + tm.write_to_compressed(compression, path, data) + + if infer is not None: + kwargs["compression"] = "infer" if infer else compression + + result = read_fwf(path, **kwargs) + tm.assert_frame_equal(result, expected) + + +def test_binary_mode(): + """ + read_fwf supports opening files in binary mode. + + GH 18035. + """ + data = """aaa aaa aaa +bba bab b a""" + df_reference = DataFrame( + [["bba", "bab", "b a"]], columns=["aaa", "aaa.1", "aaa.2"], index=[0] + ) + with tm.ensure_clean() as path: + Path(path).write_text(data, encoding="utf-8") + with open(path, "rb") as file: + df = read_fwf(file) + file.seek(0) + tm.assert_frame_equal(df, df_reference) + + +@pytest.mark.parametrize("memory_map", [True, False]) +def test_encoding_mmap(memory_map): + """ + encoding should be working, even when using a memory-mapped file. + + GH 23254. + """ + encoding = "iso8859_1" + with tm.ensure_clean() as path: + Path(path).write_bytes(" 1 A Ä 2\n".encode(encoding)) + df = read_fwf( + path, + header=None, + widths=[2, 2, 2, 2], + encoding=encoding, + memory_map=memory_map, + ) + df_reference = DataFrame([[1, "A", "Ä", 2]]) + tm.assert_frame_equal(df, df_reference) + + +@pytest.mark.parametrize( + "colspecs, names, widths, index_col", + [ + ( + [(0, 6), (6, 12), (12, 18), (18, None)], + list("abcde"), + None, + None, + ), + ( + None, + list("abcde"), + [6] * 4, + None, + ), + ( + [(0, 6), (6, 12), (12, 18), (18, None)], + list("abcde"), + None, + True, + ), + ( + None, + list("abcde"), + [6] * 4, + False, + ), + ( + None, + list("abcde"), + [6] * 4, + True, + ), + ( + [(0, 6), (6, 12), (12, 18), (18, None)], + list("abcde"), + None, + False, + ), + ], +) +def test_len_colspecs_len_names(colspecs, names, widths, index_col): + # GH#40830 + data = """col1 col2 col3 col4 + bab ba 2""" + msg = "Length of colspecs must match length of names" + with pytest.raises(ValueError, match=msg): + read_fwf( + StringIO(data), + colspecs=colspecs, + names=names, + widths=widths, + index_col=index_col, + ) + + +@pytest.mark.parametrize( + "colspecs, names, widths, index_col, expected", + [ + ( + [(0, 6), (6, 12), (12, 18), (18, None)], + list("abc"), + None, + 0, + DataFrame( + index=["col1", "ba"], + columns=["a", "b", "c"], + data=[["col2", "col3", "col4"], ["b ba", "2", np.nan]], + ), + ), + ( + [(0, 6), (6, 12), (12, 18), (18, None)], + list("ab"), + None, + [0, 1], + DataFrame( + index=[["col1", "ba"], ["col2", "b ba"]], + columns=["a", "b"], + data=[["col3", "col4"], ["2", np.nan]], + ), + ), + ( + [(0, 6), (6, 12), (12, 18), (18, None)], + list("a"), + None, + [0, 1, 2], + DataFrame( + index=[["col1", "ba"], ["col2", "b ba"], ["col3", "2"]], + columns=["a"], + data=[["col4"], [np.nan]], + ), + ), + ( + None, + list("abc"), + [6] * 4, + 0, + DataFrame( + index=["col1", "ba"], + columns=["a", "b", "c"], + data=[["col2", "col3", "col4"], ["b ba", "2", np.nan]], + ), + ), + ( + None, + list("ab"), + [6] * 4, + [0, 1], + DataFrame( + index=[["col1", "ba"], ["col2", "b ba"]], + columns=["a", "b"], + data=[["col3", "col4"], ["2", np.nan]], + ), + ), + ( + None, + list("a"), + [6] * 4, + [0, 1, 2], + DataFrame( + index=[["col1", "ba"], ["col2", "b ba"], ["col3", "2"]], + columns=["a"], + data=[["col4"], [np.nan]], + ), + ), + ], +) +def test_len_colspecs_len_names_with_index_col( + colspecs, names, widths, index_col, expected +): + # GH#40830 + data = """col1 col2 col3 col4 + bab ba 2""" + result = read_fwf( + StringIO(data), + colspecs=colspecs, + names=names, + widths=widths, + index_col=index_col, + ) + tm.assert_frame_equal(result, expected) + + +def test_colspecs_with_comment(): + # GH 14135 + result = read_fwf( + StringIO("#\nA1K\n"), colspecs=[(1, 2), (2, 3)], comment="#", header=None + ) + expected = DataFrame([[1, "K"]], columns=[0, 1]) + tm.assert_frame_equal(result, expected) + + +def test_skip_rows_and_n_rows(): + # GH#44021 + data = """a\tb +1\t a +2\t b +3\t c +4\t d +5\t e +6\t f + """ + result = read_fwf(StringIO(data), nrows=4, skiprows=[2, 4]) + expected = DataFrame({"a": [1, 3, 5, 6], "b": ["a", "c", "e", "f"]}) + tm.assert_frame_equal(result, expected) + + +def test_skiprows_with_iterator(): + # GH#10261, GH#56323 + data = """0 +1 +2 +3 +4 +5 +6 +7 +8 +9 + """ + df_iter = read_fwf( + StringIO(data), + colspecs=[(0, 2)], + names=["a"], + iterator=True, + chunksize=2, + skiprows=[0, 1, 2, 6, 9], + ) + expected_frames = [ + DataFrame({"a": [3, 4]}), + DataFrame({"a": [5, 7]}, index=[2, 3]), + DataFrame({"a": [8]}, index=[4]), + ] + for i, result in enumerate(df_iter): + tm.assert_frame_equal(result, expected_frames[i]) + + +def test_names_and_infer_colspecs(): + # GH#45337 + data = """X Y Z + 959.0 345 22.2 + """ + result = read_fwf(StringIO(data), skiprows=1, usecols=[0, 2], names=["a", "b"]) + expected = DataFrame({"a": [959.0], "b": 22.2}) + tm.assert_frame_equal(result, expected) + + +def test_widths_and_usecols(): + # GH#46580 + data = """0 1 n -0.4100.1 +0 2 p 0.2 90.1 +0 3 n -0.3140.4""" + result = read_fwf( + StringIO(data), + header=None, + usecols=(0, 1, 3), + widths=(3, 5, 1, 5, 5), + index_col=False, + names=("c0", "c1", "c3"), + ) + expected = DataFrame( + { + "c0": 0, + "c1": [1, 2, 3], + "c3": [-0.4, 0.2, -0.3], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_dtype_backend(string_storage, dtype_backend): + # GH#50289 + if string_storage == "python": + arr = StringArray(np.array(["a", "b"], dtype=np.object_)) + arr_na = StringArray(np.array([pd.NA, "a"], dtype=np.object_)) + elif dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + arr = ArrowExtensionArray(pa.array(["a", "b"])) + arr_na = ArrowExtensionArray(pa.array([None, "a"])) + else: + pa = pytest.importorskip("pyarrow") + arr = ArrowStringArray(pa.array(["a", "b"])) + arr_na = ArrowStringArray(pa.array([None, "a"])) + + data = """a b c d e f g h i +1 2.5 True a +3 4.5 False b True 6 7.5 a""" + with pd.option_context("mode.string_storage", string_storage): + result = read_fwf(StringIO(data), dtype_backend=dtype_backend) + + expected = DataFrame( + { + "a": pd.Series([1, 3], dtype="Int64"), + "b": pd.Series([2.5, 4.5], dtype="Float64"), + "c": pd.Series([True, False], dtype="boolean"), + "d": arr, + "e": pd.Series([pd.NA, True], dtype="boolean"), + "f": pd.Series([pd.NA, 6], dtype="Int64"), + "g": pd.Series([pd.NA, 7.5], dtype="Float64"), + "h": arr_na, + "i": pd.Series([pd.NA, pd.NA], dtype="Int64"), + } + ) + if dtype_backend == "pyarrow": + pa = pytest.importorskip("pyarrow") + from pandas.arrays import ArrowExtensionArray + + expected = DataFrame( + { + col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True)) + for col in expected.columns + } + ) + expected["i"] = ArrowExtensionArray(pa.array([None, None])) + + tm.assert_frame_equal(result, expected) + + +def test_invalid_dtype_backend(): + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + read_fwf("test", dtype_backend="numpy") + + +@pytest.mark.network +@pytest.mark.single_cpu +def test_url_urlopen(httpserver): + data = """\ +A B C D +201158 360.242940 149.910199 11950.7 +201159 444.953632 166.985655 11788.4 +201160 364.136849 183.628767 11806.2 +201161 413.836124 184.375703 11916.8 +201162 502.953953 173.237159 12468.3 +""" + httpserver.serve_content(content=data) + expected = pd.Index(list("ABCD")) + with urlopen(httpserver.url) as f: + result = read_fwf(f).columns + + tm.assert_index_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_skiprows.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_skiprows.py new file mode 100644 index 0000000000000000000000000000000000000000..2d50916228f1482ec0648e678143c80dbc727ee4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_skiprows.py @@ -0,0 +1,334 @@ +""" +Tests that skipped rows are properly handled during +parsing for all of the parsers defined in parsers.py +""" + +from datetime import datetime +from io import StringIO + +import numpy as np +import pytest + +from pandas.errors import EmptyDataError + +from pandas import ( + DataFrame, + Index, +) +import pandas._testing as tm + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@xfail_pyarrow # ValueError: skiprows argument must be an integer +@pytest.mark.parametrize("skiprows", [list(range(6)), 6]) +def test_skip_rows_bug(all_parsers, skiprows): + # see gh-505 + parser = all_parsers + text = """#foo,a,b,c +#foo,a,b,c +#foo,a,b,c +#foo,a,b,c +#foo,a,b,c +#foo,a,b,c +1/1/2000,1.,2.,3. +1/2/2000,4,5,6 +1/3/2000,7,8,9 +""" + result = parser.read_csv( + StringIO(text), skiprows=skiprows, header=None, index_col=0, parse_dates=True + ) + index = Index( + [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], name=0 + ) + + expected = DataFrame( + np.arange(1.0, 10.0).reshape((3, 3)), columns=[1, 2, 3], index=index + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # ValueError: skiprows argument must be an integer +def test_deep_skip_rows(all_parsers): + # see gh-4382 + parser = all_parsers + data = "a,b,c\n" + "\n".join( + [",".join([str(i), str(i + 1), str(i + 2)]) for i in range(10)] + ) + condensed_data = "a,b,c\n" + "\n".join( + [",".join([str(i), str(i + 1), str(i + 2)]) for i in [0, 1, 2, 3, 4, 6, 8, 9]] + ) + + result = parser.read_csv(StringIO(data), skiprows=[6, 8]) + condensed_result = parser.read_csv(StringIO(condensed_data)) + tm.assert_frame_equal(result, condensed_result) + + +@xfail_pyarrow # AssertionError: DataFrame are different +def test_skip_rows_blank(all_parsers): + # see gh-9832 + parser = all_parsers + text = """#foo,a,b,c +#foo,a,b,c + +#foo,a,b,c +#foo,a,b,c + +1/1/2000,1.,2.,3. +1/2/2000,4,5,6 +1/3/2000,7,8,9 +""" + data = parser.read_csv( + StringIO(text), skiprows=6, header=None, index_col=0, parse_dates=True + ) + index = Index( + [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], name=0 + ) + + expected = DataFrame( + np.arange(1.0, 10.0).reshape((3, 3)), columns=[1, 2, 3], index=index + ) + tm.assert_frame_equal(data, expected) + + +@pytest.mark.parametrize( + "data,kwargs,expected", + [ + ( + """id,text,num_lines +1,"line 11 +line 12",2 +2,"line 21 +line 22",2 +3,"line 31",1""", + {"skiprows": [1]}, + DataFrame( + [[2, "line 21\nline 22", 2], [3, "line 31", 1]], + columns=["id", "text", "num_lines"], + ), + ), + ( + "a,b,c\n~a\n b~,~e\n d~,~f\n f~\n1,2,~12\n 13\n 14~", + {"quotechar": "~", "skiprows": [2]}, + DataFrame([["a\n b", "e\n d", "f\n f"]], columns=["a", "b", "c"]), + ), + ( + ( + "Text,url\n~example\n " + "sentence\n one~,url1\n~" + "example\n sentence\n two~,url2\n~" + "example\n sentence\n three~,url3" + ), + {"quotechar": "~", "skiprows": [1, 3]}, + DataFrame([["example\n sentence\n two", "url2"]], columns=["Text", "url"]), + ), + ], +) +@xfail_pyarrow # ValueError: skiprows argument must be an integer +def test_skip_row_with_newline(all_parsers, data, kwargs, expected): + # see gh-12775 and gh-10911 + parser = all_parsers + result = parser.read_csv(StringIO(data), **kwargs) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # ValueError: skiprows argument must be an integer +def test_skip_row_with_quote(all_parsers): + # see gh-12775 and gh-10911 + parser = all_parsers + data = """id,text,num_lines +1,"line '11' line 12",2 +2,"line '21' line 22",2 +3,"line '31' line 32",1""" + + exp_data = [[2, "line '21' line 22", 2], [3, "line '31' line 32", 1]] + expected = DataFrame(exp_data, columns=["id", "text", "num_lines"]) + + result = parser.read_csv(StringIO(data), skiprows=[1]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data,exp_data", + [ + ( + """id,text,num_lines +1,"line \n'11' line 12",2 +2,"line \n'21' line 22",2 +3,"line \n'31' line 32",1""", + [[2, "line \n'21' line 22", 2], [3, "line \n'31' line 32", 1]], + ), + ( + """id,text,num_lines +1,"line '11\n' line 12",2 +2,"line '21\n' line 22",2 +3,"line '31\n' line 32",1""", + [[2, "line '21\n' line 22", 2], [3, "line '31\n' line 32", 1]], + ), + ( + """id,text,num_lines +1,"line '11\n' \r\tline 12",2 +2,"line '21\n' \r\tline 22",2 +3,"line '31\n' \r\tline 32",1""", + [[2, "line '21\n' \r\tline 22", 2], [3, "line '31\n' \r\tline 32", 1]], + ), + ], +) +@xfail_pyarrow # ValueError: skiprows argument must be an integer +def test_skip_row_with_newline_and_quote(all_parsers, data, exp_data): + # see gh-12775 and gh-10911 + parser = all_parsers + result = parser.read_csv(StringIO(data), skiprows=[1]) + + expected = DataFrame(exp_data, columns=["id", "text", "num_lines"]) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # ValueError: The 'delim_whitespace' option is not supported +@pytest.mark.parametrize( + "lineterminator", ["\n", "\r\n", "\r"] # "LF" # "CRLF" # "CR" +) +def test_skiprows_lineterminator(all_parsers, lineterminator, request): + # see gh-9079 + parser = all_parsers + data = "\n".join( + [ + "SMOSMANIA ThetaProbe-ML2X ", + "2007/01/01 01:00 0.2140 U M ", + "2007/01/01 02:00 0.2141 M O ", + "2007/01/01 04:00 0.2142 D M ", + ] + ) + expected = DataFrame( + [ + ["2007/01/01", "01:00", 0.2140, "U", "M"], + ["2007/01/01", "02:00", 0.2141, "M", "O"], + ["2007/01/01", "04:00", 0.2142, "D", "M"], + ], + columns=["date", "time", "var", "flag", "oflag"], + ) + + if parser.engine == "python" and lineterminator == "\r": + mark = pytest.mark.xfail(reason="'CR' not respect with the Python parser yet") + request.applymarker(mark) + + data = data.replace("\n", lineterminator) + + depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated" + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), + skiprows=1, + delim_whitespace=True, + names=["date", "time", "var", "flag", "oflag"], + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # AssertionError: DataFrame are different +def test_skiprows_infield_quote(all_parsers): + # see gh-14459 + parser = all_parsers + data = 'a"\nb"\na\n1' + expected = DataFrame({"a": [1]}) + + result = parser.read_csv(StringIO(data), skiprows=2) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # ValueError: skiprows argument must be an integer +@pytest.mark.parametrize( + "kwargs,expected", + [ + ({}, DataFrame({"1": [3, 5]})), + ({"header": 0, "names": ["foo"]}, DataFrame({"foo": [3, 5]})), + ], +) +def test_skip_rows_callable(all_parsers, kwargs, expected): + parser = all_parsers + data = "a\n1\n2\n3\n4\n5" + + result = parser.read_csv(StringIO(data), skiprows=lambda x: x % 2 == 0, **kwargs) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # ValueError: skiprows argument must be an integer +def test_skip_rows_callable_not_in(all_parsers): + parser = all_parsers + data = "0,a\n1,b\n2,c\n3,d\n4,e" + expected = DataFrame([[1, "b"], [3, "d"]]) + + result = parser.read_csv( + StringIO(data), header=None, skiprows=lambda x: x not in [1, 3] + ) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # ValueError: skiprows argument must be an integer +def test_skip_rows_skip_all(all_parsers): + parser = all_parsers + data = "a\n1\n2\n3\n4\n5" + msg = "No columns to parse from file" + + with pytest.raises(EmptyDataError, match=msg): + parser.read_csv(StringIO(data), skiprows=lambda x: True) + + +@xfail_pyarrow # ValueError: skiprows argument must be an integer +def test_skip_rows_bad_callable(all_parsers): + msg = "by zero" + parser = all_parsers + data = "a\n1\n2\n3\n4\n5" + + with pytest.raises(ZeroDivisionError, match=msg): + parser.read_csv(StringIO(data), skiprows=lambda x: 1 / 0) + + +@xfail_pyarrow # ValueError: skiprows argument must be an integer +def test_skip_rows_and_n_rows(all_parsers): + # GH#44021 + data = """a,b +1,a +2,b +3,c +4,d +5,e +6,f +7,g +8,h +""" + parser = all_parsers + result = parser.read_csv(StringIO(data), nrows=5, skiprows=[2, 4, 6]) + expected = DataFrame({"a": [1, 3, 5, 7, 8], "b": ["a", "c", "e", "g", "h"]}) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow +def test_skip_rows_with_chunks(all_parsers): + # GH 55677 + data = """col_a +10 +20 +30 +40 +50 +60 +70 +80 +90 +100 +""" + parser = all_parsers + reader = parser.read_csv( + StringIO(data), engine=parser, skiprows=lambda x: x in [1, 4, 5], chunksize=4 + ) + df1 = next(reader) + df2 = next(reader) + + tm.assert_frame_equal(df1, DataFrame({"col_a": [20, 30, 60, 70]})) + tm.assert_frame_equal(df2, DataFrame({"col_a": [80, 90, 100]}, index=[4, 5, 6])) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_textreader.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_textreader.py new file mode 100644 index 0000000000000000000000000000000000000000..fef5414e85e52749faab254c2336d6707a10347e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_textreader.py @@ -0,0 +1,342 @@ +""" +Tests the TextReader class in parsers.pyx, which +is integral to the C engine in parsers.py +""" +from io import ( + BytesIO, + StringIO, +) + +import numpy as np +import pytest + +import pandas._libs.parsers as parser +from pandas._libs.parsers import TextReader +from pandas.errors import ParserWarning + +from pandas import DataFrame +import pandas._testing as tm + +from pandas.io.parsers import ( + TextFileReader, + read_csv, +) +from pandas.io.parsers.c_parser_wrapper import ensure_dtype_objs + + +class TestTextReader: + @pytest.fixture + def csv_path(self, datapath): + return datapath("io", "data", "csv", "test1.csv") + + def test_file_handle(self, csv_path): + with open(csv_path, "rb") as f: + reader = TextReader(f) + reader.read() + + def test_file_handle_mmap(self, csv_path): + # this was never using memory_map=True + with open(csv_path, "rb") as f: + reader = TextReader(f, header=None) + reader.read() + + def test_StringIO(self, csv_path): + with open(csv_path, "rb") as f: + text = f.read() + src = BytesIO(text) + reader = TextReader(src, header=None) + reader.read() + + def test_string_factorize(self): + # should this be optional? + data = "a\nb\na\nb\na" + reader = TextReader(StringIO(data), header=None) + result = reader.read() + assert len(set(map(id, result[0]))) == 2 + + def test_skipinitialspace(self): + data = "a, b\na, b\na, b\na, b" + + reader = TextReader(StringIO(data), skipinitialspace=True, header=None) + result = reader.read() + + tm.assert_numpy_array_equal( + result[0], np.array(["a", "a", "a", "a"], dtype=np.object_) + ) + tm.assert_numpy_array_equal( + result[1], np.array(["b", "b", "b", "b"], dtype=np.object_) + ) + + def test_parse_booleans(self): + data = "True\nFalse\nTrue\nTrue" + + reader = TextReader(StringIO(data), header=None) + result = reader.read() + + assert result[0].dtype == np.bool_ + + def test_delimit_whitespace(self): + data = 'a b\na\t\t "b"\n"a"\t \t b' + + reader = TextReader(StringIO(data), delim_whitespace=True, header=None) + result = reader.read() + + tm.assert_numpy_array_equal( + result[0], np.array(["a", "a", "a"], dtype=np.object_) + ) + tm.assert_numpy_array_equal( + result[1], np.array(["b", "b", "b"], dtype=np.object_) + ) + + def test_embedded_newline(self): + data = 'a\n"hello\nthere"\nthis' + + reader = TextReader(StringIO(data), header=None) + result = reader.read() + + expected = np.array(["a", "hello\nthere", "this"], dtype=np.object_) + tm.assert_numpy_array_equal(result[0], expected) + + def test_euro_decimal(self): + data = "12345,67\n345,678" + + reader = TextReader(StringIO(data), delimiter=":", decimal=",", header=None) + result = reader.read() + + expected = np.array([12345.67, 345.678]) + tm.assert_almost_equal(result[0], expected) + + def test_integer_thousands(self): + data = "123,456\n12,500" + + reader = TextReader(StringIO(data), delimiter=":", thousands=",", header=None) + result = reader.read() + + expected = np.array([123456, 12500], dtype=np.int64) + tm.assert_almost_equal(result[0], expected) + + def test_integer_thousands_alt(self): + data = "123.456\n12.500" + + reader = TextFileReader( + StringIO(data), delimiter=":", thousands=".", header=None + ) + result = reader.read() + + expected = DataFrame([123456, 12500]) + tm.assert_frame_equal(result, expected) + + def test_skip_bad_lines(self): + # too many lines, see #2430 for why + data = "a:b:c\nd:e:f\ng:h:i\nj:k:l:m\nl:m:n\no:p:q:r" + + reader = TextReader(StringIO(data), delimiter=":", header=None) + msg = r"Error tokenizing data\. C error: Expected 3 fields in line 4, saw 4" + with pytest.raises(parser.ParserError, match=msg): + reader.read() + + reader = TextReader( + StringIO(data), delimiter=":", header=None, on_bad_lines=2 # Skip + ) + result = reader.read() + expected = { + 0: np.array(["a", "d", "g", "l"], dtype=object), + 1: np.array(["b", "e", "h", "m"], dtype=object), + 2: np.array(["c", "f", "i", "n"], dtype=object), + } + assert_array_dicts_equal(result, expected) + + with tm.assert_produces_warning(ParserWarning, match="Skipping line"): + reader = TextReader( + StringIO(data), delimiter=":", header=None, on_bad_lines=1 # Warn + ) + reader.read() + + def test_header_not_enough_lines(self): + data = "skip this\nskip this\na,b,c\n1,2,3\n4,5,6" + + reader = TextReader(StringIO(data), delimiter=",", header=2) + header = reader.header + expected = [["a", "b", "c"]] + assert header == expected + + recs = reader.read() + expected = { + 0: np.array([1, 4], dtype=np.int64), + 1: np.array([2, 5], dtype=np.int64), + 2: np.array([3, 6], dtype=np.int64), + } + assert_array_dicts_equal(recs, expected) + + def test_escapechar(self): + data = '\\"hello world"\n\\"hello world"\n\\"hello world"' + + reader = TextReader(StringIO(data), delimiter=",", header=None, escapechar="\\") + result = reader.read() + expected = {0: np.array(['"hello world"'] * 3, dtype=object)} + assert_array_dicts_equal(result, expected) + + def test_eof_has_eol(self): + # handling of new line at EOF + pass + + def test_na_substitution(self): + pass + + def test_numpy_string_dtype(self): + data = """\ +a,1 +aa,2 +aaa,3 +aaaa,4 +aaaaa,5""" + + def _make_reader(**kwds): + if "dtype" in kwds: + kwds["dtype"] = ensure_dtype_objs(kwds["dtype"]) + return TextReader(StringIO(data), delimiter=",", header=None, **kwds) + + reader = _make_reader(dtype="S5,i4") + result = reader.read() + + assert result[0].dtype == "S5" + + ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaaa"], dtype="S5") + assert (result[0] == ex_values).all() + assert result[1].dtype == "i4" + + reader = _make_reader(dtype="S4") + result = reader.read() + assert result[0].dtype == "S4" + ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaa"], dtype="S4") + assert (result[0] == ex_values).all() + assert result[1].dtype == "S4" + + def test_pass_dtype(self): + data = """\ +one,two +1,a +2,b +3,c +4,d""" + + def _make_reader(**kwds): + if "dtype" in kwds: + kwds["dtype"] = ensure_dtype_objs(kwds["dtype"]) + return TextReader(StringIO(data), delimiter=",", **kwds) + + reader = _make_reader(dtype={"one": "u1", 1: "S1"}) + result = reader.read() + assert result[0].dtype == "u1" + assert result[1].dtype == "S1" + + reader = _make_reader(dtype={"one": np.uint8, 1: object}) + result = reader.read() + assert result[0].dtype == "u1" + assert result[1].dtype == "O" + + reader = _make_reader(dtype={"one": np.dtype("u1"), 1: np.dtype("O")}) + result = reader.read() + assert result[0].dtype == "u1" + assert result[1].dtype == "O" + + def test_usecols(self): + data = """\ +a,b,c +1,2,3 +4,5,6 +7,8,9 +10,11,12""" + + def _make_reader(**kwds): + return TextReader(StringIO(data), delimiter=",", **kwds) + + reader = _make_reader(usecols=(1, 2)) + result = reader.read() + + exp = _make_reader().read() + assert len(result) == 2 + assert (result[1] == exp[1]).all() + assert (result[2] == exp[2]).all() + + @pytest.mark.parametrize( + "text, kwargs", + [ + ("a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12", {"delimiter": ","}), + ( + "a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12", + {"delim_whitespace": True}, + ), + ("a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12", {"delimiter": ","}), + ( + ( + "A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r" + "AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r" + ",BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0" + ), + {"delimiter": ","}, + ), + ("A B C\r 2 3\r4 5 6", {"delim_whitespace": True}), + ("A B C\r2 3\r4 5 6", {"delim_whitespace": True}), + ], + ) + def test_cr_delimited(self, text, kwargs): + nice_text = text.replace("\r", "\r\n") + result = TextReader(StringIO(text), **kwargs).read() + expected = TextReader(StringIO(nice_text), **kwargs).read() + assert_array_dicts_equal(result, expected) + + def test_empty_field_eof(self): + data = "a,b,c\n1,2,3\n4,," + + result = TextReader(StringIO(data), delimiter=",").read() + + expected = { + 0: np.array([1, 4], dtype=np.int64), + 1: np.array(["2", ""], dtype=object), + 2: np.array(["3", ""], dtype=object), + } + assert_array_dicts_equal(result, expected) + + @pytest.mark.parametrize("repeat", range(10)) + def test_empty_field_eof_mem_access_bug(self, repeat): + # GH5664 + a = DataFrame([["b"], [np.nan]], columns=["a"], index=["a", "c"]) + b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]], columns=list("abcd"), index=[1, 1]) + c = DataFrame( + [ + [1, 2, 3, 4], + [6, np.nan, np.nan, np.nan], + [8, 9, 10, 11], + [13, 14, np.nan, np.nan], + ], + columns=list("abcd"), + index=[0, 5, 7, 12], + ) + + df = read_csv(StringIO("a,b\nc\n"), skiprows=0, names=["a"], engine="c") + tm.assert_frame_equal(df, a) + + df = read_csv( + StringIO("1,1,1,1,0\n" * 2 + "\n" * 2), names=list("abcd"), engine="c" + ) + tm.assert_frame_equal(df, b) + + df = read_csv( + StringIO("0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14"), + names=list("abcd"), + engine="c", + ) + tm.assert_frame_equal(df, c) + + def test_empty_csv_input(self): + # GH14867 + with read_csv( + StringIO(), chunksize=20, header=None, names=["a", "b", "c"] + ) as df: + assert isinstance(df, TextFileReader) + + +def assert_array_dicts_equal(left, right): + for k, v in left.items(): + tm.assert_numpy_array_equal(np.asarray(v), np.asarray(right[k])) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_unsupported.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_unsupported.py new file mode 100644 index 0000000000000000000000000000000000000000..f8790bdb5fa426252f85f472f1249e75dae42dcd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_unsupported.py @@ -0,0 +1,226 @@ +""" +Tests that features that are currently unsupported in +either the Python or C parser are actually enforced +and are clearly communicated to the user. + +Ultimately, the goal is to remove test cases from this +test suite as new feature support is added to the parsers. +""" +from io import StringIO +import os +from pathlib import Path + +import pytest + +from pandas.errors import ParserError + +import pandas._testing as tm + +from pandas.io.parsers import read_csv +import pandas.io.parsers.readers as parsers + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@pytest.fixture(params=["python", "python-fwf"], ids=lambda val: val) +def python_engine(request): + return request.param + + +class TestUnsupportedFeatures: + def test_mangle_dupe_cols_false(self): + # see gh-12935 + data = "a b c\n1 2 3" + + for engine in ("c", "python"): + with pytest.raises(TypeError, match="unexpected keyword"): + read_csv(StringIO(data), engine=engine, mangle_dupe_cols=True) + + def test_c_engine(self): + # see gh-6607 + data = "a b c\n1 2 3" + msg = "does not support" + + depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated" + + # specify C engine with unsupported options (raise) + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + read_csv(StringIO(data), engine="c", sep=None, delim_whitespace=False) + with pytest.raises(ValueError, match=msg): + read_csv(StringIO(data), engine="c", sep=r"\s") + with pytest.raises(ValueError, match=msg): + read_csv(StringIO(data), engine="c", sep="\t", quotechar=chr(128)) + with pytest.raises(ValueError, match=msg): + read_csv(StringIO(data), engine="c", skipfooter=1) + + # specify C-unsupported options without python-unsupported options + with tm.assert_produces_warning((parsers.ParserWarning, FutureWarning)): + read_csv(StringIO(data), sep=None, delim_whitespace=False) + with tm.assert_produces_warning(parsers.ParserWarning): + read_csv(StringIO(data), sep=r"\s") + with tm.assert_produces_warning(parsers.ParserWarning): + read_csv(StringIO(data), sep="\t", quotechar=chr(128)) + with tm.assert_produces_warning(parsers.ParserWarning): + read_csv(StringIO(data), skipfooter=1) + + text = """ A B C D E +one two three four +a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 +a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 +x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" + msg = "Error tokenizing data" + + with pytest.raises(ParserError, match=msg): + read_csv(StringIO(text), sep="\\s+") + with pytest.raises(ParserError, match=msg): + read_csv(StringIO(text), engine="c", sep="\\s+") + + msg = "Only length-1 thousands markers supported" + data = """A|B|C +1|2,334|5 +10|13|10. +""" + with pytest.raises(ValueError, match=msg): + read_csv(StringIO(data), thousands=",,") + with pytest.raises(ValueError, match=msg): + read_csv(StringIO(data), thousands="") + + msg = "Only length-1 line terminators supported" + data = "a,b,c~~1,2,3~~4,5,6" + with pytest.raises(ValueError, match=msg): + read_csv(StringIO(data), lineterminator="~~") + + def test_python_engine(self, python_engine): + from pandas.io.parsers.readers import _python_unsupported as py_unsupported + + data = """1,2,3,, +1,2,3,4, +1,2,3,4,5 +1,2,,, +1,2,3,4,""" + + for default in py_unsupported: + msg = ( + f"The {repr(default)} option is not " + f"supported with the {repr(python_engine)} engine" + ) + + kwargs = {default: object()} + with pytest.raises(ValueError, match=msg): + read_csv(StringIO(data), engine=python_engine, **kwargs) + + def test_python_engine_file_no_iter(self, python_engine): + # see gh-16530 + class NoNextBuffer: + def __init__(self, csv_data) -> None: + self.data = csv_data + + def __next__(self): + return self.data.__next__() + + def read(self): + return self.data + + def readline(self): + return self.data + + data = "a\n1" + msg = "'NoNextBuffer' object is not iterable|argument 1 must be an iterator" + + with pytest.raises(TypeError, match=msg): + read_csv(NoNextBuffer(data), engine=python_engine) + + def test_pyarrow_engine(self): + from pandas.io.parsers.readers import _pyarrow_unsupported as pa_unsupported + + data = """1,2,3,, + 1,2,3,4, + 1,2,3,4,5 + 1,2,,, + 1,2,3,4,""" + + for default in pa_unsupported: + msg = ( + f"The {repr(default)} option is not " + f"supported with the 'pyarrow' engine" + ) + kwargs = {default: object()} + default_needs_bool = {"warn_bad_lines", "error_bad_lines"} + if default == "dialect": + kwargs[default] = "excel" # test a random dialect + elif default in default_needs_bool: + kwargs[default] = True + elif default == "on_bad_lines": + kwargs[default] = "warn" + + warn = None + depr_msg = None + if "delim_whitespace" in kwargs: + depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated" + warn = FutureWarning + if "verbose" in kwargs: + depr_msg = "The 'verbose' keyword in pd.read_csv is deprecated" + warn = FutureWarning + + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(warn, match=depr_msg): + read_csv(StringIO(data), engine="pyarrow", **kwargs) + + def test_on_bad_lines_callable_python_or_pyarrow(self, all_parsers): + # GH 5686 + # GH 54643 + sio = StringIO("a,b\n1,2") + bad_lines_func = lambda x: x + parser = all_parsers + if all_parsers.engine not in ["python", "pyarrow"]: + msg = ( + "on_bad_line can only be a callable " + "function if engine='python' or 'pyarrow'" + ) + with pytest.raises(ValueError, match=msg): + parser.read_csv(sio, on_bad_lines=bad_lines_func) + else: + parser.read_csv(sio, on_bad_lines=bad_lines_func) + + +def test_close_file_handle_on_invalid_usecols(all_parsers): + # GH 45384 + parser = all_parsers + + error = ValueError + if parser.engine == "pyarrow": + # Raises pyarrow.lib.ArrowKeyError + pytest.skip(reason="https://github.com/apache/arrow/issues/38676") + + with tm.ensure_clean("test.csv") as fname: + Path(fname).write_text("col1,col2\na,b\n1,2", encoding="utf-8") + with tm.assert_produces_warning(False): + with pytest.raises(error, match="col3"): + parser.read_csv(fname, usecols=["col1", "col2", "col3"]) + # unlink fails on windows if file handles still point to it + os.unlink(fname) + + +def test_invalid_file_inputs(request, all_parsers): + # GH#45957 + parser = all_parsers + if parser.engine == "python": + request.applymarker( + pytest.mark.xfail(reason=f"{parser.engine} engine supports lists.") + ) + + with pytest.raises(ValueError, match="Invalid"): + parser.read_csv([]) + + +def test_invalid_dtype_backend(all_parsers): + parser = all_parsers + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + parser.read_csv("test", dtype_backend="numpy") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_upcast.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_upcast.py new file mode 100644 index 0000000000000000000000000000000000000000..bc4c4c2e24e9caf8d4ac118b5053fe03d97aafb0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_upcast.py @@ -0,0 +1,102 @@ +import numpy as np +import pytest + +from pandas._libs.parsers import ( + _maybe_upcast, + na_values, +) + +import pandas as pd +from pandas import NA +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + BooleanArray, + FloatingArray, + IntegerArray, + StringArray, +) + + +def test_maybe_upcast(any_real_numpy_dtype): + # GH#36712 + + dtype = np.dtype(any_real_numpy_dtype) + na_value = na_values[dtype] + arr = np.array([1, 2, na_value], dtype=dtype) + result = _maybe_upcast(arr, use_dtype_backend=True) + + expected_mask = np.array([False, False, True]) + if issubclass(dtype.type, np.integer): + expected = IntegerArray(arr, mask=expected_mask) + else: + expected = FloatingArray(arr, mask=expected_mask) + + tm.assert_extension_array_equal(result, expected) + + +def test_maybe_upcast_no_na(any_real_numpy_dtype): + # GH#36712 + arr = np.array([1, 2, 3], dtype=any_real_numpy_dtype) + result = _maybe_upcast(arr, use_dtype_backend=True) + + expected_mask = np.array([False, False, False]) + if issubclass(np.dtype(any_real_numpy_dtype).type, np.integer): + expected = IntegerArray(arr, mask=expected_mask) + else: + expected = FloatingArray(arr, mask=expected_mask) + + tm.assert_extension_array_equal(result, expected) + + +def test_maybe_upcaste_bool(): + # GH#36712 + dtype = np.bool_ + na_value = na_values[dtype] + arr = np.array([True, False, na_value], dtype="uint8").view(dtype) + result = _maybe_upcast(arr, use_dtype_backend=True) + + expected_mask = np.array([False, False, True]) + expected = BooleanArray(arr, mask=expected_mask) + tm.assert_extension_array_equal(result, expected) + + +def test_maybe_upcaste_bool_no_nan(): + # GH#36712 + dtype = np.bool_ + arr = np.array([True, False, False], dtype="uint8").view(dtype) + result = _maybe_upcast(arr, use_dtype_backend=True) + + expected_mask = np.array([False, False, False]) + expected = BooleanArray(arr, mask=expected_mask) + tm.assert_extension_array_equal(result, expected) + + +def test_maybe_upcaste_all_nan(): + # GH#36712 + dtype = np.int64 + na_value = na_values[dtype] + arr = np.array([na_value, na_value], dtype=dtype) + result = _maybe_upcast(arr, use_dtype_backend=True) + + expected_mask = np.array([True, True]) + expected = IntegerArray(arr, mask=expected_mask) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("val", [na_values[np.object_], "c"]) +def test_maybe_upcast_object(val, string_storage): + # GH#36712 + pa = pytest.importorskip("pyarrow") + + with pd.option_context("mode.string_storage", string_storage): + arr = np.array(["a", "b", val], dtype=np.object_) + result = _maybe_upcast(arr, use_dtype_backend=True) + + if string_storage == "python": + exp_val = "c" if val == "c" else NA + expected = StringArray(np.array(["a", "b", exp_val], dtype=np.object_)) + else: + exp_val = "c" if val == "c" else None + expected = ArrowStringArray(pa.array(["a", "b", exp_val])) + tm.assert_extension_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51e54f8f67cd3cf1d0b3df799ab84b17d7748453 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_parse_dates.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_parse_dates.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32d20d14223ddca8b6aef01cf81cc8df72bfe806 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_parse_dates.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_strings.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_strings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a6180e2db3c6d66e6dc0ec3d7dcb480d2c96aa0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_strings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_usecols_basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_usecols_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11d9932bc0ffa7ab6e564d0979da04e63e4e94c8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_usecols_basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_parse_dates.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_parse_dates.py new file mode 100644 index 0000000000000000000000000000000000000000..bc66189ca064e5f0cc474cbd072747978f60e2c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_parse_dates.py @@ -0,0 +1,194 @@ +""" +Tests the usecols functionality during parsing +for all of the parsers defined in parsers.py +""" +from io import StringIO + +import pytest + +from pandas import ( + DataFrame, + Index, + Timestamp, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + +_msg_pyarrow_requires_names = ( + "The pyarrow engine does not allow 'usecols' to be integer column " + "positions. Pass a list of string column names instead." +) + + +@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]]) +def test_usecols_with_parse_dates(all_parsers, usecols): + # see gh-9755 + data = """a,b,c,d,e +0,1,2014-01-01,09:00,4 +0,1,2014-01-02,10:00,4""" + parser = all_parsers + parse_dates = [[1, 2]] + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + + cols = { + "a": [0, 0], + "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")], + } + expected = DataFrame(cols, columns=["c_d", "a"]) + if parser.engine == "pyarrow": + with pytest.raises(ValueError, match=_msg_pyarrow_requires_names): + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + parser.read_csv( + StringIO(data), usecols=usecols, parse_dates=parse_dates + ) + return + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), usecols=usecols, parse_dates=parse_dates + ) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # pyarrow.lib.ArrowKeyError: Column 'fdate' in include_columns +def test_usecols_with_parse_dates2(all_parsers): + # see gh-13604 + parser = all_parsers + data = """2008-02-07 09:40,1032.43 +2008-02-07 09:50,1042.54 +2008-02-07 10:00,1051.65""" + + names = ["date", "values"] + usecols = names[:] + parse_dates = [0] + + index = Index( + [ + Timestamp("2008-02-07 09:40"), + Timestamp("2008-02-07 09:50"), + Timestamp("2008-02-07 10:00"), + ], + name="date", + ) + cols = {"values": [1032.43, 1042.54, 1051.65]} + expected = DataFrame(cols, index=index) + + result = parser.read_csv( + StringIO(data), + parse_dates=parse_dates, + index_col=0, + usecols=usecols, + header=None, + names=names, + ) + tm.assert_frame_equal(result, expected) + + +def test_usecols_with_parse_dates3(all_parsers): + # see gh-14792 + parser = all_parsers + data = """a,b,c,d,e,f,g,h,i,j +2016/09/21,1,1,2,3,4,5,6,7,8""" + + usecols = list("abcdefghij") + parse_dates = [0] + + cols = { + "a": Timestamp("2016-09-21").as_unit("ns"), + "b": [1], + "c": [1], + "d": [2], + "e": [3], + "f": [4], + "g": [5], + "h": [6], + "i": [7], + "j": [8], + } + expected = DataFrame(cols, columns=usecols) + + result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates) + tm.assert_frame_equal(result, expected) + + +def test_usecols_with_parse_dates4(all_parsers): + data = "a,b,c,d,e,f,g,h,i,j\n2016/09/21,1,1,2,3,4,5,6,7,8" + usecols = list("abcdefghij") + parse_dates = [[0, 1]] + parser = all_parsers + + cols = { + "a_b": "2016/09/21 1", + "c": [1], + "d": [2], + "e": [3], + "f": [4], + "g": [5], + "h": [6], + "i": [7], + "j": [8], + } + expected = DataFrame(cols, columns=["a_b"] + list("cdefghij")) + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), + usecols=usecols, + parse_dates=parse_dates, + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]]) +@pytest.mark.parametrize( + "names", + [ + list("abcde"), # Names span all columns in original data. + list("acd"), # Names span only the selected columns. + ], +) +def test_usecols_with_parse_dates_and_names(all_parsers, usecols, names, request): + # see gh-9755 + s = """0,1,2014-01-01,09:00,4 +0,1,2014-01-02,10:00,4""" + parse_dates = [[1, 2]] + parser = all_parsers + + if parser.engine == "pyarrow" and not (len(names) == 3 and usecols[0] == 0): + mark = pytest.mark.xfail( + reason="Length mismatch in some cases, UserWarning in other" + ) + request.applymarker(mark) + + cols = { + "a": [0, 0], + "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")], + } + expected = DataFrame(cols, columns=["c_d", "a"]) + + depr_msg = ( + "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated" + ) + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(s), names=names, parse_dates=parse_dates, usecols=usecols + ) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_strings.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_strings.py new file mode 100644 index 0000000000000000000000000000000000000000..d4ade41d384659ae5571742b7d22620727365ad3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_strings.py @@ -0,0 +1,96 @@ +""" +Tests the usecols functionality during parsing +for all of the parsers defined in parsers.py +""" +from io import StringIO + +import pytest + +from pandas import DataFrame +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +def test_usecols_with_unicode_strings(all_parsers): + # see gh-13219 + data = """AAA,BBB,CCC,DDD +0.056674973,8,True,a +2.613230982,2,False,b +3.568935038,7,False,a""" + parser = all_parsers + + exp_data = { + "AAA": { + 0: 0.056674972999999997, + 1: 2.6132309819999997, + 2: 3.5689350380000002, + }, + "BBB": {0: 8, 1: 2, 2: 7}, + } + expected = DataFrame(exp_data) + + result = parser.read_csv(StringIO(data), usecols=["AAA", "BBB"]) + tm.assert_frame_equal(result, expected) + + +def test_usecols_with_single_byte_unicode_strings(all_parsers): + # see gh-13219 + data = """A,B,C,D +0.056674973,8,True,a +2.613230982,2,False,b +3.568935038,7,False,a""" + parser = all_parsers + + exp_data = { + "A": { + 0: 0.056674972999999997, + 1: 2.6132309819999997, + 2: 3.5689350380000002, + }, + "B": {0: 8, 1: 2, 2: 7}, + } + expected = DataFrame(exp_data) + + result = parser.read_csv(StringIO(data), usecols=["A", "B"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("usecols", [["AAA", b"BBB"], [b"AAA", "BBB"]]) +def test_usecols_with_mixed_encoding_strings(all_parsers, usecols): + data = """AAA,BBB,CCC,DDD +0.056674973,8,True,a +2.613230982,2,False,b +3.568935038,7,False,a""" + parser = all_parsers + _msg_validate_usecols_arg = ( + "'usecols' must either be list-like " + "of all strings, all unicode, all " + "integers or a callable." + ) + with pytest.raises(ValueError, match=_msg_validate_usecols_arg): + parser.read_csv(StringIO(data), usecols=usecols) + + +@pytest.mark.parametrize("usecols", [["あああ", "いい"], ["あああ", "いい"]]) +def test_usecols_with_multi_byte_characters(all_parsers, usecols): + data = """あああ,いい,ううう,ええええ +0.056674973,8,True,a +2.613230982,2,False,b +3.568935038,7,False,a""" + parser = all_parsers + + exp_data = { + "あああ": { + 0: 0.056674972999999997, + 1: 2.6132309819999997, + 2: 3.5689350380000002, + }, + "いい": {0: 8, 1: 2, 2: 7}, + } + expected = DataFrame(exp_data) + + result = parser.read_csv(StringIO(data), usecols=usecols) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_usecols_basic.py b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_usecols_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..767fba666e41769a2fa1c756a5e93b5e1720cd9c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_usecols_basic.py @@ -0,0 +1,563 @@ +""" +Tests the usecols functionality during parsing +for all of the parsers defined in parsers.py +""" +from io import StringIO + +import numpy as np +import pytest + +from pandas.errors import ParserError + +from pandas import ( + DataFrame, + Index, + array, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +_msg_validate_usecols_arg = ( + "'usecols' must either be list-like " + "of all strings, all unicode, all " + "integers or a callable." +) +_msg_validate_usecols_names = ( + "Usecols do not match columns, columns expected but not found: {0}" +) +_msg_pyarrow_requires_names = ( + "The pyarrow engine does not allow 'usecols' to be integer column " + "positions. Pass a list of string column names instead." +) + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame is deprecated:DeprecationWarning" +) + + +def test_raise_on_mixed_dtype_usecols(all_parsers): + # See gh-12678 + data = """a,b,c + 1000,2000,3000 + 4000,5000,6000 + """ + usecols = [0, "b", 2] + parser = all_parsers + + with pytest.raises(ValueError, match=_msg_validate_usecols_arg): + parser.read_csv(StringIO(data), usecols=usecols) + + +@pytest.mark.parametrize("usecols", [(1, 2), ("b", "c")]) +def test_usecols(all_parsers, usecols, request): + data = """\ +a,b,c +1,2,3 +4,5,6 +7,8,9 +10,11,12""" + parser = all_parsers + if parser.engine == "pyarrow" and isinstance(usecols[0], int): + with pytest.raises(ValueError, match=_msg_pyarrow_requires_names): + parser.read_csv(StringIO(data), usecols=usecols) + return + + result = parser.read_csv(StringIO(data), usecols=usecols) + + expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"]) + tm.assert_frame_equal(result, expected) + + +def test_usecols_with_names(all_parsers): + data = """\ +a,b,c +1,2,3 +4,5,6 +7,8,9 +10,11,12""" + parser = all_parsers + names = ["foo", "bar"] + + if parser.engine == "pyarrow": + with pytest.raises(ValueError, match=_msg_pyarrow_requires_names): + parser.read_csv(StringIO(data), names=names, usecols=[1, 2], header=0) + return + + result = parser.read_csv(StringIO(data), names=names, usecols=[1, 2], header=0) + + expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=names) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "names,usecols", [(["b", "c"], [1, 2]), (["a", "b", "c"], ["b", "c"])] +) +def test_usecols_relative_to_names(all_parsers, names, usecols): + data = """\ +1,2,3 +4,5,6 +7,8,9 +10,11,12""" + parser = all_parsers + if parser.engine == "pyarrow" and not isinstance(usecols[0], int): + # ArrowKeyError: Column 'fb' in include_columns does not exist + pytest.skip(reason="https://github.com/apache/arrow/issues/38676") + + result = parser.read_csv(StringIO(data), names=names, header=None, usecols=usecols) + + expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"]) + tm.assert_frame_equal(result, expected) + + +def test_usecols_relative_to_names2(all_parsers): + # see gh-5766 + data = """\ +1,2,3 +4,5,6 +7,8,9 +10,11,12""" + parser = all_parsers + + result = parser.read_csv( + StringIO(data), names=["a", "b"], header=None, usecols=[0, 1] + ) + + expected = DataFrame([[1, 2], [4, 5], [7, 8], [10, 11]], columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + +# regex mismatch: "Length mismatch: Expected axis has 1 elements" +@xfail_pyarrow +def test_usecols_name_length_conflict(all_parsers): + data = """\ +1,2,3 +4,5,6 +7,8,9 +10,11,12""" + parser = all_parsers + msg = "Number of passed names did not match number of header fields in the file" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), names=["a", "b"], header=None, usecols=[1]) + + +def test_usecols_single_string(all_parsers): + # see gh-20558 + parser = all_parsers + data = """foo, bar, baz +1000, 2000, 3000 +4000, 5000, 6000""" + + with pytest.raises(ValueError, match=_msg_validate_usecols_arg): + parser.read_csv(StringIO(data), usecols="foo") + + +@skip_pyarrow # CSV parse error in one case, AttributeError in another +@pytest.mark.parametrize( + "data", ["a,b,c,d\n1,2,3,4\n5,6,7,8", "a,b,c,d\n1,2,3,4,\n5,6,7,8,"] +) +def test_usecols_index_col_false(all_parsers, data): + # see gh-9082 + parser = all_parsers + usecols = ["a", "c", "d"] + expected = DataFrame({"a": [1, 5], "c": [3, 7], "d": [4, 8]}) + + result = parser.read_csv(StringIO(data), usecols=usecols, index_col=False) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("index_col", ["b", 0]) +@pytest.mark.parametrize("usecols", [["b", "c"], [1, 2]]) +def test_usecols_index_col_conflict(all_parsers, usecols, index_col, request): + # see gh-4201: test that index_col as integer reflects usecols + parser = all_parsers + data = "a,b,c,d\nA,a,1,one\nB,b,2,two" + + if parser.engine == "pyarrow" and isinstance(usecols[0], int): + with pytest.raises(ValueError, match=_msg_pyarrow_requires_names): + parser.read_csv(StringIO(data), usecols=usecols, index_col=index_col) + return + + expected = DataFrame({"c": [1, 2]}, index=Index(["a", "b"], name="b")) + + result = parser.read_csv(StringIO(data), usecols=usecols, index_col=index_col) + tm.assert_frame_equal(result, expected) + + +def test_usecols_index_col_conflict2(all_parsers): + # see gh-4201: test that index_col as integer reflects usecols + parser = all_parsers + data = "a,b,c,d\nA,a,1,one\nB,b,2,two" + + expected = DataFrame({"b": ["a", "b"], "c": [1, 2], "d": ("one", "two")}) + expected = expected.set_index(["b", "c"]) + + result = parser.read_csv( + StringIO(data), usecols=["b", "c", "d"], index_col=["b", "c"] + ) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Expected 3 columns, got 4 +def test_usecols_implicit_index_col(all_parsers): + # see gh-2654 + parser = all_parsers + data = "a,b,c\n4,apple,bat,5.7\n8,orange,cow,10" + + result = parser.read_csv(StringIO(data), usecols=["a", "b"]) + expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8]) + tm.assert_frame_equal(result, expected) + + +def test_usecols_index_col_middle(all_parsers): + # GH#9098 + parser = all_parsers + data = """a,b,c,d +1,2,3,4 +""" + result = parser.read_csv(StringIO(data), usecols=["b", "c", "d"], index_col="c") + expected = DataFrame({"b": [2], "d": [4]}, index=Index([3], name="c")) + tm.assert_frame_equal(result, expected) + + +def test_usecols_index_col_end(all_parsers): + # GH#9098 + parser = all_parsers + data = """a,b,c,d +1,2,3,4 +""" + result = parser.read_csv(StringIO(data), usecols=["b", "c", "d"], index_col="d") + expected = DataFrame({"b": [2], "c": [3]}, index=Index([4], name="d")) + tm.assert_frame_equal(result, expected) + + +def test_usecols_regex_sep(all_parsers): + # see gh-2733 + parser = all_parsers + data = "a b c\n4 apple bat 5.7\n8 orange cow 10" + + if parser.engine == "pyarrow": + msg = "the 'pyarrow' engine does not support regex separators" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b")) + return + + result = parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b")) + + expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8]) + tm.assert_frame_equal(result, expected) + + +def test_usecols_with_whitespace(all_parsers): + parser = all_parsers + data = "a b c\n4 apple bat 5.7\n8 orange cow 10" + + depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated" + + if parser.engine == "pyarrow": + msg = "The 'delim_whitespace' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + parser.read_csv( + StringIO(data), delim_whitespace=True, usecols=("a", "b") + ) + return + + with tm.assert_produces_warning( + FutureWarning, match=depr_msg, check_stacklevel=False + ): + result = parser.read_csv( + StringIO(data), delim_whitespace=True, usecols=("a", "b") + ) + expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "usecols,expected", + [ + # Column selection by index. + ([0, 1], DataFrame(data=[[1000, 2000], [4000, 5000]], columns=["2", "0"])), + # Column selection by name. + ( + ["0", "1"], + DataFrame(data=[[2000, 3000], [5000, 6000]], columns=["0", "1"]), + ), + ], +) +def test_usecols_with_integer_like_header(all_parsers, usecols, expected, request): + parser = all_parsers + data = """2,0,1 +1000,2000,3000 +4000,5000,6000""" + + if parser.engine == "pyarrow" and isinstance(usecols[0], int): + with pytest.raises(ValueError, match=_msg_pyarrow_requires_names): + parser.read_csv(StringIO(data), usecols=usecols) + return + + result = parser.read_csv(StringIO(data), usecols=usecols) + tm.assert_frame_equal(result, expected) + + +@xfail_pyarrow # mismatched shape +def test_empty_usecols(all_parsers): + data = "a,b,c\n1,2,3\n4,5,6" + expected = DataFrame(columns=Index([])) + parser = all_parsers + + result = parser.read_csv(StringIO(data), usecols=set()) + tm.assert_frame_equal(result, expected) + + +def test_np_array_usecols(all_parsers): + # see gh-12546 + parser = all_parsers + data = "a,b,c\n1,2,3" + usecols = np.array(["a", "b"]) + + expected = DataFrame([[1, 2]], columns=usecols) + result = parser.read_csv(StringIO(data), usecols=usecols) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "usecols,expected", + [ + ( + lambda x: x.upper() in ["AAA", "BBB", "DDD"], + DataFrame( + { + "AaA": { + 0: 0.056674972999999997, + 1: 2.6132309819999997, + 2: 3.5689350380000002, + }, + "bBb": {0: 8, 1: 2, 2: 7}, + "ddd": {0: "a", 1: "b", 2: "a"}, + } + ), + ), + (lambda x: False, DataFrame(columns=Index([]))), + ], +) +def test_callable_usecols(all_parsers, usecols, expected): + # see gh-14154 + data = """AaA,bBb,CCC,ddd +0.056674973,8,True,a +2.613230982,2,False,b +3.568935038,7,False,a""" + parser = all_parsers + + if parser.engine == "pyarrow": + msg = "The pyarrow engine does not allow 'usecols' to be a callable" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), usecols=usecols) + return + + result = parser.read_csv(StringIO(data), usecols=usecols) + tm.assert_frame_equal(result, expected) + + +# ArrowKeyError: Column 'fa' in include_columns does not exist in CSV file +@skip_pyarrow +@pytest.mark.parametrize("usecols", [["a", "c"], lambda x: x in ["a", "c"]]) +def test_incomplete_first_row(all_parsers, usecols): + # see gh-6710 + data = "1,2\n1,2,3" + parser = all_parsers + names = ["a", "b", "c"] + expected = DataFrame({"a": [1, 1], "c": [np.nan, 3]}) + + result = parser.read_csv(StringIO(data), names=names, usecols=usecols) + tm.assert_frame_equal(result, expected) + + +@skip_pyarrow # CSV parse error: Expected 3 columns, got 4 +@pytest.mark.parametrize( + "data,usecols,kwargs,expected", + [ + # see gh-8985 + ( + "19,29,39\n" * 2 + "10,20,30,40", + [0, 1, 2], + {"header": None}, + DataFrame([[19, 29, 39], [19, 29, 39], [10, 20, 30]]), + ), + # see gh-9549 + ( + ("A,B,C\n1,2,3\n3,4,5\n1,2,4,5,1,6\n1,2,3,,,1,\n1,2,3\n5,6,7"), + ["A", "B", "C"], + {}, + DataFrame( + { + "A": [1, 3, 1, 1, 1, 5], + "B": [2, 4, 2, 2, 2, 6], + "C": [3, 5, 4, 3, 3, 7], + } + ), + ), + ], +) +def test_uneven_length_cols(all_parsers, data, usecols, kwargs, expected): + # see gh-8985 + parser = all_parsers + result = parser.read_csv(StringIO(data), usecols=usecols, **kwargs) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "usecols,kwargs,expected,msg", + [ + ( + ["a", "b", "c", "d"], + {}, + DataFrame({"a": [1, 5], "b": [2, 6], "c": [3, 7], "d": [4, 8]}), + None, + ), + ( + ["a", "b", "c", "f"], + {}, + None, + _msg_validate_usecols_names.format(r"\['f'\]"), + ), + (["a", "b", "f"], {}, None, _msg_validate_usecols_names.format(r"\['f'\]")), + ( + ["a", "b", "f", "g"], + {}, + None, + _msg_validate_usecols_names.format(r"\[('f', 'g'|'g', 'f')\]"), + ), + # see gh-14671 + ( + None, + {"header": 0, "names": ["A", "B", "C", "D"]}, + DataFrame({"A": [1, 5], "B": [2, 6], "C": [3, 7], "D": [4, 8]}), + None, + ), + ( + ["A", "B", "C", "f"], + {"header": 0, "names": ["A", "B", "C", "D"]}, + None, + _msg_validate_usecols_names.format(r"\['f'\]"), + ), + ( + ["A", "B", "f"], + {"names": ["A", "B", "C", "D"]}, + None, + _msg_validate_usecols_names.format(r"\['f'\]"), + ), + ], +) +def test_raises_on_usecols_names_mismatch( + all_parsers, usecols, kwargs, expected, msg, request +): + data = "a,b,c,d\n1,2,3,4\n5,6,7,8" + kwargs.update(usecols=usecols) + parser = all_parsers + + if parser.engine == "pyarrow" and not ( + usecols is not None and expected is not None + ): + # everything but the first case + # ArrowKeyError: Column 'f' in include_columns does not exist in CSV file + pytest.skip(reason="https://github.com/apache/arrow/issues/38676") + + if expected is None: + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), **kwargs) + else: + result = parser.read_csv(StringIO(data), **kwargs) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("usecols", [["A", "C"], [0, 2]]) +def test_usecols_subset_names_mismatch_orig_columns(all_parsers, usecols, request): + data = "a,b,c,d\n1,2,3,4\n5,6,7,8" + names = ["A", "B", "C", "D"] + parser = all_parsers + + if parser.engine == "pyarrow": + if isinstance(usecols[0], int): + with pytest.raises(ValueError, match=_msg_pyarrow_requires_names): + parser.read_csv(StringIO(data), header=0, names=names, usecols=usecols) + return + # "pyarrow.lib.ArrowKeyError: Column 'A' in include_columns does not exist" + pytest.skip(reason="https://github.com/apache/arrow/issues/38676") + + result = parser.read_csv(StringIO(data), header=0, names=names, usecols=usecols) + expected = DataFrame({"A": [1, 5], "C": [3, 7]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("names", [None, ["a", "b"]]) +def test_usecols_indices_out_of_bounds(all_parsers, names): + # GH#25623 & GH 41130; enforced in 2.0 + parser = all_parsers + data = """ +a,b +1,2 + """ + + err = ParserError + msg = "Defining usecols with out-of-bounds" + if parser.engine == "pyarrow": + err = ValueError + msg = _msg_pyarrow_requires_names + + with pytest.raises(err, match=msg): + parser.read_csv(StringIO(data), usecols=[0, 2], names=names, header=0) + + +def test_usecols_additional_columns(all_parsers): + # GH#46997 + parser = all_parsers + usecols = lambda header: header.strip() in ["a", "b", "c"] + + if parser.engine == "pyarrow": + msg = "The pyarrow engine does not allow 'usecols' to be a callable" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO("a,b\nx,y,z"), index_col=False, usecols=usecols) + return + result = parser.read_csv(StringIO("a,b\nx,y,z"), index_col=False, usecols=usecols) + expected = DataFrame({"a": ["x"], "b": "y"}) + tm.assert_frame_equal(result, expected) + + +def test_usecols_additional_columns_integer_columns(all_parsers): + # GH#46997 + parser = all_parsers + usecols = lambda header: header.strip() in ["0", "1"] + if parser.engine == "pyarrow": + msg = "The pyarrow engine does not allow 'usecols' to be a callable" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO("0,1\nx,y,z"), index_col=False, usecols=usecols) + return + result = parser.read_csv(StringIO("0,1\nx,y,z"), index_col=False, usecols=usecols) + expected = DataFrame({"0": ["x"], "1": "y"}) + tm.assert_frame_equal(result, expected) + + +def test_usecols_dtype(all_parsers): + parser = all_parsers + data = """ +col1,col2,col3 +a,1,x +b,2,y +""" + result = parser.read_csv( + StringIO(data), + usecols=["col1", "col2"], + dtype={"col1": "string", "col2": "uint8", "col3": "string"}, + ) + expected = DataFrame( + {"col1": array(["a", "b"]), "col2": np.array([1, 2], dtype="uint8")} + ) + tm.assert_frame_equal(result, expected)