applied-ai-018 commited on
Commit
8972ff3
·
verified ·
1 Parent(s): bac2ee3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__init__.py +0 -0
  2. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_odf.py +77 -0
  3. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_odswriter.py +106 -0
  4. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_openpyxl.py +432 -0
  5. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_readers.py +1751 -0
  6. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_writers.py +1511 -0
  7. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlrd.py +76 -0
  8. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlsxwriter.py +86 -0
  9. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_highlight.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_latex.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/__init__.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/conftest.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_c_parser_only.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_comment.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_compression.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_concatenate_chunks.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_converters.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_dialect.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_encoding.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_header.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_index_col.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_mangle_dupes.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_multi_thread.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_na_values.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_parse_dates.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_python_parser_only.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_quoting.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_read_fwf.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_skiprows.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_textreader.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_upcast.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_chunksize.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_data_list.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_file_buffer_url.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_float.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_inf.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_ints.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_read_errors.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_chunksize.py +378 -0
  40. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_common_basic.py +979 -0
  41. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_decimal.py +72 -0
  42. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_file_buffer_url.py +478 -0
  43. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_index.py +302 -0
  44. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_inf.py +78 -0
  45. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_iterator.py +134 -0
  46. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_read_errors.py +320 -0
  47. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_verbose.py +81 -0
  48. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__init__.py +0 -0
  49. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/__init__.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_categorical.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_odf.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas.compat import is_platform_windows
7
+
8
+ import pandas as pd
9
+ import pandas._testing as tm
10
+
11
+ pytest.importorskip("odf")
12
+
13
+ if is_platform_windows():
14
+ pytestmark = pytest.mark.single_cpu
15
+
16
+
17
+ @pytest.fixture(autouse=True)
18
+ def cd_and_set_engine(monkeypatch, datapath):
19
+ func = functools.partial(pd.read_excel, engine="odf")
20
+ monkeypatch.setattr(pd, "read_excel", func)
21
+ monkeypatch.chdir(datapath("io", "data", "excel"))
22
+
23
+
24
+ def test_read_invalid_types_raises():
25
+ # the invalid_value_type.ods required manually editing
26
+ # of the included content.xml file
27
+ with pytest.raises(ValueError, match="Unrecognized type awesome_new_type"):
28
+ pd.read_excel("invalid_value_type.ods")
29
+
30
+
31
+ def test_read_writer_table():
32
+ # Also test reading tables from an text OpenDocument file
33
+ # (.odt)
34
+ index = pd.Index(["Row 1", "Row 2", "Row 3"], name="Header")
35
+ expected = pd.DataFrame(
36
+ [[1, np.nan, 7], [2, np.nan, 8], [3, np.nan, 9]],
37
+ index=index,
38
+ columns=["Column 1", "Unnamed: 2", "Column 3"],
39
+ )
40
+
41
+ result = pd.read_excel("writertable.odt", sheet_name="Table1", index_col=0)
42
+
43
+ tm.assert_frame_equal(result, expected)
44
+
45
+
46
+ def test_read_newlines_between_xml_elements_table():
47
+ # GH#45598
48
+ expected = pd.DataFrame(
49
+ [[1.0, 4.0, 7], [np.nan, np.nan, 8], [3.0, 6.0, 9]],
50
+ columns=["Column 1", "Column 2", "Column 3"],
51
+ )
52
+
53
+ result = pd.read_excel("test_newlines.ods")
54
+
55
+ tm.assert_frame_equal(result, expected)
56
+
57
+
58
+ def test_read_unempty_cells():
59
+ expected = pd.DataFrame(
60
+ [1, np.nan, 3, np.nan, 5],
61
+ columns=["Column 1"],
62
+ )
63
+
64
+ result = pd.read_excel("test_unempty_cells.ods")
65
+
66
+ tm.assert_frame_equal(result, expected)
67
+
68
+
69
+ def test_read_cell_annotation():
70
+ expected = pd.DataFrame(
71
+ ["test", np.nan, "test 3"],
72
+ columns=["Column 1"],
73
+ )
74
+
75
+ result = pd.read_excel("test_cell_annotation.ods")
76
+
77
+ tm.assert_frame_equal(result, expected)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_odswriter.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ date,
3
+ datetime,
4
+ )
5
+ import re
6
+
7
+ import pytest
8
+
9
+ from pandas.compat import is_platform_windows
10
+
11
+ import pandas as pd
12
+ import pandas._testing as tm
13
+
14
+ from pandas.io.excel import ExcelWriter
15
+
16
+ odf = pytest.importorskip("odf")
17
+
18
+ if is_platform_windows():
19
+ pytestmark = pytest.mark.single_cpu
20
+
21
+
22
+ @pytest.fixture
23
+ def ext():
24
+ return ".ods"
25
+
26
+
27
+ def test_write_append_mode_raises(ext):
28
+ msg = "Append mode is not supported with odf!"
29
+
30
+ with tm.ensure_clean(ext) as f:
31
+ with pytest.raises(ValueError, match=msg):
32
+ ExcelWriter(f, engine="odf", mode="a")
33
+
34
+
35
+ @pytest.mark.parametrize("engine_kwargs", [None, {"kwarg": 1}])
36
+ def test_engine_kwargs(ext, engine_kwargs):
37
+ # GH 42286
38
+ # GH 43445
39
+ # test for error: OpenDocumentSpreadsheet does not accept any arguments
40
+ with tm.ensure_clean(ext) as f:
41
+ if engine_kwargs is not None:
42
+ error = re.escape(
43
+ "OpenDocumentSpreadsheet() got an unexpected keyword argument 'kwarg'"
44
+ )
45
+ with pytest.raises(
46
+ TypeError,
47
+ match=error,
48
+ ):
49
+ ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs)
50
+ else:
51
+ with ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs) as _:
52
+ pass
53
+
54
+
55
+ def test_book_and_sheets_consistent(ext):
56
+ # GH#45687 - Ensure sheets is updated if user modifies book
57
+ with tm.ensure_clean(ext) as f:
58
+ with ExcelWriter(f) as writer:
59
+ assert writer.sheets == {}
60
+ table = odf.table.Table(name="test_name")
61
+ writer.book.spreadsheet.addElement(table)
62
+ assert writer.sheets == {"test_name": table}
63
+
64
+
65
+ @pytest.mark.parametrize(
66
+ ["value", "cell_value_type", "cell_value_attribute", "cell_value"],
67
+ argvalues=[
68
+ (True, "boolean", "boolean-value", "true"),
69
+ ("test string", "string", "string-value", "test string"),
70
+ (1, "float", "value", "1"),
71
+ (1.5, "float", "value", "1.5"),
72
+ (
73
+ datetime(2010, 10, 10, 10, 10, 10),
74
+ "date",
75
+ "date-value",
76
+ "2010-10-10T10:10:10",
77
+ ),
78
+ (date(2010, 10, 10), "date", "date-value", "2010-10-10"),
79
+ ],
80
+ )
81
+ def test_cell_value_type(ext, value, cell_value_type, cell_value_attribute, cell_value):
82
+ # GH#54994 ODS: cell attributes should follow specification
83
+ # http://docs.oasis-open.org/office/v1.2/os/OpenDocument-v1.2-os-part1.html#refTable13
84
+ from odf.namespaces import OFFICENS
85
+ from odf.table import (
86
+ TableCell,
87
+ TableRow,
88
+ )
89
+
90
+ table_cell_name = TableCell().qname
91
+
92
+ with tm.ensure_clean(ext) as f:
93
+ pd.DataFrame([[value]]).to_excel(f, header=False, index=False)
94
+
95
+ with pd.ExcelFile(f) as wb:
96
+ sheet = wb._reader.get_sheet_by_index(0)
97
+ sheet_rows = sheet.getElementsByType(TableRow)
98
+ sheet_cells = [
99
+ x
100
+ for x in sheet_rows[0].childNodes
101
+ if hasattr(x, "qname") and x.qname == table_cell_name
102
+ ]
103
+
104
+ cell = sheet_cells[0]
105
+ assert cell.attributes.get((OFFICENS, "value-type")) == cell_value_type
106
+ assert cell.attributes.get((OFFICENS, cell_value_attribute)) == cell_value
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_openpyxl.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ from pathlib import Path
3
+ import re
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from pandas.compat import is_platform_windows
9
+
10
+ import pandas as pd
11
+ from pandas import DataFrame
12
+ import pandas._testing as tm
13
+
14
+ from pandas.io.excel import (
15
+ ExcelWriter,
16
+ _OpenpyxlWriter,
17
+ )
18
+ from pandas.io.excel._openpyxl import OpenpyxlReader
19
+
20
+ openpyxl = pytest.importorskip("openpyxl")
21
+
22
+ if is_platform_windows():
23
+ pytestmark = pytest.mark.single_cpu
24
+
25
+
26
+ @pytest.fixture
27
+ def ext():
28
+ return ".xlsx"
29
+
30
+
31
+ def test_to_excel_styleconverter():
32
+ from openpyxl import styles
33
+
34
+ hstyle = {
35
+ "font": {"color": "00FF0000", "bold": True},
36
+ "borders": {"top": "thin", "right": "thin", "bottom": "thin", "left": "thin"},
37
+ "alignment": {"horizontal": "center", "vertical": "top"},
38
+ "fill": {"patternType": "solid", "fgColor": {"rgb": "006666FF", "tint": 0.3}},
39
+ "number_format": {"format_code": "0.00"},
40
+ "protection": {"locked": True, "hidden": False},
41
+ }
42
+
43
+ font_color = styles.Color("00FF0000")
44
+ font = styles.Font(bold=True, color=font_color)
45
+ side = styles.Side(style=styles.borders.BORDER_THIN)
46
+ border = styles.Border(top=side, right=side, bottom=side, left=side)
47
+ alignment = styles.Alignment(horizontal="center", vertical="top")
48
+ fill_color = styles.Color(rgb="006666FF", tint=0.3)
49
+ fill = styles.PatternFill(patternType="solid", fgColor=fill_color)
50
+
51
+ number_format = "0.00"
52
+
53
+ protection = styles.Protection(locked=True, hidden=False)
54
+
55
+ kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle)
56
+ assert kw["font"] == font
57
+ assert kw["border"] == border
58
+ assert kw["alignment"] == alignment
59
+ assert kw["fill"] == fill
60
+ assert kw["number_format"] == number_format
61
+ assert kw["protection"] == protection
62
+
63
+
64
+ def test_write_cells_merge_styled(ext):
65
+ from pandas.io.formats.excel import ExcelCell
66
+
67
+ sheet_name = "merge_styled"
68
+
69
+ sty_b1 = {"font": {"color": "00FF0000"}}
70
+ sty_a2 = {"font": {"color": "0000FF00"}}
71
+
72
+ initial_cells = [
73
+ ExcelCell(col=1, row=0, val=42, style=sty_b1),
74
+ ExcelCell(col=0, row=1, val=99, style=sty_a2),
75
+ ]
76
+
77
+ sty_merged = {"font": {"color": "000000FF", "bold": True}}
78
+ sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged)
79
+ openpyxl_sty_merged = sty_kwargs["font"]
80
+ merge_cells = [
81
+ ExcelCell(
82
+ col=0, row=0, val="pandas", mergestart=1, mergeend=1, style=sty_merged
83
+ )
84
+ ]
85
+
86
+ with tm.ensure_clean(ext) as path:
87
+ with _OpenpyxlWriter(path) as writer:
88
+ writer._write_cells(initial_cells, sheet_name=sheet_name)
89
+ writer._write_cells(merge_cells, sheet_name=sheet_name)
90
+
91
+ wks = writer.sheets[sheet_name]
92
+ xcell_b1 = wks["B1"]
93
+ xcell_a2 = wks["A2"]
94
+ assert xcell_b1.font == openpyxl_sty_merged
95
+ assert xcell_a2.font == openpyxl_sty_merged
96
+
97
+
98
+ @pytest.mark.parametrize("iso_dates", [True, False])
99
+ def test_engine_kwargs_write(ext, iso_dates):
100
+ # GH 42286 GH 43445
101
+ engine_kwargs = {"iso_dates": iso_dates}
102
+ with tm.ensure_clean(ext) as f:
103
+ with ExcelWriter(f, engine="openpyxl", engine_kwargs=engine_kwargs) as writer:
104
+ assert writer.book.iso_dates == iso_dates
105
+ # ExcelWriter won't allow us to close without writing something
106
+ DataFrame().to_excel(writer)
107
+
108
+
109
+ def test_engine_kwargs_append_invalid(ext):
110
+ # GH 43445
111
+ # test whether an invalid engine kwargs actually raises
112
+ with tm.ensure_clean(ext) as f:
113
+ DataFrame(["hello", "world"]).to_excel(f)
114
+ with pytest.raises(
115
+ TypeError,
116
+ match=re.escape(
117
+ "load_workbook() got an unexpected keyword argument 'apple_banana'"
118
+ ),
119
+ ):
120
+ with ExcelWriter(
121
+ f, engine="openpyxl", mode="a", engine_kwargs={"apple_banana": "fruit"}
122
+ ) as writer:
123
+ # ExcelWriter needs us to write something to close properly
124
+ DataFrame(["good"]).to_excel(writer, sheet_name="Sheet2")
125
+
126
+
127
+ @pytest.mark.parametrize("data_only, expected", [(True, 0), (False, "=1+1")])
128
+ def test_engine_kwargs_append_data_only(ext, data_only, expected):
129
+ # GH 43445
130
+ # tests whether the data_only engine_kwarg actually works well for
131
+ # openpyxl's load_workbook
132
+ with tm.ensure_clean(ext) as f:
133
+ DataFrame(["=1+1"]).to_excel(f)
134
+ with ExcelWriter(
135
+ f, engine="openpyxl", mode="a", engine_kwargs={"data_only": data_only}
136
+ ) as writer:
137
+ assert writer.sheets["Sheet1"]["B2"].value == expected
138
+ # ExcelWriter needs us to writer something to close properly?
139
+ DataFrame().to_excel(writer, sheet_name="Sheet2")
140
+
141
+ # ensure that data_only also works for reading
142
+ # and that formulas/values roundtrip
143
+ assert (
144
+ pd.read_excel(
145
+ f,
146
+ sheet_name="Sheet1",
147
+ engine="openpyxl",
148
+ engine_kwargs={"data_only": data_only},
149
+ ).iloc[0, 1]
150
+ == expected
151
+ )
152
+
153
+
154
+ @pytest.mark.parametrize("kwarg_name", ["read_only", "data_only"])
155
+ @pytest.mark.parametrize("kwarg_value", [True, False])
156
+ def test_engine_kwargs_append_reader(datapath, ext, kwarg_name, kwarg_value):
157
+ # GH 55027
158
+ # test that `read_only` and `data_only` can be passed to
159
+ # `openpyxl.reader.excel.load_workbook` via `engine_kwargs`
160
+ filename = datapath("io", "data", "excel", "test1" + ext)
161
+ with contextlib.closing(
162
+ OpenpyxlReader(filename, engine_kwargs={kwarg_name: kwarg_value})
163
+ ) as reader:
164
+ assert getattr(reader.book, kwarg_name) == kwarg_value
165
+
166
+
167
+ @pytest.mark.parametrize(
168
+ "mode,expected", [("w", ["baz"]), ("a", ["foo", "bar", "baz"])]
169
+ )
170
+ def test_write_append_mode(ext, mode, expected):
171
+ df = DataFrame([1], columns=["baz"])
172
+
173
+ with tm.ensure_clean(ext) as f:
174
+ wb = openpyxl.Workbook()
175
+ wb.worksheets[0].title = "foo"
176
+ wb.worksheets[0]["A1"].value = "foo"
177
+ wb.create_sheet("bar")
178
+ wb.worksheets[1]["A1"].value = "bar"
179
+ wb.save(f)
180
+
181
+ with ExcelWriter(f, engine="openpyxl", mode=mode) as writer:
182
+ df.to_excel(writer, sheet_name="baz", index=False)
183
+
184
+ with contextlib.closing(openpyxl.load_workbook(f)) as wb2:
185
+ result = [sheet.title for sheet in wb2.worksheets]
186
+ assert result == expected
187
+
188
+ for index, cell_value in enumerate(expected):
189
+ assert wb2.worksheets[index]["A1"].value == cell_value
190
+
191
+
192
+ @pytest.mark.parametrize(
193
+ "if_sheet_exists,num_sheets,expected",
194
+ [
195
+ ("new", 2, ["apple", "banana"]),
196
+ ("replace", 1, ["pear"]),
197
+ ("overlay", 1, ["pear", "banana"]),
198
+ ],
199
+ )
200
+ def test_if_sheet_exists_append_modes(ext, if_sheet_exists, num_sheets, expected):
201
+ # GH 40230
202
+ df1 = DataFrame({"fruit": ["apple", "banana"]})
203
+ df2 = DataFrame({"fruit": ["pear"]})
204
+
205
+ with tm.ensure_clean(ext) as f:
206
+ df1.to_excel(f, engine="openpyxl", sheet_name="foo", index=False)
207
+ with ExcelWriter(
208
+ f, engine="openpyxl", mode="a", if_sheet_exists=if_sheet_exists
209
+ ) as writer:
210
+ df2.to_excel(writer, sheet_name="foo", index=False)
211
+
212
+ with contextlib.closing(openpyxl.load_workbook(f)) as wb:
213
+ assert len(wb.sheetnames) == num_sheets
214
+ assert wb.sheetnames[0] == "foo"
215
+ result = pd.read_excel(wb, "foo", engine="openpyxl")
216
+ assert list(result["fruit"]) == expected
217
+ if len(wb.sheetnames) == 2:
218
+ result = pd.read_excel(wb, wb.sheetnames[1], engine="openpyxl")
219
+ tm.assert_frame_equal(result, df2)
220
+
221
+
222
+ @pytest.mark.parametrize(
223
+ "startrow, startcol, greeting, goodbye",
224
+ [
225
+ (0, 0, ["poop", "world"], ["goodbye", "people"]),
226
+ (0, 1, ["hello", "world"], ["poop", "people"]),
227
+ (1, 0, ["hello", "poop"], ["goodbye", "people"]),
228
+ (1, 1, ["hello", "world"], ["goodbye", "poop"]),
229
+ ],
230
+ )
231
+ def test_append_overlay_startrow_startcol(ext, startrow, startcol, greeting, goodbye):
232
+ df1 = DataFrame({"greeting": ["hello", "world"], "goodbye": ["goodbye", "people"]})
233
+ df2 = DataFrame(["poop"])
234
+
235
+ with tm.ensure_clean(ext) as f:
236
+ df1.to_excel(f, engine="openpyxl", sheet_name="poo", index=False)
237
+ with ExcelWriter(
238
+ f, engine="openpyxl", mode="a", if_sheet_exists="overlay"
239
+ ) as writer:
240
+ # use startrow+1 because we don't have a header
241
+ df2.to_excel(
242
+ writer,
243
+ index=False,
244
+ header=False,
245
+ startrow=startrow + 1,
246
+ startcol=startcol,
247
+ sheet_name="poo",
248
+ )
249
+
250
+ result = pd.read_excel(f, sheet_name="poo", engine="openpyxl")
251
+ expected = DataFrame({"greeting": greeting, "goodbye": goodbye})
252
+ tm.assert_frame_equal(result, expected)
253
+
254
+
255
+ @pytest.mark.parametrize(
256
+ "if_sheet_exists,msg",
257
+ [
258
+ (
259
+ "invalid",
260
+ "'invalid' is not valid for if_sheet_exists. Valid options "
261
+ "are 'error', 'new', 'replace' and 'overlay'.",
262
+ ),
263
+ (
264
+ "error",
265
+ "Sheet 'foo' already exists and if_sheet_exists is set to 'error'.",
266
+ ),
267
+ (
268
+ None,
269
+ "Sheet 'foo' already exists and if_sheet_exists is set to 'error'.",
270
+ ),
271
+ ],
272
+ )
273
+ def test_if_sheet_exists_raises(ext, if_sheet_exists, msg):
274
+ # GH 40230
275
+ df = DataFrame({"fruit": ["pear"]})
276
+ with tm.ensure_clean(ext) as f:
277
+ with pytest.raises(ValueError, match=re.escape(msg)):
278
+ df.to_excel(f, sheet_name="foo", engine="openpyxl")
279
+ with ExcelWriter(
280
+ f, engine="openpyxl", mode="a", if_sheet_exists=if_sheet_exists
281
+ ) as writer:
282
+ df.to_excel(writer, sheet_name="foo")
283
+
284
+
285
+ def test_to_excel_with_openpyxl_engine(ext):
286
+ # GH 29854
287
+ with tm.ensure_clean(ext) as filename:
288
+ df1 = DataFrame({"A": np.linspace(1, 10, 10)})
289
+ df2 = DataFrame({"B": np.linspace(1, 20, 10)})
290
+ df = pd.concat([df1, df2], axis=1)
291
+ styled = df.style.map(
292
+ lambda val: f"color: {'red' if val < 0 else 'black'}"
293
+ ).highlight_max()
294
+
295
+ styled.to_excel(filename, engine="openpyxl")
296
+
297
+
298
+ @pytest.mark.parametrize("read_only", [True, False])
299
+ def test_read_workbook(datapath, ext, read_only):
300
+ # GH 39528
301
+ filename = datapath("io", "data", "excel", "test1" + ext)
302
+ with contextlib.closing(
303
+ openpyxl.load_workbook(filename, read_only=read_only)
304
+ ) as wb:
305
+ result = pd.read_excel(wb, engine="openpyxl")
306
+ expected = pd.read_excel(filename)
307
+ tm.assert_frame_equal(result, expected)
308
+
309
+
310
+ @pytest.mark.parametrize(
311
+ "header, expected_data",
312
+ [
313
+ (
314
+ 0,
315
+ {
316
+ "Title": [np.nan, "A", 1, 2, 3],
317
+ "Unnamed: 1": [np.nan, "B", 4, 5, 6],
318
+ "Unnamed: 2": [np.nan, "C", 7, 8, 9],
319
+ },
320
+ ),
321
+ (2, {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}),
322
+ ],
323
+ )
324
+ @pytest.mark.parametrize(
325
+ "filename", ["dimension_missing", "dimension_small", "dimension_large"]
326
+ )
327
+ # When read_only is None, use read_excel instead of a workbook
328
+ @pytest.mark.parametrize("read_only", [True, False, None])
329
+ def test_read_with_bad_dimension(
330
+ datapath, ext, header, expected_data, filename, read_only
331
+ ):
332
+ # GH 38956, 39001 - no/incorrect dimension information
333
+ path = datapath("io", "data", "excel", f"{filename}{ext}")
334
+ if read_only is None:
335
+ result = pd.read_excel(path, header=header)
336
+ else:
337
+ with contextlib.closing(
338
+ openpyxl.load_workbook(path, read_only=read_only)
339
+ ) as wb:
340
+ result = pd.read_excel(wb, engine="openpyxl", header=header)
341
+ expected = DataFrame(expected_data)
342
+ tm.assert_frame_equal(result, expected)
343
+
344
+
345
+ def test_append_mode_file(ext):
346
+ # GH 39576
347
+ df = DataFrame()
348
+
349
+ with tm.ensure_clean(ext) as f:
350
+ df.to_excel(f, engine="openpyxl")
351
+
352
+ with ExcelWriter(
353
+ f, mode="a", engine="openpyxl", if_sheet_exists="new"
354
+ ) as writer:
355
+ df.to_excel(writer)
356
+
357
+ # make sure that zip files are not concatenated by making sure that
358
+ # "docProps/app.xml" only occurs twice in the file
359
+ data = Path(f).read_bytes()
360
+ first = data.find(b"docProps/app.xml")
361
+ second = data.find(b"docProps/app.xml", first + 1)
362
+ third = data.find(b"docProps/app.xml", second + 1)
363
+ assert second != -1 and third == -1
364
+
365
+
366
+ # When read_only is None, use read_excel instead of a workbook
367
+ @pytest.mark.parametrize("read_only", [True, False, None])
368
+ def test_read_with_empty_trailing_rows(datapath, ext, read_only):
369
+ # GH 39181
370
+ path = datapath("io", "data", "excel", f"empty_trailing_rows{ext}")
371
+ if read_only is None:
372
+ result = pd.read_excel(path)
373
+ else:
374
+ with contextlib.closing(
375
+ openpyxl.load_workbook(path, read_only=read_only)
376
+ ) as wb:
377
+ result = pd.read_excel(wb, engine="openpyxl")
378
+ expected = DataFrame(
379
+ {
380
+ "Title": [np.nan, "A", 1, 2, 3],
381
+ "Unnamed: 1": [np.nan, "B", 4, 5, 6],
382
+ "Unnamed: 2": [np.nan, "C", 7, 8, 9],
383
+ }
384
+ )
385
+ tm.assert_frame_equal(result, expected)
386
+
387
+
388
+ # When read_only is None, use read_excel instead of a workbook
389
+ @pytest.mark.parametrize("read_only", [True, False, None])
390
+ def test_read_empty_with_blank_row(datapath, ext, read_only):
391
+ # GH 39547 - empty excel file with a row that has no data
392
+ path = datapath("io", "data", "excel", f"empty_with_blank_row{ext}")
393
+ if read_only is None:
394
+ result = pd.read_excel(path)
395
+ else:
396
+ with contextlib.closing(
397
+ openpyxl.load_workbook(path, read_only=read_only)
398
+ ) as wb:
399
+ result = pd.read_excel(wb, engine="openpyxl")
400
+ expected = DataFrame()
401
+ tm.assert_frame_equal(result, expected)
402
+
403
+
404
+ def test_book_and_sheets_consistent(ext):
405
+ # GH#45687 - Ensure sheets is updated if user modifies book
406
+ with tm.ensure_clean(ext) as f:
407
+ with ExcelWriter(f, engine="openpyxl") as writer:
408
+ assert writer.sheets == {}
409
+ sheet = writer.book.create_sheet("test_name", 0)
410
+ assert writer.sheets == {"test_name": sheet}
411
+
412
+
413
+ def test_ints_spelled_with_decimals(datapath, ext):
414
+ # GH 46988 - openpyxl returns this sheet with floats
415
+ path = datapath("io", "data", "excel", f"ints_spelled_with_decimals{ext}")
416
+ result = pd.read_excel(path)
417
+ expected = DataFrame(range(2, 12), columns=[1])
418
+ tm.assert_frame_equal(result, expected)
419
+
420
+
421
+ def test_read_multiindex_header_no_index_names(datapath, ext):
422
+ # GH#47487
423
+ path = datapath("io", "data", "excel", f"multiindex_no_index_names{ext}")
424
+ result = pd.read_excel(path, index_col=[0, 1, 2], header=[0, 1, 2])
425
+ expected = DataFrame(
426
+ [[np.nan, "x", "x", "x"], ["x", np.nan, np.nan, np.nan]],
427
+ columns=pd.MultiIndex.from_tuples(
428
+ [("X", "Y", "A1"), ("X", "Y", "A2"), ("XX", "YY", "B1"), ("XX", "YY", "B2")]
429
+ ),
430
+ index=pd.MultiIndex.from_tuples([("A", "AA", "AAA"), ("A", "BB", "BBB")]),
431
+ )
432
+ tm.assert_frame_equal(result, expected)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_readers.py ADDED
@@ -0,0 +1,1751 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from datetime import (
4
+ datetime,
5
+ time,
6
+ )
7
+ from functools import partial
8
+ from io import BytesIO
9
+ import os
10
+ from pathlib import Path
11
+ import platform
12
+ import re
13
+ from urllib.error import URLError
14
+ from zipfile import BadZipFile
15
+
16
+ import numpy as np
17
+ import pytest
18
+
19
+ from pandas._config import using_pyarrow_string_dtype
20
+
21
+ from pandas.compat import is_platform_windows
22
+ import pandas.util._test_decorators as td
23
+
24
+ import pandas as pd
25
+ from pandas import (
26
+ DataFrame,
27
+ Index,
28
+ MultiIndex,
29
+ Series,
30
+ read_csv,
31
+ )
32
+ import pandas._testing as tm
33
+ from pandas.core.arrays import (
34
+ ArrowStringArray,
35
+ StringArray,
36
+ )
37
+
38
+ if is_platform_windows():
39
+ pytestmark = pytest.mark.single_cpu
40
+
41
+ read_ext_params = [".xls", ".xlsx", ".xlsm", ".xlsb", ".ods"]
42
+ engine_params = [
43
+ # Add any engines to test here
44
+ # When defusedxml is installed it triggers deprecation warnings for
45
+ # xlrd and openpyxl, so catch those here
46
+ pytest.param(
47
+ "xlrd",
48
+ marks=[
49
+ td.skip_if_no("xlrd"),
50
+ ],
51
+ ),
52
+ pytest.param(
53
+ "openpyxl",
54
+ marks=[
55
+ td.skip_if_no("openpyxl"),
56
+ ],
57
+ ),
58
+ pytest.param(
59
+ None,
60
+ marks=[
61
+ td.skip_if_no("xlrd"),
62
+ ],
63
+ ),
64
+ pytest.param("pyxlsb", marks=td.skip_if_no("pyxlsb")),
65
+ pytest.param("odf", marks=td.skip_if_no("odf")),
66
+ pytest.param("calamine", marks=td.skip_if_no("python_calamine")),
67
+ ]
68
+
69
+
70
+ def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool:
71
+ """
72
+ Filter out invalid (engine, ext) pairs instead of skipping, as that
73
+ produces 500+ pytest.skips.
74
+ """
75
+ engine = engine.values[0]
76
+ if engine == "openpyxl" and read_ext == ".xls":
77
+ return False
78
+ if engine == "odf" and read_ext != ".ods":
79
+ return False
80
+ if read_ext == ".ods" and engine not in {"odf", "calamine"}:
81
+ return False
82
+ if engine == "pyxlsb" and read_ext != ".xlsb":
83
+ return False
84
+ if read_ext == ".xlsb" and engine not in {"pyxlsb", "calamine"}:
85
+ return False
86
+ if engine == "xlrd" and read_ext != ".xls":
87
+ return False
88
+ return True
89
+
90
+
91
+ def _transfer_marks(engine, read_ext):
92
+ """
93
+ engine gives us a pytest.param object with some marks, read_ext is just
94
+ a string. We need to generate a new pytest.param inheriting the marks.
95
+ """
96
+ values = engine.values + (read_ext,)
97
+ new_param = pytest.param(values, marks=engine.marks)
98
+ return new_param
99
+
100
+
101
+ @pytest.fixture(
102
+ params=[
103
+ _transfer_marks(eng, ext)
104
+ for eng in engine_params
105
+ for ext in read_ext_params
106
+ if _is_valid_engine_ext_pair(eng, ext)
107
+ ],
108
+ ids=str,
109
+ )
110
+ def engine_and_read_ext(request):
111
+ """
112
+ Fixture for Excel reader engine and read_ext, only including valid pairs.
113
+ """
114
+ return request.param
115
+
116
+
117
+ @pytest.fixture
118
+ def engine(engine_and_read_ext):
119
+ engine, read_ext = engine_and_read_ext
120
+ return engine
121
+
122
+
123
+ @pytest.fixture
124
+ def read_ext(engine_and_read_ext):
125
+ engine, read_ext = engine_and_read_ext
126
+ return read_ext
127
+
128
+
129
+ @pytest.fixture
130
+ def df_ref(datapath):
131
+ """
132
+ Obtain the reference data from read_csv with the Python engine.
133
+ """
134
+ filepath = datapath("io", "data", "csv", "test1.csv")
135
+ df_ref = read_csv(filepath, index_col=0, parse_dates=True, engine="python")
136
+ return df_ref
137
+
138
+
139
+ def get_exp_unit(read_ext: str, engine: str | None) -> str:
140
+ return "ns"
141
+
142
+
143
+ def adjust_expected(expected: DataFrame, read_ext: str, engine: str) -> None:
144
+ expected.index.name = None
145
+ unit = get_exp_unit(read_ext, engine)
146
+ # error: "Index" has no attribute "as_unit"
147
+ expected.index = expected.index.as_unit(unit) # type: ignore[attr-defined]
148
+
149
+
150
+ def xfail_datetimes_with_pyxlsb(engine, request):
151
+ if engine == "pyxlsb":
152
+ request.applymarker(
153
+ pytest.mark.xfail(
154
+ reason="Sheets containing datetimes not supported by pyxlsb"
155
+ )
156
+ )
157
+
158
+
159
+ class TestReaders:
160
+ @pytest.fixture(autouse=True)
161
+ def cd_and_set_engine(self, engine, datapath, monkeypatch):
162
+ """
163
+ Change directory and set engine for read_excel calls.
164
+ """
165
+ func = partial(pd.read_excel, engine=engine)
166
+ monkeypatch.chdir(datapath("io", "data", "excel"))
167
+ monkeypatch.setattr(pd, "read_excel", func)
168
+
169
+ def test_engine_used(self, read_ext, engine, monkeypatch):
170
+ # GH 38884
171
+ def parser(self, *args, **kwargs):
172
+ return self.engine
173
+
174
+ monkeypatch.setattr(pd.ExcelFile, "parse", parser)
175
+
176
+ expected_defaults = {
177
+ "xlsx": "openpyxl",
178
+ "xlsm": "openpyxl",
179
+ "xlsb": "pyxlsb",
180
+ "xls": "xlrd",
181
+ "ods": "odf",
182
+ }
183
+
184
+ with open("test1" + read_ext, "rb") as f:
185
+ result = pd.read_excel(f)
186
+
187
+ if engine is not None:
188
+ expected = engine
189
+ else:
190
+ expected = expected_defaults[read_ext[1:]]
191
+ assert result == expected
192
+
193
+ def test_engine_kwargs(self, read_ext, engine):
194
+ # GH#52214
195
+ expected_defaults = {
196
+ "xlsx": {"foo": "abcd"},
197
+ "xlsm": {"foo": 123},
198
+ "xlsb": {"foo": "True"},
199
+ "xls": {"foo": True},
200
+ "ods": {"foo": "abcd"},
201
+ }
202
+
203
+ if engine in {"xlrd", "pyxlsb"}:
204
+ msg = re.escape(r"open_workbook() got an unexpected keyword argument 'foo'")
205
+ elif engine == "odf":
206
+ msg = re.escape(r"load() got an unexpected keyword argument 'foo'")
207
+ else:
208
+ msg = re.escape(r"load_workbook() got an unexpected keyword argument 'foo'")
209
+
210
+ if engine is not None:
211
+ with pytest.raises(TypeError, match=msg):
212
+ pd.read_excel(
213
+ "test1" + read_ext,
214
+ sheet_name="Sheet1",
215
+ index_col=0,
216
+ engine_kwargs=expected_defaults[read_ext[1:]],
217
+ )
218
+
219
+ def test_usecols_int(self, read_ext):
220
+ # usecols as int
221
+ msg = "Passing an integer for `usecols`"
222
+ with pytest.raises(ValueError, match=msg):
223
+ pd.read_excel(
224
+ "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=3
225
+ )
226
+
227
+ # usecols as int
228
+ with pytest.raises(ValueError, match=msg):
229
+ pd.read_excel(
230
+ "test1" + read_ext,
231
+ sheet_name="Sheet2",
232
+ skiprows=[1],
233
+ index_col=0,
234
+ usecols=3,
235
+ )
236
+
237
+ def test_usecols_list(self, request, engine, read_ext, df_ref):
238
+ xfail_datetimes_with_pyxlsb(engine, request)
239
+
240
+ expected = df_ref[["B", "C"]]
241
+ adjust_expected(expected, read_ext, engine)
242
+
243
+ df1 = pd.read_excel(
244
+ "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=[0, 2, 3]
245
+ )
246
+ df2 = pd.read_excel(
247
+ "test1" + read_ext,
248
+ sheet_name="Sheet2",
249
+ skiprows=[1],
250
+ index_col=0,
251
+ usecols=[0, 2, 3],
252
+ )
253
+
254
+ # TODO add index to xls file)
255
+ tm.assert_frame_equal(df1, expected)
256
+ tm.assert_frame_equal(df2, expected)
257
+
258
+ def test_usecols_str(self, request, engine, read_ext, df_ref):
259
+ xfail_datetimes_with_pyxlsb(engine, request)
260
+
261
+ expected = df_ref[["A", "B", "C"]]
262
+ adjust_expected(expected, read_ext, engine)
263
+
264
+ df2 = pd.read_excel(
265
+ "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A:D"
266
+ )
267
+ df3 = pd.read_excel(
268
+ "test1" + read_ext,
269
+ sheet_name="Sheet2",
270
+ skiprows=[1],
271
+ index_col=0,
272
+ usecols="A:D",
273
+ )
274
+
275
+ # TODO add index to xls, read xls ignores index name ?
276
+ tm.assert_frame_equal(df2, expected)
277
+ tm.assert_frame_equal(df3, expected)
278
+
279
+ expected = df_ref[["B", "C"]]
280
+ adjust_expected(expected, read_ext, engine)
281
+
282
+ df2 = pd.read_excel(
283
+ "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C,D"
284
+ )
285
+ df3 = pd.read_excel(
286
+ "test1" + read_ext,
287
+ sheet_name="Sheet2",
288
+ skiprows=[1],
289
+ index_col=0,
290
+ usecols="A,C,D",
291
+ )
292
+ # TODO add index to xls file
293
+ tm.assert_frame_equal(df2, expected)
294
+ tm.assert_frame_equal(df3, expected)
295
+
296
+ df2 = pd.read_excel(
297
+ "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C:D"
298
+ )
299
+ df3 = pd.read_excel(
300
+ "test1" + read_ext,
301
+ sheet_name="Sheet2",
302
+ skiprows=[1],
303
+ index_col=0,
304
+ usecols="A,C:D",
305
+ )
306
+ tm.assert_frame_equal(df2, expected)
307
+ tm.assert_frame_equal(df3, expected)
308
+
309
+ @pytest.mark.parametrize(
310
+ "usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]]
311
+ )
312
+ def test_usecols_diff_positional_int_columns_order(
313
+ self, request, engine, read_ext, usecols, df_ref
314
+ ):
315
+ xfail_datetimes_with_pyxlsb(engine, request)
316
+
317
+ expected = df_ref[["A", "C"]]
318
+ adjust_expected(expected, read_ext, engine)
319
+
320
+ result = pd.read_excel(
321
+ "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=usecols
322
+ )
323
+ tm.assert_frame_equal(result, expected)
324
+
325
+ @pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]])
326
+ def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref):
327
+ expected = df_ref[["B", "D"]]
328
+ expected.index = range(len(expected))
329
+
330
+ result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols=usecols)
331
+ tm.assert_frame_equal(result, expected)
332
+
333
+ def test_read_excel_without_slicing(self, request, engine, read_ext, df_ref):
334
+ xfail_datetimes_with_pyxlsb(engine, request)
335
+
336
+ expected = df_ref
337
+ adjust_expected(expected, read_ext, engine)
338
+
339
+ result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0)
340
+ tm.assert_frame_equal(result, expected)
341
+
342
+ def test_usecols_excel_range_str(self, request, engine, read_ext, df_ref):
343
+ xfail_datetimes_with_pyxlsb(engine, request)
344
+
345
+ expected = df_ref[["C", "D"]]
346
+ adjust_expected(expected, read_ext, engine)
347
+
348
+ result = pd.read_excel(
349
+ "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,D:E"
350
+ )
351
+ tm.assert_frame_equal(result, expected)
352
+
353
+ def test_usecols_excel_range_str_invalid(self, read_ext):
354
+ msg = "Invalid column name: E1"
355
+
356
+ with pytest.raises(ValueError, match=msg):
357
+ pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols="D:E1")
358
+
359
+ def test_index_col_label_error(self, read_ext):
360
+ msg = "list indices must be integers.*, not str"
361
+
362
+ with pytest.raises(TypeError, match=msg):
363
+ pd.read_excel(
364
+ "test1" + read_ext,
365
+ sheet_name="Sheet1",
366
+ index_col=["A"],
367
+ usecols=["A", "C"],
368
+ )
369
+
370
+ def test_index_col_str(self, read_ext):
371
+ # see gh-52716
372
+ result = pd.read_excel("test1" + read_ext, sheet_name="Sheet3", index_col="A")
373
+ expected = DataFrame(
374
+ columns=["B", "C", "D", "E", "F"], index=Index([], name="A")
375
+ )
376
+ tm.assert_frame_equal(result, expected)
377
+
378
+ def test_index_col_empty(self, read_ext):
379
+ # see gh-9208
380
+ result = pd.read_excel(
381
+ "test1" + read_ext, sheet_name="Sheet3", index_col=["A", "B", "C"]
382
+ )
383
+ expected = DataFrame(
384
+ columns=["D", "E", "F"],
385
+ index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]),
386
+ )
387
+ tm.assert_frame_equal(result, expected)
388
+
389
+ @pytest.mark.parametrize("index_col", [None, 2])
390
+ def test_index_col_with_unnamed(self, read_ext, index_col):
391
+ # see gh-18792
392
+ result = pd.read_excel(
393
+ "test1" + read_ext, sheet_name="Sheet4", index_col=index_col
394
+ )
395
+ expected = DataFrame(
396
+ [["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"]
397
+ )
398
+ if index_col:
399
+ expected = expected.set_index(expected.columns[index_col])
400
+
401
+ tm.assert_frame_equal(result, expected)
402
+
403
+ def test_usecols_pass_non_existent_column(self, read_ext):
404
+ msg = (
405
+ "Usecols do not match columns, "
406
+ "columns expected but not found: "
407
+ r"\['E'\]"
408
+ )
409
+
410
+ with pytest.raises(ValueError, match=msg):
411
+ pd.read_excel("test1" + read_ext, usecols=["E"])
412
+
413
+ def test_usecols_wrong_type(self, read_ext):
414
+ msg = (
415
+ "'usecols' must either be list-like of "
416
+ "all strings, all unicode, all integers or a callable."
417
+ )
418
+
419
+ with pytest.raises(ValueError, match=msg):
420
+ pd.read_excel("test1" + read_ext, usecols=["E1", 0])
421
+
422
+ def test_excel_stop_iterator(self, read_ext):
423
+ parsed = pd.read_excel("test2" + read_ext, sheet_name="Sheet1")
424
+ expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"])
425
+ tm.assert_frame_equal(parsed, expected)
426
+
427
+ def test_excel_cell_error_na(self, request, engine, read_ext):
428
+ xfail_datetimes_with_pyxlsb(engine, request)
429
+
430
+ # https://github.com/tafia/calamine/issues/355
431
+ if engine == "calamine" and read_ext == ".ods":
432
+ request.applymarker(
433
+ pytest.mark.xfail(reason="Calamine can't extract error from ods files")
434
+ )
435
+
436
+ parsed = pd.read_excel("test3" + read_ext, sheet_name="Sheet1")
437
+ expected = DataFrame([[np.nan]], columns=["Test"])
438
+ tm.assert_frame_equal(parsed, expected)
439
+
440
+ def test_excel_table(self, request, engine, read_ext, df_ref):
441
+ xfail_datetimes_with_pyxlsb(engine, request)
442
+
443
+ expected = df_ref
444
+ adjust_expected(expected, read_ext, engine)
445
+
446
+ df1 = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0)
447
+ df2 = pd.read_excel(
448
+ "test1" + read_ext, sheet_name="Sheet2", skiprows=[1], index_col=0
449
+ )
450
+ # TODO add index to file
451
+ tm.assert_frame_equal(df1, expected)
452
+ tm.assert_frame_equal(df2, expected)
453
+
454
+ df3 = pd.read_excel(
455
+ "test1" + read_ext, sheet_name="Sheet1", index_col=0, skipfooter=1
456
+ )
457
+ tm.assert_frame_equal(df3, df1.iloc[:-1])
458
+
459
+ def test_reader_special_dtypes(self, request, engine, read_ext):
460
+ xfail_datetimes_with_pyxlsb(engine, request)
461
+
462
+ unit = get_exp_unit(read_ext, engine)
463
+ expected = DataFrame.from_dict(
464
+ {
465
+ "IntCol": [1, 2, -3, 4, 0],
466
+ "FloatCol": [1.25, 2.25, 1.83, 1.92, 0.0000000005],
467
+ "BoolCol": [True, False, True, True, False],
468
+ "StrCol": [1, 2, 3, 4, 5],
469
+ "Str2Col": ["a", 3, "c", "d", "e"],
470
+ "DateCol": Index(
471
+ [
472
+ datetime(2013, 10, 30),
473
+ datetime(2013, 10, 31),
474
+ datetime(1905, 1, 1),
475
+ datetime(2013, 12, 14),
476
+ datetime(2015, 3, 14),
477
+ ],
478
+ dtype=f"M8[{unit}]",
479
+ ),
480
+ },
481
+ )
482
+ basename = "test_types"
483
+
484
+ # should read in correctly and infer types
485
+ actual = pd.read_excel(basename + read_ext, sheet_name="Sheet1")
486
+ tm.assert_frame_equal(actual, expected)
487
+
488
+ # if not coercing number, then int comes in as float
489
+ float_expected = expected.copy()
490
+ float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
491
+ actual = pd.read_excel(basename + read_ext, sheet_name="Sheet1")
492
+ tm.assert_frame_equal(actual, float_expected)
493
+
494
+ # check setting Index (assuming xls and xlsx are the same here)
495
+ for icol, name in enumerate(expected.columns):
496
+ actual = pd.read_excel(
497
+ basename + read_ext, sheet_name="Sheet1", index_col=icol
498
+ )
499
+ exp = expected.set_index(name)
500
+ tm.assert_frame_equal(actual, exp)
501
+
502
+ expected["StrCol"] = expected["StrCol"].apply(str)
503
+ actual = pd.read_excel(
504
+ basename + read_ext, sheet_name="Sheet1", converters={"StrCol": str}
505
+ )
506
+ tm.assert_frame_equal(actual, expected)
507
+
508
+ # GH8212 - support for converters and missing values
509
+ def test_reader_converters(self, read_ext):
510
+ basename = "test_converters"
511
+
512
+ expected = DataFrame.from_dict(
513
+ {
514
+ "IntCol": [1, 2, -3, -1000, 0],
515
+ "FloatCol": [12.5, np.nan, 18.3, 19.2, 0.000000005],
516
+ "BoolCol": ["Found", "Found", "Found", "Not found", "Found"],
517
+ "StrCol": ["1", np.nan, "3", "4", "5"],
518
+ }
519
+ )
520
+
521
+ converters = {
522
+ "IntCol": lambda x: int(x) if x != "" else -1000,
523
+ "FloatCol": lambda x: 10 * x if x else np.nan,
524
+ 2: lambda x: "Found" if x != "" else "Not found",
525
+ 3: lambda x: str(x) if x else "",
526
+ }
527
+
528
+ # should read in correctly and set types of single cells (not array
529
+ # dtypes)
530
+ actual = pd.read_excel(
531
+ basename + read_ext, sheet_name="Sheet1", converters=converters
532
+ )
533
+ tm.assert_frame_equal(actual, expected)
534
+
535
+ def test_reader_dtype(self, read_ext):
536
+ # GH 8212
537
+ basename = "testdtype"
538
+ actual = pd.read_excel(basename + read_ext)
539
+
540
+ expected = DataFrame(
541
+ {
542
+ "a": [1, 2, 3, 4],
543
+ "b": [2.5, 3.5, 4.5, 5.5],
544
+ "c": [1, 2, 3, 4],
545
+ "d": [1.0, 2.0, np.nan, 4.0],
546
+ }
547
+ )
548
+
549
+ tm.assert_frame_equal(actual, expected)
550
+
551
+ actual = pd.read_excel(
552
+ basename + read_ext, dtype={"a": "float64", "b": "float32", "c": str}
553
+ )
554
+
555
+ expected["a"] = expected["a"].astype("float64")
556
+ expected["b"] = expected["b"].astype("float32")
557
+ expected["c"] = Series(["001", "002", "003", "004"], dtype=object)
558
+ tm.assert_frame_equal(actual, expected)
559
+
560
+ msg = "Unable to convert column d to type int64"
561
+ with pytest.raises(ValueError, match=msg):
562
+ pd.read_excel(basename + read_ext, dtype={"d": "int64"})
563
+
564
+ @pytest.mark.parametrize(
565
+ "dtype,expected",
566
+ [
567
+ (
568
+ None,
569
+ DataFrame(
570
+ {
571
+ "a": [1, 2, 3, 4],
572
+ "b": [2.5, 3.5, 4.5, 5.5],
573
+ "c": [1, 2, 3, 4],
574
+ "d": [1.0, 2.0, np.nan, 4.0],
575
+ }
576
+ ),
577
+ ),
578
+ (
579
+ {"a": "float64", "b": "float32", "c": str, "d": str},
580
+ DataFrame(
581
+ {
582
+ "a": Series([1, 2, 3, 4], dtype="float64"),
583
+ "b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
584
+ "c": Series(["001", "002", "003", "004"], dtype=object),
585
+ "d": Series(["1", "2", np.nan, "4"], dtype=object),
586
+ }
587
+ ),
588
+ ),
589
+ ],
590
+ )
591
+ def test_reader_dtype_str(self, read_ext, dtype, expected):
592
+ # see gh-20377
593
+ basename = "testdtype"
594
+
595
+ actual = pd.read_excel(basename + read_ext, dtype=dtype)
596
+ tm.assert_frame_equal(actual, expected)
597
+
598
+ def test_dtype_backend(self, read_ext, dtype_backend, engine):
599
+ # GH#36712
600
+ if read_ext in (".xlsb", ".xls"):
601
+ pytest.skip(f"No engine for filetype: '{read_ext}'")
602
+
603
+ df = DataFrame(
604
+ {
605
+ "a": Series([1, 3], dtype="Int64"),
606
+ "b": Series([2.5, 4.5], dtype="Float64"),
607
+ "c": Series([True, False], dtype="boolean"),
608
+ "d": Series(["a", "b"], dtype="string"),
609
+ "e": Series([pd.NA, 6], dtype="Int64"),
610
+ "f": Series([pd.NA, 7.5], dtype="Float64"),
611
+ "g": Series([pd.NA, True], dtype="boolean"),
612
+ "h": Series([pd.NA, "a"], dtype="string"),
613
+ "i": Series([pd.Timestamp("2019-12-31")] * 2),
614
+ "j": Series([pd.NA, pd.NA], dtype="Int64"),
615
+ }
616
+ )
617
+ with tm.ensure_clean(read_ext) as file_path:
618
+ df.to_excel(file_path, sheet_name="test", index=False)
619
+ result = pd.read_excel(
620
+ file_path, sheet_name="test", dtype_backend=dtype_backend
621
+ )
622
+ if dtype_backend == "pyarrow":
623
+ import pyarrow as pa
624
+
625
+ from pandas.arrays import ArrowExtensionArray
626
+
627
+ expected = DataFrame(
628
+ {
629
+ col: ArrowExtensionArray(pa.array(df[col], from_pandas=True))
630
+ for col in df.columns
631
+ }
632
+ )
633
+ # pyarrow by default infers timestamp resolution as us, not ns
634
+ expected["i"] = ArrowExtensionArray(
635
+ expected["i"].array._pa_array.cast(pa.timestamp(unit="us"))
636
+ )
637
+ # pyarrow supports a null type, so don't have to default to Int64
638
+ expected["j"] = ArrowExtensionArray(pa.array([None, None]))
639
+ else:
640
+ expected = df
641
+ unit = get_exp_unit(read_ext, engine)
642
+ expected["i"] = expected["i"].astype(f"M8[{unit}]")
643
+
644
+ tm.assert_frame_equal(result, expected)
645
+
646
+ def test_dtype_backend_and_dtype(self, read_ext):
647
+ # GH#36712
648
+ if read_ext in (".xlsb", ".xls"):
649
+ pytest.skip(f"No engine for filetype: '{read_ext}'")
650
+
651
+ df = DataFrame({"a": [np.nan, 1.0], "b": [2.5, np.nan]})
652
+ with tm.ensure_clean(read_ext) as file_path:
653
+ df.to_excel(file_path, sheet_name="test", index=False)
654
+ result = pd.read_excel(
655
+ file_path,
656
+ sheet_name="test",
657
+ dtype_backend="numpy_nullable",
658
+ dtype="float64",
659
+ )
660
+ tm.assert_frame_equal(result, df)
661
+
662
+ @pytest.mark.xfail(
663
+ using_pyarrow_string_dtype(), reason="infer_string takes precedence"
664
+ )
665
+ def test_dtype_backend_string(self, read_ext, string_storage):
666
+ # GH#36712
667
+ if read_ext in (".xlsb", ".xls"):
668
+ pytest.skip(f"No engine for filetype: '{read_ext}'")
669
+
670
+ pa = pytest.importorskip("pyarrow")
671
+
672
+ with pd.option_context("mode.string_storage", string_storage):
673
+ df = DataFrame(
674
+ {
675
+ "a": np.array(["a", "b"], dtype=np.object_),
676
+ "b": np.array(["x", pd.NA], dtype=np.object_),
677
+ }
678
+ )
679
+ with tm.ensure_clean(read_ext) as file_path:
680
+ df.to_excel(file_path, sheet_name="test", index=False)
681
+ result = pd.read_excel(
682
+ file_path, sheet_name="test", dtype_backend="numpy_nullable"
683
+ )
684
+
685
+ if string_storage == "python":
686
+ expected = DataFrame(
687
+ {
688
+ "a": StringArray(np.array(["a", "b"], dtype=np.object_)),
689
+ "b": StringArray(np.array(["x", pd.NA], dtype=np.object_)),
690
+ }
691
+ )
692
+ else:
693
+ expected = DataFrame(
694
+ {
695
+ "a": ArrowStringArray(pa.array(["a", "b"])),
696
+ "b": ArrowStringArray(pa.array(["x", None])),
697
+ }
698
+ )
699
+ tm.assert_frame_equal(result, expected)
700
+
701
+ @pytest.mark.parametrize("dtypes, exp_value", [({}, 1), ({"a.1": "int64"}, 1)])
702
+ def test_dtype_mangle_dup_cols(self, read_ext, dtypes, exp_value):
703
+ # GH#35211
704
+ basename = "df_mangle_dup_col_dtypes"
705
+ dtype_dict = {"a": object, **dtypes}
706
+ dtype_dict_copy = dtype_dict.copy()
707
+ # GH#42462
708
+ result = pd.read_excel(basename + read_ext, dtype=dtype_dict)
709
+ expected = DataFrame(
710
+ {
711
+ "a": Series([1], dtype=object),
712
+ "a.1": Series([exp_value], dtype=object if not dtypes else None),
713
+ }
714
+ )
715
+ assert dtype_dict == dtype_dict_copy, "dtype dict changed"
716
+ tm.assert_frame_equal(result, expected)
717
+
718
+ def test_reader_spaces(self, read_ext):
719
+ # see gh-32207
720
+ basename = "test_spaces"
721
+
722
+ actual = pd.read_excel(basename + read_ext)
723
+ expected = DataFrame(
724
+ {
725
+ "testcol": [
726
+ "this is great",
727
+ "4 spaces",
728
+ "1 trailing ",
729
+ " 1 leading",
730
+ "2 spaces multiple times",
731
+ ]
732
+ }
733
+ )
734
+ tm.assert_frame_equal(actual, expected)
735
+
736
+ # gh-36122, gh-35802
737
+ @pytest.mark.parametrize(
738
+ "basename,expected",
739
+ [
740
+ ("gh-35802", DataFrame({"COLUMN": ["Test (1)"]})),
741
+ ("gh-36122", DataFrame(columns=["got 2nd sa"])),
742
+ ],
743
+ )
744
+ def test_read_excel_ods_nested_xml(self, engine, read_ext, basename, expected):
745
+ # see gh-35802
746
+ if engine != "odf":
747
+ pytest.skip(f"Skipped for engine: {engine}")
748
+
749
+ actual = pd.read_excel(basename + read_ext)
750
+ tm.assert_frame_equal(actual, expected)
751
+
752
+ def test_reading_all_sheets(self, read_ext):
753
+ # Test reading all sheet names by setting sheet_name to None,
754
+ # Ensure a dict is returned.
755
+ # See PR #9450
756
+ basename = "test_multisheet"
757
+ dfs = pd.read_excel(basename + read_ext, sheet_name=None)
758
+ # ensure this is not alphabetical to test order preservation
759
+ expected_keys = ["Charlie", "Alpha", "Beta"]
760
+ tm.assert_contains_all(expected_keys, dfs.keys())
761
+ # Issue 9930
762
+ # Ensure sheet order is preserved
763
+ assert expected_keys == list(dfs.keys())
764
+
765
+ def test_reading_multiple_specific_sheets(self, read_ext):
766
+ # Test reading specific sheet names by specifying a mixed list
767
+ # of integers and strings, and confirm that duplicated sheet
768
+ # references (positions/names) are removed properly.
769
+ # Ensure a dict is returned
770
+ # See PR #9450
771
+ basename = "test_multisheet"
772
+ # Explicitly request duplicates. Only the set should be returned.
773
+ expected_keys = [2, "Charlie", "Charlie"]
774
+ dfs = pd.read_excel(basename + read_ext, sheet_name=expected_keys)
775
+ expected_keys = list(set(expected_keys))
776
+ tm.assert_contains_all(expected_keys, dfs.keys())
777
+ assert len(expected_keys) == len(dfs.keys())
778
+
779
+ def test_reading_all_sheets_with_blank(self, read_ext):
780
+ # Test reading all sheet names by setting sheet_name to None,
781
+ # In the case where some sheets are blank.
782
+ # Issue #11711
783
+ basename = "blank_with_header"
784
+ dfs = pd.read_excel(basename + read_ext, sheet_name=None)
785
+ expected_keys = ["Sheet1", "Sheet2", "Sheet3"]
786
+ tm.assert_contains_all(expected_keys, dfs.keys())
787
+
788
+ # GH6403
789
+ def test_read_excel_blank(self, read_ext):
790
+ actual = pd.read_excel("blank" + read_ext, sheet_name="Sheet1")
791
+ tm.assert_frame_equal(actual, DataFrame())
792
+
793
+ def test_read_excel_blank_with_header(self, read_ext):
794
+ expected = DataFrame(columns=["col_1", "col_2"])
795
+ actual = pd.read_excel("blank_with_header" + read_ext, sheet_name="Sheet1")
796
+ tm.assert_frame_equal(actual, expected)
797
+
798
+ def test_exception_message_includes_sheet_name(self, read_ext):
799
+ # GH 48706
800
+ with pytest.raises(ValueError, match=r" \(sheet: Sheet1\)$"):
801
+ pd.read_excel("blank_with_header" + read_ext, header=[1], sheet_name=None)
802
+ with pytest.raises(ZeroDivisionError, match=r" \(sheet: Sheet1\)$"):
803
+ pd.read_excel("test1" + read_ext, usecols=lambda x: 1 / 0, sheet_name=None)
804
+
805
+ @pytest.mark.filterwarnings("ignore:Cell A4 is marked:UserWarning:openpyxl")
806
+ def test_date_conversion_overflow(self, request, engine, read_ext):
807
+ # GH 10001 : pandas.ExcelFile ignore parse_dates=False
808
+ xfail_datetimes_with_pyxlsb(engine, request)
809
+
810
+ expected = DataFrame(
811
+ [
812
+ [pd.Timestamp("2016-03-12"), "Marc Johnson"],
813
+ [pd.Timestamp("2016-03-16"), "Jack Black"],
814
+ [1e20, "Timothy Brown"],
815
+ ],
816
+ columns=["DateColWithBigInt", "StringCol"],
817
+ )
818
+
819
+ if engine == "openpyxl":
820
+ request.applymarker(
821
+ pytest.mark.xfail(reason="Maybe not supported by openpyxl")
822
+ )
823
+
824
+ if engine is None and read_ext in (".xlsx", ".xlsm"):
825
+ # GH 35029
826
+ request.applymarker(
827
+ pytest.mark.xfail(reason="Defaults to openpyxl, maybe not supported")
828
+ )
829
+
830
+ result = pd.read_excel("testdateoverflow" + read_ext)
831
+ tm.assert_frame_equal(result, expected)
832
+
833
+ def test_sheet_name(self, request, read_ext, engine, df_ref):
834
+ xfail_datetimes_with_pyxlsb(engine, request)
835
+
836
+ filename = "test1"
837
+ sheet_name = "Sheet1"
838
+
839
+ expected = df_ref
840
+ adjust_expected(expected, read_ext, engine)
841
+
842
+ df1 = pd.read_excel(
843
+ filename + read_ext, sheet_name=sheet_name, index_col=0
844
+ ) # doc
845
+ df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name)
846
+
847
+ tm.assert_frame_equal(df1, expected)
848
+ tm.assert_frame_equal(df2, expected)
849
+
850
+ def test_excel_read_buffer(self, read_ext):
851
+ pth = "test1" + read_ext
852
+ expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0)
853
+ with open(pth, "rb") as f:
854
+ actual = pd.read_excel(f, sheet_name="Sheet1", index_col=0)
855
+ tm.assert_frame_equal(expected, actual)
856
+
857
+ def test_bad_engine_raises(self):
858
+ bad_engine = "foo"
859
+ with pytest.raises(ValueError, match="Unknown engine: foo"):
860
+ pd.read_excel("", engine=bad_engine)
861
+
862
+ @pytest.mark.parametrize(
863
+ "sheet_name",
864
+ [3, [0, 3], [3, 0], "Sheet4", ["Sheet1", "Sheet4"], ["Sheet4", "Sheet1"]],
865
+ )
866
+ def test_bad_sheetname_raises(self, read_ext, sheet_name):
867
+ # GH 39250
868
+ msg = "Worksheet index 3 is invalid|Worksheet named 'Sheet4' not found"
869
+ with pytest.raises(ValueError, match=msg):
870
+ pd.read_excel("blank" + read_ext, sheet_name=sheet_name)
871
+
872
+ def test_missing_file_raises(self, read_ext):
873
+ bad_file = f"foo{read_ext}"
874
+ # CI tests with other languages, translates to "No such file or directory"
875
+ match = "|".join(
876
+ [
877
+ "(No such file or directory",
878
+ "没有那个文件或目录",
879
+ "File o directory non esistente)",
880
+ ]
881
+ )
882
+ with pytest.raises(FileNotFoundError, match=match):
883
+ pd.read_excel(bad_file)
884
+
885
+ def test_corrupt_bytes_raises(self, engine):
886
+ bad_stream = b"foo"
887
+ if engine is None:
888
+ error = ValueError
889
+ msg = (
890
+ "Excel file format cannot be determined, you must "
891
+ "specify an engine manually."
892
+ )
893
+ elif engine == "xlrd":
894
+ from xlrd import XLRDError
895
+
896
+ error = XLRDError
897
+ msg = (
898
+ "Unsupported format, or corrupt file: Expected BOF "
899
+ "record; found b'foo'"
900
+ )
901
+ elif engine == "calamine":
902
+ from python_calamine import CalamineError
903
+
904
+ error = CalamineError
905
+ msg = "Cannot detect file format"
906
+ else:
907
+ error = BadZipFile
908
+ msg = "File is not a zip file"
909
+ with pytest.raises(error, match=msg):
910
+ pd.read_excel(BytesIO(bad_stream))
911
+
912
+ @pytest.mark.network
913
+ @pytest.mark.single_cpu
914
+ def test_read_from_http_url(self, httpserver, read_ext):
915
+ with open("test1" + read_ext, "rb") as f:
916
+ httpserver.serve_content(content=f.read())
917
+ url_table = pd.read_excel(httpserver.url)
918
+ local_table = pd.read_excel("test1" + read_ext)
919
+ tm.assert_frame_equal(url_table, local_table)
920
+
921
+ @td.skip_if_not_us_locale
922
+ @pytest.mark.single_cpu
923
+ def test_read_from_s3_url(self, read_ext, s3_public_bucket, s3so):
924
+ # Bucket created in tests/io/conftest.py
925
+ with open("test1" + read_ext, "rb") as f:
926
+ s3_public_bucket.put_object(Key="test1" + read_ext, Body=f)
927
+
928
+ url = f"s3://{s3_public_bucket.name}/test1" + read_ext
929
+
930
+ url_table = pd.read_excel(url, storage_options=s3so)
931
+ local_table = pd.read_excel("test1" + read_ext)
932
+ tm.assert_frame_equal(url_table, local_table)
933
+
934
+ @pytest.mark.single_cpu
935
+ def test_read_from_s3_object(self, read_ext, s3_public_bucket, s3so):
936
+ # GH 38788
937
+ # Bucket created in tests/io/conftest.py
938
+ with open("test1" + read_ext, "rb") as f:
939
+ s3_public_bucket.put_object(Key="test1" + read_ext, Body=f)
940
+
941
+ import s3fs
942
+
943
+ s3 = s3fs.S3FileSystem(**s3so)
944
+
945
+ with s3.open(f"s3://{s3_public_bucket.name}/test1" + read_ext) as f:
946
+ url_table = pd.read_excel(f)
947
+
948
+ local_table = pd.read_excel("test1" + read_ext)
949
+ tm.assert_frame_equal(url_table, local_table)
950
+
951
+ @pytest.mark.slow
952
+ def test_read_from_file_url(self, read_ext, datapath):
953
+ # FILE
954
+ localtable = os.path.join(datapath("io", "data", "excel"), "test1" + read_ext)
955
+ local_table = pd.read_excel(localtable)
956
+
957
+ try:
958
+ url_table = pd.read_excel("file://localhost/" + localtable)
959
+ except URLError:
960
+ # fails on some systems
961
+ platform_info = " ".join(platform.uname()).strip()
962
+ pytest.skip(f"failing on {platform_info}")
963
+
964
+ tm.assert_frame_equal(url_table, local_table)
965
+
966
+ def test_read_from_pathlib_path(self, read_ext):
967
+ # GH12655
968
+ str_path = "test1" + read_ext
969
+ expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0)
970
+
971
+ path_obj = Path("test1" + read_ext)
972
+ actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0)
973
+
974
+ tm.assert_frame_equal(expected, actual)
975
+
976
+ @td.skip_if_no("py.path")
977
+ def test_read_from_py_localpath(self, read_ext):
978
+ # GH12655
979
+ from py.path import local as LocalPath
980
+
981
+ str_path = os.path.join("test1" + read_ext)
982
+ expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0)
983
+
984
+ path_obj = LocalPath().join("test1" + read_ext)
985
+ actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0)
986
+
987
+ tm.assert_frame_equal(expected, actual)
988
+
989
+ def test_close_from_py_localpath(self, read_ext):
990
+ # GH31467
991
+ str_path = os.path.join("test1" + read_ext)
992
+ with open(str_path, "rb") as f:
993
+ x = pd.read_excel(f, sheet_name="Sheet1", index_col=0)
994
+ del x
995
+ # should not throw an exception because the passed file was closed
996
+ f.read()
997
+
998
+ def test_reader_seconds(self, request, engine, read_ext):
999
+ xfail_datetimes_with_pyxlsb(engine, request)
1000
+
1001
+ # GH 55045
1002
+ if engine == "calamine" and read_ext == ".ods":
1003
+ request.applymarker(
1004
+ pytest.mark.xfail(
1005
+ reason="ODS file contains bad datetime (seconds as text)"
1006
+ )
1007
+ )
1008
+
1009
+ # Test reading times with and without milliseconds. GH5945.
1010
+ expected = DataFrame.from_dict(
1011
+ {
1012
+ "Time": [
1013
+ time(1, 2, 3),
1014
+ time(2, 45, 56, 100000),
1015
+ time(4, 29, 49, 200000),
1016
+ time(6, 13, 42, 300000),
1017
+ time(7, 57, 35, 400000),
1018
+ time(9, 41, 28, 500000),
1019
+ time(11, 25, 21, 600000),
1020
+ time(13, 9, 14, 700000),
1021
+ time(14, 53, 7, 800000),
1022
+ time(16, 37, 0, 900000),
1023
+ time(18, 20, 54),
1024
+ ]
1025
+ }
1026
+ )
1027
+
1028
+ actual = pd.read_excel("times_1900" + read_ext, sheet_name="Sheet1")
1029
+ tm.assert_frame_equal(actual, expected)
1030
+
1031
+ actual = pd.read_excel("times_1904" + read_ext, sheet_name="Sheet1")
1032
+ tm.assert_frame_equal(actual, expected)
1033
+
1034
+ def test_read_excel_multiindex(self, request, engine, read_ext):
1035
+ # see gh-4679
1036
+ xfail_datetimes_with_pyxlsb(engine, request)
1037
+
1038
+ unit = get_exp_unit(read_ext, engine)
1039
+
1040
+ mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
1041
+ mi_file = "testmultiindex" + read_ext
1042
+
1043
+ # "mi_column" sheet
1044
+ expected = DataFrame(
1045
+ [
1046
+ [1, 2.5, pd.Timestamp("2015-01-01"), True],
1047
+ [2, 3.5, pd.Timestamp("2015-01-02"), False],
1048
+ [3, 4.5, pd.Timestamp("2015-01-03"), False],
1049
+ [4, 5.5, pd.Timestamp("2015-01-04"), True],
1050
+ ],
1051
+ columns=mi,
1052
+ )
1053
+ expected[mi[2]] = expected[mi[2]].astype(f"M8[{unit}]")
1054
+
1055
+ actual = pd.read_excel(
1056
+ mi_file, sheet_name="mi_column", header=[0, 1], index_col=0
1057
+ )
1058
+ tm.assert_frame_equal(actual, expected)
1059
+
1060
+ # "mi_index" sheet
1061
+ expected.index = mi
1062
+ expected.columns = ["a", "b", "c", "d"]
1063
+
1064
+ actual = pd.read_excel(mi_file, sheet_name="mi_index", index_col=[0, 1])
1065
+ tm.assert_frame_equal(actual, expected)
1066
+
1067
+ # "both" sheet
1068
+ expected.columns = mi
1069
+
1070
+ actual = pd.read_excel(
1071
+ mi_file, sheet_name="both", index_col=[0, 1], header=[0, 1]
1072
+ )
1073
+ tm.assert_frame_equal(actual, expected)
1074
+
1075
+ # "mi_index_name" sheet
1076
+ expected.columns = ["a", "b", "c", "d"]
1077
+ expected.index = mi.set_names(["ilvl1", "ilvl2"])
1078
+
1079
+ actual = pd.read_excel(mi_file, sheet_name="mi_index_name", index_col=[0, 1])
1080
+ tm.assert_frame_equal(actual, expected)
1081
+
1082
+ # "mi_column_name" sheet
1083
+ expected.index = list(range(4))
1084
+ expected.columns = mi.set_names(["c1", "c2"])
1085
+ actual = pd.read_excel(
1086
+ mi_file, sheet_name="mi_column_name", header=[0, 1], index_col=0
1087
+ )
1088
+ tm.assert_frame_equal(actual, expected)
1089
+
1090
+ # see gh-11317
1091
+ # "name_with_int" sheet
1092
+ expected.columns = mi.set_levels([1, 2], level=1).set_names(["c1", "c2"])
1093
+
1094
+ actual = pd.read_excel(
1095
+ mi_file, sheet_name="name_with_int", index_col=0, header=[0, 1]
1096
+ )
1097
+ tm.assert_frame_equal(actual, expected)
1098
+
1099
+ # "both_name" sheet
1100
+ expected.columns = mi.set_names(["c1", "c2"])
1101
+ expected.index = mi.set_names(["ilvl1", "ilvl2"])
1102
+
1103
+ actual = pd.read_excel(
1104
+ mi_file, sheet_name="both_name", index_col=[0, 1], header=[0, 1]
1105
+ )
1106
+ tm.assert_frame_equal(actual, expected)
1107
+
1108
+ # "both_skiprows" sheet
1109
+ actual = pd.read_excel(
1110
+ mi_file,
1111
+ sheet_name="both_name_skiprows",
1112
+ index_col=[0, 1],
1113
+ header=[0, 1],
1114
+ skiprows=2,
1115
+ )
1116
+ tm.assert_frame_equal(actual, expected)
1117
+
1118
+ @pytest.mark.parametrize(
1119
+ "sheet_name,idx_lvl2",
1120
+ [
1121
+ ("both_name_blank_after_mi_name", [np.nan, "b", "a", "b"]),
1122
+ ("both_name_multiple_blanks", [np.nan] * 4),
1123
+ ],
1124
+ )
1125
+ def test_read_excel_multiindex_blank_after_name(
1126
+ self, request, engine, read_ext, sheet_name, idx_lvl2
1127
+ ):
1128
+ # GH34673
1129
+ xfail_datetimes_with_pyxlsb(engine, request)
1130
+
1131
+ mi_file = "testmultiindex" + read_ext
1132
+ mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]], names=["c1", "c2"])
1133
+
1134
+ unit = get_exp_unit(read_ext, engine)
1135
+
1136
+ expected = DataFrame(
1137
+ [
1138
+ [1, 2.5, pd.Timestamp("2015-01-01"), True],
1139
+ [2, 3.5, pd.Timestamp("2015-01-02"), False],
1140
+ [3, 4.5, pd.Timestamp("2015-01-03"), False],
1141
+ [4, 5.5, pd.Timestamp("2015-01-04"), True],
1142
+ ],
1143
+ columns=mi,
1144
+ index=MultiIndex.from_arrays(
1145
+ (["foo", "foo", "bar", "bar"], idx_lvl2),
1146
+ names=["ilvl1", "ilvl2"],
1147
+ ),
1148
+ )
1149
+ expected[mi[2]] = expected[mi[2]].astype(f"M8[{unit}]")
1150
+ result = pd.read_excel(
1151
+ mi_file,
1152
+ sheet_name=sheet_name,
1153
+ index_col=[0, 1],
1154
+ header=[0, 1],
1155
+ )
1156
+ tm.assert_frame_equal(result, expected)
1157
+
1158
+ def test_read_excel_multiindex_header_only(self, read_ext):
1159
+ # see gh-11733.
1160
+ #
1161
+ # Don't try to parse a header name if there isn't one.
1162
+ mi_file = "testmultiindex" + read_ext
1163
+ result = pd.read_excel(mi_file, sheet_name="index_col_none", header=[0, 1])
1164
+
1165
+ exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")])
1166
+ expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns)
1167
+ tm.assert_frame_equal(result, expected)
1168
+
1169
+ def test_excel_old_index_format(self, read_ext):
1170
+ # see gh-4679
1171
+ filename = "test_index_name_pre17" + read_ext
1172
+
1173
+ # We detect headers to determine if index names exist, so
1174
+ # that "index" name in the "names" version of the data will
1175
+ # now be interpreted as rows that include null data.
1176
+ data = np.array(
1177
+ [
1178
+ [np.nan, np.nan, np.nan, np.nan, np.nan],
1179
+ ["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
1180
+ ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
1181
+ ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
1182
+ ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
1183
+ ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"],
1184
+ ],
1185
+ dtype=object,
1186
+ )
1187
+ columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
1188
+ mi = MultiIndex(
1189
+ levels=[
1190
+ ["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"],
1191
+ ["R1", "R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"],
1192
+ ],
1193
+ codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]],
1194
+ names=[None, None],
1195
+ )
1196
+ si = Index(
1197
+ ["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None
1198
+ )
1199
+
1200
+ expected = DataFrame(data, index=si, columns=columns)
1201
+
1202
+ actual = pd.read_excel(filename, sheet_name="single_names", index_col=0)
1203
+ tm.assert_frame_equal(actual, expected)
1204
+
1205
+ expected.index = mi
1206
+
1207
+ actual = pd.read_excel(filename, sheet_name="multi_names", index_col=[0, 1])
1208
+ tm.assert_frame_equal(actual, expected)
1209
+
1210
+ # The analogous versions of the "names" version data
1211
+ # where there are explicitly no names for the indices.
1212
+ data = np.array(
1213
+ [
1214
+ ["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
1215
+ ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
1216
+ ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
1217
+ ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
1218
+ ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"],
1219
+ ]
1220
+ )
1221
+ columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
1222
+ mi = MultiIndex(
1223
+ levels=[
1224
+ ["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"],
1225
+ ["R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"],
1226
+ ],
1227
+ codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
1228
+ names=[None, None],
1229
+ )
1230
+ si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None)
1231
+
1232
+ expected = DataFrame(data, index=si, columns=columns)
1233
+
1234
+ actual = pd.read_excel(filename, sheet_name="single_no_names", index_col=0)
1235
+ tm.assert_frame_equal(actual, expected)
1236
+
1237
+ expected.index = mi
1238
+
1239
+ actual = pd.read_excel(filename, sheet_name="multi_no_names", index_col=[0, 1])
1240
+ tm.assert_frame_equal(actual, expected)
1241
+
1242
+ def test_read_excel_bool_header_arg(self, read_ext):
1243
+ # GH 6114
1244
+ msg = "Passing a bool to header is invalid"
1245
+ for arg in [True, False]:
1246
+ with pytest.raises(TypeError, match=msg):
1247
+ pd.read_excel("test1" + read_ext, header=arg)
1248
+
1249
+ def test_read_excel_skiprows(self, request, engine, read_ext):
1250
+ # GH 4903
1251
+ xfail_datetimes_with_pyxlsb(engine, request)
1252
+
1253
+ unit = get_exp_unit(read_ext, engine)
1254
+
1255
+ actual = pd.read_excel(
1256
+ "testskiprows" + read_ext, sheet_name="skiprows_list", skiprows=[0, 2]
1257
+ )
1258
+ expected = DataFrame(
1259
+ [
1260
+ [1, 2.5, pd.Timestamp("2015-01-01"), True],
1261
+ [2, 3.5, pd.Timestamp("2015-01-02"), False],
1262
+ [3, 4.5, pd.Timestamp("2015-01-03"), False],
1263
+ [4, 5.5, pd.Timestamp("2015-01-04"), True],
1264
+ ],
1265
+ columns=["a", "b", "c", "d"],
1266
+ )
1267
+ expected["c"] = expected["c"].astype(f"M8[{unit}]")
1268
+ tm.assert_frame_equal(actual, expected)
1269
+
1270
+ actual = pd.read_excel(
1271
+ "testskiprows" + read_ext,
1272
+ sheet_name="skiprows_list",
1273
+ skiprows=np.array([0, 2]),
1274
+ )
1275
+ tm.assert_frame_equal(actual, expected)
1276
+
1277
+ # GH36435
1278
+ actual = pd.read_excel(
1279
+ "testskiprows" + read_ext,
1280
+ sheet_name="skiprows_list",
1281
+ skiprows=lambda x: x in [0, 2],
1282
+ )
1283
+ tm.assert_frame_equal(actual, expected)
1284
+
1285
+ actual = pd.read_excel(
1286
+ "testskiprows" + read_ext,
1287
+ sheet_name="skiprows_list",
1288
+ skiprows=3,
1289
+ names=["a", "b", "c", "d"],
1290
+ )
1291
+ expected = DataFrame(
1292
+ [
1293
+ # [1, 2.5, pd.Timestamp("2015-01-01"), True],
1294
+ [2, 3.5, pd.Timestamp("2015-01-02"), False],
1295
+ [3, 4.5, pd.Timestamp("2015-01-03"), False],
1296
+ [4, 5.5, pd.Timestamp("2015-01-04"), True],
1297
+ ],
1298
+ columns=["a", "b", "c", "d"],
1299
+ )
1300
+ expected["c"] = expected["c"].astype(f"M8[{unit}]")
1301
+ tm.assert_frame_equal(actual, expected)
1302
+
1303
+ def test_read_excel_skiprows_callable_not_in(self, request, engine, read_ext):
1304
+ # GH 4903
1305
+ xfail_datetimes_with_pyxlsb(engine, request)
1306
+ unit = get_exp_unit(read_ext, engine)
1307
+
1308
+ actual = pd.read_excel(
1309
+ "testskiprows" + read_ext,
1310
+ sheet_name="skiprows_list",
1311
+ skiprows=lambda x: x not in [1, 3, 5],
1312
+ )
1313
+ expected = DataFrame(
1314
+ [
1315
+ [1, 2.5, pd.Timestamp("2015-01-01"), True],
1316
+ # [2, 3.5, pd.Timestamp("2015-01-02"), False],
1317
+ [3, 4.5, pd.Timestamp("2015-01-03"), False],
1318
+ # [4, 5.5, pd.Timestamp("2015-01-04"), True],
1319
+ ],
1320
+ columns=["a", "b", "c", "d"],
1321
+ )
1322
+ expected["c"] = expected["c"].astype(f"M8[{unit}]")
1323
+ tm.assert_frame_equal(actual, expected)
1324
+
1325
+ def test_read_excel_nrows(self, read_ext):
1326
+ # GH 16645
1327
+ num_rows_to_pull = 5
1328
+ actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull)
1329
+ expected = pd.read_excel("test1" + read_ext)
1330
+ expected = expected[:num_rows_to_pull]
1331
+ tm.assert_frame_equal(actual, expected)
1332
+
1333
+ def test_read_excel_nrows_greater_than_nrows_in_file(self, read_ext):
1334
+ # GH 16645
1335
+ expected = pd.read_excel("test1" + read_ext)
1336
+ num_records_in_file = len(expected)
1337
+ num_rows_to_pull = num_records_in_file + 10
1338
+ actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull)
1339
+ tm.assert_frame_equal(actual, expected)
1340
+
1341
+ def test_read_excel_nrows_non_integer_parameter(self, read_ext):
1342
+ # GH 16645
1343
+ msg = "'nrows' must be an integer >=0"
1344
+ with pytest.raises(ValueError, match=msg):
1345
+ pd.read_excel("test1" + read_ext, nrows="5")
1346
+
1347
+ @pytest.mark.parametrize(
1348
+ "filename,sheet_name,header,index_col,skiprows",
1349
+ [
1350
+ ("testmultiindex", "mi_column", [0, 1], 0, None),
1351
+ ("testmultiindex", "mi_index", None, [0, 1], None),
1352
+ ("testmultiindex", "both", [0, 1], [0, 1], None),
1353
+ ("testmultiindex", "mi_column_name", [0, 1], 0, None),
1354
+ ("testskiprows", "skiprows_list", None, None, [0, 2]),
1355
+ ("testskiprows", "skiprows_list", None, None, lambda x: x in (0, 2)),
1356
+ ],
1357
+ )
1358
+ def test_read_excel_nrows_params(
1359
+ self, read_ext, filename, sheet_name, header, index_col, skiprows
1360
+ ):
1361
+ """
1362
+ For various parameters, we should get the same result whether we
1363
+ limit the rows during load (nrows=3) or after (df.iloc[:3]).
1364
+ """
1365
+ # GH 46894
1366
+ expected = pd.read_excel(
1367
+ filename + read_ext,
1368
+ sheet_name=sheet_name,
1369
+ header=header,
1370
+ index_col=index_col,
1371
+ skiprows=skiprows,
1372
+ ).iloc[:3]
1373
+ actual = pd.read_excel(
1374
+ filename + read_ext,
1375
+ sheet_name=sheet_name,
1376
+ header=header,
1377
+ index_col=index_col,
1378
+ skiprows=skiprows,
1379
+ nrows=3,
1380
+ )
1381
+ tm.assert_frame_equal(actual, expected)
1382
+
1383
+ def test_deprecated_kwargs(self, read_ext):
1384
+ with pytest.raises(TypeError, match="but 3 positional arguments"):
1385
+ pd.read_excel("test1" + read_ext, "Sheet1", 0)
1386
+
1387
+ def test_no_header_with_list_index_col(self, read_ext):
1388
+ # GH 31783
1389
+ file_name = "testmultiindex" + read_ext
1390
+ data = [("B", "B"), ("key", "val"), (3, 4), (3, 4)]
1391
+ idx = MultiIndex.from_tuples(
1392
+ [("A", "A"), ("key", "val"), (1, 2), (1, 2)], names=(0, 1)
1393
+ )
1394
+ expected = DataFrame(data, index=idx, columns=(2, 3))
1395
+ result = pd.read_excel(
1396
+ file_name, sheet_name="index_col_none", index_col=[0, 1], header=None
1397
+ )
1398
+ tm.assert_frame_equal(expected, result)
1399
+
1400
+ def test_one_col_noskip_blank_line(self, read_ext):
1401
+ # GH 39808
1402
+ file_name = "one_col_blank_line" + read_ext
1403
+ data = [0.5, np.nan, 1, 2]
1404
+ expected = DataFrame(data, columns=["numbers"])
1405
+ result = pd.read_excel(file_name)
1406
+ tm.assert_frame_equal(result, expected)
1407
+
1408
+ def test_multiheader_two_blank_lines(self, read_ext):
1409
+ # GH 40442
1410
+ file_name = "testmultiindex" + read_ext
1411
+ columns = MultiIndex.from_tuples([("a", "A"), ("b", "B")])
1412
+ data = [[np.nan, np.nan], [np.nan, np.nan], [1, 3], [2, 4]]
1413
+ expected = DataFrame(data, columns=columns)
1414
+ result = pd.read_excel(
1415
+ file_name, sheet_name="mi_column_empty_rows", header=[0, 1]
1416
+ )
1417
+ tm.assert_frame_equal(result, expected)
1418
+
1419
+ def test_trailing_blanks(self, read_ext):
1420
+ """
1421
+ Sheets can contain blank cells with no data. Some of our readers
1422
+ were including those cells, creating many empty rows and columns
1423
+ """
1424
+ file_name = "trailing_blanks" + read_ext
1425
+ result = pd.read_excel(file_name)
1426
+ assert result.shape == (3, 3)
1427
+
1428
+ def test_ignore_chartsheets_by_str(self, request, engine, read_ext):
1429
+ # GH 41448
1430
+ if read_ext == ".ods":
1431
+ pytest.skip("chartsheets do not exist in the ODF format")
1432
+ if engine == "pyxlsb":
1433
+ request.applymarker(
1434
+ pytest.mark.xfail(
1435
+ reason="pyxlsb can't distinguish chartsheets from worksheets"
1436
+ )
1437
+ )
1438
+ with pytest.raises(ValueError, match="Worksheet named 'Chart1' not found"):
1439
+ pd.read_excel("chartsheet" + read_ext, sheet_name="Chart1")
1440
+
1441
+ def test_ignore_chartsheets_by_int(self, request, engine, read_ext):
1442
+ # GH 41448
1443
+ if read_ext == ".ods":
1444
+ pytest.skip("chartsheets do not exist in the ODF format")
1445
+ if engine == "pyxlsb":
1446
+ request.applymarker(
1447
+ pytest.mark.xfail(
1448
+ reason="pyxlsb can't distinguish chartsheets from worksheets"
1449
+ )
1450
+ )
1451
+ with pytest.raises(
1452
+ ValueError, match="Worksheet index 1 is invalid, 1 worksheets found"
1453
+ ):
1454
+ pd.read_excel("chartsheet" + read_ext, sheet_name=1)
1455
+
1456
+ def test_euro_decimal_format(self, read_ext):
1457
+ # copied from read_csv
1458
+ result = pd.read_excel("test_decimal" + read_ext, decimal=",", skiprows=1)
1459
+ expected = DataFrame(
1460
+ [
1461
+ [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],
1462
+ [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],
1463
+ [3, 878.158, 108013.434, "GHI", "rez", 2.735694704],
1464
+ ],
1465
+ columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
1466
+ )
1467
+ tm.assert_frame_equal(result, expected)
1468
+
1469
+
1470
+ class TestExcelFileRead:
1471
+ def test_deprecate_bytes_input(self, engine, read_ext):
1472
+ # GH 53830
1473
+ msg = (
1474
+ "Passing bytes to 'read_excel' is deprecated and "
1475
+ "will be removed in a future version. To read from a "
1476
+ "byte string, wrap it in a `BytesIO` object."
1477
+ )
1478
+
1479
+ with tm.assert_produces_warning(
1480
+ FutureWarning, match=msg, raise_on_extra_warnings=False
1481
+ ):
1482
+ with open("test1" + read_ext, "rb") as f:
1483
+ pd.read_excel(f.read(), engine=engine)
1484
+
1485
+ @pytest.fixture(autouse=True)
1486
+ def cd_and_set_engine(self, engine, datapath, monkeypatch):
1487
+ """
1488
+ Change directory and set engine for ExcelFile objects.
1489
+ """
1490
+ func = partial(pd.ExcelFile, engine=engine)
1491
+ monkeypatch.chdir(datapath("io", "data", "excel"))
1492
+ monkeypatch.setattr(pd, "ExcelFile", func)
1493
+
1494
+ def test_engine_used(self, read_ext, engine):
1495
+ expected_defaults = {
1496
+ "xlsx": "openpyxl",
1497
+ "xlsm": "openpyxl",
1498
+ "xlsb": "pyxlsb",
1499
+ "xls": "xlrd",
1500
+ "ods": "odf",
1501
+ }
1502
+
1503
+ with pd.ExcelFile("test1" + read_ext) as excel:
1504
+ result = excel.engine
1505
+
1506
+ if engine is not None:
1507
+ expected = engine
1508
+ else:
1509
+ expected = expected_defaults[read_ext[1:]]
1510
+ assert result == expected
1511
+
1512
+ def test_excel_passes_na(self, read_ext):
1513
+ with pd.ExcelFile("test4" + read_ext) as excel:
1514
+ parsed = pd.read_excel(
1515
+ excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"]
1516
+ )
1517
+ expected = DataFrame(
1518
+ [["NA"], [1], ["NA"], [np.nan], ["rabbit"]], columns=["Test"]
1519
+ )
1520
+ tm.assert_frame_equal(parsed, expected)
1521
+
1522
+ with pd.ExcelFile("test4" + read_ext) as excel:
1523
+ parsed = pd.read_excel(
1524
+ excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"]
1525
+ )
1526
+ expected = DataFrame(
1527
+ [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"]
1528
+ )
1529
+ tm.assert_frame_equal(parsed, expected)
1530
+
1531
+ # 13967
1532
+ with pd.ExcelFile("test5" + read_ext) as excel:
1533
+ parsed = pd.read_excel(
1534
+ excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"]
1535
+ )
1536
+ expected = DataFrame(
1537
+ [["1.#QNAN"], [1], ["nan"], [np.nan], ["rabbit"]], columns=["Test"]
1538
+ )
1539
+ tm.assert_frame_equal(parsed, expected)
1540
+
1541
+ with pd.ExcelFile("test5" + read_ext) as excel:
1542
+ parsed = pd.read_excel(
1543
+ excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"]
1544
+ )
1545
+ expected = DataFrame(
1546
+ [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"]
1547
+ )
1548
+ tm.assert_frame_equal(parsed, expected)
1549
+
1550
+ @pytest.mark.parametrize("na_filter", [None, True, False])
1551
+ def test_excel_passes_na_filter(self, read_ext, na_filter):
1552
+ # gh-25453
1553
+ kwargs = {}
1554
+
1555
+ if na_filter is not None:
1556
+ kwargs["na_filter"] = na_filter
1557
+
1558
+ with pd.ExcelFile("test5" + read_ext) as excel:
1559
+ parsed = pd.read_excel(
1560
+ excel,
1561
+ sheet_name="Sheet1",
1562
+ keep_default_na=True,
1563
+ na_values=["apple"],
1564
+ **kwargs,
1565
+ )
1566
+
1567
+ if na_filter is False:
1568
+ expected = [["1.#QNAN"], [1], ["nan"], ["apple"], ["rabbit"]]
1569
+ else:
1570
+ expected = [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]]
1571
+
1572
+ expected = DataFrame(expected, columns=["Test"])
1573
+ tm.assert_frame_equal(parsed, expected)
1574
+
1575
+ def test_excel_table_sheet_by_index(self, request, engine, read_ext, df_ref):
1576
+ xfail_datetimes_with_pyxlsb(engine, request)
1577
+
1578
+ expected = df_ref
1579
+ adjust_expected(expected, read_ext, engine)
1580
+
1581
+ with pd.ExcelFile("test1" + read_ext) as excel:
1582
+ df1 = pd.read_excel(excel, sheet_name=0, index_col=0)
1583
+ df2 = pd.read_excel(excel, sheet_name=1, skiprows=[1], index_col=0)
1584
+ tm.assert_frame_equal(df1, expected)
1585
+ tm.assert_frame_equal(df2, expected)
1586
+
1587
+ with pd.ExcelFile("test1" + read_ext) as excel:
1588
+ df1 = excel.parse(0, index_col=0)
1589
+ df2 = excel.parse(1, skiprows=[1], index_col=0)
1590
+ tm.assert_frame_equal(df1, expected)
1591
+ tm.assert_frame_equal(df2, expected)
1592
+
1593
+ with pd.ExcelFile("test1" + read_ext) as excel:
1594
+ df3 = pd.read_excel(excel, sheet_name=0, index_col=0, skipfooter=1)
1595
+ tm.assert_frame_equal(df3, df1.iloc[:-1])
1596
+
1597
+ with pd.ExcelFile("test1" + read_ext) as excel:
1598
+ df3 = excel.parse(0, index_col=0, skipfooter=1)
1599
+
1600
+ tm.assert_frame_equal(df3, df1.iloc[:-1])
1601
+
1602
+ def test_sheet_name(self, request, engine, read_ext, df_ref):
1603
+ xfail_datetimes_with_pyxlsb(engine, request)
1604
+
1605
+ expected = df_ref
1606
+ adjust_expected(expected, read_ext, engine)
1607
+
1608
+ filename = "test1"
1609
+ sheet_name = "Sheet1"
1610
+
1611
+ with pd.ExcelFile(filename + read_ext) as excel:
1612
+ df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc
1613
+
1614
+ with pd.ExcelFile(filename + read_ext) as excel:
1615
+ df2_parse = excel.parse(index_col=0, sheet_name=sheet_name)
1616
+
1617
+ tm.assert_frame_equal(df1_parse, expected)
1618
+ tm.assert_frame_equal(df2_parse, expected)
1619
+
1620
+ @pytest.mark.parametrize(
1621
+ "sheet_name",
1622
+ [3, [0, 3], [3, 0], "Sheet4", ["Sheet1", "Sheet4"], ["Sheet4", "Sheet1"]],
1623
+ )
1624
+ def test_bad_sheetname_raises(self, read_ext, sheet_name):
1625
+ # GH 39250
1626
+ msg = "Worksheet index 3 is invalid|Worksheet named 'Sheet4' not found"
1627
+ with pytest.raises(ValueError, match=msg):
1628
+ with pd.ExcelFile("blank" + read_ext) as excel:
1629
+ excel.parse(sheet_name=sheet_name)
1630
+
1631
+ def test_excel_read_buffer(self, engine, read_ext):
1632
+ pth = "test1" + read_ext
1633
+ expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0, engine=engine)
1634
+
1635
+ with open(pth, "rb") as f:
1636
+ with pd.ExcelFile(f) as xls:
1637
+ actual = pd.read_excel(xls, sheet_name="Sheet1", index_col=0)
1638
+
1639
+ tm.assert_frame_equal(expected, actual)
1640
+
1641
+ def test_reader_closes_file(self, engine, read_ext):
1642
+ with open("test1" + read_ext, "rb") as f:
1643
+ with pd.ExcelFile(f) as xlsx:
1644
+ # parses okay
1645
+ pd.read_excel(xlsx, sheet_name="Sheet1", index_col=0, engine=engine)
1646
+
1647
+ assert f.closed
1648
+
1649
+ def test_conflicting_excel_engines(self, read_ext):
1650
+ # GH 26566
1651
+ msg = "Engine should not be specified when passing an ExcelFile"
1652
+
1653
+ with pd.ExcelFile("test1" + read_ext) as xl:
1654
+ with pytest.raises(ValueError, match=msg):
1655
+ pd.read_excel(xl, engine="foo")
1656
+
1657
+ def test_excel_read_binary(self, engine, read_ext):
1658
+ # GH 15914
1659
+ expected = pd.read_excel("test1" + read_ext, engine=engine)
1660
+
1661
+ with open("test1" + read_ext, "rb") as f:
1662
+ data = f.read()
1663
+
1664
+ actual = pd.read_excel(BytesIO(data), engine=engine)
1665
+ tm.assert_frame_equal(expected, actual)
1666
+
1667
+ def test_excel_read_binary_via_read_excel(self, read_ext, engine):
1668
+ # GH 38424
1669
+ with open("test1" + read_ext, "rb") as f:
1670
+ result = pd.read_excel(f, engine=engine)
1671
+ expected = pd.read_excel("test1" + read_ext, engine=engine)
1672
+ tm.assert_frame_equal(result, expected)
1673
+
1674
+ def test_read_excel_header_index_out_of_range(self, engine):
1675
+ # GH#43143
1676
+ with open("df_header_oob.xlsx", "rb") as f:
1677
+ with pytest.raises(ValueError, match="exceeds maximum"):
1678
+ pd.read_excel(f, header=[0, 1])
1679
+
1680
+ @pytest.mark.parametrize("filename", ["df_empty.xlsx", "df_equals.xlsx"])
1681
+ def test_header_with_index_col(self, filename):
1682
+ # GH 33476
1683
+ idx = Index(["Z"], name="I2")
1684
+ cols = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"])
1685
+ expected = DataFrame([[1, 3]], index=idx, columns=cols, dtype="int64")
1686
+ result = pd.read_excel(
1687
+ filename, sheet_name="Sheet1", index_col=0, header=[0, 1]
1688
+ )
1689
+ tm.assert_frame_equal(expected, result)
1690
+
1691
+ def test_read_datetime_multiindex(self, request, engine, read_ext):
1692
+ # GH 34748
1693
+ xfail_datetimes_with_pyxlsb(engine, request)
1694
+
1695
+ f = "test_datetime_mi" + read_ext
1696
+ with pd.ExcelFile(f) as excel:
1697
+ actual = pd.read_excel(excel, header=[0, 1], index_col=0, engine=engine)
1698
+
1699
+ unit = get_exp_unit(read_ext, engine)
1700
+ dti = pd.DatetimeIndex(["2020-02-29", "2020-03-01"], dtype=f"M8[{unit}]")
1701
+ expected_column_index = MultiIndex.from_arrays(
1702
+ [dti[:1], dti[1:]],
1703
+ names=[
1704
+ dti[0].to_pydatetime(),
1705
+ dti[1].to_pydatetime(),
1706
+ ],
1707
+ )
1708
+ expected = DataFrame([], index=[], columns=expected_column_index)
1709
+
1710
+ tm.assert_frame_equal(expected, actual)
1711
+
1712
+ def test_engine_invalid_option(self, read_ext):
1713
+ # read_ext includes the '.' hence the weird formatting
1714
+ with pytest.raises(ValueError, match="Value must be one of *"):
1715
+ with pd.option_context(f"io.excel{read_ext}.reader", "abc"):
1716
+ pass
1717
+
1718
+ def test_ignore_chartsheets(self, request, engine, read_ext):
1719
+ # GH 41448
1720
+ if read_ext == ".ods":
1721
+ pytest.skip("chartsheets do not exist in the ODF format")
1722
+ if engine == "pyxlsb":
1723
+ request.applymarker(
1724
+ pytest.mark.xfail(
1725
+ reason="pyxlsb can't distinguish chartsheets from worksheets"
1726
+ )
1727
+ )
1728
+ with pd.ExcelFile("chartsheet" + read_ext) as excel:
1729
+ assert excel.sheet_names == ["Sheet1"]
1730
+
1731
+ def test_corrupt_files_closed(self, engine, read_ext):
1732
+ # GH41778
1733
+ errors = (BadZipFile,)
1734
+ if engine is None:
1735
+ pytest.skip(f"Invalid test for engine={engine}")
1736
+ elif engine == "xlrd":
1737
+ import xlrd
1738
+
1739
+ errors = (BadZipFile, xlrd.biffh.XLRDError)
1740
+ elif engine == "calamine":
1741
+ from python_calamine import CalamineError
1742
+
1743
+ errors = (CalamineError,)
1744
+
1745
+ with tm.ensure_clean(f"corrupt{read_ext}") as file:
1746
+ Path(file).write_text("corrupt", encoding="utf-8")
1747
+ with tm.assert_produces_warning(False):
1748
+ try:
1749
+ pd.ExcelFile(file, engine=engine)
1750
+ except errors:
1751
+ pass
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_writers.py ADDED
@@ -0,0 +1,1511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ date,
3
+ datetime,
4
+ timedelta,
5
+ )
6
+ from functools import partial
7
+ from io import BytesIO
8
+ import os
9
+ import re
10
+
11
+ import numpy as np
12
+ import pytest
13
+
14
+ from pandas.compat import is_platform_windows
15
+ from pandas.compat._constants import PY310
16
+ from pandas.compat._optional import import_optional_dependency
17
+ import pandas.util._test_decorators as td
18
+
19
+ import pandas as pd
20
+ from pandas import (
21
+ DataFrame,
22
+ Index,
23
+ MultiIndex,
24
+ date_range,
25
+ option_context,
26
+ )
27
+ import pandas._testing as tm
28
+
29
+ from pandas.io.excel import (
30
+ ExcelFile,
31
+ ExcelWriter,
32
+ _OpenpyxlWriter,
33
+ _XlsxWriter,
34
+ register_writer,
35
+ )
36
+ from pandas.io.excel._util import _writers
37
+
38
+ if is_platform_windows():
39
+ pytestmark = pytest.mark.single_cpu
40
+
41
+
42
+ def get_exp_unit(path: str) -> str:
43
+ return "ns"
44
+
45
+
46
+ @pytest.fixture
47
+ def frame(float_frame):
48
+ """
49
+ Returns the first ten items in fixture "float_frame".
50
+ """
51
+ return float_frame[:10]
52
+
53
+
54
+ @pytest.fixture(params=[True, False])
55
+ def merge_cells(request):
56
+ return request.param
57
+
58
+
59
+ @pytest.fixture
60
+ def path(ext):
61
+ """
62
+ Fixture to open file for use in each test case.
63
+ """
64
+ with tm.ensure_clean(ext) as file_path:
65
+ yield file_path
66
+
67
+
68
+ @pytest.fixture
69
+ def set_engine(engine, ext):
70
+ """
71
+ Fixture to set engine for use in each test case.
72
+
73
+ Rather than requiring `engine=...` to be provided explicitly as an
74
+ argument in each test, this fixture sets a global option to dictate
75
+ which engine should be used to write Excel files. After executing
76
+ the test it rolls back said change to the global option.
77
+ """
78
+ option_name = f"io.excel.{ext.strip('.')}.writer"
79
+ with option_context(option_name, engine):
80
+ yield
81
+
82
+
83
+ @pytest.mark.parametrize(
84
+ "ext",
85
+ [
86
+ pytest.param(".xlsx", marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")]),
87
+ pytest.param(".xlsm", marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")]),
88
+ pytest.param(
89
+ ".xlsx", marks=[td.skip_if_no("xlsxwriter"), td.skip_if_no("xlrd")]
90
+ ),
91
+ pytest.param(".ods", marks=td.skip_if_no("odf")),
92
+ ],
93
+ )
94
+ class TestRoundTrip:
95
+ @pytest.mark.parametrize(
96
+ "header,expected",
97
+ [(None, DataFrame([np.nan] * 4)), (0, DataFrame({"Unnamed: 0": [np.nan] * 3}))],
98
+ )
99
+ def test_read_one_empty_col_no_header(self, ext, header, expected):
100
+ # xref gh-12292
101
+ filename = "no_header"
102
+ df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]])
103
+
104
+ with tm.ensure_clean(ext) as path:
105
+ df.to_excel(path, sheet_name=filename, index=False, header=False)
106
+ result = pd.read_excel(
107
+ path, sheet_name=filename, usecols=[0], header=header
108
+ )
109
+
110
+ tm.assert_frame_equal(result, expected)
111
+
112
+ @pytest.mark.parametrize(
113
+ "header,expected",
114
+ [(None, DataFrame([0] + [np.nan] * 4)), (0, DataFrame([np.nan] * 4))],
115
+ )
116
+ def test_read_one_empty_col_with_header(self, ext, header, expected):
117
+ filename = "with_header"
118
+ df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]])
119
+
120
+ with tm.ensure_clean(ext) as path:
121
+ df.to_excel(path, sheet_name="with_header", index=False, header=True)
122
+ result = pd.read_excel(
123
+ path, sheet_name=filename, usecols=[0], header=header
124
+ )
125
+
126
+ tm.assert_frame_equal(result, expected)
127
+
128
+ def test_set_column_names_in_parameter(self, ext):
129
+ # GH 12870 : pass down column names associated with
130
+ # keyword argument names
131
+ refdf = DataFrame([[1, "foo"], [2, "bar"], [3, "baz"]], columns=["a", "b"])
132
+
133
+ with tm.ensure_clean(ext) as pth:
134
+ with ExcelWriter(pth) as writer:
135
+ refdf.to_excel(
136
+ writer, sheet_name="Data_no_head", header=False, index=False
137
+ )
138
+ refdf.to_excel(writer, sheet_name="Data_with_head", index=False)
139
+
140
+ refdf.columns = ["A", "B"]
141
+
142
+ with ExcelFile(pth) as reader:
143
+ xlsdf_no_head = pd.read_excel(
144
+ reader, sheet_name="Data_no_head", header=None, names=["A", "B"]
145
+ )
146
+ xlsdf_with_head = pd.read_excel(
147
+ reader,
148
+ sheet_name="Data_with_head",
149
+ index_col=None,
150
+ names=["A", "B"],
151
+ )
152
+
153
+ tm.assert_frame_equal(xlsdf_no_head, refdf)
154
+ tm.assert_frame_equal(xlsdf_with_head, refdf)
155
+
156
+ def test_creating_and_reading_multiple_sheets(self, ext):
157
+ # see gh-9450
158
+ #
159
+ # Test reading multiple sheets, from a runtime
160
+ # created Excel file with multiple sheets.
161
+ def tdf(col_sheet_name):
162
+ d, i = [11, 22, 33], [1, 2, 3]
163
+ return DataFrame(d, i, columns=[col_sheet_name])
164
+
165
+ sheets = ["AAA", "BBB", "CCC"]
166
+
167
+ dfs = [tdf(s) for s in sheets]
168
+ dfs = dict(zip(sheets, dfs))
169
+
170
+ with tm.ensure_clean(ext) as pth:
171
+ with ExcelWriter(pth) as ew:
172
+ for sheetname, df in dfs.items():
173
+ df.to_excel(ew, sheet_name=sheetname)
174
+
175
+ dfs_returned = pd.read_excel(pth, sheet_name=sheets, index_col=0)
176
+
177
+ for s in sheets:
178
+ tm.assert_frame_equal(dfs[s], dfs_returned[s])
179
+
180
+ def test_read_excel_multiindex_empty_level(self, ext):
181
+ # see gh-12453
182
+ with tm.ensure_clean(ext) as path:
183
+ df = DataFrame(
184
+ {
185
+ ("One", "x"): {0: 1},
186
+ ("Two", "X"): {0: 3},
187
+ ("Two", "Y"): {0: 7},
188
+ ("Zero", ""): {0: 0},
189
+ }
190
+ )
191
+
192
+ expected = DataFrame(
193
+ {
194
+ ("One", "x"): {0: 1},
195
+ ("Two", "X"): {0: 3},
196
+ ("Two", "Y"): {0: 7},
197
+ ("Zero", "Unnamed: 4_level_1"): {0: 0},
198
+ }
199
+ )
200
+
201
+ df.to_excel(path)
202
+ actual = pd.read_excel(path, header=[0, 1], index_col=0)
203
+ tm.assert_frame_equal(actual, expected)
204
+
205
+ df = DataFrame(
206
+ {
207
+ ("Beg", ""): {0: 0},
208
+ ("Middle", "x"): {0: 1},
209
+ ("Tail", "X"): {0: 3},
210
+ ("Tail", "Y"): {0: 7},
211
+ }
212
+ )
213
+
214
+ expected = DataFrame(
215
+ {
216
+ ("Beg", "Unnamed: 1_level_1"): {0: 0},
217
+ ("Middle", "x"): {0: 1},
218
+ ("Tail", "X"): {0: 3},
219
+ ("Tail", "Y"): {0: 7},
220
+ }
221
+ )
222
+
223
+ df.to_excel(path)
224
+ actual = pd.read_excel(path, header=[0, 1], index_col=0)
225
+ tm.assert_frame_equal(actual, expected)
226
+
227
+ @pytest.mark.parametrize("c_idx_names", ["a", None])
228
+ @pytest.mark.parametrize("r_idx_names", ["b", None])
229
+ @pytest.mark.parametrize("c_idx_levels", [1, 3])
230
+ @pytest.mark.parametrize("r_idx_levels", [1, 3])
231
+ def test_excel_multindex_roundtrip(
232
+ self, ext, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels, request
233
+ ):
234
+ # see gh-4679
235
+ with tm.ensure_clean(ext) as pth:
236
+ # Empty name case current read in as
237
+ # unnamed levels, not Nones.
238
+ check_names = bool(r_idx_names) or r_idx_levels <= 1
239
+
240
+ if c_idx_levels == 1:
241
+ columns = Index(list("abcde"))
242
+ else:
243
+ columns = MultiIndex.from_arrays(
244
+ [range(5) for _ in range(c_idx_levels)],
245
+ names=[f"{c_idx_names}-{i}" for i in range(c_idx_levels)],
246
+ )
247
+ if r_idx_levels == 1:
248
+ index = Index(list("ghijk"))
249
+ else:
250
+ index = MultiIndex.from_arrays(
251
+ [range(5) for _ in range(r_idx_levels)],
252
+ names=[f"{r_idx_names}-{i}" for i in range(r_idx_levels)],
253
+ )
254
+ df = DataFrame(
255
+ 1.1 * np.ones((5, 5)),
256
+ columns=columns,
257
+ index=index,
258
+ )
259
+ df.to_excel(pth)
260
+
261
+ act = pd.read_excel(
262
+ pth,
263
+ index_col=list(range(r_idx_levels)),
264
+ header=list(range(c_idx_levels)),
265
+ )
266
+ tm.assert_frame_equal(df, act, check_names=check_names)
267
+
268
+ df.iloc[0, :] = np.nan
269
+ df.to_excel(pth)
270
+
271
+ act = pd.read_excel(
272
+ pth,
273
+ index_col=list(range(r_idx_levels)),
274
+ header=list(range(c_idx_levels)),
275
+ )
276
+ tm.assert_frame_equal(df, act, check_names=check_names)
277
+
278
+ df.iloc[-1, :] = np.nan
279
+ df.to_excel(pth)
280
+ act = pd.read_excel(
281
+ pth,
282
+ index_col=list(range(r_idx_levels)),
283
+ header=list(range(c_idx_levels)),
284
+ )
285
+ tm.assert_frame_equal(df, act, check_names=check_names)
286
+
287
+ def test_read_excel_parse_dates(self, ext):
288
+ # see gh-11544, gh-12051
289
+ df = DataFrame(
290
+ {"col": [1, 2, 3], "date_strings": date_range("2012-01-01", periods=3)}
291
+ )
292
+ df2 = df.copy()
293
+ df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y")
294
+
295
+ with tm.ensure_clean(ext) as pth:
296
+ df2.to_excel(pth)
297
+
298
+ res = pd.read_excel(pth, index_col=0)
299
+ tm.assert_frame_equal(df2, res)
300
+
301
+ res = pd.read_excel(pth, parse_dates=["date_strings"], index_col=0)
302
+ tm.assert_frame_equal(df, res)
303
+
304
+ date_parser = lambda x: datetime.strptime(x, "%m/%d/%Y")
305
+ with tm.assert_produces_warning(
306
+ FutureWarning,
307
+ match="use 'date_format' instead",
308
+ raise_on_extra_warnings=False,
309
+ ):
310
+ res = pd.read_excel(
311
+ pth,
312
+ parse_dates=["date_strings"],
313
+ date_parser=date_parser,
314
+ index_col=0,
315
+ )
316
+ tm.assert_frame_equal(df, res)
317
+ res = pd.read_excel(
318
+ pth, parse_dates=["date_strings"], date_format="%m/%d/%Y", index_col=0
319
+ )
320
+ tm.assert_frame_equal(df, res)
321
+
322
+ def test_multiindex_interval_datetimes(self, ext):
323
+ # GH 30986
324
+ midx = MultiIndex.from_arrays(
325
+ [
326
+ range(4),
327
+ pd.interval_range(
328
+ start=pd.Timestamp("2020-01-01"), periods=4, freq="6ME"
329
+ ),
330
+ ]
331
+ )
332
+ df = DataFrame(range(4), index=midx)
333
+ with tm.ensure_clean(ext) as pth:
334
+ df.to_excel(pth)
335
+ result = pd.read_excel(pth, index_col=[0, 1])
336
+ expected = DataFrame(
337
+ range(4),
338
+ MultiIndex.from_arrays(
339
+ [
340
+ range(4),
341
+ [
342
+ "(2020-01-31 00:00:00, 2020-07-31 00:00:00]",
343
+ "(2020-07-31 00:00:00, 2021-01-31 00:00:00]",
344
+ "(2021-01-31 00:00:00, 2021-07-31 00:00:00]",
345
+ "(2021-07-31 00:00:00, 2022-01-31 00:00:00]",
346
+ ],
347
+ ]
348
+ ),
349
+ )
350
+ tm.assert_frame_equal(result, expected)
351
+
352
+
353
+ @pytest.mark.parametrize(
354
+ "engine,ext",
355
+ [
356
+ pytest.param(
357
+ "openpyxl",
358
+ ".xlsx",
359
+ marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")],
360
+ ),
361
+ pytest.param(
362
+ "openpyxl",
363
+ ".xlsm",
364
+ marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")],
365
+ ),
366
+ pytest.param(
367
+ "xlsxwriter",
368
+ ".xlsx",
369
+ marks=[td.skip_if_no("xlsxwriter"), td.skip_if_no("xlrd")],
370
+ ),
371
+ pytest.param("odf", ".ods", marks=td.skip_if_no("odf")),
372
+ ],
373
+ )
374
+ @pytest.mark.usefixtures("set_engine")
375
+ class TestExcelWriter:
376
+ def test_excel_sheet_size(self, path):
377
+ # GH 26080
378
+ breaking_row_count = 2**20 + 1
379
+ breaking_col_count = 2**14 + 1
380
+ # purposely using two arrays to prevent memory issues while testing
381
+ row_arr = np.zeros(shape=(breaking_row_count, 1))
382
+ col_arr = np.zeros(shape=(1, breaking_col_count))
383
+ row_df = DataFrame(row_arr)
384
+ col_df = DataFrame(col_arr)
385
+
386
+ msg = "sheet is too large"
387
+ with pytest.raises(ValueError, match=msg):
388
+ row_df.to_excel(path)
389
+
390
+ with pytest.raises(ValueError, match=msg):
391
+ col_df.to_excel(path)
392
+
393
+ def test_excel_sheet_by_name_raise(self, path):
394
+ gt = DataFrame(np.random.default_rng(2).standard_normal((10, 2)))
395
+ gt.to_excel(path)
396
+
397
+ with ExcelFile(path) as xl:
398
+ df = pd.read_excel(xl, sheet_name=0, index_col=0)
399
+
400
+ tm.assert_frame_equal(gt, df)
401
+
402
+ msg = "Worksheet named '0' not found"
403
+ with pytest.raises(ValueError, match=msg):
404
+ pd.read_excel(xl, "0")
405
+
406
+ def test_excel_writer_context_manager(self, frame, path):
407
+ with ExcelWriter(path) as writer:
408
+ frame.to_excel(writer, sheet_name="Data1")
409
+ frame2 = frame.copy()
410
+ frame2.columns = frame.columns[::-1]
411
+ frame2.to_excel(writer, sheet_name="Data2")
412
+
413
+ with ExcelFile(path) as reader:
414
+ found_df = pd.read_excel(reader, sheet_name="Data1", index_col=0)
415
+ found_df2 = pd.read_excel(reader, sheet_name="Data2", index_col=0)
416
+
417
+ tm.assert_frame_equal(found_df, frame)
418
+ tm.assert_frame_equal(found_df2, frame2)
419
+
420
+ def test_roundtrip(self, frame, path):
421
+ frame = frame.copy()
422
+ frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
423
+
424
+ frame.to_excel(path, sheet_name="test1")
425
+ frame.to_excel(path, sheet_name="test1", columns=["A", "B"])
426
+ frame.to_excel(path, sheet_name="test1", header=False)
427
+ frame.to_excel(path, sheet_name="test1", index=False)
428
+
429
+ # test roundtrip
430
+ frame.to_excel(path, sheet_name="test1")
431
+ recons = pd.read_excel(path, sheet_name="test1", index_col=0)
432
+ tm.assert_frame_equal(frame, recons)
433
+
434
+ frame.to_excel(path, sheet_name="test1", index=False)
435
+ recons = pd.read_excel(path, sheet_name="test1", index_col=None)
436
+ recons.index = frame.index
437
+ tm.assert_frame_equal(frame, recons)
438
+
439
+ frame.to_excel(path, sheet_name="test1", na_rep="NA")
440
+ recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["NA"])
441
+ tm.assert_frame_equal(frame, recons)
442
+
443
+ # GH 3611
444
+ frame.to_excel(path, sheet_name="test1", na_rep="88")
445
+ recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["88"])
446
+ tm.assert_frame_equal(frame, recons)
447
+
448
+ frame.to_excel(path, sheet_name="test1", na_rep="88")
449
+ recons = pd.read_excel(
450
+ path, sheet_name="test1", index_col=0, na_values=[88, 88.0]
451
+ )
452
+ tm.assert_frame_equal(frame, recons)
453
+
454
+ # GH 6573
455
+ frame.to_excel(path, sheet_name="Sheet1")
456
+ recons = pd.read_excel(path, index_col=0)
457
+ tm.assert_frame_equal(frame, recons)
458
+
459
+ frame.to_excel(path, sheet_name="0")
460
+ recons = pd.read_excel(path, index_col=0)
461
+ tm.assert_frame_equal(frame, recons)
462
+
463
+ # GH 8825 Pandas Series should provide to_excel method
464
+ s = frame["A"]
465
+ s.to_excel(path)
466
+ recons = pd.read_excel(path, index_col=0)
467
+ tm.assert_frame_equal(s.to_frame(), recons)
468
+
469
+ def test_mixed(self, frame, path):
470
+ mixed_frame = frame.copy()
471
+ mixed_frame["foo"] = "bar"
472
+
473
+ mixed_frame.to_excel(path, sheet_name="test1")
474
+ with ExcelFile(path) as reader:
475
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
476
+ tm.assert_frame_equal(mixed_frame, recons)
477
+
478
+ def test_ts_frame(self, path):
479
+ unit = get_exp_unit(path)
480
+ df = DataFrame(
481
+ np.random.default_rng(2).standard_normal((5, 4)),
482
+ columns=Index(list("ABCD")),
483
+ index=date_range("2000-01-01", periods=5, freq="B"),
484
+ )
485
+
486
+ # freq doesn't round-trip
487
+ index = pd.DatetimeIndex(np.asarray(df.index), freq=None)
488
+ df.index = index
489
+
490
+ expected = df[:]
491
+ expected.index = expected.index.as_unit(unit)
492
+
493
+ df.to_excel(path, sheet_name="test1")
494
+ with ExcelFile(path) as reader:
495
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
496
+ tm.assert_frame_equal(expected, recons)
497
+
498
+ def test_basics_with_nan(self, frame, path):
499
+ frame = frame.copy()
500
+ frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
501
+ frame.to_excel(path, sheet_name="test1")
502
+ frame.to_excel(path, sheet_name="test1", columns=["A", "B"])
503
+ frame.to_excel(path, sheet_name="test1", header=False)
504
+ frame.to_excel(path, sheet_name="test1", index=False)
505
+
506
+ @pytest.mark.parametrize("np_type", [np.int8, np.int16, np.int32, np.int64])
507
+ def test_int_types(self, np_type, path):
508
+ # Test np.int values read come back as int
509
+ # (rather than float which is Excel's format).
510
+ df = DataFrame(
511
+ np.random.default_rng(2).integers(-10, 10, size=(10, 2)), dtype=np_type
512
+ )
513
+ df.to_excel(path, sheet_name="test1")
514
+
515
+ with ExcelFile(path) as reader:
516
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
517
+
518
+ int_frame = df.astype(np.int64)
519
+ tm.assert_frame_equal(int_frame, recons)
520
+
521
+ recons2 = pd.read_excel(path, sheet_name="test1", index_col=0)
522
+ tm.assert_frame_equal(int_frame, recons2)
523
+
524
+ @pytest.mark.parametrize("np_type", [np.float16, np.float32, np.float64])
525
+ def test_float_types(self, np_type, path):
526
+ # Test np.float values read come back as float.
527
+ df = DataFrame(np.random.default_rng(2).random(10), dtype=np_type)
528
+ df.to_excel(path, sheet_name="test1")
529
+
530
+ with ExcelFile(path) as reader:
531
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
532
+ np_type
533
+ )
534
+
535
+ tm.assert_frame_equal(df, recons)
536
+
537
+ def test_bool_types(self, path):
538
+ # Test np.bool_ values read come back as float.
539
+ df = DataFrame([1, 0, True, False], dtype=np.bool_)
540
+ df.to_excel(path, sheet_name="test1")
541
+
542
+ with ExcelFile(path) as reader:
543
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
544
+ np.bool_
545
+ )
546
+
547
+ tm.assert_frame_equal(df, recons)
548
+
549
+ def test_inf_roundtrip(self, path):
550
+ df = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
551
+ df.to_excel(path, sheet_name="test1")
552
+
553
+ with ExcelFile(path) as reader:
554
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
555
+
556
+ tm.assert_frame_equal(df, recons)
557
+
558
+ def test_sheets(self, frame, path):
559
+ # freq doesn't round-trip
560
+ unit = get_exp_unit(path)
561
+ tsframe = DataFrame(
562
+ np.random.default_rng(2).standard_normal((5, 4)),
563
+ columns=Index(list("ABCD")),
564
+ index=date_range("2000-01-01", periods=5, freq="B"),
565
+ )
566
+ index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None)
567
+ tsframe.index = index
568
+
569
+ expected = tsframe[:]
570
+ expected.index = expected.index.as_unit(unit)
571
+
572
+ frame = frame.copy()
573
+ frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
574
+
575
+ frame.to_excel(path, sheet_name="test1")
576
+ frame.to_excel(path, sheet_name="test1", columns=["A", "B"])
577
+ frame.to_excel(path, sheet_name="test1", header=False)
578
+ frame.to_excel(path, sheet_name="test1", index=False)
579
+
580
+ # Test writing to separate sheets
581
+ with ExcelWriter(path) as writer:
582
+ frame.to_excel(writer, sheet_name="test1")
583
+ tsframe.to_excel(writer, sheet_name="test2")
584
+ with ExcelFile(path) as reader:
585
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
586
+ tm.assert_frame_equal(frame, recons)
587
+ recons = pd.read_excel(reader, sheet_name="test2", index_col=0)
588
+ tm.assert_frame_equal(expected, recons)
589
+ assert 2 == len(reader.sheet_names)
590
+ assert "test1" == reader.sheet_names[0]
591
+ assert "test2" == reader.sheet_names[1]
592
+
593
+ def test_colaliases(self, frame, path):
594
+ frame = frame.copy()
595
+ frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
596
+
597
+ frame.to_excel(path, sheet_name="test1")
598
+ frame.to_excel(path, sheet_name="test1", columns=["A", "B"])
599
+ frame.to_excel(path, sheet_name="test1", header=False)
600
+ frame.to_excel(path, sheet_name="test1", index=False)
601
+
602
+ # column aliases
603
+ col_aliases = Index(["AA", "X", "Y", "Z"])
604
+ frame.to_excel(path, sheet_name="test1", header=col_aliases)
605
+ with ExcelFile(path) as reader:
606
+ rs = pd.read_excel(reader, sheet_name="test1", index_col=0)
607
+ xp = frame.copy()
608
+ xp.columns = col_aliases
609
+ tm.assert_frame_equal(xp, rs)
610
+
611
+ def test_roundtrip_indexlabels(self, merge_cells, frame, path):
612
+ frame = frame.copy()
613
+ frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
614
+
615
+ frame.to_excel(path, sheet_name="test1")
616
+ frame.to_excel(path, sheet_name="test1", columns=["A", "B"])
617
+ frame.to_excel(path, sheet_name="test1", header=False)
618
+ frame.to_excel(path, sheet_name="test1", index=False)
619
+
620
+ # test index_label
621
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0
622
+ df.to_excel(
623
+ path, sheet_name="test1", index_label=["test"], merge_cells=merge_cells
624
+ )
625
+ with ExcelFile(path) as reader:
626
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
627
+ np.int64
628
+ )
629
+ df.index.names = ["test"]
630
+ assert df.index.names == recons.index.names
631
+
632
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0
633
+ df.to_excel(
634
+ path,
635
+ sheet_name="test1",
636
+ index_label=["test", "dummy", "dummy2"],
637
+ merge_cells=merge_cells,
638
+ )
639
+ with ExcelFile(path) as reader:
640
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
641
+ np.int64
642
+ )
643
+ df.index.names = ["test"]
644
+ assert df.index.names == recons.index.names
645
+
646
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0
647
+ df.to_excel(
648
+ path, sheet_name="test1", index_label="test", merge_cells=merge_cells
649
+ )
650
+ with ExcelFile(path) as reader:
651
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
652
+ np.int64
653
+ )
654
+ df.index.names = ["test"]
655
+ tm.assert_frame_equal(df, recons.astype(bool))
656
+
657
+ frame.to_excel(
658
+ path,
659
+ sheet_name="test1",
660
+ columns=["A", "B", "C", "D"],
661
+ index=False,
662
+ merge_cells=merge_cells,
663
+ )
664
+ # take 'A' and 'B' as indexes (same row as cols 'C', 'D')
665
+ df = frame.copy()
666
+ df = df.set_index(["A", "B"])
667
+
668
+ with ExcelFile(path) as reader:
669
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1])
670
+ tm.assert_frame_equal(df, recons)
671
+
672
+ def test_excel_roundtrip_indexname(self, merge_cells, path):
673
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
674
+ df.index.name = "foo"
675
+
676
+ df.to_excel(path, merge_cells=merge_cells)
677
+
678
+ with ExcelFile(path) as xf:
679
+ result = pd.read_excel(xf, sheet_name=xf.sheet_names[0], index_col=0)
680
+
681
+ tm.assert_frame_equal(result, df)
682
+ assert result.index.name == "foo"
683
+
684
+ def test_excel_roundtrip_datetime(self, merge_cells, path):
685
+ # datetime.date, not sure what to test here exactly
686
+ unit = get_exp_unit(path)
687
+
688
+ # freq does not round-trip
689
+ tsframe = DataFrame(
690
+ np.random.default_rng(2).standard_normal((5, 4)),
691
+ columns=Index(list("ABCD")),
692
+ index=date_range("2000-01-01", periods=5, freq="B"),
693
+ )
694
+ index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None)
695
+ tsframe.index = index
696
+
697
+ tsf = tsframe.copy()
698
+
699
+ tsf.index = [x.date() for x in tsframe.index]
700
+ tsf.to_excel(path, sheet_name="test1", merge_cells=merge_cells)
701
+
702
+ with ExcelFile(path) as reader:
703
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
704
+
705
+ expected = tsframe[:]
706
+ expected.index = expected.index.as_unit(unit)
707
+ tm.assert_frame_equal(expected, recons)
708
+
709
+ def test_excel_date_datetime_format(self, ext, path):
710
+ # see gh-4133
711
+ #
712
+ # Excel output format strings
713
+ unit = get_exp_unit(path)
714
+
715
+ df = DataFrame(
716
+ [
717
+ [date(2014, 1, 31), date(1999, 9, 24)],
718
+ [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
719
+ ],
720
+ index=["DATE", "DATETIME"],
721
+ columns=["X", "Y"],
722
+ )
723
+ df_expected = DataFrame(
724
+ [
725
+ [datetime(2014, 1, 31), datetime(1999, 9, 24)],
726
+ [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
727
+ ],
728
+ index=["DATE", "DATETIME"],
729
+ columns=["X", "Y"],
730
+ )
731
+ df_expected = df_expected.astype(f"M8[{unit}]")
732
+
733
+ with tm.ensure_clean(ext) as filename2:
734
+ with ExcelWriter(path) as writer1:
735
+ df.to_excel(writer1, sheet_name="test1")
736
+
737
+ with ExcelWriter(
738
+ filename2,
739
+ date_format="DD.MM.YYYY",
740
+ datetime_format="DD.MM.YYYY HH-MM-SS",
741
+ ) as writer2:
742
+ df.to_excel(writer2, sheet_name="test1")
743
+
744
+ with ExcelFile(path) as reader1:
745
+ rs1 = pd.read_excel(reader1, sheet_name="test1", index_col=0)
746
+
747
+ with ExcelFile(filename2) as reader2:
748
+ rs2 = pd.read_excel(reader2, sheet_name="test1", index_col=0)
749
+
750
+ tm.assert_frame_equal(rs1, rs2)
751
+
752
+ # Since the reader returns a datetime object for dates,
753
+ # we need to use df_expected to check the result.
754
+ tm.assert_frame_equal(rs2, df_expected)
755
+
756
+ def test_to_excel_interval_no_labels(self, path, using_infer_string):
757
+ # see gh-19242
758
+ #
759
+ # Test writing Interval without labels.
760
+ df = DataFrame(
761
+ np.random.default_rng(2).integers(-10, 10, size=(20, 1)), dtype=np.int64
762
+ )
763
+ expected = df.copy()
764
+
765
+ df["new"] = pd.cut(df[0], 10)
766
+ expected["new"] = pd.cut(expected[0], 10).astype(
767
+ str if not using_infer_string else "string[pyarrow_numpy]"
768
+ )
769
+
770
+ df.to_excel(path, sheet_name="test1")
771
+ with ExcelFile(path) as reader:
772
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
773
+ tm.assert_frame_equal(expected, recons)
774
+
775
+ def test_to_excel_interval_labels(self, path):
776
+ # see gh-19242
777
+ #
778
+ # Test writing Interval with labels.
779
+ df = DataFrame(
780
+ np.random.default_rng(2).integers(-10, 10, size=(20, 1)), dtype=np.int64
781
+ )
782
+ expected = df.copy()
783
+ intervals = pd.cut(
784
+ df[0], 10, labels=["A", "B", "C", "D", "E", "F", "G", "H", "I", "J"]
785
+ )
786
+ df["new"] = intervals
787
+ expected["new"] = pd.Series(list(intervals))
788
+
789
+ df.to_excel(path, sheet_name="test1")
790
+ with ExcelFile(path) as reader:
791
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
792
+ tm.assert_frame_equal(expected, recons)
793
+
794
+ def test_to_excel_timedelta(self, path):
795
+ # see gh-19242, gh-9155
796
+ #
797
+ # Test writing timedelta to xls.
798
+ df = DataFrame(
799
+ np.random.default_rng(2).integers(-10, 10, size=(20, 1)),
800
+ columns=["A"],
801
+ dtype=np.int64,
802
+ )
803
+ expected = df.copy()
804
+
805
+ df["new"] = df["A"].apply(lambda x: timedelta(seconds=x))
806
+ expected["new"] = expected["A"].apply(
807
+ lambda x: timedelta(seconds=x).total_seconds() / 86400
808
+ )
809
+
810
+ df.to_excel(path, sheet_name="test1")
811
+ with ExcelFile(path) as reader:
812
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
813
+ tm.assert_frame_equal(expected, recons)
814
+
815
+ def test_to_excel_periodindex(self, path):
816
+ # xp has a PeriodIndex
817
+ df = DataFrame(
818
+ np.random.default_rng(2).standard_normal((5, 4)),
819
+ columns=Index(list("ABCD")),
820
+ index=date_range("2000-01-01", periods=5, freq="B"),
821
+ )
822
+ xp = df.resample("ME").mean().to_period("M")
823
+
824
+ xp.to_excel(path, sheet_name="sht1")
825
+
826
+ with ExcelFile(path) as reader:
827
+ rs = pd.read_excel(reader, sheet_name="sht1", index_col=0)
828
+ tm.assert_frame_equal(xp, rs.to_period("M"))
829
+
830
+ def test_to_excel_multiindex(self, merge_cells, frame, path):
831
+ arrays = np.arange(len(frame.index) * 2, dtype=np.int64).reshape(2, -1)
832
+ new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
833
+ frame.index = new_index
834
+
835
+ frame.to_excel(path, sheet_name="test1", header=False)
836
+ frame.to_excel(path, sheet_name="test1", columns=["A", "B"])
837
+
838
+ # round trip
839
+ frame.to_excel(path, sheet_name="test1", merge_cells=merge_cells)
840
+ with ExcelFile(path) as reader:
841
+ df = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1])
842
+ tm.assert_frame_equal(frame, df)
843
+
844
+ # GH13511
845
+ def test_to_excel_multiindex_nan_label(self, merge_cells, path):
846
+ df = DataFrame(
847
+ {
848
+ "A": [None, 2, 3],
849
+ "B": [10, 20, 30],
850
+ "C": np.random.default_rng(2).random(3),
851
+ }
852
+ )
853
+ df = df.set_index(["A", "B"])
854
+
855
+ df.to_excel(path, merge_cells=merge_cells)
856
+ df1 = pd.read_excel(path, index_col=[0, 1])
857
+ tm.assert_frame_equal(df, df1)
858
+
859
+ # Test for Issue 11328. If column indices are integers, make
860
+ # sure they are handled correctly for either setting of
861
+ # merge_cells
862
+ def test_to_excel_multiindex_cols(self, merge_cells, frame, path):
863
+ arrays = np.arange(len(frame.index) * 2, dtype=np.int64).reshape(2, -1)
864
+ new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
865
+ frame.index = new_index
866
+
867
+ new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2), (50, 1), (50, 2)])
868
+ frame.columns = new_cols_index
869
+ header = [0, 1]
870
+ if not merge_cells:
871
+ header = 0
872
+
873
+ # round trip
874
+ frame.to_excel(path, sheet_name="test1", merge_cells=merge_cells)
875
+ with ExcelFile(path) as reader:
876
+ df = pd.read_excel(
877
+ reader, sheet_name="test1", header=header, index_col=[0, 1]
878
+ )
879
+ if not merge_cells:
880
+ fm = frame.columns._format_multi(sparsify=False, include_names=False)
881
+ frame.columns = [".".join(map(str, q)) for q in zip(*fm)]
882
+ tm.assert_frame_equal(frame, df)
883
+
884
+ def test_to_excel_multiindex_dates(self, merge_cells, path):
885
+ # try multiindex with dates
886
+ unit = get_exp_unit(path)
887
+ tsframe = DataFrame(
888
+ np.random.default_rng(2).standard_normal((5, 4)),
889
+ columns=Index(list("ABCD")),
890
+ index=date_range("2000-01-01", periods=5, freq="B"),
891
+ )
892
+ tsframe.index = MultiIndex.from_arrays(
893
+ [
894
+ tsframe.index.as_unit(unit),
895
+ np.arange(len(tsframe.index), dtype=np.int64),
896
+ ],
897
+ names=["time", "foo"],
898
+ )
899
+
900
+ tsframe.to_excel(path, sheet_name="test1", merge_cells=merge_cells)
901
+ with ExcelFile(path) as reader:
902
+ recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1])
903
+
904
+ tm.assert_frame_equal(tsframe, recons)
905
+ assert recons.index.names == ("time", "foo")
906
+
907
+ def test_to_excel_multiindex_no_write_index(self, path):
908
+ # Test writing and re-reading a MI without the index. GH 5616.
909
+
910
+ # Initial non-MI frame.
911
+ frame1 = DataFrame({"a": [10, 20], "b": [30, 40], "c": [50, 60]})
912
+
913
+ # Add a MI.
914
+ frame2 = frame1.copy()
915
+ multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])
916
+ frame2.index = multi_index
917
+
918
+ # Write out to Excel without the index.
919
+ frame2.to_excel(path, sheet_name="test1", index=False)
920
+
921
+ # Read it back in.
922
+ with ExcelFile(path) as reader:
923
+ frame3 = pd.read_excel(reader, sheet_name="test1")
924
+
925
+ # Test that it is the same as the initial frame.
926
+ tm.assert_frame_equal(frame1, frame3)
927
+
928
+ def test_to_excel_empty_multiindex(self, path):
929
+ # GH 19543.
930
+ expected = DataFrame([], columns=[0, 1, 2])
931
+
932
+ df = DataFrame([], index=MultiIndex.from_tuples([], names=[0, 1]), columns=[2])
933
+ df.to_excel(path, sheet_name="test1")
934
+
935
+ with ExcelFile(path) as reader:
936
+ result = pd.read_excel(reader, sheet_name="test1")
937
+ tm.assert_frame_equal(
938
+ result, expected, check_index_type=False, check_dtype=False
939
+ )
940
+
941
+ def test_to_excel_float_format(self, path):
942
+ df = DataFrame(
943
+ [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
944
+ index=["A", "B"],
945
+ columns=["X", "Y", "Z"],
946
+ )
947
+ df.to_excel(path, sheet_name="test1", float_format="%.2f")
948
+
949
+ with ExcelFile(path) as reader:
950
+ result = pd.read_excel(reader, sheet_name="test1", index_col=0)
951
+
952
+ expected = DataFrame(
953
+ [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
954
+ index=["A", "B"],
955
+ columns=["X", "Y", "Z"],
956
+ )
957
+ tm.assert_frame_equal(result, expected)
958
+
959
+ def test_to_excel_output_encoding(self, ext):
960
+ # Avoid mixed inferred_type.
961
+ df = DataFrame(
962
+ [["\u0192", "\u0193", "\u0194"], ["\u0195", "\u0196", "\u0197"]],
963
+ index=["A\u0192", "B"],
964
+ columns=["X\u0193", "Y", "Z"],
965
+ )
966
+
967
+ with tm.ensure_clean("__tmp_to_excel_float_format__." + ext) as filename:
968
+ df.to_excel(filename, sheet_name="TestSheet")
969
+ result = pd.read_excel(filename, sheet_name="TestSheet", index_col=0)
970
+ tm.assert_frame_equal(result, df)
971
+
972
+ def test_to_excel_unicode_filename(self, ext):
973
+ with tm.ensure_clean("\u0192u." + ext) as filename:
974
+ try:
975
+ with open(filename, "wb"):
976
+ pass
977
+ except UnicodeEncodeError:
978
+ pytest.skip("No unicode file names on this system")
979
+
980
+ df = DataFrame(
981
+ [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
982
+ index=["A", "B"],
983
+ columns=["X", "Y", "Z"],
984
+ )
985
+ df.to_excel(filename, sheet_name="test1", float_format="%.2f")
986
+
987
+ with ExcelFile(filename) as reader:
988
+ result = pd.read_excel(reader, sheet_name="test1", index_col=0)
989
+
990
+ expected = DataFrame(
991
+ [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
992
+ index=["A", "B"],
993
+ columns=["X", "Y", "Z"],
994
+ )
995
+ tm.assert_frame_equal(result, expected)
996
+
997
+ @pytest.mark.parametrize("use_headers", [True, False])
998
+ @pytest.mark.parametrize("r_idx_nlevels", [1, 2, 3])
999
+ @pytest.mark.parametrize("c_idx_nlevels", [1, 2, 3])
1000
+ def test_excel_010_hemstring(
1001
+ self, merge_cells, c_idx_nlevels, r_idx_nlevels, use_headers, path
1002
+ ):
1003
+ def roundtrip(data, header=True, parser_hdr=0, index=True):
1004
+ data.to_excel(path, header=header, merge_cells=merge_cells, index=index)
1005
+
1006
+ with ExcelFile(path) as xf:
1007
+ return pd.read_excel(
1008
+ xf, sheet_name=xf.sheet_names[0], header=parser_hdr
1009
+ )
1010
+
1011
+ # Basic test.
1012
+ parser_header = 0 if use_headers else None
1013
+ res = roundtrip(DataFrame([0]), use_headers, parser_header)
1014
+
1015
+ assert res.shape == (1, 2)
1016
+ assert res.iloc[0, 0] is not np.nan
1017
+
1018
+ # More complex tests with multi-index.
1019
+ nrows = 5
1020
+ ncols = 3
1021
+
1022
+ # ensure limited functionality in 0.10
1023
+ # override of gh-2370 until sorted out in 0.11
1024
+
1025
+ if c_idx_nlevels == 1:
1026
+ columns = Index([f"a-{i}" for i in range(ncols)], dtype=object)
1027
+ else:
1028
+ columns = MultiIndex.from_arrays(
1029
+ [range(ncols) for _ in range(c_idx_nlevels)],
1030
+ names=[f"i-{i}" for i in range(c_idx_nlevels)],
1031
+ )
1032
+ if r_idx_nlevels == 1:
1033
+ index = Index([f"b-{i}" for i in range(nrows)], dtype=object)
1034
+ else:
1035
+ index = MultiIndex.from_arrays(
1036
+ [range(nrows) for _ in range(r_idx_nlevels)],
1037
+ names=[f"j-{i}" for i in range(r_idx_nlevels)],
1038
+ )
1039
+
1040
+ df = DataFrame(
1041
+ np.ones((nrows, ncols)),
1042
+ columns=columns,
1043
+ index=index,
1044
+ )
1045
+
1046
+ # This if will be removed once multi-column Excel writing
1047
+ # is implemented. For now fixing gh-9794.
1048
+ if c_idx_nlevels > 1:
1049
+ msg = (
1050
+ "Writing to Excel with MultiIndex columns and no index "
1051
+ "\\('index'=False\\) is not yet implemented."
1052
+ )
1053
+ with pytest.raises(NotImplementedError, match=msg):
1054
+ roundtrip(df, use_headers, index=False)
1055
+ else:
1056
+ res = roundtrip(df, use_headers)
1057
+
1058
+ if use_headers:
1059
+ assert res.shape == (nrows, ncols + r_idx_nlevels)
1060
+ else:
1061
+ # First row taken as columns.
1062
+ assert res.shape == (nrows - 1, ncols + r_idx_nlevels)
1063
+
1064
+ # No NaNs.
1065
+ for r in range(len(res.index)):
1066
+ for c in range(len(res.columns)):
1067
+ assert res.iloc[r, c] is not np.nan
1068
+
1069
+ def test_duplicated_columns(self, path):
1070
+ # see gh-5235
1071
+ df = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B"])
1072
+ df.to_excel(path, sheet_name="test1")
1073
+ expected = DataFrame(
1074
+ [[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B.1"]
1075
+ )
1076
+
1077
+ # By default, we mangle.
1078
+ result = pd.read_excel(path, sheet_name="test1", index_col=0)
1079
+ tm.assert_frame_equal(result, expected)
1080
+
1081
+ # see gh-11007, gh-10970
1082
+ df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A", "B"])
1083
+ df.to_excel(path, sheet_name="test1")
1084
+
1085
+ result = pd.read_excel(path, sheet_name="test1", index_col=0)
1086
+ expected = DataFrame(
1087
+ [[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A.1", "B.1"]
1088
+ )
1089
+ tm.assert_frame_equal(result, expected)
1090
+
1091
+ # see gh-10982
1092
+ df.to_excel(path, sheet_name="test1", index=False, header=False)
1093
+ result = pd.read_excel(path, sheet_name="test1", header=None)
1094
+
1095
+ expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
1096
+ tm.assert_frame_equal(result, expected)
1097
+
1098
+ def test_swapped_columns(self, path):
1099
+ # Test for issue #5427.
1100
+ write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]})
1101
+ write_frame.to_excel(path, sheet_name="test1", columns=["B", "A"])
1102
+
1103
+ read_frame = pd.read_excel(path, sheet_name="test1", header=0)
1104
+
1105
+ tm.assert_series_equal(write_frame["A"], read_frame["A"])
1106
+ tm.assert_series_equal(write_frame["B"], read_frame["B"])
1107
+
1108
+ def test_invalid_columns(self, path):
1109
+ # see gh-10982
1110
+ write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]})
1111
+
1112
+ with pytest.raises(KeyError, match="Not all names specified"):
1113
+ write_frame.to_excel(path, sheet_name="test1", columns=["B", "C"])
1114
+
1115
+ with pytest.raises(
1116
+ KeyError, match="'passes columns are not ALL present dataframe'"
1117
+ ):
1118
+ write_frame.to_excel(path, sheet_name="test1", columns=["C", "D"])
1119
+
1120
+ @pytest.mark.parametrize(
1121
+ "to_excel_index,read_excel_index_col",
1122
+ [
1123
+ (True, 0), # Include index in write to file
1124
+ (False, None), # Dont include index in write to file
1125
+ ],
1126
+ )
1127
+ def test_write_subset_columns(self, path, to_excel_index, read_excel_index_col):
1128
+ # GH 31677
1129
+ write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2], "C": [3, 3, 3]})
1130
+ write_frame.to_excel(
1131
+ path, sheet_name="col_subset_bug", columns=["A", "B"], index=to_excel_index
1132
+ )
1133
+
1134
+ expected = write_frame[["A", "B"]]
1135
+ read_frame = pd.read_excel(
1136
+ path, sheet_name="col_subset_bug", index_col=read_excel_index_col
1137
+ )
1138
+
1139
+ tm.assert_frame_equal(expected, read_frame)
1140
+
1141
+ def test_comment_arg(self, path):
1142
+ # see gh-18735
1143
+ #
1144
+ # Test the comment argument functionality to pd.read_excel.
1145
+
1146
+ # Create file to read in.
1147
+ df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]})
1148
+ df.to_excel(path, sheet_name="test_c")
1149
+
1150
+ # Read file without comment arg.
1151
+ result1 = pd.read_excel(path, sheet_name="test_c", index_col=0)
1152
+
1153
+ result1.iloc[1, 0] = None
1154
+ result1.iloc[1, 1] = None
1155
+ result1.iloc[2, 1] = None
1156
+
1157
+ result2 = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0)
1158
+ tm.assert_frame_equal(result1, result2)
1159
+
1160
+ def test_comment_default(self, path):
1161
+ # Re issue #18735
1162
+ # Test the comment argument default to pd.read_excel
1163
+
1164
+ # Create file to read in
1165
+ df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]})
1166
+ df.to_excel(path, sheet_name="test_c")
1167
+
1168
+ # Read file with default and explicit comment=None
1169
+ result1 = pd.read_excel(path, sheet_name="test_c")
1170
+ result2 = pd.read_excel(path, sheet_name="test_c", comment=None)
1171
+ tm.assert_frame_equal(result1, result2)
1172
+
1173
+ def test_comment_used(self, path):
1174
+ # see gh-18735
1175
+ #
1176
+ # Test the comment argument is working as expected when used.
1177
+
1178
+ # Create file to read in.
1179
+ df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]})
1180
+ df.to_excel(path, sheet_name="test_c")
1181
+
1182
+ # Test read_frame_comment against manually produced expected output.
1183
+ expected = DataFrame({"A": ["one", None, "one"], "B": ["two", None, None]})
1184
+ result = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0)
1185
+ tm.assert_frame_equal(result, expected)
1186
+
1187
+ def test_comment_empty_line(self, path):
1188
+ # Re issue #18735
1189
+ # Test that pd.read_excel ignores commented lines at the end of file
1190
+
1191
+ df = DataFrame({"a": ["1", "#2"], "b": ["2", "3"]})
1192
+ df.to_excel(path, index=False)
1193
+
1194
+ # Test that all-comment lines at EoF are ignored
1195
+ expected = DataFrame({"a": [1], "b": [2]})
1196
+ result = pd.read_excel(path, comment="#")
1197
+ tm.assert_frame_equal(result, expected)
1198
+
1199
+ def test_datetimes(self, path):
1200
+ # Test writing and reading datetimes. For issue #9139. (xref #9185)
1201
+ unit = get_exp_unit(path)
1202
+ datetimes = [
1203
+ datetime(2013, 1, 13, 1, 2, 3),
1204
+ datetime(2013, 1, 13, 2, 45, 56),
1205
+ datetime(2013, 1, 13, 4, 29, 49),
1206
+ datetime(2013, 1, 13, 6, 13, 42),
1207
+ datetime(2013, 1, 13, 7, 57, 35),
1208
+ datetime(2013, 1, 13, 9, 41, 28),
1209
+ datetime(2013, 1, 13, 11, 25, 21),
1210
+ datetime(2013, 1, 13, 13, 9, 14),
1211
+ datetime(2013, 1, 13, 14, 53, 7),
1212
+ datetime(2013, 1, 13, 16, 37, 0),
1213
+ datetime(2013, 1, 13, 18, 20, 52),
1214
+ ]
1215
+
1216
+ write_frame = DataFrame({"A": datetimes})
1217
+ write_frame.to_excel(path, sheet_name="Sheet1")
1218
+ read_frame = pd.read_excel(path, sheet_name="Sheet1", header=0)
1219
+
1220
+ expected = write_frame.astype(f"M8[{unit}]")
1221
+ tm.assert_series_equal(expected["A"], read_frame["A"])
1222
+
1223
+ def test_bytes_io(self, engine):
1224
+ # see gh-7074
1225
+ with BytesIO() as bio:
1226
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)))
1227
+
1228
+ # Pass engine explicitly, as there is no file path to infer from.
1229
+ with ExcelWriter(bio, engine=engine) as writer:
1230
+ df.to_excel(writer)
1231
+
1232
+ bio.seek(0)
1233
+ reread_df = pd.read_excel(bio, index_col=0)
1234
+ tm.assert_frame_equal(df, reread_df)
1235
+
1236
+ def test_engine_kwargs(self, engine, path):
1237
+ # GH#52368
1238
+ df = DataFrame([{"A": 1, "B": 2}, {"A": 3, "B": 4}])
1239
+
1240
+ msgs = {
1241
+ "odf": r"OpenDocumentSpreadsheet() got an unexpected keyword "
1242
+ r"argument 'foo'",
1243
+ "openpyxl": r"__init__() got an unexpected keyword argument 'foo'",
1244
+ "xlsxwriter": r"__init__() got an unexpected keyword argument 'foo'",
1245
+ }
1246
+
1247
+ if PY310:
1248
+ msgs[
1249
+ "openpyxl"
1250
+ ] = "Workbook.__init__() got an unexpected keyword argument 'foo'"
1251
+ msgs[
1252
+ "xlsxwriter"
1253
+ ] = "Workbook.__init__() got an unexpected keyword argument 'foo'"
1254
+
1255
+ # Handle change in error message for openpyxl (write and append mode)
1256
+ if engine == "openpyxl" and not os.path.exists(path):
1257
+ msgs[
1258
+ "openpyxl"
1259
+ ] = r"load_workbook() got an unexpected keyword argument 'foo'"
1260
+
1261
+ with pytest.raises(TypeError, match=re.escape(msgs[engine])):
1262
+ df.to_excel(
1263
+ path,
1264
+ engine=engine,
1265
+ engine_kwargs={"foo": "bar"},
1266
+ )
1267
+
1268
+ def test_write_lists_dict(self, path):
1269
+ # see gh-8188.
1270
+ df = DataFrame(
1271
+ {
1272
+ "mixed": ["a", ["b", "c"], {"d": "e", "f": 2}],
1273
+ "numeric": [1, 2, 3.0],
1274
+ "str": ["apple", "banana", "cherry"],
1275
+ }
1276
+ )
1277
+ df.to_excel(path, sheet_name="Sheet1")
1278
+ read = pd.read_excel(path, sheet_name="Sheet1", header=0, index_col=0)
1279
+
1280
+ expected = df.copy()
1281
+ expected.mixed = expected.mixed.apply(str)
1282
+ expected.numeric = expected.numeric.astype("int64")
1283
+
1284
+ tm.assert_frame_equal(read, expected)
1285
+
1286
+ def test_render_as_column_name(self, path):
1287
+ # see gh-34331
1288
+ df = DataFrame({"render": [1, 2], "data": [3, 4]})
1289
+ df.to_excel(path, sheet_name="Sheet1")
1290
+ read = pd.read_excel(path, "Sheet1", index_col=0)
1291
+ expected = df
1292
+ tm.assert_frame_equal(read, expected)
1293
+
1294
+ def test_true_and_false_value_options(self, path):
1295
+ # see gh-13347
1296
+ df = DataFrame([["foo", "bar"]], columns=["col1", "col2"], dtype=object)
1297
+ with option_context("future.no_silent_downcasting", True):
1298
+ expected = df.replace({"foo": True, "bar": False}).astype("bool")
1299
+
1300
+ df.to_excel(path)
1301
+ read_frame = pd.read_excel(
1302
+ path, true_values=["foo"], false_values=["bar"], index_col=0
1303
+ )
1304
+ tm.assert_frame_equal(read_frame, expected)
1305
+
1306
+ def test_freeze_panes(self, path):
1307
+ # see gh-15160
1308
+ expected = DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
1309
+ expected.to_excel(path, sheet_name="Sheet1", freeze_panes=(1, 1))
1310
+
1311
+ result = pd.read_excel(path, index_col=0)
1312
+ tm.assert_frame_equal(result, expected)
1313
+
1314
+ def test_path_path_lib(self, engine, ext):
1315
+ df = DataFrame(
1316
+ 1.1 * np.arange(120).reshape((30, 4)),
1317
+ columns=Index(list("ABCD")),
1318
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
1319
+ )
1320
+ writer = partial(df.to_excel, engine=engine)
1321
+
1322
+ reader = partial(pd.read_excel, index_col=0)
1323
+ result = tm.round_trip_pathlib(writer, reader, path=f"foo{ext}")
1324
+ tm.assert_frame_equal(result, df)
1325
+
1326
+ def test_path_local_path(self, engine, ext):
1327
+ df = DataFrame(
1328
+ 1.1 * np.arange(120).reshape((30, 4)),
1329
+ columns=Index(list("ABCD")),
1330
+ index=Index([f"i-{i}" for i in range(30)]),
1331
+ )
1332
+ writer = partial(df.to_excel, engine=engine)
1333
+
1334
+ reader = partial(pd.read_excel, index_col=0)
1335
+ result = tm.round_trip_localpath(writer, reader, path=f"foo{ext}")
1336
+ tm.assert_frame_equal(result, df)
1337
+
1338
+ def test_merged_cell_custom_objects(self, path):
1339
+ # see GH-27006
1340
+ mi = MultiIndex.from_tuples(
1341
+ [
1342
+ (pd.Period("2018"), pd.Period("2018Q1")),
1343
+ (pd.Period("2018"), pd.Period("2018Q2")),
1344
+ ]
1345
+ )
1346
+ expected = DataFrame(np.ones((2, 2), dtype="int64"), columns=mi)
1347
+ expected.to_excel(path)
1348
+ result = pd.read_excel(path, header=[0, 1], index_col=0)
1349
+ # need to convert PeriodIndexes to standard Indexes for assert equal
1350
+ expected.columns = expected.columns.set_levels(
1351
+ [[str(i) for i in mi.levels[0]], [str(i) for i in mi.levels[1]]],
1352
+ level=[0, 1],
1353
+ )
1354
+ tm.assert_frame_equal(result, expected)
1355
+
1356
+ @pytest.mark.parametrize("dtype", [None, object])
1357
+ def test_raise_when_saving_timezones(self, dtype, tz_aware_fixture, path):
1358
+ # GH 27008, GH 7056
1359
+ tz = tz_aware_fixture
1360
+ data = pd.Timestamp("2019", tz=tz)
1361
+ df = DataFrame([data], dtype=dtype)
1362
+ with pytest.raises(ValueError, match="Excel does not support"):
1363
+ df.to_excel(path)
1364
+
1365
+ data = data.to_pydatetime()
1366
+ df = DataFrame([data], dtype=dtype)
1367
+ with pytest.raises(ValueError, match="Excel does not support"):
1368
+ df.to_excel(path)
1369
+
1370
+ def test_excel_duplicate_columns_with_names(self, path):
1371
+ # GH#39695
1372
+ df = DataFrame({"A": [0, 1], "B": [10, 11]})
1373
+ df.to_excel(path, columns=["A", "B", "A"], index=False)
1374
+
1375
+ result = pd.read_excel(path)
1376
+ expected = DataFrame([[0, 10, 0], [1, 11, 1]], columns=["A", "B", "A.1"])
1377
+ tm.assert_frame_equal(result, expected)
1378
+
1379
+ def test_if_sheet_exists_raises(self, ext):
1380
+ # GH 40230
1381
+ msg = "if_sheet_exists is only valid in append mode (mode='a')"
1382
+
1383
+ with tm.ensure_clean(ext) as f:
1384
+ with pytest.raises(ValueError, match=re.escape(msg)):
1385
+ ExcelWriter(f, if_sheet_exists="replace")
1386
+
1387
+ def test_excel_writer_empty_frame(self, engine, ext):
1388
+ # GH#45793
1389
+ with tm.ensure_clean(ext) as path:
1390
+ with ExcelWriter(path, engine=engine) as writer:
1391
+ DataFrame().to_excel(writer)
1392
+ result = pd.read_excel(path)
1393
+ expected = DataFrame()
1394
+ tm.assert_frame_equal(result, expected)
1395
+
1396
+ def test_to_excel_empty_frame(self, engine, ext):
1397
+ # GH#45793
1398
+ with tm.ensure_clean(ext) as path:
1399
+ DataFrame().to_excel(path, engine=engine)
1400
+ result = pd.read_excel(path)
1401
+ expected = DataFrame()
1402
+ tm.assert_frame_equal(result, expected)
1403
+
1404
+
1405
+ class TestExcelWriterEngineTests:
1406
+ @pytest.mark.parametrize(
1407
+ "klass,ext",
1408
+ [
1409
+ pytest.param(_XlsxWriter, ".xlsx", marks=td.skip_if_no("xlsxwriter")),
1410
+ pytest.param(_OpenpyxlWriter, ".xlsx", marks=td.skip_if_no("openpyxl")),
1411
+ ],
1412
+ )
1413
+ def test_ExcelWriter_dispatch(self, klass, ext):
1414
+ with tm.ensure_clean(ext) as path:
1415
+ with ExcelWriter(path) as writer:
1416
+ if ext == ".xlsx" and bool(
1417
+ import_optional_dependency("xlsxwriter", errors="ignore")
1418
+ ):
1419
+ # xlsxwriter has preference over openpyxl if both installed
1420
+ assert isinstance(writer, _XlsxWriter)
1421
+ else:
1422
+ assert isinstance(writer, klass)
1423
+
1424
+ def test_ExcelWriter_dispatch_raises(self):
1425
+ with pytest.raises(ValueError, match="No engine"):
1426
+ ExcelWriter("nothing")
1427
+
1428
+ def test_register_writer(self):
1429
+ class DummyClass(ExcelWriter):
1430
+ called_save = False
1431
+ called_write_cells = False
1432
+ called_sheets = False
1433
+ _supported_extensions = ("xlsx", "xls")
1434
+ _engine = "dummy"
1435
+
1436
+ def book(self):
1437
+ pass
1438
+
1439
+ def _save(self):
1440
+ type(self).called_save = True
1441
+
1442
+ def _write_cells(self, *args, **kwargs):
1443
+ type(self).called_write_cells = True
1444
+
1445
+ @property
1446
+ def sheets(self):
1447
+ type(self).called_sheets = True
1448
+
1449
+ @classmethod
1450
+ def assert_called_and_reset(cls):
1451
+ assert cls.called_save
1452
+ assert cls.called_write_cells
1453
+ assert not cls.called_sheets
1454
+ cls.called_save = False
1455
+ cls.called_write_cells = False
1456
+
1457
+ register_writer(DummyClass)
1458
+
1459
+ with option_context("io.excel.xlsx.writer", "dummy"):
1460
+ path = "something.xlsx"
1461
+ with tm.ensure_clean(path) as filepath:
1462
+ with ExcelWriter(filepath) as writer:
1463
+ assert isinstance(writer, DummyClass)
1464
+ df = DataFrame(
1465
+ ["a"],
1466
+ columns=Index(["b"], name="foo"),
1467
+ index=Index(["c"], name="bar"),
1468
+ )
1469
+ df.to_excel(filepath)
1470
+ DummyClass.assert_called_and_reset()
1471
+
1472
+ with tm.ensure_clean("something.xls") as filepath:
1473
+ df.to_excel(filepath, engine="dummy")
1474
+ DummyClass.assert_called_and_reset()
1475
+
1476
+
1477
+ @td.skip_if_no("xlrd")
1478
+ @td.skip_if_no("openpyxl")
1479
+ class TestFSPath:
1480
+ def test_excelfile_fspath(self):
1481
+ with tm.ensure_clean("foo.xlsx") as path:
1482
+ df = DataFrame({"A": [1, 2]})
1483
+ df.to_excel(path)
1484
+ with ExcelFile(path) as xl:
1485
+ result = os.fspath(xl)
1486
+ assert result == path
1487
+
1488
+ def test_excelwriter_fspath(self):
1489
+ with tm.ensure_clean("foo.xlsx") as path:
1490
+ with ExcelWriter(path) as writer:
1491
+ assert os.fspath(writer) == str(path)
1492
+
1493
+ def test_to_excel_pos_args_deprecation(self):
1494
+ # GH-54229
1495
+ df = DataFrame({"a": [1, 2, 3]})
1496
+ msg = (
1497
+ r"Starting with pandas version 3.0 all arguments of to_excel except "
1498
+ r"for the argument 'excel_writer' will be keyword-only."
1499
+ )
1500
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1501
+ buf = BytesIO()
1502
+ writer = ExcelWriter(buf)
1503
+ df.to_excel(writer, "Sheet_name_1")
1504
+
1505
+
1506
+ @pytest.mark.parametrize("klass", _writers.values())
1507
+ def test_subclass_attr(klass):
1508
+ # testing that subclasses of ExcelWriter don't have public attributes (issue 49602)
1509
+ attrs_base = {name for name in dir(ExcelWriter) if not name.startswith("_")}
1510
+ attrs_klass = {name for name in dir(klass) if not name.startswith("_")}
1511
+ assert not attrs_base.symmetric_difference(attrs_klass)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlrd.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas.compat import is_platform_windows
7
+
8
+ import pandas as pd
9
+ import pandas._testing as tm
10
+
11
+ from pandas.io.excel import ExcelFile
12
+ from pandas.io.excel._base import inspect_excel_format
13
+
14
+ xlrd = pytest.importorskip("xlrd")
15
+
16
+ if is_platform_windows():
17
+ pytestmark = pytest.mark.single_cpu
18
+
19
+
20
+ @pytest.fixture(params=[".xls"])
21
+ def read_ext_xlrd(request):
22
+ """
23
+ Valid extensions for reading Excel files with xlrd.
24
+
25
+ Similar to read_ext, but excludes .ods, .xlsb, and for xlrd>2 .xlsx, .xlsm
26
+ """
27
+ return request.param
28
+
29
+
30
+ def test_read_xlrd_book(read_ext_xlrd, datapath):
31
+ engine = "xlrd"
32
+ sheet_name = "Sheet1"
33
+ pth = datapath("io", "data", "excel", "test1.xls")
34
+ with xlrd.open_workbook(pth) as book:
35
+ with ExcelFile(book, engine=engine) as xl:
36
+ result = pd.read_excel(xl, sheet_name=sheet_name, index_col=0)
37
+
38
+ expected = pd.read_excel(
39
+ book, sheet_name=sheet_name, engine=engine, index_col=0
40
+ )
41
+ tm.assert_frame_equal(result, expected)
42
+
43
+
44
+ def test_read_xlsx_fails(datapath):
45
+ # GH 29375
46
+ from xlrd.biffh import XLRDError
47
+
48
+ path = datapath("io", "data", "excel", "test1.xlsx")
49
+ with pytest.raises(XLRDError, match="Excel xlsx file; not supported"):
50
+ pd.read_excel(path, engine="xlrd")
51
+
52
+
53
+ def test_nan_in_xls(datapath):
54
+ # GH 54564
55
+ path = datapath("io", "data", "excel", "test6.xls")
56
+
57
+ expected = pd.DataFrame({0: np.r_[0, 2].astype("int64"), 1: np.r_[1, np.nan]})
58
+
59
+ result = pd.read_excel(path, header=None)
60
+
61
+ tm.assert_frame_equal(result, expected)
62
+
63
+
64
+ @pytest.mark.parametrize(
65
+ "file_header",
66
+ [
67
+ b"\x09\x00\x04\x00\x07\x00\x10\x00",
68
+ b"\x09\x02\x06\x00\x00\x00\x10\x00",
69
+ b"\x09\x04\x06\x00\x00\x00\x10\x00",
70
+ b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1",
71
+ ],
72
+ )
73
+ def test_read_old_xls_files(file_header):
74
+ # GH 41226
75
+ f = io.BytesIO(file_header)
76
+ assert inspect_excel_format(f) == "xls"
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlsxwriter.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+
3
+ import pytest
4
+
5
+ from pandas.compat import is_platform_windows
6
+
7
+ from pandas import DataFrame
8
+ import pandas._testing as tm
9
+
10
+ from pandas.io.excel import ExcelWriter
11
+
12
+ xlsxwriter = pytest.importorskip("xlsxwriter")
13
+
14
+ if is_platform_windows():
15
+ pytestmark = pytest.mark.single_cpu
16
+
17
+
18
+ @pytest.fixture
19
+ def ext():
20
+ return ".xlsx"
21
+
22
+
23
+ def test_column_format(ext):
24
+ # Test that column formats are applied to cells. Test for issue #9167.
25
+ # Applicable to xlsxwriter only.
26
+ openpyxl = pytest.importorskip("openpyxl")
27
+
28
+ with tm.ensure_clean(ext) as path:
29
+ frame = DataFrame({"A": [123456, 123456], "B": [123456, 123456]})
30
+
31
+ with ExcelWriter(path) as writer:
32
+ frame.to_excel(writer)
33
+
34
+ # Add a number format to col B and ensure it is applied to cells.
35
+ num_format = "#,##0"
36
+ write_workbook = writer.book
37
+ write_worksheet = write_workbook.worksheets()[0]
38
+ col_format = write_workbook.add_format({"num_format": num_format})
39
+ write_worksheet.set_column("B:B", None, col_format)
40
+
41
+ with contextlib.closing(openpyxl.load_workbook(path)) as read_workbook:
42
+ try:
43
+ read_worksheet = read_workbook["Sheet1"]
44
+ except TypeError:
45
+ # compat
46
+ read_worksheet = read_workbook.get_sheet_by_name(name="Sheet1")
47
+
48
+ # Get the number format from the cell.
49
+ try:
50
+ cell = read_worksheet["B2"]
51
+ except TypeError:
52
+ # compat
53
+ cell = read_worksheet.cell("B2")
54
+
55
+ try:
56
+ read_num_format = cell.number_format
57
+ except AttributeError:
58
+ read_num_format = cell.style.number_format._format_code
59
+
60
+ assert read_num_format == num_format
61
+
62
+
63
+ def test_write_append_mode_raises(ext):
64
+ msg = "Append mode is not supported with xlsxwriter!"
65
+
66
+ with tm.ensure_clean(ext) as f:
67
+ with pytest.raises(ValueError, match=msg):
68
+ ExcelWriter(f, engine="xlsxwriter", mode="a")
69
+
70
+
71
+ @pytest.mark.parametrize("nan_inf_to_errors", [True, False])
72
+ def test_engine_kwargs(ext, nan_inf_to_errors):
73
+ # GH 42286
74
+ engine_kwargs = {"options": {"nan_inf_to_errors": nan_inf_to_errors}}
75
+ with tm.ensure_clean(ext) as f:
76
+ with ExcelWriter(f, engine="xlsxwriter", engine_kwargs=engine_kwargs) as writer:
77
+ assert writer.book.nan_inf_to_errors == nan_inf_to_errors
78
+
79
+
80
+ def test_book_and_sheets_consistent(ext):
81
+ # GH#45687 - Ensure sheets is updated if user modifies book
82
+ with tm.ensure_clean(ext) as f:
83
+ with ExcelWriter(f, engine="xlsxwriter") as writer:
84
+ assert writer.sheets == {}
85
+ sheet = writer.book.add_worksheet("test_name")
86
+ assert writer.sheets == {"test_name": sheet}
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_highlight.cpython-310.pyc ADDED
Binary file (5.51 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_latex.cpython-310.pyc ADDED
Binary file (28.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (7.83 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_c_parser_only.cpython-310.pyc ADDED
Binary file (16.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_comment.cpython-310.pyc ADDED
Binary file (5.65 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_compression.cpython-310.pyc ADDED
Binary file (6.27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_concatenate_chunks.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_converters.cpython-310.pyc ADDED
Binary file (6.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_dialect.cpython-310.pyc ADDED
Binary file (4.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_encoding.cpython-310.pyc ADDED
Binary file (9.22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_header.cpython-310.pyc ADDED
Binary file (16.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_index_col.cpython-310.pyc ADDED
Binary file (9.55 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_mangle_dupes.cpython-310.pyc ADDED
Binary file (4.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_multi_thread.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_na_values.cpython-310.pyc ADDED
Binary file (16.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_parse_dates.cpython-310.pyc ADDED
Binary file (44.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_python_parser_only.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_quoting.cpython-310.pyc ADDED
Binary file (4.88 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_read_fwf.cpython-310.pyc ADDED
Binary file (24.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_skiprows.cpython-310.pyc ADDED
Binary file (8.65 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_textreader.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_upcast.cpython-310.pyc ADDED
Binary file (2.84 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_chunksize.cpython-310.pyc ADDED
Binary file (9.68 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_data_list.cpython-310.pyc ADDED
Binary file (2.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_file_buffer_url.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_float.cpython-310.pyc ADDED
Binary file (2.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_inf.cpython-310.pyc ADDED
Binary file (2.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_ints.cpython-310.pyc ADDED
Binary file (5.57 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_read_errors.cpython-310.pyc ADDED
Binary file (8.62 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_chunksize.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that work on both the Python and C engines but do not have a
3
+ specific classification into the other test modules.
4
+ """
5
+ from io import StringIO
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from pandas._libs import parsers as libparsers
11
+ from pandas.errors import DtypeWarning
12
+
13
+ from pandas import (
14
+ DataFrame,
15
+ concat,
16
+ )
17
+ import pandas._testing as tm
18
+
19
+ pytestmark = pytest.mark.filterwarnings(
20
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
21
+ )
22
+
23
+
24
+ @pytest.mark.parametrize("index_col", [0, "index"])
25
+ def test_read_chunksize_with_index(all_parsers, index_col):
26
+ parser = all_parsers
27
+ data = """index,A,B,C,D
28
+ foo,2,3,4,5
29
+ bar,7,8,9,10
30
+ baz,12,13,14,15
31
+ qux,12,13,14,15
32
+ foo2,12,13,14,15
33
+ bar2,12,13,14,15
34
+ """
35
+
36
+ expected = DataFrame(
37
+ [
38
+ ["foo", 2, 3, 4, 5],
39
+ ["bar", 7, 8, 9, 10],
40
+ ["baz", 12, 13, 14, 15],
41
+ ["qux", 12, 13, 14, 15],
42
+ ["foo2", 12, 13, 14, 15],
43
+ ["bar2", 12, 13, 14, 15],
44
+ ],
45
+ columns=["index", "A", "B", "C", "D"],
46
+ )
47
+ expected = expected.set_index("index")
48
+
49
+ if parser.engine == "pyarrow":
50
+ msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
51
+ with pytest.raises(ValueError, match=msg):
52
+ with parser.read_csv(StringIO(data), index_col=0, chunksize=2) as reader:
53
+ list(reader)
54
+ return
55
+
56
+ with parser.read_csv(StringIO(data), index_col=0, chunksize=2) as reader:
57
+ chunks = list(reader)
58
+ tm.assert_frame_equal(chunks[0], expected[:2])
59
+ tm.assert_frame_equal(chunks[1], expected[2:4])
60
+ tm.assert_frame_equal(chunks[2], expected[4:])
61
+
62
+
63
+ @pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
64
+ def test_read_chunksize_bad(all_parsers, chunksize):
65
+ data = """index,A,B,C,D
66
+ foo,2,3,4,5
67
+ bar,7,8,9,10
68
+ baz,12,13,14,15
69
+ qux,12,13,14,15
70
+ foo2,12,13,14,15
71
+ bar2,12,13,14,15
72
+ """
73
+ parser = all_parsers
74
+ msg = r"'chunksize' must be an integer >=1"
75
+ if parser.engine == "pyarrow":
76
+ msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
77
+
78
+ with pytest.raises(ValueError, match=msg):
79
+ with parser.read_csv(StringIO(data), chunksize=chunksize) as _:
80
+ pass
81
+
82
+
83
+ @pytest.mark.parametrize("chunksize", [2, 8])
84
+ def test_read_chunksize_and_nrows(all_parsers, chunksize):
85
+ # see gh-15755
86
+ data = """index,A,B,C,D
87
+ foo,2,3,4,5
88
+ bar,7,8,9,10
89
+ baz,12,13,14,15
90
+ qux,12,13,14,15
91
+ foo2,12,13,14,15
92
+ bar2,12,13,14,15
93
+ """
94
+ parser = all_parsers
95
+ kwargs = {"index_col": 0, "nrows": 5}
96
+
97
+ if parser.engine == "pyarrow":
98
+ msg = "The 'nrows' option is not supported with the 'pyarrow' engine"
99
+ with pytest.raises(ValueError, match=msg):
100
+ parser.read_csv(StringIO(data), **kwargs)
101
+ return
102
+
103
+ expected = parser.read_csv(StringIO(data), **kwargs)
104
+ with parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs) as reader:
105
+ tm.assert_frame_equal(concat(reader), expected)
106
+
107
+
108
+ def test_read_chunksize_and_nrows_changing_size(all_parsers):
109
+ data = """index,A,B,C,D
110
+ foo,2,3,4,5
111
+ bar,7,8,9,10
112
+ baz,12,13,14,15
113
+ qux,12,13,14,15
114
+ foo2,12,13,14,15
115
+ bar2,12,13,14,15
116
+ """
117
+ parser = all_parsers
118
+ kwargs = {"index_col": 0, "nrows": 5}
119
+
120
+ if parser.engine == "pyarrow":
121
+ msg = "The 'nrows' option is not supported with the 'pyarrow' engine"
122
+ with pytest.raises(ValueError, match=msg):
123
+ parser.read_csv(StringIO(data), **kwargs)
124
+ return
125
+
126
+ expected = parser.read_csv(StringIO(data), **kwargs)
127
+ with parser.read_csv(StringIO(data), chunksize=8, **kwargs) as reader:
128
+ tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
129
+ tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
130
+
131
+ with pytest.raises(StopIteration, match=""):
132
+ reader.get_chunk(size=3)
133
+
134
+
135
+ def test_get_chunk_passed_chunksize(all_parsers):
136
+ parser = all_parsers
137
+ data = """A,B,C
138
+ 1,2,3
139
+ 4,5,6
140
+ 7,8,9
141
+ 1,2,3"""
142
+
143
+ if parser.engine == "pyarrow":
144
+ msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
145
+ with pytest.raises(ValueError, match=msg):
146
+ with parser.read_csv(StringIO(data), chunksize=2) as reader:
147
+ reader.get_chunk()
148
+ return
149
+
150
+ with parser.read_csv(StringIO(data), chunksize=2) as reader:
151
+ result = reader.get_chunk()
152
+
153
+ expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
154
+ tm.assert_frame_equal(result, expected)
155
+
156
+
157
+ @pytest.mark.parametrize("kwargs", [{}, {"index_col": 0}])
158
+ def test_read_chunksize_compat(all_parsers, kwargs):
159
+ # see gh-12185
160
+ data = """index,A,B,C,D
161
+ foo,2,3,4,5
162
+ bar,7,8,9,10
163
+ baz,12,13,14,15
164
+ qux,12,13,14,15
165
+ foo2,12,13,14,15
166
+ bar2,12,13,14,15
167
+ """
168
+ parser = all_parsers
169
+ result = parser.read_csv(StringIO(data), **kwargs)
170
+
171
+ if parser.engine == "pyarrow":
172
+ msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
173
+ with pytest.raises(ValueError, match=msg):
174
+ with parser.read_csv(StringIO(data), chunksize=2, **kwargs) as reader:
175
+ concat(reader)
176
+ return
177
+
178
+ with parser.read_csv(StringIO(data), chunksize=2, **kwargs) as reader:
179
+ via_reader = concat(reader)
180
+ tm.assert_frame_equal(via_reader, result)
181
+
182
+
183
+ def test_read_chunksize_jagged_names(all_parsers):
184
+ # see gh-23509
185
+ parser = all_parsers
186
+ data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
187
+
188
+ expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
189
+
190
+ if parser.engine == "pyarrow":
191
+ msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
192
+ with pytest.raises(ValueError, match=msg):
193
+ with parser.read_csv(
194
+ StringIO(data), names=range(10), chunksize=4
195
+ ) as reader:
196
+ concat(reader)
197
+ return
198
+
199
+ with parser.read_csv(StringIO(data), names=range(10), chunksize=4) as reader:
200
+ result = concat(reader)
201
+ tm.assert_frame_equal(result, expected)
202
+
203
+
204
+ def test_chunk_begins_with_newline_whitespace(all_parsers):
205
+ # see gh-10022
206
+ parser = all_parsers
207
+ data = "\n hello\nworld\n"
208
+
209
+ result = parser.read_csv(StringIO(data), header=None)
210
+ expected = DataFrame([" hello", "world"])
211
+ tm.assert_frame_equal(result, expected)
212
+
213
+
214
+ @pytest.mark.slow
215
+ def test_chunks_have_consistent_numerical_type(all_parsers, monkeypatch):
216
+ # mainly an issue with the C parser
217
+ heuristic = 2**3
218
+ parser = all_parsers
219
+ integers = [str(i) for i in range(heuristic - 1)]
220
+ data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
221
+
222
+ # Coercions should work without warnings.
223
+ with monkeypatch.context() as m:
224
+ m.setattr(libparsers, "DEFAULT_BUFFER_HEURISTIC", heuristic)
225
+ result = parser.read_csv(StringIO(data))
226
+
227
+ assert type(result.a[0]) is np.float64
228
+ assert result.a.dtype == float
229
+
230
+
231
+ def test_warn_if_chunks_have_mismatched_type(all_parsers):
232
+ warning_type = None
233
+ parser = all_parsers
234
+ size = 10000
235
+
236
+ # see gh-3866: if chunks are different types and can't
237
+ # be coerced using numerical types, then issue warning.
238
+ if parser.engine == "c" and parser.low_memory:
239
+ warning_type = DtypeWarning
240
+ # Use larger size to hit warning path
241
+ size = 499999
242
+
243
+ integers = [str(i) for i in range(size)]
244
+ data = "a\n" + "\n".join(integers + ["a", "b"] + integers)
245
+
246
+ buf = StringIO(data)
247
+
248
+ if parser.engine == "pyarrow":
249
+ df = parser.read_csv(
250
+ buf,
251
+ )
252
+ else:
253
+ df = parser.read_csv_check_warnings(
254
+ warning_type,
255
+ r"Columns \(0\) have mixed types. "
256
+ "Specify dtype option on import or set low_memory=False.",
257
+ buf,
258
+ )
259
+
260
+ assert df.a.dtype == object
261
+
262
+
263
+ @pytest.mark.parametrize("iterator", [True, False])
264
+ def test_empty_with_nrows_chunksize(all_parsers, iterator):
265
+ # see gh-9535
266
+ parser = all_parsers
267
+ expected = DataFrame(columns=["foo", "bar"])
268
+
269
+ nrows = 10
270
+ data = StringIO("foo,bar\n")
271
+
272
+ if parser.engine == "pyarrow":
273
+ msg = (
274
+ "The '(nrows|chunksize)' option is not supported with the 'pyarrow' engine"
275
+ )
276
+ with pytest.raises(ValueError, match=msg):
277
+ if iterator:
278
+ with parser.read_csv(data, chunksize=nrows) as reader:
279
+ next(iter(reader))
280
+ else:
281
+ parser.read_csv(data, nrows=nrows)
282
+ return
283
+
284
+ if iterator:
285
+ with parser.read_csv(data, chunksize=nrows) as reader:
286
+ result = next(iter(reader))
287
+ else:
288
+ result = parser.read_csv(data, nrows=nrows)
289
+
290
+ tm.assert_frame_equal(result, expected)
291
+
292
+
293
+ def test_read_csv_memory_growth_chunksize(all_parsers):
294
+ # see gh-24805
295
+ #
296
+ # Let's just make sure that we don't crash
297
+ # as we iteratively process all chunks.
298
+ parser = all_parsers
299
+
300
+ with tm.ensure_clean() as path:
301
+ with open(path, "w", encoding="utf-8") as f:
302
+ for i in range(1000):
303
+ f.write(str(i) + "\n")
304
+
305
+ if parser.engine == "pyarrow":
306
+ msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
307
+ with pytest.raises(ValueError, match=msg):
308
+ with parser.read_csv(path, chunksize=20) as result:
309
+ for _ in result:
310
+ pass
311
+ return
312
+
313
+ with parser.read_csv(path, chunksize=20) as result:
314
+ for _ in result:
315
+ pass
316
+
317
+
318
+ def test_chunksize_with_usecols_second_block_shorter(all_parsers):
319
+ # GH#21211
320
+ parser = all_parsers
321
+ data = """1,2,3,4
322
+ 5,6,7,8
323
+ 9,10,11
324
+ """
325
+
326
+ if parser.engine == "pyarrow":
327
+ msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
328
+ with pytest.raises(ValueError, match=msg):
329
+ parser.read_csv(
330
+ StringIO(data),
331
+ names=["a", "b"],
332
+ chunksize=2,
333
+ usecols=[0, 1],
334
+ header=None,
335
+ )
336
+ return
337
+
338
+ result_chunks = parser.read_csv(
339
+ StringIO(data),
340
+ names=["a", "b"],
341
+ chunksize=2,
342
+ usecols=[0, 1],
343
+ header=None,
344
+ )
345
+
346
+ expected_frames = [
347
+ DataFrame({"a": [1, 5], "b": [2, 6]}),
348
+ DataFrame({"a": [9], "b": [10]}, index=[2]),
349
+ ]
350
+
351
+ for i, result in enumerate(result_chunks):
352
+ tm.assert_frame_equal(result, expected_frames[i])
353
+
354
+
355
+ def test_chunksize_second_block_shorter(all_parsers):
356
+ # GH#21211
357
+ parser = all_parsers
358
+ data = """a,b,c,d
359
+ 1,2,3,4
360
+ 5,6,7,8
361
+ 9,10,11
362
+ """
363
+
364
+ if parser.engine == "pyarrow":
365
+ msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
366
+ with pytest.raises(ValueError, match=msg):
367
+ parser.read_csv(StringIO(data), chunksize=2)
368
+ return
369
+
370
+ result_chunks = parser.read_csv(StringIO(data), chunksize=2)
371
+
372
+ expected_frames = [
373
+ DataFrame({"a": [1, 5], "b": [2, 6], "c": [3, 7], "d": [4, 8]}),
374
+ DataFrame({"a": [9], "b": [10], "c": [11], "d": [np.nan]}, index=[2]),
375
+ ]
376
+
377
+ for i, result in enumerate(result_chunks):
378
+ tm.assert_frame_equal(result, expected_frames[i])
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_common_basic.py ADDED
@@ -0,0 +1,979 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that work on both the Python and C engines but do not have a
3
+ specific classification into the other test modules.
4
+ """
5
+ from datetime import datetime
6
+ from inspect import signature
7
+ from io import StringIO
8
+ import os
9
+ from pathlib import Path
10
+ import sys
11
+
12
+ import numpy as np
13
+ import pytest
14
+
15
+ from pandas.errors import (
16
+ EmptyDataError,
17
+ ParserError,
18
+ ParserWarning,
19
+ )
20
+
21
+ from pandas import (
22
+ DataFrame,
23
+ Index,
24
+ Timestamp,
25
+ compat,
26
+ )
27
+ import pandas._testing as tm
28
+
29
+ from pandas.io.parsers import TextFileReader
30
+ from pandas.io.parsers.c_parser_wrapper import CParserWrapper
31
+
32
+ pytestmark = pytest.mark.filterwarnings(
33
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
34
+ )
35
+
36
+ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
37
+ skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
38
+
39
+
40
+ def test_override_set_noconvert_columns():
41
+ # see gh-17351
42
+ #
43
+ # Usecols needs to be sorted in _set_noconvert_columns based
44
+ # on the test_usecols_with_parse_dates test from test_usecols.py
45
+ class MyTextFileReader(TextFileReader):
46
+ def __init__(self) -> None:
47
+ self._currow = 0
48
+ self.squeeze = False
49
+
50
+ class MyCParserWrapper(CParserWrapper):
51
+ def _set_noconvert_columns(self):
52
+ if self.usecols_dtype == "integer":
53
+ # self.usecols is a set, which is documented as unordered
54
+ # but in practice, a CPython set of integers is sorted.
55
+ # In other implementations this assumption does not hold.
56
+ # The following code simulates a different order, which
57
+ # before GH 17351 would cause the wrong columns to be
58
+ # converted via the parse_dates parameter
59
+ self.usecols = list(self.usecols)
60
+ self.usecols.reverse()
61
+ return CParserWrapper._set_noconvert_columns(self)
62
+
63
+ data = """a,b,c,d,e
64
+ 0,1,2014-01-01,09:00,4
65
+ 0,1,2014-01-02,10:00,4"""
66
+
67
+ parse_dates = [[1, 2]]
68
+ cols = {
69
+ "a": [0, 0],
70
+ "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
71
+ }
72
+ expected = DataFrame(cols, columns=["c_d", "a"])
73
+
74
+ parser = MyTextFileReader()
75
+ parser.options = {
76
+ "usecols": [0, 2, 3],
77
+ "parse_dates": parse_dates,
78
+ "delimiter": ",",
79
+ }
80
+ parser.engine = "c"
81
+ parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
82
+
83
+ result = parser.read()
84
+ tm.assert_frame_equal(result, expected)
85
+
86
+
87
+ def test_read_csv_local(all_parsers, csv1):
88
+ prefix = "file:///" if compat.is_platform_windows() else "file://"
89
+ parser = all_parsers
90
+
91
+ fname = prefix + str(os.path.abspath(csv1))
92
+ result = parser.read_csv(fname, index_col=0, parse_dates=True)
93
+ # TODO: make unit check more specific
94
+ if parser.engine == "pyarrow":
95
+ result.index = result.index.as_unit("ns")
96
+ expected = DataFrame(
97
+ [
98
+ [0.980269, 3.685731, -0.364216805298, -1.159738],
99
+ [1.047916, -0.041232, -0.16181208307, 0.212549],
100
+ [0.498581, 0.731168, -0.537677223318, 1.346270],
101
+ [1.120202, 1.567621, 0.00364077397681, 0.675253],
102
+ [-0.487094, 0.571455, -1.6116394093, 0.103469],
103
+ [0.836649, 0.246462, 0.588542635376, 1.062782],
104
+ [-0.157161, 1.340307, 1.1957779562, -1.097007],
105
+ ],
106
+ columns=["A", "B", "C", "D"],
107
+ index=Index(
108
+ [
109
+ datetime(2000, 1, 3),
110
+ datetime(2000, 1, 4),
111
+ datetime(2000, 1, 5),
112
+ datetime(2000, 1, 6),
113
+ datetime(2000, 1, 7),
114
+ datetime(2000, 1, 10),
115
+ datetime(2000, 1, 11),
116
+ ],
117
+ name="index",
118
+ ),
119
+ )
120
+ tm.assert_frame_equal(result, expected)
121
+
122
+
123
+ def test_1000_sep(all_parsers):
124
+ parser = all_parsers
125
+ data = """A|B|C
126
+ 1|2,334|5
127
+ 10|13|10.
128
+ """
129
+ expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
130
+
131
+ if parser.engine == "pyarrow":
132
+ msg = "The 'thousands' option is not supported with the 'pyarrow' engine"
133
+ with pytest.raises(ValueError, match=msg):
134
+ parser.read_csv(StringIO(data), sep="|", thousands=",")
135
+ return
136
+
137
+ result = parser.read_csv(StringIO(data), sep="|", thousands=",")
138
+ tm.assert_frame_equal(result, expected)
139
+
140
+
141
+ @xfail_pyarrow # ValueError: Found non-unique column index
142
+ def test_unnamed_columns(all_parsers):
143
+ data = """A,B,C,,
144
+ 1,2,3,4,5
145
+ 6,7,8,9,10
146
+ 11,12,13,14,15
147
+ """
148
+ parser = all_parsers
149
+ expected = DataFrame(
150
+ [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
151
+ dtype=np.int64,
152
+ columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
153
+ )
154
+ result = parser.read_csv(StringIO(data))
155
+ tm.assert_frame_equal(result, expected)
156
+
157
+
158
+ def test_csv_mixed_type(all_parsers):
159
+ data = """A,B,C
160
+ a,1,2
161
+ b,3,4
162
+ c,4,5
163
+ """
164
+ parser = all_parsers
165
+ expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
166
+ result = parser.read_csv(StringIO(data))
167
+ tm.assert_frame_equal(result, expected)
168
+
169
+
170
+ def test_read_csv_low_memory_no_rows_with_index(all_parsers):
171
+ # see gh-21141
172
+ parser = all_parsers
173
+
174
+ if not parser.low_memory:
175
+ pytest.skip("This is a low-memory specific test")
176
+
177
+ data = """A,B,C
178
+ 1,1,1,2
179
+ 2,2,3,4
180
+ 3,3,4,5
181
+ """
182
+
183
+ if parser.engine == "pyarrow":
184
+ msg = "The 'nrows' option is not supported with the 'pyarrow' engine"
185
+ with pytest.raises(ValueError, match=msg):
186
+ parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
187
+ return
188
+
189
+ result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
190
+ expected = DataFrame(columns=["A", "B", "C"])
191
+ tm.assert_frame_equal(result, expected)
192
+
193
+
194
+ def test_read_csv_dataframe(all_parsers, csv1):
195
+ parser = all_parsers
196
+ result = parser.read_csv(csv1, index_col=0, parse_dates=True)
197
+ # TODO: make unit check more specific
198
+ if parser.engine == "pyarrow":
199
+ result.index = result.index.as_unit("ns")
200
+ expected = DataFrame(
201
+ [
202
+ [0.980269, 3.685731, -0.364216805298, -1.159738],
203
+ [1.047916, -0.041232, -0.16181208307, 0.212549],
204
+ [0.498581, 0.731168, -0.537677223318, 1.346270],
205
+ [1.120202, 1.567621, 0.00364077397681, 0.675253],
206
+ [-0.487094, 0.571455, -1.6116394093, 0.103469],
207
+ [0.836649, 0.246462, 0.588542635376, 1.062782],
208
+ [-0.157161, 1.340307, 1.1957779562, -1.097007],
209
+ ],
210
+ columns=["A", "B", "C", "D"],
211
+ index=Index(
212
+ [
213
+ datetime(2000, 1, 3),
214
+ datetime(2000, 1, 4),
215
+ datetime(2000, 1, 5),
216
+ datetime(2000, 1, 6),
217
+ datetime(2000, 1, 7),
218
+ datetime(2000, 1, 10),
219
+ datetime(2000, 1, 11),
220
+ ],
221
+ name="index",
222
+ ),
223
+ )
224
+ tm.assert_frame_equal(result, expected)
225
+
226
+
227
+ @pytest.mark.parametrize("nrows", [3, 3.0])
228
+ def test_read_nrows(all_parsers, nrows):
229
+ # see gh-10476
230
+ data = """index,A,B,C,D
231
+ foo,2,3,4,5
232
+ bar,7,8,9,10
233
+ baz,12,13,14,15
234
+ qux,12,13,14,15
235
+ foo2,12,13,14,15
236
+ bar2,12,13,14,15
237
+ """
238
+ expected = DataFrame(
239
+ [["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
240
+ columns=["index", "A", "B", "C", "D"],
241
+ )
242
+ parser = all_parsers
243
+
244
+ if parser.engine == "pyarrow":
245
+ msg = "The 'nrows' option is not supported with the 'pyarrow' engine"
246
+ with pytest.raises(ValueError, match=msg):
247
+ parser.read_csv(StringIO(data), nrows=nrows)
248
+ return
249
+
250
+ result = parser.read_csv(StringIO(data), nrows=nrows)
251
+ tm.assert_frame_equal(result, expected)
252
+
253
+
254
+ @pytest.mark.parametrize("nrows", [1.2, "foo", -1])
255
+ def test_read_nrows_bad(all_parsers, nrows):
256
+ data = """index,A,B,C,D
257
+ foo,2,3,4,5
258
+ bar,7,8,9,10
259
+ baz,12,13,14,15
260
+ qux,12,13,14,15
261
+ foo2,12,13,14,15
262
+ bar2,12,13,14,15
263
+ """
264
+ msg = r"'nrows' must be an integer >=0"
265
+ parser = all_parsers
266
+ if parser.engine == "pyarrow":
267
+ msg = "The 'nrows' option is not supported with the 'pyarrow' engine"
268
+
269
+ with pytest.raises(ValueError, match=msg):
270
+ parser.read_csv(StringIO(data), nrows=nrows)
271
+
272
+
273
+ def test_nrows_skipfooter_errors(all_parsers):
274
+ msg = "'skipfooter' not supported with 'nrows'"
275
+ data = "a\n1\n2\n3\n4\n5\n6"
276
+ parser = all_parsers
277
+
278
+ with pytest.raises(ValueError, match=msg):
279
+ parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
280
+
281
+
282
+ @skip_pyarrow
283
+ def test_missing_trailing_delimiters(all_parsers):
284
+ parser = all_parsers
285
+ data = """A,B,C,D
286
+ 1,2,3,4
287
+ 1,3,3,
288
+ 1,4,5"""
289
+
290
+ result = parser.read_csv(StringIO(data))
291
+ expected = DataFrame(
292
+ [[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
293
+ columns=["A", "B", "C", "D"],
294
+ )
295
+ tm.assert_frame_equal(result, expected)
296
+
297
+
298
+ def test_skip_initial_space(all_parsers):
299
+ data = (
300
+ '"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
301
+ "1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
302
+ "314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
303
+ "70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
304
+ "0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
305
+ "-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
306
+ )
307
+ parser = all_parsers
308
+
309
+ if parser.engine == "pyarrow":
310
+ msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine"
311
+ with pytest.raises(ValueError, match=msg):
312
+ parser.read_csv(
313
+ StringIO(data),
314
+ names=list(range(33)),
315
+ header=None,
316
+ na_values=["-9999.0"],
317
+ skipinitialspace=True,
318
+ )
319
+ return
320
+
321
+ result = parser.read_csv(
322
+ StringIO(data),
323
+ names=list(range(33)),
324
+ header=None,
325
+ na_values=["-9999.0"],
326
+ skipinitialspace=True,
327
+ )
328
+ expected = DataFrame(
329
+ [
330
+ [
331
+ "09-Apr-2012",
332
+ "01:10:18.300",
333
+ 2456026.548822908,
334
+ 12849,
335
+ 1.00361,
336
+ 1.12551,
337
+ 330.65659,
338
+ 355626618.16711,
339
+ 73.48821,
340
+ 314.11625,
341
+ 1917.09447,
342
+ 179.71425,
343
+ 80.0,
344
+ 240.0,
345
+ -350,
346
+ 70.06056,
347
+ 344.9837,
348
+ 1,
349
+ 1,
350
+ -0.689265,
351
+ -0.692787,
352
+ 0.212036,
353
+ 14.7674,
354
+ 41.605,
355
+ np.nan,
356
+ np.nan,
357
+ np.nan,
358
+ np.nan,
359
+ np.nan,
360
+ np.nan,
361
+ 0,
362
+ 12,
363
+ 128,
364
+ ]
365
+ ]
366
+ )
367
+ tm.assert_frame_equal(result, expected)
368
+
369
+
370
+ @skip_pyarrow
371
+ def test_trailing_delimiters(all_parsers):
372
+ # see gh-2442
373
+ data = """A,B,C
374
+ 1,2,3,
375
+ 4,5,6,
376
+ 7,8,9,"""
377
+ parser = all_parsers
378
+ result = parser.read_csv(StringIO(data), index_col=False)
379
+
380
+ expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
381
+ tm.assert_frame_equal(result, expected)
382
+
383
+
384
+ def test_escapechar(all_parsers):
385
+ # https://stackoverflow.com/questions/13824840/feature-request-for-
386
+ # pandas-read-csv
387
+ data = '''SEARCH_TERM,ACTUAL_URL
388
+ "bra tv board","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
389
+ "tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
390
+ "SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
391
+
392
+ parser = all_parsers
393
+ result = parser.read_csv(
394
+ StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
395
+ )
396
+
397
+ assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series'
398
+
399
+ tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
400
+
401
+
402
+ def test_ignore_leading_whitespace(all_parsers):
403
+ # see gh-3374, gh-6607
404
+ parser = all_parsers
405
+ data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
406
+
407
+ if parser.engine == "pyarrow":
408
+ msg = "the 'pyarrow' engine does not support regex separators"
409
+ with pytest.raises(ValueError, match=msg):
410
+ parser.read_csv(StringIO(data), sep=r"\s+")
411
+ return
412
+ result = parser.read_csv(StringIO(data), sep=r"\s+")
413
+
414
+ expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
415
+ tm.assert_frame_equal(result, expected)
416
+
417
+
418
+ @skip_pyarrow
419
+ @pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
420
+ def test_uneven_lines_with_usecols(all_parsers, usecols):
421
+ # see gh-12203
422
+ parser = all_parsers
423
+ data = r"""a,b,c
424
+ 0,1,2
425
+ 3,4,5,6,7
426
+ 8,9,10"""
427
+
428
+ if usecols is None:
429
+ # Make sure that an error is still raised
430
+ # when the "usecols" parameter is not provided.
431
+ msg = r"Expected \d+ fields in line \d+, saw \d+"
432
+ with pytest.raises(ParserError, match=msg):
433
+ parser.read_csv(StringIO(data))
434
+ else:
435
+ expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
436
+
437
+ result = parser.read_csv(StringIO(data), usecols=usecols)
438
+ tm.assert_frame_equal(result, expected)
439
+
440
+
441
+ @skip_pyarrow
442
+ @pytest.mark.parametrize(
443
+ "data,kwargs,expected",
444
+ [
445
+ # First, check to see that the response of parser when faced with no
446
+ # provided columns raises the correct error, with or without usecols.
447
+ ("", {}, None),
448
+ ("", {"usecols": ["X"]}, None),
449
+ (
450
+ ",,",
451
+ {"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
452
+ DataFrame(columns=["X"], index=[0], dtype=np.float64),
453
+ ),
454
+ (
455
+ "",
456
+ {"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
457
+ DataFrame(columns=["X"]),
458
+ ),
459
+ ],
460
+ )
461
+ def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
462
+ # see gh-12493
463
+ parser = all_parsers
464
+
465
+ if expected is None:
466
+ msg = "No columns to parse from file"
467
+ with pytest.raises(EmptyDataError, match=msg):
468
+ parser.read_csv(StringIO(data), **kwargs)
469
+ else:
470
+ result = parser.read_csv(StringIO(data), **kwargs)
471
+ tm.assert_frame_equal(result, expected)
472
+
473
+
474
+ @pytest.mark.parametrize(
475
+ "kwargs,expected",
476
+ [
477
+ # gh-8661, gh-8679: this should ignore six lines, including
478
+ # lines with trailing whitespace and blank lines.
479
+ (
480
+ {
481
+ "header": None,
482
+ "delim_whitespace": True,
483
+ "skiprows": [0, 1, 2, 3, 5, 6],
484
+ "skip_blank_lines": True,
485
+ },
486
+ DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
487
+ ),
488
+ # gh-8983: test skipping set of rows after a row with trailing spaces.
489
+ (
490
+ {
491
+ "delim_whitespace": True,
492
+ "skiprows": [1, 2, 3, 5, 6],
493
+ "skip_blank_lines": True,
494
+ },
495
+ DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
496
+ ),
497
+ ],
498
+ )
499
+ def test_trailing_spaces(all_parsers, kwargs, expected):
500
+ data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa: E501
501
+ parser = all_parsers
502
+
503
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
504
+
505
+ if parser.engine == "pyarrow":
506
+ msg = "The 'delim_whitespace' option is not supported with the 'pyarrow' engine"
507
+ with pytest.raises(ValueError, match=msg):
508
+ with tm.assert_produces_warning(
509
+ FutureWarning, match=depr_msg, check_stacklevel=False
510
+ ):
511
+ parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
512
+ return
513
+
514
+ with tm.assert_produces_warning(
515
+ FutureWarning, match=depr_msg, check_stacklevel=False
516
+ ):
517
+ result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
518
+ tm.assert_frame_equal(result, expected)
519
+
520
+
521
+ def test_raise_on_sep_with_delim_whitespace(all_parsers):
522
+ # see gh-6607
523
+ data = "a b c\n1 2 3"
524
+ parser = all_parsers
525
+
526
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
527
+ with pytest.raises(ValueError, match="you can only specify one"):
528
+ with tm.assert_produces_warning(
529
+ FutureWarning, match=depr_msg, check_stacklevel=False
530
+ ):
531
+ parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
532
+
533
+
534
+ def test_read_filepath_or_buffer(all_parsers):
535
+ # see gh-43366
536
+ parser = all_parsers
537
+
538
+ with pytest.raises(TypeError, match="Expected file path name or file-like"):
539
+ parser.read_csv(filepath_or_buffer=b"input")
540
+
541
+
542
+ @pytest.mark.parametrize("delim_whitespace", [True, False])
543
+ def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
544
+ # see gh-9710
545
+ parser = all_parsers
546
+ data = """\
547
+ MyColumn
548
+ a
549
+ b
550
+ a
551
+ b\n"""
552
+
553
+ expected = DataFrame({"MyColumn": list("abab")})
554
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
555
+
556
+ if parser.engine == "pyarrow":
557
+ msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine"
558
+ with pytest.raises(ValueError, match=msg):
559
+ with tm.assert_produces_warning(
560
+ FutureWarning, match=depr_msg, check_stacklevel=False
561
+ ):
562
+ parser.read_csv(
563
+ StringIO(data),
564
+ skipinitialspace=True,
565
+ delim_whitespace=delim_whitespace,
566
+ )
567
+ return
568
+
569
+ with tm.assert_produces_warning(
570
+ FutureWarning, match=depr_msg, check_stacklevel=False
571
+ ):
572
+ result = parser.read_csv(
573
+ StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
574
+ )
575
+ tm.assert_frame_equal(result, expected)
576
+
577
+
578
+ @pytest.mark.parametrize(
579
+ "sep,skip_blank_lines,exp_data",
580
+ [
581
+ (",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
582
+ (r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
583
+ (
584
+ ",",
585
+ False,
586
+ [
587
+ [1.0, 2.0, 4.0],
588
+ [np.nan, np.nan, np.nan],
589
+ [np.nan, np.nan, np.nan],
590
+ [5.0, np.nan, 10.0],
591
+ [np.nan, np.nan, np.nan],
592
+ [-70.0, 0.4, 1.0],
593
+ ],
594
+ ),
595
+ ],
596
+ )
597
+ def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data, request):
598
+ parser = all_parsers
599
+ data = """\
600
+ A,B,C
601
+ 1,2.,4.
602
+
603
+
604
+ 5.,NaN,10.0
605
+
606
+ -70,.4,1
607
+ """
608
+
609
+ if sep == r"\s+":
610
+ data = data.replace(",", " ")
611
+
612
+ if parser.engine == "pyarrow":
613
+ msg = "the 'pyarrow' engine does not support regex separators"
614
+ with pytest.raises(ValueError, match=msg):
615
+ parser.read_csv(
616
+ StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines
617
+ )
618
+ return
619
+
620
+ result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
621
+ expected = DataFrame(exp_data, columns=["A", "B", "C"])
622
+ tm.assert_frame_equal(result, expected)
623
+
624
+
625
+ @skip_pyarrow
626
+ def test_whitespace_lines(all_parsers):
627
+ parser = all_parsers
628
+ data = """
629
+
630
+ \t \t\t
631
+ \t
632
+ A,B,C
633
+ \t 1,2.,4.
634
+ 5.,NaN,10.0
635
+ """
636
+ expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
637
+ result = parser.read_csv(StringIO(data))
638
+ tm.assert_frame_equal(result, expected)
639
+
640
+
641
+ @pytest.mark.parametrize(
642
+ "data,expected",
643
+ [
644
+ (
645
+ """ A B C D
646
+ a 1 2 3 4
647
+ b 1 2 3 4
648
+ c 1 2 3 4
649
+ """,
650
+ DataFrame(
651
+ [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
652
+ columns=["A", "B", "C", "D"],
653
+ index=["a", "b", "c"],
654
+ ),
655
+ ),
656
+ (
657
+ " a b c\n1 2 3 \n4 5 6\n 7 8 9",
658
+ DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
659
+ ),
660
+ ],
661
+ )
662
+ def test_whitespace_regex_separator(all_parsers, data, expected):
663
+ # see gh-6607
664
+ parser = all_parsers
665
+ if parser.engine == "pyarrow":
666
+ msg = "the 'pyarrow' engine does not support regex separators"
667
+ with pytest.raises(ValueError, match=msg):
668
+ parser.read_csv(StringIO(data), sep=r"\s+")
669
+ return
670
+
671
+ result = parser.read_csv(StringIO(data), sep=r"\s+")
672
+ tm.assert_frame_equal(result, expected)
673
+
674
+
675
+ def test_sub_character(all_parsers, csv_dir_path):
676
+ # see gh-16893
677
+ filename = os.path.join(csv_dir_path, "sub_char.csv")
678
+ expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
679
+
680
+ parser = all_parsers
681
+ result = parser.read_csv(filename)
682
+ tm.assert_frame_equal(result, expected)
683
+
684
+
685
+ @pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"])
686
+ def test_filename_with_special_chars(all_parsers, filename):
687
+ # see gh-15086.
688
+ parser = all_parsers
689
+ df = DataFrame({"a": [1, 2, 3]})
690
+
691
+ with tm.ensure_clean(filename) as path:
692
+ df.to_csv(path, index=False)
693
+
694
+ result = parser.read_csv(path)
695
+ tm.assert_frame_equal(result, df)
696
+
697
+
698
+ def test_read_table_same_signature_as_read_csv(all_parsers):
699
+ # GH-34976
700
+ parser = all_parsers
701
+
702
+ table_sign = signature(parser.read_table)
703
+ csv_sign = signature(parser.read_csv)
704
+
705
+ assert table_sign.parameters.keys() == csv_sign.parameters.keys()
706
+ assert table_sign.return_annotation == csv_sign.return_annotation
707
+
708
+ for key, csv_param in csv_sign.parameters.items():
709
+ table_param = table_sign.parameters[key]
710
+ if key == "sep":
711
+ assert csv_param.default == ","
712
+ assert table_param.default == "\t"
713
+ assert table_param.annotation == csv_param.annotation
714
+ assert table_param.kind == csv_param.kind
715
+ continue
716
+
717
+ assert table_param == csv_param
718
+
719
+
720
+ def test_read_table_equivalency_to_read_csv(all_parsers):
721
+ # see gh-21948
722
+ # As of 0.25.0, read_table is undeprecated
723
+ parser = all_parsers
724
+ data = "a\tb\n1\t2\n3\t4"
725
+ expected = parser.read_csv(StringIO(data), sep="\t")
726
+ result = parser.read_table(StringIO(data))
727
+ tm.assert_frame_equal(result, expected)
728
+
729
+
730
+ @pytest.mark.parametrize("read_func", ["read_csv", "read_table"])
731
+ def test_read_csv_and_table_sys_setprofile(all_parsers, read_func):
732
+ # GH#41069
733
+ parser = all_parsers
734
+ data = "a b\n0 1"
735
+
736
+ sys.setprofile(lambda *a, **k: None)
737
+ result = getattr(parser, read_func)(StringIO(data))
738
+ sys.setprofile(None)
739
+
740
+ expected = DataFrame({"a b": ["0 1"]})
741
+ tm.assert_frame_equal(result, expected)
742
+
743
+
744
+ @skip_pyarrow
745
+ def test_first_row_bom(all_parsers):
746
+ # see gh-26545
747
+ parser = all_parsers
748
+ data = '''\ufeff"Head1"\t"Head2"\t"Head3"'''
749
+
750
+ result = parser.read_csv(StringIO(data), delimiter="\t")
751
+ expected = DataFrame(columns=["Head1", "Head2", "Head3"])
752
+ tm.assert_frame_equal(result, expected)
753
+
754
+
755
+ @skip_pyarrow
756
+ def test_first_row_bom_unquoted(all_parsers):
757
+ # see gh-36343
758
+ parser = all_parsers
759
+ data = """\ufeffHead1\tHead2\tHead3"""
760
+
761
+ result = parser.read_csv(StringIO(data), delimiter="\t")
762
+ expected = DataFrame(columns=["Head1", "Head2", "Head3"])
763
+ tm.assert_frame_equal(result, expected)
764
+
765
+
766
+ @pytest.mark.parametrize("nrows", range(1, 6))
767
+ def test_blank_lines_between_header_and_data_rows(all_parsers, nrows):
768
+ # GH 28071
769
+ ref = DataFrame(
770
+ [[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]],
771
+ columns=list("ab"),
772
+ )
773
+ csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4"
774
+ parser = all_parsers
775
+
776
+ if parser.engine == "pyarrow":
777
+ msg = "The 'nrows' option is not supported with the 'pyarrow' engine"
778
+ with pytest.raises(ValueError, match=msg):
779
+ parser.read_csv(
780
+ StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False
781
+ )
782
+ return
783
+
784
+ df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False)
785
+ tm.assert_frame_equal(df, ref[:nrows])
786
+
787
+
788
+ @skip_pyarrow
789
+ def test_no_header_two_extra_columns(all_parsers):
790
+ # GH 26218
791
+ column_names = ["one", "two", "three"]
792
+ ref = DataFrame([["foo", "bar", "baz"]], columns=column_names)
793
+ stream = StringIO("foo,bar,baz,bam,blah")
794
+ parser = all_parsers
795
+ df = parser.read_csv_check_warnings(
796
+ ParserWarning,
797
+ "Length of header or names does not match length of data. "
798
+ "This leads to a loss of data with index_col=False.",
799
+ stream,
800
+ header=None,
801
+ names=column_names,
802
+ index_col=False,
803
+ )
804
+ tm.assert_frame_equal(df, ref)
805
+
806
+
807
+ def test_read_csv_names_not_accepting_sets(all_parsers):
808
+ # GH 34946
809
+ data = """\
810
+ 1,2,3
811
+ 4,5,6\n"""
812
+ parser = all_parsers
813
+ with pytest.raises(ValueError, match="Names should be an ordered collection."):
814
+ parser.read_csv(StringIO(data), names=set("QAZ"))
815
+
816
+
817
+ def test_read_table_delim_whitespace_default_sep(all_parsers):
818
+ # GH: 35958
819
+ f = StringIO("a b c\n1 -2 -3\n4 5 6")
820
+ parser = all_parsers
821
+
822
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_table is deprecated"
823
+
824
+ if parser.engine == "pyarrow":
825
+ msg = "The 'delim_whitespace' option is not supported with the 'pyarrow' engine"
826
+ with pytest.raises(ValueError, match=msg):
827
+ with tm.assert_produces_warning(
828
+ FutureWarning, match=depr_msg, check_stacklevel=False
829
+ ):
830
+ parser.read_table(f, delim_whitespace=True)
831
+ return
832
+ with tm.assert_produces_warning(
833
+ FutureWarning, match=depr_msg, check_stacklevel=False
834
+ ):
835
+ result = parser.read_table(f, delim_whitespace=True)
836
+ expected = DataFrame({"a": [1, 4], "b": [-2, 5], "c": [-3, 6]})
837
+ tm.assert_frame_equal(result, expected)
838
+
839
+
840
+ @pytest.mark.parametrize("delimiter", [",", "\t"])
841
+ def test_read_csv_delim_whitespace_non_default_sep(all_parsers, delimiter):
842
+ # GH: 35958
843
+ f = StringIO("a b c\n1 -2 -3\n4 5 6")
844
+ parser = all_parsers
845
+ msg = (
846
+ "Specified a delimiter with both sep and "
847
+ "delim_whitespace=True; you can only specify one."
848
+ )
849
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
850
+ with tm.assert_produces_warning(
851
+ FutureWarning, match=depr_msg, check_stacklevel=False
852
+ ):
853
+ with pytest.raises(ValueError, match=msg):
854
+ parser.read_csv(f, delim_whitespace=True, sep=delimiter)
855
+
856
+ with pytest.raises(ValueError, match=msg):
857
+ parser.read_csv(f, delim_whitespace=True, delimiter=delimiter)
858
+
859
+
860
+ def test_read_csv_delimiter_and_sep_no_default(all_parsers):
861
+ # GH#39823
862
+ f = StringIO("a,b\n1,2")
863
+ parser = all_parsers
864
+ msg = "Specified a sep and a delimiter; you can only specify one."
865
+ with pytest.raises(ValueError, match=msg):
866
+ parser.read_csv(f, sep=" ", delimiter=".")
867
+
868
+
869
+ @pytest.mark.parametrize("kwargs", [{"delimiter": "\n"}, {"sep": "\n"}])
870
+ def test_read_csv_line_break_as_separator(kwargs, all_parsers):
871
+ # GH#43528
872
+ parser = all_parsers
873
+ data = """a,b,c
874
+ 1,2,3
875
+ """
876
+ msg = (
877
+ r"Specified \\n as separator or delimiter. This forces the python engine "
878
+ r"which does not accept a line terminator. Hence it is not allowed to use "
879
+ r"the line terminator as separator."
880
+ )
881
+ with pytest.raises(ValueError, match=msg):
882
+ parser.read_csv(StringIO(data), **kwargs)
883
+
884
+
885
+ @pytest.mark.parametrize("delimiter", [",", "\t"])
886
+ def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter):
887
+ # GH: 35958
888
+ f = StringIO("a b c\n1 -2 -3\n4 5 6")
889
+ parser = all_parsers
890
+ msg = (
891
+ "Specified a delimiter with both sep and "
892
+ "delim_whitespace=True; you can only specify one."
893
+ )
894
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_table is deprecated"
895
+ with tm.assert_produces_warning(
896
+ FutureWarning, match=depr_msg, check_stacklevel=False
897
+ ):
898
+ with pytest.raises(ValueError, match=msg):
899
+ parser.read_table(f, delim_whitespace=True, sep=delimiter)
900
+
901
+ with pytest.raises(ValueError, match=msg):
902
+ parser.read_table(f, delim_whitespace=True, delimiter=delimiter)
903
+
904
+
905
+ @skip_pyarrow
906
+ def test_dict_keys_as_names(all_parsers):
907
+ # GH: 36928
908
+ data = "1,2"
909
+
910
+ keys = {"a": int, "b": int}.keys()
911
+ parser = all_parsers
912
+
913
+ result = parser.read_csv(StringIO(data), names=keys)
914
+ expected = DataFrame({"a": [1], "b": [2]})
915
+ tm.assert_frame_equal(result, expected)
916
+
917
+
918
+ @xfail_pyarrow # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xed in position 0
919
+ def test_encoding_surrogatepass(all_parsers):
920
+ # GH39017
921
+ parser = all_parsers
922
+ content = b"\xed\xbd\xbf"
923
+ decoded = content.decode("utf-8", errors="surrogatepass")
924
+ expected = DataFrame({decoded: [decoded]}, index=[decoded * 2])
925
+ expected.index.name = decoded * 2
926
+
927
+ with tm.ensure_clean() as path:
928
+ Path(path).write_bytes(
929
+ content * 2 + b"," + content + b"\n" + content * 2 + b"," + content
930
+ )
931
+ df = parser.read_csv(path, encoding_errors="surrogatepass", index_col=0)
932
+ tm.assert_frame_equal(df, expected)
933
+ with pytest.raises(UnicodeDecodeError, match="'utf-8' codec can't decode byte"):
934
+ parser.read_csv(path)
935
+
936
+
937
+ def test_malformed_second_line(all_parsers):
938
+ # see GH14782
939
+ parser = all_parsers
940
+ data = "\na\nb\n"
941
+ result = parser.read_csv(StringIO(data), skip_blank_lines=False, header=1)
942
+ expected = DataFrame({"a": ["b"]})
943
+ tm.assert_frame_equal(result, expected)
944
+
945
+
946
+ @skip_pyarrow
947
+ def test_short_single_line(all_parsers):
948
+ # GH 47566
949
+ parser = all_parsers
950
+ columns = ["a", "b", "c"]
951
+ data = "1,2"
952
+ result = parser.read_csv(StringIO(data), header=None, names=columns)
953
+ expected = DataFrame({"a": [1], "b": [2], "c": [np.nan]})
954
+ tm.assert_frame_equal(result, expected)
955
+
956
+
957
+ @xfail_pyarrow # ValueError: Length mismatch: Expected axis has 2 elements
958
+ def test_short_multi_line(all_parsers):
959
+ # GH 47566
960
+ parser = all_parsers
961
+ columns = ["a", "b", "c"]
962
+ data = "1,2\n1,2"
963
+ result = parser.read_csv(StringIO(data), header=None, names=columns)
964
+ expected = DataFrame({"a": [1, 1], "b": [2, 2], "c": [np.nan, np.nan]})
965
+ tm.assert_frame_equal(result, expected)
966
+
967
+
968
+ def test_read_seek(all_parsers):
969
+ # GH48646
970
+ parser = all_parsers
971
+ prefix = "### DATA\n"
972
+ content = "nkey,value\ntables,rectangular\n"
973
+ with tm.ensure_clean() as path:
974
+ Path(path).write_text(prefix + content, encoding="utf-8")
975
+ with open(path, encoding="utf-8") as file:
976
+ file.readline()
977
+ actual = parser.read_csv(file)
978
+ expected = parser.read_csv(StringIO(content))
979
+ tm.assert_frame_equal(actual, expected)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_decimal.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that work on both the Python and C engines but do not have a
3
+ specific classification into the other test modules.
4
+ """
5
+ from io import StringIO
6
+
7
+ import pytest
8
+
9
+ from pandas import DataFrame
10
+ import pandas._testing as tm
11
+
12
+ pytestmark = pytest.mark.filterwarnings(
13
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
14
+ )
15
+
16
+
17
+ @pytest.mark.parametrize(
18
+ "data,thousands,decimal",
19
+ [
20
+ (
21
+ """A|B|C
22
+ 1|2,334.01|5
23
+ 10|13|10.
24
+ """,
25
+ ",",
26
+ ".",
27
+ ),
28
+ (
29
+ """A|B|C
30
+ 1|2.334,01|5
31
+ 10|13|10,
32
+ """,
33
+ ".",
34
+ ",",
35
+ ),
36
+ ],
37
+ )
38
+ def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal):
39
+ parser = all_parsers
40
+ expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
41
+
42
+ if parser.engine == "pyarrow":
43
+ msg = "The 'thousands' option is not supported with the 'pyarrow' engine"
44
+ with pytest.raises(ValueError, match=msg):
45
+ parser.read_csv(
46
+ StringIO(data), sep="|", thousands=thousands, decimal=decimal
47
+ )
48
+ return
49
+
50
+ result = parser.read_csv(
51
+ StringIO(data), sep="|", thousands=thousands, decimal=decimal
52
+ )
53
+ tm.assert_frame_equal(result, expected)
54
+
55
+
56
+ def test_euro_decimal_format(all_parsers):
57
+ parser = all_parsers
58
+ data = """Id;Number1;Number2;Text1;Text2;Number3
59
+ 1;1521,1541;187101,9543;ABC;poi;4,738797819
60
+ 2;121,12;14897,76;DEF;uyt;0,377320872
61
+ 3;878,158;108013,434;GHI;rez;2,735694704"""
62
+
63
+ result = parser.read_csv(StringIO(data), sep=";", decimal=",")
64
+ expected = DataFrame(
65
+ [
66
+ [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],
67
+ [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],
68
+ [3, 878.158, 108013.434, "GHI", "rez", 2.735694704],
69
+ ],
70
+ columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
71
+ )
72
+ tm.assert_frame_equal(result, expected)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_file_buffer_url.py ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that work on both the Python and C engines but do not have a
3
+ specific classification into the other test modules.
4
+ """
5
+ from io import (
6
+ BytesIO,
7
+ StringIO,
8
+ )
9
+ import os
10
+ import platform
11
+ from urllib.error import URLError
12
+ import uuid
13
+
14
+ import numpy as np
15
+ import pytest
16
+
17
+ from pandas.errors import (
18
+ EmptyDataError,
19
+ ParserError,
20
+ )
21
+ import pandas.util._test_decorators as td
22
+
23
+ from pandas import (
24
+ DataFrame,
25
+ Index,
26
+ )
27
+ import pandas._testing as tm
28
+
29
+ pytestmark = pytest.mark.filterwarnings(
30
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
31
+ )
32
+
33
+ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
34
+ skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
35
+
36
+
37
+ @pytest.mark.network
38
+ @pytest.mark.single_cpu
39
+ def test_url(all_parsers, csv_dir_path, httpserver):
40
+ parser = all_parsers
41
+ kwargs = {"sep": "\t"}
42
+
43
+ local_path = os.path.join(csv_dir_path, "salaries.csv")
44
+ with open(local_path, encoding="utf-8") as f:
45
+ httpserver.serve_content(content=f.read())
46
+
47
+ url_result = parser.read_csv(httpserver.url, **kwargs)
48
+
49
+ local_result = parser.read_csv(local_path, **kwargs)
50
+ tm.assert_frame_equal(url_result, local_result)
51
+
52
+
53
+ @pytest.mark.slow
54
+ def test_local_file(all_parsers, csv_dir_path):
55
+ parser = all_parsers
56
+ kwargs = {"sep": "\t"}
57
+
58
+ local_path = os.path.join(csv_dir_path, "salaries.csv")
59
+ local_result = parser.read_csv(local_path, **kwargs)
60
+ url = "file://localhost/" + local_path
61
+
62
+ try:
63
+ url_result = parser.read_csv(url, **kwargs)
64
+ tm.assert_frame_equal(url_result, local_result)
65
+ except URLError:
66
+ # Fails on some systems.
67
+ pytest.skip("Failing on: " + " ".join(platform.uname()))
68
+
69
+
70
+ @xfail_pyarrow # AssertionError: DataFrame.index are different
71
+ def test_path_path_lib(all_parsers):
72
+ parser = all_parsers
73
+ df = DataFrame(
74
+ 1.1 * np.arange(120).reshape((30, 4)),
75
+ columns=Index(list("ABCD"), dtype=object),
76
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
77
+ )
78
+ result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))
79
+ tm.assert_frame_equal(df, result)
80
+
81
+
82
+ @xfail_pyarrow # AssertionError: DataFrame.index are different
83
+ def test_path_local_path(all_parsers):
84
+ parser = all_parsers
85
+ df = DataFrame(
86
+ 1.1 * np.arange(120).reshape((30, 4)),
87
+ columns=Index(list("ABCD"), dtype=object),
88
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
89
+ )
90
+ result = tm.round_trip_localpath(
91
+ df.to_csv, lambda p: parser.read_csv(p, index_col=0)
92
+ )
93
+ tm.assert_frame_equal(df, result)
94
+
95
+
96
+ def test_nonexistent_path(all_parsers):
97
+ # gh-2428: pls no segfault
98
+ # gh-14086: raise more helpful FileNotFoundError
99
+ # GH#29233 "File foo" instead of "File b'foo'"
100
+ parser = all_parsers
101
+ path = f"{uuid.uuid4()}.csv"
102
+
103
+ msg = r"\[Errno 2\]"
104
+ with pytest.raises(FileNotFoundError, match=msg) as e:
105
+ parser.read_csv(path)
106
+ assert path == e.value.filename
107
+
108
+
109
+ @td.skip_if_windows # os.chmod does not work in windows
110
+ def test_no_permission(all_parsers):
111
+ # GH 23784
112
+ parser = all_parsers
113
+
114
+ msg = r"\[Errno 13\]"
115
+ with tm.ensure_clean() as path:
116
+ os.chmod(path, 0) # make file unreadable
117
+
118
+ # verify that this process cannot open the file (not running as sudo)
119
+ try:
120
+ with open(path, encoding="utf-8"):
121
+ pass
122
+ pytest.skip("Running as sudo.")
123
+ except PermissionError:
124
+ pass
125
+
126
+ with pytest.raises(PermissionError, match=msg) as e:
127
+ parser.read_csv(path)
128
+ assert path == e.value.filename
129
+
130
+
131
+ @pytest.mark.parametrize(
132
+ "data,kwargs,expected,msg",
133
+ [
134
+ # gh-10728: WHITESPACE_LINE
135
+ (
136
+ "a,b,c\n4,5,6\n ",
137
+ {},
138
+ DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
139
+ None,
140
+ ),
141
+ # gh-10548: EAT_LINE_COMMENT
142
+ (
143
+ "a,b,c\n4,5,6\n#comment",
144
+ {"comment": "#"},
145
+ DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
146
+ None,
147
+ ),
148
+ # EAT_CRNL_NOP
149
+ (
150
+ "a,b,c\n4,5,6\n\r",
151
+ {},
152
+ DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
153
+ None,
154
+ ),
155
+ # EAT_COMMENT
156
+ (
157
+ "a,b,c\n4,5,6#comment",
158
+ {"comment": "#"},
159
+ DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
160
+ None,
161
+ ),
162
+ # SKIP_LINE
163
+ (
164
+ "a,b,c\n4,5,6\nskipme",
165
+ {"skiprows": [2]},
166
+ DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
167
+ None,
168
+ ),
169
+ # EAT_LINE_COMMENT
170
+ (
171
+ "a,b,c\n4,5,6\n#comment",
172
+ {"comment": "#", "skip_blank_lines": False},
173
+ DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
174
+ None,
175
+ ),
176
+ # IN_FIELD
177
+ (
178
+ "a,b,c\n4,5,6\n ",
179
+ {"skip_blank_lines": False},
180
+ DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]),
181
+ None,
182
+ ),
183
+ # EAT_CRNL
184
+ (
185
+ "a,b,c\n4,5,6\n\r",
186
+ {"skip_blank_lines": False},
187
+ DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]),
188
+ None,
189
+ ),
190
+ # ESCAPED_CHAR
191
+ (
192
+ "a,b,c\n4,5,6\n\\",
193
+ {"escapechar": "\\"},
194
+ None,
195
+ "(EOF following escape character)|(unexpected end of data)",
196
+ ),
197
+ # ESCAPE_IN_QUOTED_FIELD
198
+ (
199
+ 'a,b,c\n4,5,6\n"\\',
200
+ {"escapechar": "\\"},
201
+ None,
202
+ "(EOF inside string starting at row 2)|(unexpected end of data)",
203
+ ),
204
+ # IN_QUOTED_FIELD
205
+ (
206
+ 'a,b,c\n4,5,6\n"',
207
+ {"escapechar": "\\"},
208
+ None,
209
+ "(EOF inside string starting at row 2)|(unexpected end of data)",
210
+ ),
211
+ ],
212
+ ids=[
213
+ "whitespace-line",
214
+ "eat-line-comment",
215
+ "eat-crnl-nop",
216
+ "eat-comment",
217
+ "skip-line",
218
+ "eat-line-comment",
219
+ "in-field",
220
+ "eat-crnl",
221
+ "escaped-char",
222
+ "escape-in-quoted-field",
223
+ "in-quoted-field",
224
+ ],
225
+ )
226
+ def test_eof_states(all_parsers, data, kwargs, expected, msg, request):
227
+ # see gh-10728, gh-10548
228
+ parser = all_parsers
229
+
230
+ if parser.engine == "pyarrow" and "comment" in kwargs:
231
+ msg = "The 'comment' option is not supported with the 'pyarrow' engine"
232
+ with pytest.raises(ValueError, match=msg):
233
+ parser.read_csv(StringIO(data), **kwargs)
234
+ return
235
+
236
+ if parser.engine == "pyarrow" and "\r" not in data:
237
+ # pandas.errors.ParserError: CSV parse error: Expected 3 columns, got 1:
238
+ # ValueError: skiprows argument must be an integer when using engine='pyarrow'
239
+ # AssertionError: Regex pattern did not match.
240
+ pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
241
+
242
+ if expected is None:
243
+ with pytest.raises(ParserError, match=msg):
244
+ parser.read_csv(StringIO(data), **kwargs)
245
+ else:
246
+ result = parser.read_csv(StringIO(data), **kwargs)
247
+ tm.assert_frame_equal(result, expected)
248
+
249
+
250
+ def test_temporary_file(all_parsers):
251
+ # see gh-13398
252
+ parser = all_parsers
253
+ data = "0 0"
254
+
255
+ with tm.ensure_clean(mode="w+", return_filelike=True) as new_file:
256
+ new_file.write(data)
257
+ new_file.flush()
258
+ new_file.seek(0)
259
+
260
+ if parser.engine == "pyarrow":
261
+ msg = "the 'pyarrow' engine does not support regex separators"
262
+ with pytest.raises(ValueError, match=msg):
263
+ parser.read_csv(new_file, sep=r"\s+", header=None)
264
+ return
265
+
266
+ result = parser.read_csv(new_file, sep=r"\s+", header=None)
267
+
268
+ expected = DataFrame([[0, 0]])
269
+ tm.assert_frame_equal(result, expected)
270
+
271
+
272
+ def test_internal_eof_byte(all_parsers):
273
+ # see gh-5500
274
+ parser = all_parsers
275
+ data = "a,b\n1\x1a,2"
276
+
277
+ expected = DataFrame([["1\x1a", 2]], columns=["a", "b"])
278
+ result = parser.read_csv(StringIO(data))
279
+ tm.assert_frame_equal(result, expected)
280
+
281
+
282
+ def test_internal_eof_byte_to_file(all_parsers):
283
+ # see gh-16559
284
+ parser = all_parsers
285
+ data = b'c1,c2\r\n"test \x1a test", test\r\n'
286
+ expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
287
+ path = f"__{uuid.uuid4()}__.csv"
288
+
289
+ with tm.ensure_clean(path) as path:
290
+ with open(path, "wb") as f:
291
+ f.write(data)
292
+
293
+ result = parser.read_csv(path)
294
+ tm.assert_frame_equal(result, expected)
295
+
296
+
297
+ def test_file_handle_string_io(all_parsers):
298
+ # gh-14418
299
+ #
300
+ # Don't close user provided file handles.
301
+ parser = all_parsers
302
+ data = "a,b\n1,2"
303
+
304
+ fh = StringIO(data)
305
+ parser.read_csv(fh)
306
+ assert not fh.closed
307
+
308
+
309
+ def test_file_handles_with_open(all_parsers, csv1):
310
+ # gh-14418
311
+ #
312
+ # Don't close user provided file handles.
313
+ parser = all_parsers
314
+
315
+ for mode in ["r", "rb"]:
316
+ with open(csv1, mode, encoding="utf-8" if mode == "r" else None) as f:
317
+ parser.read_csv(f)
318
+ assert not f.closed
319
+
320
+
321
+ def test_invalid_file_buffer_class(all_parsers):
322
+ # see gh-15337
323
+ class InvalidBuffer:
324
+ pass
325
+
326
+ parser = all_parsers
327
+ msg = "Invalid file path or buffer object type"
328
+
329
+ with pytest.raises(ValueError, match=msg):
330
+ parser.read_csv(InvalidBuffer())
331
+
332
+
333
+ def test_invalid_file_buffer_mock(all_parsers):
334
+ # see gh-15337
335
+ parser = all_parsers
336
+ msg = "Invalid file path or buffer object type"
337
+
338
+ class Foo:
339
+ pass
340
+
341
+ with pytest.raises(ValueError, match=msg):
342
+ parser.read_csv(Foo())
343
+
344
+
345
+ def test_valid_file_buffer_seems_invalid(all_parsers):
346
+ # gh-16135: we want to ensure that "tell" and "seek"
347
+ # aren't actually being used when we call `read_csv`
348
+ #
349
+ # Thus, while the object may look "invalid" (these
350
+ # methods are attributes of the `StringIO` class),
351
+ # it is still a valid file-object for our purposes.
352
+ class NoSeekTellBuffer(StringIO):
353
+ def tell(self):
354
+ raise AttributeError("No tell method")
355
+
356
+ def seek(self, pos, whence=0):
357
+ raise AttributeError("No seek method")
358
+
359
+ data = "a\n1"
360
+ parser = all_parsers
361
+ expected = DataFrame({"a": [1]})
362
+
363
+ result = parser.read_csv(NoSeekTellBuffer(data))
364
+ tm.assert_frame_equal(result, expected)
365
+
366
+
367
+ @pytest.mark.parametrize("io_class", [StringIO, BytesIO])
368
+ @pytest.mark.parametrize("encoding", [None, "utf-8"])
369
+ def test_read_csv_file_handle(all_parsers, io_class, encoding):
370
+ """
371
+ Test whether read_csv does not close user-provided file handles.
372
+
373
+ GH 36980
374
+ """
375
+ parser = all_parsers
376
+ expected = DataFrame({"a": [1], "b": [2]})
377
+
378
+ content = "a,b\n1,2"
379
+ handle = io_class(content.encode("utf-8") if io_class == BytesIO else content)
380
+
381
+ tm.assert_frame_equal(parser.read_csv(handle, encoding=encoding), expected)
382
+ assert not handle.closed
383
+
384
+
385
+ def test_memory_map_compression(all_parsers, compression):
386
+ """
387
+ Support memory map for compressed files.
388
+
389
+ GH 37621
390
+ """
391
+ parser = all_parsers
392
+ expected = DataFrame({"a": [1], "b": [2]})
393
+
394
+ with tm.ensure_clean() as path:
395
+ expected.to_csv(path, index=False, compression=compression)
396
+
397
+ if parser.engine == "pyarrow":
398
+ msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
399
+ with pytest.raises(ValueError, match=msg):
400
+ parser.read_csv(path, memory_map=True, compression=compression)
401
+ return
402
+
403
+ result = parser.read_csv(path, memory_map=True, compression=compression)
404
+
405
+ tm.assert_frame_equal(
406
+ result,
407
+ expected,
408
+ )
409
+
410
+
411
+ def test_context_manager(all_parsers, datapath):
412
+ # make sure that opened files are closed
413
+ parser = all_parsers
414
+
415
+ path = datapath("io", "data", "csv", "iris.csv")
416
+
417
+ if parser.engine == "pyarrow":
418
+ msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
419
+ with pytest.raises(ValueError, match=msg):
420
+ parser.read_csv(path, chunksize=1)
421
+ return
422
+
423
+ reader = parser.read_csv(path, chunksize=1)
424
+ assert not reader.handles.handle.closed
425
+ try:
426
+ with reader:
427
+ next(reader)
428
+ assert False
429
+ except AssertionError:
430
+ assert reader.handles.handle.closed
431
+
432
+
433
+ def test_context_manageri_user_provided(all_parsers, datapath):
434
+ # make sure that user-provided handles are not closed
435
+ parser = all_parsers
436
+
437
+ with open(datapath("io", "data", "csv", "iris.csv"), encoding="utf-8") as path:
438
+ if parser.engine == "pyarrow":
439
+ msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
440
+ with pytest.raises(ValueError, match=msg):
441
+ parser.read_csv(path, chunksize=1)
442
+ return
443
+
444
+ reader = parser.read_csv(path, chunksize=1)
445
+ assert not reader.handles.handle.closed
446
+ try:
447
+ with reader:
448
+ next(reader)
449
+ assert False
450
+ except AssertionError:
451
+ assert not reader.handles.handle.closed
452
+
453
+
454
+ @skip_pyarrow # ParserError: Empty CSV file
455
+ def test_file_descriptor_leak(all_parsers, using_copy_on_write):
456
+ # GH 31488
457
+ parser = all_parsers
458
+ with tm.ensure_clean() as path:
459
+ with pytest.raises(EmptyDataError, match="No columns to parse from file"):
460
+ parser.read_csv(path)
461
+
462
+
463
+ def test_memory_map(all_parsers, csv_dir_path):
464
+ mmap_file = os.path.join(csv_dir_path, "test_mmap.csv")
465
+ parser = all_parsers
466
+
467
+ expected = DataFrame(
468
+ {"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]}
469
+ )
470
+
471
+ if parser.engine == "pyarrow":
472
+ msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
473
+ with pytest.raises(ValueError, match=msg):
474
+ parser.read_csv(mmap_file, memory_map=True)
475
+ return
476
+
477
+ result = parser.read_csv(mmap_file, memory_map=True)
478
+ tm.assert_frame_equal(result, expected)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_index.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that work on both the Python and C engines but do not have a
3
+ specific classification into the other test modules.
4
+ """
5
+ from datetime import datetime
6
+ from io import StringIO
7
+ import os
8
+
9
+ import pytest
10
+
11
+ from pandas import (
12
+ DataFrame,
13
+ Index,
14
+ MultiIndex,
15
+ )
16
+ import pandas._testing as tm
17
+
18
+ pytestmark = pytest.mark.filterwarnings(
19
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
20
+ )
21
+
22
+ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
23
+ skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
24
+
25
+
26
+ @pytest.mark.parametrize(
27
+ "data,kwargs,expected",
28
+ [
29
+ (
30
+ """foo,2,3,4,5
31
+ bar,7,8,9,10
32
+ baz,12,13,14,15
33
+ qux,12,13,14,15
34
+ foo2,12,13,14,15
35
+ bar2,12,13,14,15
36
+ """,
37
+ {"index_col": 0, "names": ["index", "A", "B", "C", "D"]},
38
+ DataFrame(
39
+ [
40
+ [2, 3, 4, 5],
41
+ [7, 8, 9, 10],
42
+ [12, 13, 14, 15],
43
+ [12, 13, 14, 15],
44
+ [12, 13, 14, 15],
45
+ [12, 13, 14, 15],
46
+ ],
47
+ index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
48
+ columns=["A", "B", "C", "D"],
49
+ ),
50
+ ),
51
+ (
52
+ """foo,one,2,3,4,5
53
+ foo,two,7,8,9,10
54
+ foo,three,12,13,14,15
55
+ bar,one,12,13,14,15
56
+ bar,two,12,13,14,15
57
+ """,
58
+ {"index_col": [0, 1], "names": ["index1", "index2", "A", "B", "C", "D"]},
59
+ DataFrame(
60
+ [
61
+ [2, 3, 4, 5],
62
+ [7, 8, 9, 10],
63
+ [12, 13, 14, 15],
64
+ [12, 13, 14, 15],
65
+ [12, 13, 14, 15],
66
+ ],
67
+ index=MultiIndex.from_tuples(
68
+ [
69
+ ("foo", "one"),
70
+ ("foo", "two"),
71
+ ("foo", "three"),
72
+ ("bar", "one"),
73
+ ("bar", "two"),
74
+ ],
75
+ names=["index1", "index2"],
76
+ ),
77
+ columns=["A", "B", "C", "D"],
78
+ ),
79
+ ),
80
+ ],
81
+ )
82
+ def test_pass_names_with_index(all_parsers, data, kwargs, expected):
83
+ parser = all_parsers
84
+ result = parser.read_csv(StringIO(data), **kwargs)
85
+ tm.assert_frame_equal(result, expected)
86
+
87
+
88
+ @pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
89
+ def test_multi_index_no_level_names(all_parsers, index_col):
90
+ data = """index1,index2,A,B,C,D
91
+ foo,one,2,3,4,5
92
+ foo,two,7,8,9,10
93
+ foo,three,12,13,14,15
94
+ bar,one,12,13,14,15
95
+ bar,two,12,13,14,15
96
+ """
97
+ headless_data = "\n".join(data.split("\n")[1:])
98
+
99
+ names = ["A", "B", "C", "D"]
100
+ parser = all_parsers
101
+
102
+ result = parser.read_csv(
103
+ StringIO(headless_data), index_col=index_col, header=None, names=names
104
+ )
105
+ expected = parser.read_csv(StringIO(data), index_col=index_col)
106
+
107
+ # No index names in headless data.
108
+ expected.index.names = [None] * 2
109
+ tm.assert_frame_equal(result, expected)
110
+
111
+
112
+ @skip_pyarrow
113
+ def test_multi_index_no_level_names_implicit(all_parsers):
114
+ parser = all_parsers
115
+ data = """A,B,C,D
116
+ foo,one,2,3,4,5
117
+ foo,two,7,8,9,10
118
+ foo,three,12,13,14,15
119
+ bar,one,12,13,14,15
120
+ bar,two,12,13,14,15
121
+ """
122
+
123
+ result = parser.read_csv(StringIO(data))
124
+ expected = DataFrame(
125
+ [
126
+ [2, 3, 4, 5],
127
+ [7, 8, 9, 10],
128
+ [12, 13, 14, 15],
129
+ [12, 13, 14, 15],
130
+ [12, 13, 14, 15],
131
+ ],
132
+ columns=["A", "B", "C", "D"],
133
+ index=MultiIndex.from_tuples(
134
+ [
135
+ ("foo", "one"),
136
+ ("foo", "two"),
137
+ ("foo", "three"),
138
+ ("bar", "one"),
139
+ ("bar", "two"),
140
+ ]
141
+ ),
142
+ )
143
+ tm.assert_frame_equal(result, expected)
144
+
145
+
146
+ @xfail_pyarrow # TypeError: an integer is required
147
+ @pytest.mark.parametrize(
148
+ "data,expected,header",
149
+ [
150
+ ("a,b", DataFrame(columns=["a", "b"]), [0]),
151
+ (
152
+ "a,b\nc,d",
153
+ DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
154
+ [0, 1],
155
+ ),
156
+ ],
157
+ )
158
+ @pytest.mark.parametrize("round_trip", [True, False])
159
+ def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
160
+ # see gh-14545
161
+ parser = all_parsers
162
+ data = expected.to_csv(index=False) if round_trip else data
163
+
164
+ result = parser.read_csv(StringIO(data), header=header)
165
+ tm.assert_frame_equal(result, expected)
166
+
167
+
168
+ @xfail_pyarrow # AssertionError: DataFrame.columns are different
169
+ def test_no_unnamed_index(all_parsers):
170
+ parser = all_parsers
171
+ data = """ id c0 c1 c2
172
+ 0 1 0 a b
173
+ 1 2 0 c d
174
+ 2 2 2 e f
175
+ """
176
+ result = parser.read_csv(StringIO(data), sep=" ")
177
+ expected = DataFrame(
178
+ [[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
179
+ columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
180
+ )
181
+ tm.assert_frame_equal(result, expected)
182
+
183
+
184
+ def test_read_duplicate_index_explicit(all_parsers):
185
+ data = """index,A,B,C,D
186
+ foo,2,3,4,5
187
+ bar,7,8,9,10
188
+ baz,12,13,14,15
189
+ qux,12,13,14,15
190
+ foo,12,13,14,15
191
+ bar,12,13,14,15
192
+ """
193
+ parser = all_parsers
194
+ result = parser.read_csv(StringIO(data), index_col=0)
195
+
196
+ expected = DataFrame(
197
+ [
198
+ [2, 3, 4, 5],
199
+ [7, 8, 9, 10],
200
+ [12, 13, 14, 15],
201
+ [12, 13, 14, 15],
202
+ [12, 13, 14, 15],
203
+ [12, 13, 14, 15],
204
+ ],
205
+ columns=["A", "B", "C", "D"],
206
+ index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
207
+ )
208
+ tm.assert_frame_equal(result, expected)
209
+
210
+
211
+ @skip_pyarrow
212
+ def test_read_duplicate_index_implicit(all_parsers):
213
+ data = """A,B,C,D
214
+ foo,2,3,4,5
215
+ bar,7,8,9,10
216
+ baz,12,13,14,15
217
+ qux,12,13,14,15
218
+ foo,12,13,14,15
219
+ bar,12,13,14,15
220
+ """
221
+ parser = all_parsers
222
+ result = parser.read_csv(StringIO(data))
223
+
224
+ expected = DataFrame(
225
+ [
226
+ [2, 3, 4, 5],
227
+ [7, 8, 9, 10],
228
+ [12, 13, 14, 15],
229
+ [12, 13, 14, 15],
230
+ [12, 13, 14, 15],
231
+ [12, 13, 14, 15],
232
+ ],
233
+ columns=["A", "B", "C", "D"],
234
+ index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
235
+ )
236
+ tm.assert_frame_equal(result, expected)
237
+
238
+
239
+ @skip_pyarrow
240
+ def test_read_csv_no_index_name(all_parsers, csv_dir_path):
241
+ parser = all_parsers
242
+ csv2 = os.path.join(csv_dir_path, "test2.csv")
243
+ result = parser.read_csv(csv2, index_col=0, parse_dates=True)
244
+
245
+ expected = DataFrame(
246
+ [
247
+ [0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
248
+ [1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
249
+ [0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
250
+ [1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
251
+ [-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
252
+ ],
253
+ columns=["A", "B", "C", "D", "E"],
254
+ index=Index(
255
+ [
256
+ datetime(2000, 1, 3),
257
+ datetime(2000, 1, 4),
258
+ datetime(2000, 1, 5),
259
+ datetime(2000, 1, 6),
260
+ datetime(2000, 1, 7),
261
+ ]
262
+ ),
263
+ )
264
+ tm.assert_frame_equal(result, expected)
265
+
266
+
267
+ @skip_pyarrow
268
+ def test_empty_with_index(all_parsers):
269
+ # see gh-10184
270
+ data = "x,y"
271
+ parser = all_parsers
272
+ result = parser.read_csv(StringIO(data), index_col=0)
273
+
274
+ expected = DataFrame(columns=["y"], index=Index([], name="x"))
275
+ tm.assert_frame_equal(result, expected)
276
+
277
+
278
+ # CSV parse error: Empty CSV file or block: cannot infer number of columns
279
+ @skip_pyarrow
280
+ def test_empty_with_multi_index(all_parsers):
281
+ # see gh-10467
282
+ data = "x,y,z"
283
+ parser = all_parsers
284
+ result = parser.read_csv(StringIO(data), index_col=["x", "y"])
285
+
286
+ expected = DataFrame(
287
+ columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
288
+ )
289
+ tm.assert_frame_equal(result, expected)
290
+
291
+
292
+ # CSV parse error: Empty CSV file or block: cannot infer number of columns
293
+ @skip_pyarrow
294
+ def test_empty_with_reversed_multi_index(all_parsers):
295
+ data = "x,y,z"
296
+ parser = all_parsers
297
+ result = parser.read_csv(StringIO(data), index_col=[1, 0])
298
+
299
+ expected = DataFrame(
300
+ columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
301
+ )
302
+ tm.assert_frame_equal(result, expected)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_inf.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that work on both the Python and C engines but do not have a
3
+ specific classification into the other test modules.
4
+ """
5
+ from io import StringIO
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from pandas import (
11
+ DataFrame,
12
+ option_context,
13
+ )
14
+ import pandas._testing as tm
15
+
16
+ pytestmark = pytest.mark.filterwarnings(
17
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
18
+ )
19
+
20
+ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
21
+
22
+
23
+ @xfail_pyarrow # AssertionError: DataFrame.index are different
24
+ @pytest.mark.parametrize("na_filter", [True, False])
25
+ def test_inf_parsing(all_parsers, na_filter):
26
+ parser = all_parsers
27
+ data = """\
28
+ ,A
29
+ a,inf
30
+ b,-inf
31
+ c,+Inf
32
+ d,-Inf
33
+ e,INF
34
+ f,-INF
35
+ g,+INf
36
+ h,-INf
37
+ i,inF
38
+ j,-inF"""
39
+ expected = DataFrame(
40
+ {"A": [float("inf"), float("-inf")] * 5},
41
+ index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"],
42
+ )
43
+ result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
44
+ tm.assert_frame_equal(result, expected)
45
+
46
+
47
+ @xfail_pyarrow # AssertionError: DataFrame.index are different
48
+ @pytest.mark.parametrize("na_filter", [True, False])
49
+ def test_infinity_parsing(all_parsers, na_filter):
50
+ parser = all_parsers
51
+ data = """\
52
+ ,A
53
+ a,Infinity
54
+ b,-Infinity
55
+ c,+Infinity
56
+ """
57
+ expected = DataFrame(
58
+ {"A": [float("infinity"), float("-infinity"), float("+infinity")]},
59
+ index=["a", "b", "c"],
60
+ )
61
+ result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
62
+ tm.assert_frame_equal(result, expected)
63
+
64
+
65
+ def test_read_csv_with_use_inf_as_na(all_parsers):
66
+ # https://github.com/pandas-dev/pandas/issues/35493
67
+ parser = all_parsers
68
+ data = "1.0\nNaN\n3.0"
69
+ msg = "use_inf_as_na option is deprecated"
70
+ warn = FutureWarning
71
+ if parser.engine == "pyarrow":
72
+ warn = (FutureWarning, DeprecationWarning)
73
+
74
+ with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
75
+ with option_context("use_inf_as_na", True):
76
+ result = parser.read_csv(StringIO(data), header=None)
77
+ expected = DataFrame([1.0, np.nan, 3.0])
78
+ tm.assert_frame_equal(result, expected)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_iterator.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that work on both the Python and C engines but do not have a
3
+ specific classification into the other test modules.
4
+ """
5
+ from io import StringIO
6
+
7
+ import pytest
8
+
9
+ from pandas import (
10
+ DataFrame,
11
+ concat,
12
+ )
13
+ import pandas._testing as tm
14
+
15
+ pytestmark = pytest.mark.filterwarnings(
16
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
17
+ )
18
+
19
+
20
+ def test_iterator(all_parsers):
21
+ # see gh-6607
22
+ data = """index,A,B,C,D
23
+ foo,2,3,4,5
24
+ bar,7,8,9,10
25
+ baz,12,13,14,15
26
+ qux,12,13,14,15
27
+ foo2,12,13,14,15
28
+ bar2,12,13,14,15
29
+ """
30
+ parser = all_parsers
31
+ kwargs = {"index_col": 0}
32
+
33
+ expected = parser.read_csv(StringIO(data), **kwargs)
34
+
35
+ if parser.engine == "pyarrow":
36
+ msg = "The 'iterator' option is not supported with the 'pyarrow' engine"
37
+ with pytest.raises(ValueError, match=msg):
38
+ parser.read_csv(StringIO(data), iterator=True, **kwargs)
39
+ return
40
+
41
+ with parser.read_csv(StringIO(data), iterator=True, **kwargs) as reader:
42
+ first_chunk = reader.read(3)
43
+ tm.assert_frame_equal(first_chunk, expected[:3])
44
+
45
+ last_chunk = reader.read(5)
46
+ tm.assert_frame_equal(last_chunk, expected[3:])
47
+
48
+
49
+ def test_iterator2(all_parsers):
50
+ parser = all_parsers
51
+ data = """A,B,C
52
+ foo,1,2,3
53
+ bar,4,5,6
54
+ baz,7,8,9
55
+ """
56
+
57
+ if parser.engine == "pyarrow":
58
+ msg = "The 'iterator' option is not supported with the 'pyarrow' engine"
59
+ with pytest.raises(ValueError, match=msg):
60
+ parser.read_csv(StringIO(data), iterator=True)
61
+ return
62
+
63
+ with parser.read_csv(StringIO(data), iterator=True) as reader:
64
+ result = list(reader)
65
+
66
+ expected = DataFrame(
67
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
68
+ index=["foo", "bar", "baz"],
69
+ columns=["A", "B", "C"],
70
+ )
71
+ tm.assert_frame_equal(result[0], expected)
72
+
73
+
74
+ def test_iterator_stop_on_chunksize(all_parsers):
75
+ # gh-3967: stopping iteration when chunksize is specified
76
+ parser = all_parsers
77
+ data = """A,B,C
78
+ foo,1,2,3
79
+ bar,4,5,6
80
+ baz,7,8,9
81
+ """
82
+ if parser.engine == "pyarrow":
83
+ msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
84
+ with pytest.raises(ValueError, match=msg):
85
+ parser.read_csv(StringIO(data), chunksize=1)
86
+ return
87
+
88
+ with parser.read_csv(StringIO(data), chunksize=1) as reader:
89
+ result = list(reader)
90
+
91
+ assert len(result) == 3
92
+ expected = DataFrame(
93
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
94
+ index=["foo", "bar", "baz"],
95
+ columns=["A", "B", "C"],
96
+ )
97
+ tm.assert_frame_equal(concat(result), expected)
98
+
99
+
100
+ @pytest.mark.parametrize(
101
+ "kwargs", [{"iterator": True, "chunksize": 1}, {"iterator": True}, {"chunksize": 1}]
102
+ )
103
+ def test_iterator_skipfooter_errors(all_parsers, kwargs):
104
+ msg = "'skipfooter' not supported for iteration"
105
+ parser = all_parsers
106
+ data = "a\n1\n2"
107
+
108
+ if parser.engine == "pyarrow":
109
+ msg = (
110
+ "The '(chunksize|iterator)' option is not supported with the "
111
+ "'pyarrow' engine"
112
+ )
113
+
114
+ with pytest.raises(ValueError, match=msg):
115
+ with parser.read_csv(StringIO(data), skipfooter=1, **kwargs) as _:
116
+ pass
117
+
118
+
119
+ def test_iteration_open_handle(all_parsers):
120
+ parser = all_parsers
121
+ kwargs = {"header": None}
122
+
123
+ with tm.ensure_clean() as path:
124
+ with open(path, "w", encoding="utf-8") as f:
125
+ f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
126
+
127
+ with open(path, encoding="utf-8") as f:
128
+ for line in f:
129
+ if "CCC" in line:
130
+ break
131
+
132
+ result = parser.read_csv(f, **kwargs)
133
+ expected = DataFrame({0: ["DDD", "EEE", "FFF", "GGG"]})
134
+ tm.assert_frame_equal(result, expected)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_read_errors.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that work on the Python, C and PyArrow engines but do not have a
3
+ specific classification into the other test modules.
4
+ """
5
+ import codecs
6
+ import csv
7
+ from io import StringIO
8
+ import os
9
+ from pathlib import Path
10
+
11
+ import numpy as np
12
+ import pytest
13
+
14
+ from pandas.compat import PY311
15
+ from pandas.errors import (
16
+ EmptyDataError,
17
+ ParserError,
18
+ ParserWarning,
19
+ )
20
+
21
+ from pandas import DataFrame
22
+ import pandas._testing as tm
23
+
24
+ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
25
+ skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
26
+
27
+
28
+ def test_empty_decimal_marker(all_parsers):
29
+ data = """A|B|C
30
+ 1|2,334|5
31
+ 10|13|10.
32
+ """
33
+ # Parsers support only length-1 decimals
34
+ msg = "Only length-1 decimal markers supported"
35
+ parser = all_parsers
36
+
37
+ if parser.engine == "pyarrow":
38
+ msg = (
39
+ "only single character unicode strings can be "
40
+ "converted to Py_UCS4, got length 0"
41
+ )
42
+
43
+ with pytest.raises(ValueError, match=msg):
44
+ parser.read_csv(StringIO(data), decimal="")
45
+
46
+
47
+ def test_bad_stream_exception(all_parsers, csv_dir_path):
48
+ # see gh-13652
49
+ #
50
+ # This test validates that both the Python engine and C engine will
51
+ # raise UnicodeDecodeError instead of C engine raising ParserError
52
+ # and swallowing the exception that caused read to fail.
53
+ path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
54
+ codec = codecs.lookup("utf-8")
55
+ utf8 = codecs.lookup("utf-8")
56
+ parser = all_parsers
57
+ msg = "'utf-8' codec can't decode byte"
58
+
59
+ # Stream must be binary UTF8.
60
+ with open(path, "rb") as handle, codecs.StreamRecoder(
61
+ handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
62
+ ) as stream:
63
+ with pytest.raises(UnicodeDecodeError, match=msg):
64
+ parser.read_csv(stream)
65
+
66
+
67
+ def test_malformed(all_parsers):
68
+ # see gh-6607
69
+ parser = all_parsers
70
+ data = """ignore
71
+ A,B,C
72
+ 1,2,3 # comment
73
+ 1,2,3,4,5
74
+ 2,3,4
75
+ """
76
+ msg = "Expected 3 fields in line 4, saw 5"
77
+ err = ParserError
78
+ if parser.engine == "pyarrow":
79
+ msg = "The 'comment' option is not supported with the 'pyarrow' engine"
80
+ err = ValueError
81
+ with pytest.raises(err, match=msg):
82
+ parser.read_csv(StringIO(data), header=1, comment="#")
83
+
84
+
85
+ @pytest.mark.parametrize("nrows", [5, 3, None])
86
+ def test_malformed_chunks(all_parsers, nrows):
87
+ data = """ignore
88
+ A,B,C
89
+ skip
90
+ 1,2,3
91
+ 3,5,10 # comment
92
+ 1,2,3,4,5
93
+ 2,3,4
94
+ """
95
+ parser = all_parsers
96
+
97
+ if parser.engine == "pyarrow":
98
+ msg = "The 'iterator' option is not supported with the 'pyarrow' engine"
99
+ with pytest.raises(ValueError, match=msg):
100
+ parser.read_csv(
101
+ StringIO(data),
102
+ header=1,
103
+ comment="#",
104
+ iterator=True,
105
+ chunksize=1,
106
+ skiprows=[2],
107
+ )
108
+ return
109
+
110
+ msg = "Expected 3 fields in line 6, saw 5"
111
+ with parser.read_csv(
112
+ StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
113
+ ) as reader:
114
+ with pytest.raises(ParserError, match=msg):
115
+ reader.read(nrows)
116
+
117
+
118
+ @xfail_pyarrow # does not raise
119
+ def test_catch_too_many_names(all_parsers):
120
+ # see gh-5156
121
+ data = """\
122
+ 1,2,3
123
+ 4,,6
124
+ 7,8,9
125
+ 10,11,12\n"""
126
+ parser = all_parsers
127
+ msg = (
128
+ "Too many columns specified: expected 4 and found 3"
129
+ if parser.engine == "c"
130
+ else "Number of passed names did not match "
131
+ "number of header fields in the file"
132
+ )
133
+
134
+ with pytest.raises(ValueError, match=msg):
135
+ parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
136
+
137
+
138
+ @skip_pyarrow # CSV parse error: Empty CSV file or block
139
+ @pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5])
140
+ def test_raise_on_no_columns(all_parsers, nrows):
141
+ parser = all_parsers
142
+ data = "\n" * nrows
143
+
144
+ msg = "No columns to parse from file"
145
+ with pytest.raises(EmptyDataError, match=msg):
146
+ parser.read_csv(StringIO(data))
147
+
148
+
149
+ def test_unexpected_keyword_parameter_exception(all_parsers):
150
+ # GH-34976
151
+ parser = all_parsers
152
+
153
+ msg = "{}\\(\\) got an unexpected keyword argument 'foo'"
154
+ with pytest.raises(TypeError, match=msg.format("read_csv")):
155
+ parser.read_csv("foo.csv", foo=1)
156
+ with pytest.raises(TypeError, match=msg.format("read_table")):
157
+ parser.read_table("foo.tsv", foo=1)
158
+
159
+
160
+ def test_suppress_error_output(all_parsers):
161
+ # see gh-15925
162
+ parser = all_parsers
163
+ data = "a\n1\n1,2,3\n4\n5,6,7"
164
+ expected = DataFrame({"a": [1, 4]})
165
+
166
+ result = parser.read_csv(StringIO(data), on_bad_lines="skip")
167
+ tm.assert_frame_equal(result, expected)
168
+
169
+
170
+ def test_error_bad_lines(all_parsers):
171
+ # see gh-15925
172
+ parser = all_parsers
173
+ data = "a\n1\n1,2,3\n4\n5,6,7"
174
+
175
+ msg = "Expected 1 fields in line 3, saw 3"
176
+
177
+ if parser.engine == "pyarrow":
178
+ # "CSV parse error: Expected 1 columns, got 3: 1,2,3"
179
+ pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
180
+
181
+ with pytest.raises(ParserError, match=msg):
182
+ parser.read_csv(StringIO(data), on_bad_lines="error")
183
+
184
+
185
+ def test_warn_bad_lines(all_parsers):
186
+ # see gh-15925
187
+ parser = all_parsers
188
+ data = "a\n1\n1,2,3\n4\n5,6,7"
189
+ expected = DataFrame({"a": [1, 4]})
190
+ match_msg = "Skipping line"
191
+
192
+ expected_warning = ParserWarning
193
+ if parser.engine == "pyarrow":
194
+ match_msg = "Expected 1 columns, but found 3: 1,2,3"
195
+ expected_warning = (ParserWarning, DeprecationWarning)
196
+
197
+ with tm.assert_produces_warning(
198
+ expected_warning, match=match_msg, check_stacklevel=False
199
+ ):
200
+ result = parser.read_csv(StringIO(data), on_bad_lines="warn")
201
+ tm.assert_frame_equal(result, expected)
202
+
203
+
204
+ def test_read_csv_wrong_num_columns(all_parsers):
205
+ # Too few columns.
206
+ data = """A,B,C,D,E,F
207
+ 1,2,3,4,5,6
208
+ 6,7,8,9,10,11,12
209
+ 11,12,13,14,15,16
210
+ """
211
+ parser = all_parsers
212
+ msg = "Expected 6 fields in line 3, saw 7"
213
+
214
+ if parser.engine == "pyarrow":
215
+ # Expected 6 columns, got 7: 6,7,8,9,10,11,12
216
+ pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
217
+
218
+ with pytest.raises(ParserError, match=msg):
219
+ parser.read_csv(StringIO(data))
220
+
221
+
222
+ def test_null_byte_char(request, all_parsers):
223
+ # see gh-2741
224
+ data = "\x00,foo"
225
+ names = ["a", "b"]
226
+ parser = all_parsers
227
+
228
+ if parser.engine == "c" or (parser.engine == "python" and PY311):
229
+ if parser.engine == "python" and PY311:
230
+ request.applymarker(
231
+ pytest.mark.xfail(
232
+ reason="In Python 3.11, this is read as an empty character not null"
233
+ )
234
+ )
235
+ expected = DataFrame([[np.nan, "foo"]], columns=names)
236
+ out = parser.read_csv(StringIO(data), names=names)
237
+ tm.assert_frame_equal(out, expected)
238
+ else:
239
+ if parser.engine == "pyarrow":
240
+ # CSV parse error: Empty CSV file or block: "
241
+ # cannot infer number of columns"
242
+ pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
243
+ else:
244
+ msg = "NULL byte detected"
245
+ with pytest.raises(ParserError, match=msg):
246
+ parser.read_csv(StringIO(data), names=names)
247
+
248
+
249
+ @pytest.mark.filterwarnings("always::ResourceWarning")
250
+ def test_open_file(request, all_parsers):
251
+ # GH 39024
252
+ parser = all_parsers
253
+
254
+ msg = "Could not determine delimiter"
255
+ err = csv.Error
256
+ if parser.engine == "c":
257
+ msg = "the 'c' engine does not support sep=None with delim_whitespace=False"
258
+ err = ValueError
259
+ elif parser.engine == "pyarrow":
260
+ msg = (
261
+ "the 'pyarrow' engine does not support sep=None with delim_whitespace=False"
262
+ )
263
+ err = ValueError
264
+
265
+ with tm.ensure_clean() as path:
266
+ file = Path(path)
267
+ file.write_bytes(b"\xe4\na\n1")
268
+
269
+ with tm.assert_produces_warning(None):
270
+ # should not trigger a ResourceWarning
271
+ with pytest.raises(err, match=msg):
272
+ parser.read_csv(file, sep=None, encoding_errors="replace")
273
+
274
+
275
+ def test_invalid_on_bad_line(all_parsers):
276
+ parser = all_parsers
277
+ data = "a\n1\n1,2,3\n4\n5,6,7"
278
+ with pytest.raises(ValueError, match="Argument abc is invalid for on_bad_lines"):
279
+ parser.read_csv(StringIO(data), on_bad_lines="abc")
280
+
281
+
282
+ def test_bad_header_uniform_error(all_parsers):
283
+ parser = all_parsers
284
+ data = "+++123456789...\ncol1,col2,col3,col4\n1,2,3,4\n"
285
+ msg = "Expected 2 fields in line 2, saw 4"
286
+ if parser.engine == "c":
287
+ msg = (
288
+ "Could not construct index. Requested to use 1 "
289
+ "number of columns, but 3 left to parse."
290
+ )
291
+ elif parser.engine == "pyarrow":
292
+ # "CSV parse error: Expected 1 columns, got 4: col1,col2,col3,col4"
293
+ pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
294
+
295
+ with pytest.raises(ParserError, match=msg):
296
+ parser.read_csv(StringIO(data), index_col=0, on_bad_lines="error")
297
+
298
+
299
+ def test_on_bad_lines_warn_correct_formatting(all_parsers):
300
+ # see gh-15925
301
+ parser = all_parsers
302
+ data = """1,2
303
+ a,b
304
+ a,b,c
305
+ a,b,d
306
+ a,b
307
+ """
308
+ expected = DataFrame({"1": "a", "2": ["b"] * 2})
309
+ match_msg = "Skipping line"
310
+
311
+ expected_warning = ParserWarning
312
+ if parser.engine == "pyarrow":
313
+ match_msg = "Expected 2 columns, but found 3: a,b,c"
314
+ expected_warning = (ParserWarning, DeprecationWarning)
315
+
316
+ with tm.assert_produces_warning(
317
+ expected_warning, match=match_msg, check_stacklevel=False
318
+ ):
319
+ result = parser.read_csv(StringIO(data), on_bad_lines="warn")
320
+ tm.assert_frame_equal(result, expected)
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_verbose.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that work on both the Python and C engines but do not have a
3
+ specific classification into the other test modules.
4
+ """
5
+ from io import StringIO
6
+
7
+ import pytest
8
+
9
+ import pandas._testing as tm
10
+
11
+ depr_msg = "The 'verbose' keyword in pd.read_csv is deprecated"
12
+
13
+
14
+ def test_verbose_read(all_parsers, capsys):
15
+ parser = all_parsers
16
+ data = """a,b,c,d
17
+ one,1,2,3
18
+ one,1,2,3
19
+ ,1,2,3
20
+ one,1,2,3
21
+ ,1,2,3
22
+ ,1,2,3
23
+ one,1,2,3
24
+ two,1,2,3"""
25
+
26
+ if parser.engine == "pyarrow":
27
+ msg = "The 'verbose' option is not supported with the 'pyarrow' engine"
28
+ with pytest.raises(ValueError, match=msg):
29
+ with tm.assert_produces_warning(
30
+ FutureWarning, match=depr_msg, check_stacklevel=False
31
+ ):
32
+ parser.read_csv(StringIO(data), verbose=True)
33
+ return
34
+
35
+ # Engines are verbose in different ways.
36
+ with tm.assert_produces_warning(
37
+ FutureWarning, match=depr_msg, check_stacklevel=False
38
+ ):
39
+ parser.read_csv(StringIO(data), verbose=True)
40
+ captured = capsys.readouterr()
41
+
42
+ if parser.engine == "c":
43
+ assert "Tokenization took:" in captured.out
44
+ assert "Parser memory cleanup took:" in captured.out
45
+ else: # Python engine
46
+ assert captured.out == "Filled 3 NA values in column a\n"
47
+
48
+
49
+ def test_verbose_read2(all_parsers, capsys):
50
+ parser = all_parsers
51
+ data = """a,b,c,d
52
+ one,1,2,3
53
+ two,1,2,3
54
+ three,1,2,3
55
+ four,1,2,3
56
+ five,1,2,3
57
+ ,1,2,3
58
+ seven,1,2,3
59
+ eight,1,2,3"""
60
+
61
+ if parser.engine == "pyarrow":
62
+ msg = "The 'verbose' option is not supported with the 'pyarrow' engine"
63
+ with pytest.raises(ValueError, match=msg):
64
+ with tm.assert_produces_warning(
65
+ FutureWarning, match=depr_msg, check_stacklevel=False
66
+ ):
67
+ parser.read_csv(StringIO(data), verbose=True, index_col=0)
68
+ return
69
+
70
+ with tm.assert_produces_warning(
71
+ FutureWarning, match=depr_msg, check_stacklevel=False
72
+ ):
73
+ parser.read_csv(StringIO(data), verbose=True, index_col=0)
74
+ captured = capsys.readouterr()
75
+
76
+ # Engines are verbose in different ways.
77
+ if parser.engine == "c":
78
+ assert "Tokenization took:" in captured.out
79
+ assert "Parser memory cleanup took:" in captured.out
80
+ else: # Python engine
81
+ assert captured.out == "Filled 1 NA values in column a\n"
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (202 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_categorical.cpython-310.pyc ADDED
Binary file (8.28 kB). View file