Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/llama-3b/global_step100/bf16_zero_pp_rank_79_mp_rank_02_optim_states.pt +3 -0
- ckpts/llama-3b/global_step100/bf16_zero_pp_rank_91_mp_rank_03_optim_states.pt +3 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/__init__.py +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odf.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odswriter.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_openpyxl.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_readers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_style.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_writers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlrd.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlsxwriter.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_odf.py +77 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_odswriter.py +106 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_openpyxl.py +432 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_readers.py +1751 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_style.py +298 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_writers.py +1511 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlrd.py +76 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlsxwriter.py +86 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/conftest.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_deprecated_kwargs.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_normalize.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_pandas.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_ujson.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/conftest.py +9 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema_ext_dtype.py +317 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/test_normalize.py +907 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/json/test_pandas.py +2202 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/__init__.py +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_dialect.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_index_col.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_skiprows.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_textreader.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/conftest.py +319 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__init__.py +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_categorical.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_dtypes_basic.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_empty.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_categorical.py +334 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_dtypes_basic.py +643 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_empty.py +181 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_c_parser_only.py +643 -0
- venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_comment.py +227 -0
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_79_mp_rank_02_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7c621cdfe648640f959f7941fbd6731fa14500a48341eb294777ec47cb77b297
|
3 |
+
size 41830394
|
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_91_mp_rank_03_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:992a571497b11e66e56a3bc164e5cae459140c44d281ec6fa6bec406d41e186a
|
3 |
+
size 41830330
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (189 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odf.cpython-310.pyc
ADDED
Binary file (2.34 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odswriter.cpython-310.pyc
ADDED
Binary file (3.33 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_openpyxl.cpython-310.pyc
ADDED
Binary file (12.4 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_readers.cpython-310.pyc
ADDED
Binary file (48.8 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_style.cpython-310.pyc
ADDED
Binary file (8.72 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_writers.cpython-310.pyc
ADDED
Binary file (46.1 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlrd.cpython-310.pyc
ADDED
Binary file (2.35 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlsxwriter.cpython-310.pyc
ADDED
Binary file (2.75 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_odf.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import functools
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from pandas.compat import is_platform_windows
|
7 |
+
|
8 |
+
import pandas as pd
|
9 |
+
import pandas._testing as tm
|
10 |
+
|
11 |
+
pytest.importorskip("odf")
|
12 |
+
|
13 |
+
if is_platform_windows():
|
14 |
+
pytestmark = pytest.mark.single_cpu
|
15 |
+
|
16 |
+
|
17 |
+
@pytest.fixture(autouse=True)
|
18 |
+
def cd_and_set_engine(monkeypatch, datapath):
|
19 |
+
func = functools.partial(pd.read_excel, engine="odf")
|
20 |
+
monkeypatch.setattr(pd, "read_excel", func)
|
21 |
+
monkeypatch.chdir(datapath("io", "data", "excel"))
|
22 |
+
|
23 |
+
|
24 |
+
def test_read_invalid_types_raises():
|
25 |
+
# the invalid_value_type.ods required manually editing
|
26 |
+
# of the included content.xml file
|
27 |
+
with pytest.raises(ValueError, match="Unrecognized type awesome_new_type"):
|
28 |
+
pd.read_excel("invalid_value_type.ods")
|
29 |
+
|
30 |
+
|
31 |
+
def test_read_writer_table():
|
32 |
+
# Also test reading tables from an text OpenDocument file
|
33 |
+
# (.odt)
|
34 |
+
index = pd.Index(["Row 1", "Row 2", "Row 3"], name="Header")
|
35 |
+
expected = pd.DataFrame(
|
36 |
+
[[1, np.nan, 7], [2, np.nan, 8], [3, np.nan, 9]],
|
37 |
+
index=index,
|
38 |
+
columns=["Column 1", "Unnamed: 2", "Column 3"],
|
39 |
+
)
|
40 |
+
|
41 |
+
result = pd.read_excel("writertable.odt", sheet_name="Table1", index_col=0)
|
42 |
+
|
43 |
+
tm.assert_frame_equal(result, expected)
|
44 |
+
|
45 |
+
|
46 |
+
def test_read_newlines_between_xml_elements_table():
|
47 |
+
# GH#45598
|
48 |
+
expected = pd.DataFrame(
|
49 |
+
[[1.0, 4.0, 7], [np.nan, np.nan, 8], [3.0, 6.0, 9]],
|
50 |
+
columns=["Column 1", "Column 2", "Column 3"],
|
51 |
+
)
|
52 |
+
|
53 |
+
result = pd.read_excel("test_newlines.ods")
|
54 |
+
|
55 |
+
tm.assert_frame_equal(result, expected)
|
56 |
+
|
57 |
+
|
58 |
+
def test_read_unempty_cells():
|
59 |
+
expected = pd.DataFrame(
|
60 |
+
[1, np.nan, 3, np.nan, 5],
|
61 |
+
columns=["Column 1"],
|
62 |
+
)
|
63 |
+
|
64 |
+
result = pd.read_excel("test_unempty_cells.ods")
|
65 |
+
|
66 |
+
tm.assert_frame_equal(result, expected)
|
67 |
+
|
68 |
+
|
69 |
+
def test_read_cell_annotation():
|
70 |
+
expected = pd.DataFrame(
|
71 |
+
["test", np.nan, "test 3"],
|
72 |
+
columns=["Column 1"],
|
73 |
+
)
|
74 |
+
|
75 |
+
result = pd.read_excel("test_cell_annotation.ods")
|
76 |
+
|
77 |
+
tm.assert_frame_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_odswriter.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import (
|
2 |
+
date,
|
3 |
+
datetime,
|
4 |
+
)
|
5 |
+
import re
|
6 |
+
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
from pandas.compat import is_platform_windows
|
10 |
+
|
11 |
+
import pandas as pd
|
12 |
+
import pandas._testing as tm
|
13 |
+
|
14 |
+
from pandas.io.excel import ExcelWriter
|
15 |
+
|
16 |
+
odf = pytest.importorskip("odf")
|
17 |
+
|
18 |
+
if is_platform_windows():
|
19 |
+
pytestmark = pytest.mark.single_cpu
|
20 |
+
|
21 |
+
|
22 |
+
@pytest.fixture
|
23 |
+
def ext():
|
24 |
+
return ".ods"
|
25 |
+
|
26 |
+
|
27 |
+
def test_write_append_mode_raises(ext):
|
28 |
+
msg = "Append mode is not supported with odf!"
|
29 |
+
|
30 |
+
with tm.ensure_clean(ext) as f:
|
31 |
+
with pytest.raises(ValueError, match=msg):
|
32 |
+
ExcelWriter(f, engine="odf", mode="a")
|
33 |
+
|
34 |
+
|
35 |
+
@pytest.mark.parametrize("engine_kwargs", [None, {"kwarg": 1}])
|
36 |
+
def test_engine_kwargs(ext, engine_kwargs):
|
37 |
+
# GH 42286
|
38 |
+
# GH 43445
|
39 |
+
# test for error: OpenDocumentSpreadsheet does not accept any arguments
|
40 |
+
with tm.ensure_clean(ext) as f:
|
41 |
+
if engine_kwargs is not None:
|
42 |
+
error = re.escape(
|
43 |
+
"OpenDocumentSpreadsheet() got an unexpected keyword argument 'kwarg'"
|
44 |
+
)
|
45 |
+
with pytest.raises(
|
46 |
+
TypeError,
|
47 |
+
match=error,
|
48 |
+
):
|
49 |
+
ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs)
|
50 |
+
else:
|
51 |
+
with ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs) as _:
|
52 |
+
pass
|
53 |
+
|
54 |
+
|
55 |
+
def test_book_and_sheets_consistent(ext):
|
56 |
+
# GH#45687 - Ensure sheets is updated if user modifies book
|
57 |
+
with tm.ensure_clean(ext) as f:
|
58 |
+
with ExcelWriter(f) as writer:
|
59 |
+
assert writer.sheets == {}
|
60 |
+
table = odf.table.Table(name="test_name")
|
61 |
+
writer.book.spreadsheet.addElement(table)
|
62 |
+
assert writer.sheets == {"test_name": table}
|
63 |
+
|
64 |
+
|
65 |
+
@pytest.mark.parametrize(
|
66 |
+
["value", "cell_value_type", "cell_value_attribute", "cell_value"],
|
67 |
+
argvalues=[
|
68 |
+
(True, "boolean", "boolean-value", "true"),
|
69 |
+
("test string", "string", "string-value", "test string"),
|
70 |
+
(1, "float", "value", "1"),
|
71 |
+
(1.5, "float", "value", "1.5"),
|
72 |
+
(
|
73 |
+
datetime(2010, 10, 10, 10, 10, 10),
|
74 |
+
"date",
|
75 |
+
"date-value",
|
76 |
+
"2010-10-10T10:10:10",
|
77 |
+
),
|
78 |
+
(date(2010, 10, 10), "date", "date-value", "2010-10-10"),
|
79 |
+
],
|
80 |
+
)
|
81 |
+
def test_cell_value_type(ext, value, cell_value_type, cell_value_attribute, cell_value):
|
82 |
+
# GH#54994 ODS: cell attributes should follow specification
|
83 |
+
# http://docs.oasis-open.org/office/v1.2/os/OpenDocument-v1.2-os-part1.html#refTable13
|
84 |
+
from odf.namespaces import OFFICENS
|
85 |
+
from odf.table import (
|
86 |
+
TableCell,
|
87 |
+
TableRow,
|
88 |
+
)
|
89 |
+
|
90 |
+
table_cell_name = TableCell().qname
|
91 |
+
|
92 |
+
with tm.ensure_clean(ext) as f:
|
93 |
+
pd.DataFrame([[value]]).to_excel(f, header=False, index=False)
|
94 |
+
|
95 |
+
with pd.ExcelFile(f) as wb:
|
96 |
+
sheet = wb._reader.get_sheet_by_index(0)
|
97 |
+
sheet_rows = sheet.getElementsByType(TableRow)
|
98 |
+
sheet_cells = [
|
99 |
+
x
|
100 |
+
for x in sheet_rows[0].childNodes
|
101 |
+
if hasattr(x, "qname") and x.qname == table_cell_name
|
102 |
+
]
|
103 |
+
|
104 |
+
cell = sheet_cells[0]
|
105 |
+
assert cell.attributes.get((OFFICENS, "value-type")) == cell_value_type
|
106 |
+
assert cell.attributes.get((OFFICENS, cell_value_attribute)) == cell_value
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_openpyxl.py
ADDED
@@ -0,0 +1,432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import contextlib
|
2 |
+
from pathlib import Path
|
3 |
+
import re
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import pytest
|
7 |
+
|
8 |
+
from pandas.compat import is_platform_windows
|
9 |
+
|
10 |
+
import pandas as pd
|
11 |
+
from pandas import DataFrame
|
12 |
+
import pandas._testing as tm
|
13 |
+
|
14 |
+
from pandas.io.excel import (
|
15 |
+
ExcelWriter,
|
16 |
+
_OpenpyxlWriter,
|
17 |
+
)
|
18 |
+
from pandas.io.excel._openpyxl import OpenpyxlReader
|
19 |
+
|
20 |
+
openpyxl = pytest.importorskip("openpyxl")
|
21 |
+
|
22 |
+
if is_platform_windows():
|
23 |
+
pytestmark = pytest.mark.single_cpu
|
24 |
+
|
25 |
+
|
26 |
+
@pytest.fixture
|
27 |
+
def ext():
|
28 |
+
return ".xlsx"
|
29 |
+
|
30 |
+
|
31 |
+
def test_to_excel_styleconverter():
|
32 |
+
from openpyxl import styles
|
33 |
+
|
34 |
+
hstyle = {
|
35 |
+
"font": {"color": "00FF0000", "bold": True},
|
36 |
+
"borders": {"top": "thin", "right": "thin", "bottom": "thin", "left": "thin"},
|
37 |
+
"alignment": {"horizontal": "center", "vertical": "top"},
|
38 |
+
"fill": {"patternType": "solid", "fgColor": {"rgb": "006666FF", "tint": 0.3}},
|
39 |
+
"number_format": {"format_code": "0.00"},
|
40 |
+
"protection": {"locked": True, "hidden": False},
|
41 |
+
}
|
42 |
+
|
43 |
+
font_color = styles.Color("00FF0000")
|
44 |
+
font = styles.Font(bold=True, color=font_color)
|
45 |
+
side = styles.Side(style=styles.borders.BORDER_THIN)
|
46 |
+
border = styles.Border(top=side, right=side, bottom=side, left=side)
|
47 |
+
alignment = styles.Alignment(horizontal="center", vertical="top")
|
48 |
+
fill_color = styles.Color(rgb="006666FF", tint=0.3)
|
49 |
+
fill = styles.PatternFill(patternType="solid", fgColor=fill_color)
|
50 |
+
|
51 |
+
number_format = "0.00"
|
52 |
+
|
53 |
+
protection = styles.Protection(locked=True, hidden=False)
|
54 |
+
|
55 |
+
kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle)
|
56 |
+
assert kw["font"] == font
|
57 |
+
assert kw["border"] == border
|
58 |
+
assert kw["alignment"] == alignment
|
59 |
+
assert kw["fill"] == fill
|
60 |
+
assert kw["number_format"] == number_format
|
61 |
+
assert kw["protection"] == protection
|
62 |
+
|
63 |
+
|
64 |
+
def test_write_cells_merge_styled(ext):
|
65 |
+
from pandas.io.formats.excel import ExcelCell
|
66 |
+
|
67 |
+
sheet_name = "merge_styled"
|
68 |
+
|
69 |
+
sty_b1 = {"font": {"color": "00FF0000"}}
|
70 |
+
sty_a2 = {"font": {"color": "0000FF00"}}
|
71 |
+
|
72 |
+
initial_cells = [
|
73 |
+
ExcelCell(col=1, row=0, val=42, style=sty_b1),
|
74 |
+
ExcelCell(col=0, row=1, val=99, style=sty_a2),
|
75 |
+
]
|
76 |
+
|
77 |
+
sty_merged = {"font": {"color": "000000FF", "bold": True}}
|
78 |
+
sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged)
|
79 |
+
openpyxl_sty_merged = sty_kwargs["font"]
|
80 |
+
merge_cells = [
|
81 |
+
ExcelCell(
|
82 |
+
col=0, row=0, val="pandas", mergestart=1, mergeend=1, style=sty_merged
|
83 |
+
)
|
84 |
+
]
|
85 |
+
|
86 |
+
with tm.ensure_clean(ext) as path:
|
87 |
+
with _OpenpyxlWriter(path) as writer:
|
88 |
+
writer._write_cells(initial_cells, sheet_name=sheet_name)
|
89 |
+
writer._write_cells(merge_cells, sheet_name=sheet_name)
|
90 |
+
|
91 |
+
wks = writer.sheets[sheet_name]
|
92 |
+
xcell_b1 = wks["B1"]
|
93 |
+
xcell_a2 = wks["A2"]
|
94 |
+
assert xcell_b1.font == openpyxl_sty_merged
|
95 |
+
assert xcell_a2.font == openpyxl_sty_merged
|
96 |
+
|
97 |
+
|
98 |
+
@pytest.mark.parametrize("iso_dates", [True, False])
|
99 |
+
def test_engine_kwargs_write(ext, iso_dates):
|
100 |
+
# GH 42286 GH 43445
|
101 |
+
engine_kwargs = {"iso_dates": iso_dates}
|
102 |
+
with tm.ensure_clean(ext) as f:
|
103 |
+
with ExcelWriter(f, engine="openpyxl", engine_kwargs=engine_kwargs) as writer:
|
104 |
+
assert writer.book.iso_dates == iso_dates
|
105 |
+
# ExcelWriter won't allow us to close without writing something
|
106 |
+
DataFrame().to_excel(writer)
|
107 |
+
|
108 |
+
|
109 |
+
def test_engine_kwargs_append_invalid(ext):
|
110 |
+
# GH 43445
|
111 |
+
# test whether an invalid engine kwargs actually raises
|
112 |
+
with tm.ensure_clean(ext) as f:
|
113 |
+
DataFrame(["hello", "world"]).to_excel(f)
|
114 |
+
with pytest.raises(
|
115 |
+
TypeError,
|
116 |
+
match=re.escape(
|
117 |
+
"load_workbook() got an unexpected keyword argument 'apple_banana'"
|
118 |
+
),
|
119 |
+
):
|
120 |
+
with ExcelWriter(
|
121 |
+
f, engine="openpyxl", mode="a", engine_kwargs={"apple_banana": "fruit"}
|
122 |
+
) as writer:
|
123 |
+
# ExcelWriter needs us to write something to close properly
|
124 |
+
DataFrame(["good"]).to_excel(writer, sheet_name="Sheet2")
|
125 |
+
|
126 |
+
|
127 |
+
@pytest.mark.parametrize("data_only, expected", [(True, 0), (False, "=1+1")])
|
128 |
+
def test_engine_kwargs_append_data_only(ext, data_only, expected):
|
129 |
+
# GH 43445
|
130 |
+
# tests whether the data_only engine_kwarg actually works well for
|
131 |
+
# openpyxl's load_workbook
|
132 |
+
with tm.ensure_clean(ext) as f:
|
133 |
+
DataFrame(["=1+1"]).to_excel(f)
|
134 |
+
with ExcelWriter(
|
135 |
+
f, engine="openpyxl", mode="a", engine_kwargs={"data_only": data_only}
|
136 |
+
) as writer:
|
137 |
+
assert writer.sheets["Sheet1"]["B2"].value == expected
|
138 |
+
# ExcelWriter needs us to writer something to close properly?
|
139 |
+
DataFrame().to_excel(writer, sheet_name="Sheet2")
|
140 |
+
|
141 |
+
# ensure that data_only also works for reading
|
142 |
+
# and that formulas/values roundtrip
|
143 |
+
assert (
|
144 |
+
pd.read_excel(
|
145 |
+
f,
|
146 |
+
sheet_name="Sheet1",
|
147 |
+
engine="openpyxl",
|
148 |
+
engine_kwargs={"data_only": data_only},
|
149 |
+
).iloc[0, 1]
|
150 |
+
== expected
|
151 |
+
)
|
152 |
+
|
153 |
+
|
154 |
+
@pytest.mark.parametrize("kwarg_name", ["read_only", "data_only"])
|
155 |
+
@pytest.mark.parametrize("kwarg_value", [True, False])
|
156 |
+
def test_engine_kwargs_append_reader(datapath, ext, kwarg_name, kwarg_value):
|
157 |
+
# GH 55027
|
158 |
+
# test that `read_only` and `data_only` can be passed to
|
159 |
+
# `openpyxl.reader.excel.load_workbook` via `engine_kwargs`
|
160 |
+
filename = datapath("io", "data", "excel", "test1" + ext)
|
161 |
+
with contextlib.closing(
|
162 |
+
OpenpyxlReader(filename, engine_kwargs={kwarg_name: kwarg_value})
|
163 |
+
) as reader:
|
164 |
+
assert getattr(reader.book, kwarg_name) == kwarg_value
|
165 |
+
|
166 |
+
|
167 |
+
@pytest.mark.parametrize(
|
168 |
+
"mode,expected", [("w", ["baz"]), ("a", ["foo", "bar", "baz"])]
|
169 |
+
)
|
170 |
+
def test_write_append_mode(ext, mode, expected):
|
171 |
+
df = DataFrame([1], columns=["baz"])
|
172 |
+
|
173 |
+
with tm.ensure_clean(ext) as f:
|
174 |
+
wb = openpyxl.Workbook()
|
175 |
+
wb.worksheets[0].title = "foo"
|
176 |
+
wb.worksheets[0]["A1"].value = "foo"
|
177 |
+
wb.create_sheet("bar")
|
178 |
+
wb.worksheets[1]["A1"].value = "bar"
|
179 |
+
wb.save(f)
|
180 |
+
|
181 |
+
with ExcelWriter(f, engine="openpyxl", mode=mode) as writer:
|
182 |
+
df.to_excel(writer, sheet_name="baz", index=False)
|
183 |
+
|
184 |
+
with contextlib.closing(openpyxl.load_workbook(f)) as wb2:
|
185 |
+
result = [sheet.title for sheet in wb2.worksheets]
|
186 |
+
assert result == expected
|
187 |
+
|
188 |
+
for index, cell_value in enumerate(expected):
|
189 |
+
assert wb2.worksheets[index]["A1"].value == cell_value
|
190 |
+
|
191 |
+
|
192 |
+
@pytest.mark.parametrize(
|
193 |
+
"if_sheet_exists,num_sheets,expected",
|
194 |
+
[
|
195 |
+
("new", 2, ["apple", "banana"]),
|
196 |
+
("replace", 1, ["pear"]),
|
197 |
+
("overlay", 1, ["pear", "banana"]),
|
198 |
+
],
|
199 |
+
)
|
200 |
+
def test_if_sheet_exists_append_modes(ext, if_sheet_exists, num_sheets, expected):
|
201 |
+
# GH 40230
|
202 |
+
df1 = DataFrame({"fruit": ["apple", "banana"]})
|
203 |
+
df2 = DataFrame({"fruit": ["pear"]})
|
204 |
+
|
205 |
+
with tm.ensure_clean(ext) as f:
|
206 |
+
df1.to_excel(f, engine="openpyxl", sheet_name="foo", index=False)
|
207 |
+
with ExcelWriter(
|
208 |
+
f, engine="openpyxl", mode="a", if_sheet_exists=if_sheet_exists
|
209 |
+
) as writer:
|
210 |
+
df2.to_excel(writer, sheet_name="foo", index=False)
|
211 |
+
|
212 |
+
with contextlib.closing(openpyxl.load_workbook(f)) as wb:
|
213 |
+
assert len(wb.sheetnames) == num_sheets
|
214 |
+
assert wb.sheetnames[0] == "foo"
|
215 |
+
result = pd.read_excel(wb, "foo", engine="openpyxl")
|
216 |
+
assert list(result["fruit"]) == expected
|
217 |
+
if len(wb.sheetnames) == 2:
|
218 |
+
result = pd.read_excel(wb, wb.sheetnames[1], engine="openpyxl")
|
219 |
+
tm.assert_frame_equal(result, df2)
|
220 |
+
|
221 |
+
|
222 |
+
@pytest.mark.parametrize(
|
223 |
+
"startrow, startcol, greeting, goodbye",
|
224 |
+
[
|
225 |
+
(0, 0, ["poop", "world"], ["goodbye", "people"]),
|
226 |
+
(0, 1, ["hello", "world"], ["poop", "people"]),
|
227 |
+
(1, 0, ["hello", "poop"], ["goodbye", "people"]),
|
228 |
+
(1, 1, ["hello", "world"], ["goodbye", "poop"]),
|
229 |
+
],
|
230 |
+
)
|
231 |
+
def test_append_overlay_startrow_startcol(ext, startrow, startcol, greeting, goodbye):
|
232 |
+
df1 = DataFrame({"greeting": ["hello", "world"], "goodbye": ["goodbye", "people"]})
|
233 |
+
df2 = DataFrame(["poop"])
|
234 |
+
|
235 |
+
with tm.ensure_clean(ext) as f:
|
236 |
+
df1.to_excel(f, engine="openpyxl", sheet_name="poo", index=False)
|
237 |
+
with ExcelWriter(
|
238 |
+
f, engine="openpyxl", mode="a", if_sheet_exists="overlay"
|
239 |
+
) as writer:
|
240 |
+
# use startrow+1 because we don't have a header
|
241 |
+
df2.to_excel(
|
242 |
+
writer,
|
243 |
+
index=False,
|
244 |
+
header=False,
|
245 |
+
startrow=startrow + 1,
|
246 |
+
startcol=startcol,
|
247 |
+
sheet_name="poo",
|
248 |
+
)
|
249 |
+
|
250 |
+
result = pd.read_excel(f, sheet_name="poo", engine="openpyxl")
|
251 |
+
expected = DataFrame({"greeting": greeting, "goodbye": goodbye})
|
252 |
+
tm.assert_frame_equal(result, expected)
|
253 |
+
|
254 |
+
|
255 |
+
@pytest.mark.parametrize(
|
256 |
+
"if_sheet_exists,msg",
|
257 |
+
[
|
258 |
+
(
|
259 |
+
"invalid",
|
260 |
+
"'invalid' is not valid for if_sheet_exists. Valid options "
|
261 |
+
"are 'error', 'new', 'replace' and 'overlay'.",
|
262 |
+
),
|
263 |
+
(
|
264 |
+
"error",
|
265 |
+
"Sheet 'foo' already exists and if_sheet_exists is set to 'error'.",
|
266 |
+
),
|
267 |
+
(
|
268 |
+
None,
|
269 |
+
"Sheet 'foo' already exists and if_sheet_exists is set to 'error'.",
|
270 |
+
),
|
271 |
+
],
|
272 |
+
)
|
273 |
+
def test_if_sheet_exists_raises(ext, if_sheet_exists, msg):
|
274 |
+
# GH 40230
|
275 |
+
df = DataFrame({"fruit": ["pear"]})
|
276 |
+
with tm.ensure_clean(ext) as f:
|
277 |
+
with pytest.raises(ValueError, match=re.escape(msg)):
|
278 |
+
df.to_excel(f, sheet_name="foo", engine="openpyxl")
|
279 |
+
with ExcelWriter(
|
280 |
+
f, engine="openpyxl", mode="a", if_sheet_exists=if_sheet_exists
|
281 |
+
) as writer:
|
282 |
+
df.to_excel(writer, sheet_name="foo")
|
283 |
+
|
284 |
+
|
285 |
+
def test_to_excel_with_openpyxl_engine(ext):
|
286 |
+
# GH 29854
|
287 |
+
with tm.ensure_clean(ext) as filename:
|
288 |
+
df1 = DataFrame({"A": np.linspace(1, 10, 10)})
|
289 |
+
df2 = DataFrame({"B": np.linspace(1, 20, 10)})
|
290 |
+
df = pd.concat([df1, df2], axis=1)
|
291 |
+
styled = df.style.map(
|
292 |
+
lambda val: f"color: {'red' if val < 0 else 'black'}"
|
293 |
+
).highlight_max()
|
294 |
+
|
295 |
+
styled.to_excel(filename, engine="openpyxl")
|
296 |
+
|
297 |
+
|
298 |
+
@pytest.mark.parametrize("read_only", [True, False])
|
299 |
+
def test_read_workbook(datapath, ext, read_only):
|
300 |
+
# GH 39528
|
301 |
+
filename = datapath("io", "data", "excel", "test1" + ext)
|
302 |
+
with contextlib.closing(
|
303 |
+
openpyxl.load_workbook(filename, read_only=read_only)
|
304 |
+
) as wb:
|
305 |
+
result = pd.read_excel(wb, engine="openpyxl")
|
306 |
+
expected = pd.read_excel(filename)
|
307 |
+
tm.assert_frame_equal(result, expected)
|
308 |
+
|
309 |
+
|
310 |
+
@pytest.mark.parametrize(
|
311 |
+
"header, expected_data",
|
312 |
+
[
|
313 |
+
(
|
314 |
+
0,
|
315 |
+
{
|
316 |
+
"Title": [np.nan, "A", 1, 2, 3],
|
317 |
+
"Unnamed: 1": [np.nan, "B", 4, 5, 6],
|
318 |
+
"Unnamed: 2": [np.nan, "C", 7, 8, 9],
|
319 |
+
},
|
320 |
+
),
|
321 |
+
(2, {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}),
|
322 |
+
],
|
323 |
+
)
|
324 |
+
@pytest.mark.parametrize(
|
325 |
+
"filename", ["dimension_missing", "dimension_small", "dimension_large"]
|
326 |
+
)
|
327 |
+
# When read_only is None, use read_excel instead of a workbook
|
328 |
+
@pytest.mark.parametrize("read_only", [True, False, None])
|
329 |
+
def test_read_with_bad_dimension(
|
330 |
+
datapath, ext, header, expected_data, filename, read_only
|
331 |
+
):
|
332 |
+
# GH 38956, 39001 - no/incorrect dimension information
|
333 |
+
path = datapath("io", "data", "excel", f"{filename}{ext}")
|
334 |
+
if read_only is None:
|
335 |
+
result = pd.read_excel(path, header=header)
|
336 |
+
else:
|
337 |
+
with contextlib.closing(
|
338 |
+
openpyxl.load_workbook(path, read_only=read_only)
|
339 |
+
) as wb:
|
340 |
+
result = pd.read_excel(wb, engine="openpyxl", header=header)
|
341 |
+
expected = DataFrame(expected_data)
|
342 |
+
tm.assert_frame_equal(result, expected)
|
343 |
+
|
344 |
+
|
345 |
+
def test_append_mode_file(ext):
|
346 |
+
# GH 39576
|
347 |
+
df = DataFrame()
|
348 |
+
|
349 |
+
with tm.ensure_clean(ext) as f:
|
350 |
+
df.to_excel(f, engine="openpyxl")
|
351 |
+
|
352 |
+
with ExcelWriter(
|
353 |
+
f, mode="a", engine="openpyxl", if_sheet_exists="new"
|
354 |
+
) as writer:
|
355 |
+
df.to_excel(writer)
|
356 |
+
|
357 |
+
# make sure that zip files are not concatenated by making sure that
|
358 |
+
# "docProps/app.xml" only occurs twice in the file
|
359 |
+
data = Path(f).read_bytes()
|
360 |
+
first = data.find(b"docProps/app.xml")
|
361 |
+
second = data.find(b"docProps/app.xml", first + 1)
|
362 |
+
third = data.find(b"docProps/app.xml", second + 1)
|
363 |
+
assert second != -1 and third == -1
|
364 |
+
|
365 |
+
|
366 |
+
# When read_only is None, use read_excel instead of a workbook
|
367 |
+
@pytest.mark.parametrize("read_only", [True, False, None])
|
368 |
+
def test_read_with_empty_trailing_rows(datapath, ext, read_only):
|
369 |
+
# GH 39181
|
370 |
+
path = datapath("io", "data", "excel", f"empty_trailing_rows{ext}")
|
371 |
+
if read_only is None:
|
372 |
+
result = pd.read_excel(path)
|
373 |
+
else:
|
374 |
+
with contextlib.closing(
|
375 |
+
openpyxl.load_workbook(path, read_only=read_only)
|
376 |
+
) as wb:
|
377 |
+
result = pd.read_excel(wb, engine="openpyxl")
|
378 |
+
expected = DataFrame(
|
379 |
+
{
|
380 |
+
"Title": [np.nan, "A", 1, 2, 3],
|
381 |
+
"Unnamed: 1": [np.nan, "B", 4, 5, 6],
|
382 |
+
"Unnamed: 2": [np.nan, "C", 7, 8, 9],
|
383 |
+
}
|
384 |
+
)
|
385 |
+
tm.assert_frame_equal(result, expected)
|
386 |
+
|
387 |
+
|
388 |
+
# When read_only is None, use read_excel instead of a workbook
|
389 |
+
@pytest.mark.parametrize("read_only", [True, False, None])
|
390 |
+
def test_read_empty_with_blank_row(datapath, ext, read_only):
|
391 |
+
# GH 39547 - empty excel file with a row that has no data
|
392 |
+
path = datapath("io", "data", "excel", f"empty_with_blank_row{ext}")
|
393 |
+
if read_only is None:
|
394 |
+
result = pd.read_excel(path)
|
395 |
+
else:
|
396 |
+
with contextlib.closing(
|
397 |
+
openpyxl.load_workbook(path, read_only=read_only)
|
398 |
+
) as wb:
|
399 |
+
result = pd.read_excel(wb, engine="openpyxl")
|
400 |
+
expected = DataFrame()
|
401 |
+
tm.assert_frame_equal(result, expected)
|
402 |
+
|
403 |
+
|
404 |
+
def test_book_and_sheets_consistent(ext):
|
405 |
+
# GH#45687 - Ensure sheets is updated if user modifies book
|
406 |
+
with tm.ensure_clean(ext) as f:
|
407 |
+
with ExcelWriter(f, engine="openpyxl") as writer:
|
408 |
+
assert writer.sheets == {}
|
409 |
+
sheet = writer.book.create_sheet("test_name", 0)
|
410 |
+
assert writer.sheets == {"test_name": sheet}
|
411 |
+
|
412 |
+
|
413 |
+
def test_ints_spelled_with_decimals(datapath, ext):
|
414 |
+
# GH 46988 - openpyxl returns this sheet with floats
|
415 |
+
path = datapath("io", "data", "excel", f"ints_spelled_with_decimals{ext}")
|
416 |
+
result = pd.read_excel(path)
|
417 |
+
expected = DataFrame(range(2, 12), columns=[1])
|
418 |
+
tm.assert_frame_equal(result, expected)
|
419 |
+
|
420 |
+
|
421 |
+
def test_read_multiindex_header_no_index_names(datapath, ext):
|
422 |
+
# GH#47487
|
423 |
+
path = datapath("io", "data", "excel", f"multiindex_no_index_names{ext}")
|
424 |
+
result = pd.read_excel(path, index_col=[0, 1, 2], header=[0, 1, 2])
|
425 |
+
expected = DataFrame(
|
426 |
+
[[np.nan, "x", "x", "x"], ["x", np.nan, np.nan, np.nan]],
|
427 |
+
columns=pd.MultiIndex.from_tuples(
|
428 |
+
[("X", "Y", "A1"), ("X", "Y", "A2"), ("XX", "YY", "B1"), ("XX", "YY", "B2")]
|
429 |
+
),
|
430 |
+
index=pd.MultiIndex.from_tuples([("A", "AA", "AAA"), ("A", "BB", "BBB")]),
|
431 |
+
)
|
432 |
+
tm.assert_frame_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_readers.py
ADDED
@@ -0,0 +1,1751 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from datetime import (
|
4 |
+
datetime,
|
5 |
+
time,
|
6 |
+
)
|
7 |
+
from functools import partial
|
8 |
+
from io import BytesIO
|
9 |
+
import os
|
10 |
+
from pathlib import Path
|
11 |
+
import platform
|
12 |
+
import re
|
13 |
+
from urllib.error import URLError
|
14 |
+
from zipfile import BadZipFile
|
15 |
+
|
16 |
+
import numpy as np
|
17 |
+
import pytest
|
18 |
+
|
19 |
+
from pandas._config import using_pyarrow_string_dtype
|
20 |
+
|
21 |
+
from pandas.compat import is_platform_windows
|
22 |
+
import pandas.util._test_decorators as td
|
23 |
+
|
24 |
+
import pandas as pd
|
25 |
+
from pandas import (
|
26 |
+
DataFrame,
|
27 |
+
Index,
|
28 |
+
MultiIndex,
|
29 |
+
Series,
|
30 |
+
read_csv,
|
31 |
+
)
|
32 |
+
import pandas._testing as tm
|
33 |
+
from pandas.core.arrays import (
|
34 |
+
ArrowStringArray,
|
35 |
+
StringArray,
|
36 |
+
)
|
37 |
+
|
38 |
+
if is_platform_windows():
|
39 |
+
pytestmark = pytest.mark.single_cpu
|
40 |
+
|
41 |
+
read_ext_params = [".xls", ".xlsx", ".xlsm", ".xlsb", ".ods"]
|
42 |
+
engine_params = [
|
43 |
+
# Add any engines to test here
|
44 |
+
# When defusedxml is installed it triggers deprecation warnings for
|
45 |
+
# xlrd and openpyxl, so catch those here
|
46 |
+
pytest.param(
|
47 |
+
"xlrd",
|
48 |
+
marks=[
|
49 |
+
td.skip_if_no("xlrd"),
|
50 |
+
],
|
51 |
+
),
|
52 |
+
pytest.param(
|
53 |
+
"openpyxl",
|
54 |
+
marks=[
|
55 |
+
td.skip_if_no("openpyxl"),
|
56 |
+
],
|
57 |
+
),
|
58 |
+
pytest.param(
|
59 |
+
None,
|
60 |
+
marks=[
|
61 |
+
td.skip_if_no("xlrd"),
|
62 |
+
],
|
63 |
+
),
|
64 |
+
pytest.param("pyxlsb", marks=td.skip_if_no("pyxlsb")),
|
65 |
+
pytest.param("odf", marks=td.skip_if_no("odf")),
|
66 |
+
pytest.param("calamine", marks=td.skip_if_no("python_calamine")),
|
67 |
+
]
|
68 |
+
|
69 |
+
|
70 |
+
def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool:
|
71 |
+
"""
|
72 |
+
Filter out invalid (engine, ext) pairs instead of skipping, as that
|
73 |
+
produces 500+ pytest.skips.
|
74 |
+
"""
|
75 |
+
engine = engine.values[0]
|
76 |
+
if engine == "openpyxl" and read_ext == ".xls":
|
77 |
+
return False
|
78 |
+
if engine == "odf" and read_ext != ".ods":
|
79 |
+
return False
|
80 |
+
if read_ext == ".ods" and engine not in {"odf", "calamine"}:
|
81 |
+
return False
|
82 |
+
if engine == "pyxlsb" and read_ext != ".xlsb":
|
83 |
+
return False
|
84 |
+
if read_ext == ".xlsb" and engine not in {"pyxlsb", "calamine"}:
|
85 |
+
return False
|
86 |
+
if engine == "xlrd" and read_ext != ".xls":
|
87 |
+
return False
|
88 |
+
return True
|
89 |
+
|
90 |
+
|
91 |
+
def _transfer_marks(engine, read_ext):
|
92 |
+
"""
|
93 |
+
engine gives us a pytest.param object with some marks, read_ext is just
|
94 |
+
a string. We need to generate a new pytest.param inheriting the marks.
|
95 |
+
"""
|
96 |
+
values = engine.values + (read_ext,)
|
97 |
+
new_param = pytest.param(values, marks=engine.marks)
|
98 |
+
return new_param
|
99 |
+
|
100 |
+
|
101 |
+
@pytest.fixture(
|
102 |
+
params=[
|
103 |
+
_transfer_marks(eng, ext)
|
104 |
+
for eng in engine_params
|
105 |
+
for ext in read_ext_params
|
106 |
+
if _is_valid_engine_ext_pair(eng, ext)
|
107 |
+
],
|
108 |
+
ids=str,
|
109 |
+
)
|
110 |
+
def engine_and_read_ext(request):
|
111 |
+
"""
|
112 |
+
Fixture for Excel reader engine and read_ext, only including valid pairs.
|
113 |
+
"""
|
114 |
+
return request.param
|
115 |
+
|
116 |
+
|
117 |
+
@pytest.fixture
|
118 |
+
def engine(engine_and_read_ext):
|
119 |
+
engine, read_ext = engine_and_read_ext
|
120 |
+
return engine
|
121 |
+
|
122 |
+
|
123 |
+
@pytest.fixture
|
124 |
+
def read_ext(engine_and_read_ext):
|
125 |
+
engine, read_ext = engine_and_read_ext
|
126 |
+
return read_ext
|
127 |
+
|
128 |
+
|
129 |
+
@pytest.fixture
|
130 |
+
def df_ref(datapath):
|
131 |
+
"""
|
132 |
+
Obtain the reference data from read_csv with the Python engine.
|
133 |
+
"""
|
134 |
+
filepath = datapath("io", "data", "csv", "test1.csv")
|
135 |
+
df_ref = read_csv(filepath, index_col=0, parse_dates=True, engine="python")
|
136 |
+
return df_ref
|
137 |
+
|
138 |
+
|
139 |
+
def get_exp_unit(read_ext: str, engine: str | None) -> str:
|
140 |
+
return "ns"
|
141 |
+
|
142 |
+
|
143 |
+
def adjust_expected(expected: DataFrame, read_ext: str, engine: str) -> None:
|
144 |
+
expected.index.name = None
|
145 |
+
unit = get_exp_unit(read_ext, engine)
|
146 |
+
# error: "Index" has no attribute "as_unit"
|
147 |
+
expected.index = expected.index.as_unit(unit) # type: ignore[attr-defined]
|
148 |
+
|
149 |
+
|
150 |
+
def xfail_datetimes_with_pyxlsb(engine, request):
|
151 |
+
if engine == "pyxlsb":
|
152 |
+
request.applymarker(
|
153 |
+
pytest.mark.xfail(
|
154 |
+
reason="Sheets containing datetimes not supported by pyxlsb"
|
155 |
+
)
|
156 |
+
)
|
157 |
+
|
158 |
+
|
159 |
+
class TestReaders:
|
160 |
+
@pytest.fixture(autouse=True)
|
161 |
+
def cd_and_set_engine(self, engine, datapath, monkeypatch):
|
162 |
+
"""
|
163 |
+
Change directory and set engine for read_excel calls.
|
164 |
+
"""
|
165 |
+
func = partial(pd.read_excel, engine=engine)
|
166 |
+
monkeypatch.chdir(datapath("io", "data", "excel"))
|
167 |
+
monkeypatch.setattr(pd, "read_excel", func)
|
168 |
+
|
169 |
+
def test_engine_used(self, read_ext, engine, monkeypatch):
|
170 |
+
# GH 38884
|
171 |
+
def parser(self, *args, **kwargs):
|
172 |
+
return self.engine
|
173 |
+
|
174 |
+
monkeypatch.setattr(pd.ExcelFile, "parse", parser)
|
175 |
+
|
176 |
+
expected_defaults = {
|
177 |
+
"xlsx": "openpyxl",
|
178 |
+
"xlsm": "openpyxl",
|
179 |
+
"xlsb": "pyxlsb",
|
180 |
+
"xls": "xlrd",
|
181 |
+
"ods": "odf",
|
182 |
+
}
|
183 |
+
|
184 |
+
with open("test1" + read_ext, "rb") as f:
|
185 |
+
result = pd.read_excel(f)
|
186 |
+
|
187 |
+
if engine is not None:
|
188 |
+
expected = engine
|
189 |
+
else:
|
190 |
+
expected = expected_defaults[read_ext[1:]]
|
191 |
+
assert result == expected
|
192 |
+
|
193 |
+
def test_engine_kwargs(self, read_ext, engine):
|
194 |
+
# GH#52214
|
195 |
+
expected_defaults = {
|
196 |
+
"xlsx": {"foo": "abcd"},
|
197 |
+
"xlsm": {"foo": 123},
|
198 |
+
"xlsb": {"foo": "True"},
|
199 |
+
"xls": {"foo": True},
|
200 |
+
"ods": {"foo": "abcd"},
|
201 |
+
}
|
202 |
+
|
203 |
+
if engine in {"xlrd", "pyxlsb"}:
|
204 |
+
msg = re.escape(r"open_workbook() got an unexpected keyword argument 'foo'")
|
205 |
+
elif engine == "odf":
|
206 |
+
msg = re.escape(r"load() got an unexpected keyword argument 'foo'")
|
207 |
+
else:
|
208 |
+
msg = re.escape(r"load_workbook() got an unexpected keyword argument 'foo'")
|
209 |
+
|
210 |
+
if engine is not None:
|
211 |
+
with pytest.raises(TypeError, match=msg):
|
212 |
+
pd.read_excel(
|
213 |
+
"test1" + read_ext,
|
214 |
+
sheet_name="Sheet1",
|
215 |
+
index_col=0,
|
216 |
+
engine_kwargs=expected_defaults[read_ext[1:]],
|
217 |
+
)
|
218 |
+
|
219 |
+
def test_usecols_int(self, read_ext):
|
220 |
+
# usecols as int
|
221 |
+
msg = "Passing an integer for `usecols`"
|
222 |
+
with pytest.raises(ValueError, match=msg):
|
223 |
+
pd.read_excel(
|
224 |
+
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=3
|
225 |
+
)
|
226 |
+
|
227 |
+
# usecols as int
|
228 |
+
with pytest.raises(ValueError, match=msg):
|
229 |
+
pd.read_excel(
|
230 |
+
"test1" + read_ext,
|
231 |
+
sheet_name="Sheet2",
|
232 |
+
skiprows=[1],
|
233 |
+
index_col=0,
|
234 |
+
usecols=3,
|
235 |
+
)
|
236 |
+
|
237 |
+
def test_usecols_list(self, request, engine, read_ext, df_ref):
|
238 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
239 |
+
|
240 |
+
expected = df_ref[["B", "C"]]
|
241 |
+
adjust_expected(expected, read_ext, engine)
|
242 |
+
|
243 |
+
df1 = pd.read_excel(
|
244 |
+
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=[0, 2, 3]
|
245 |
+
)
|
246 |
+
df2 = pd.read_excel(
|
247 |
+
"test1" + read_ext,
|
248 |
+
sheet_name="Sheet2",
|
249 |
+
skiprows=[1],
|
250 |
+
index_col=0,
|
251 |
+
usecols=[0, 2, 3],
|
252 |
+
)
|
253 |
+
|
254 |
+
# TODO add index to xls file)
|
255 |
+
tm.assert_frame_equal(df1, expected)
|
256 |
+
tm.assert_frame_equal(df2, expected)
|
257 |
+
|
258 |
+
def test_usecols_str(self, request, engine, read_ext, df_ref):
|
259 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
260 |
+
|
261 |
+
expected = df_ref[["A", "B", "C"]]
|
262 |
+
adjust_expected(expected, read_ext, engine)
|
263 |
+
|
264 |
+
df2 = pd.read_excel(
|
265 |
+
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A:D"
|
266 |
+
)
|
267 |
+
df3 = pd.read_excel(
|
268 |
+
"test1" + read_ext,
|
269 |
+
sheet_name="Sheet2",
|
270 |
+
skiprows=[1],
|
271 |
+
index_col=0,
|
272 |
+
usecols="A:D",
|
273 |
+
)
|
274 |
+
|
275 |
+
# TODO add index to xls, read xls ignores index name ?
|
276 |
+
tm.assert_frame_equal(df2, expected)
|
277 |
+
tm.assert_frame_equal(df3, expected)
|
278 |
+
|
279 |
+
expected = df_ref[["B", "C"]]
|
280 |
+
adjust_expected(expected, read_ext, engine)
|
281 |
+
|
282 |
+
df2 = pd.read_excel(
|
283 |
+
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C,D"
|
284 |
+
)
|
285 |
+
df3 = pd.read_excel(
|
286 |
+
"test1" + read_ext,
|
287 |
+
sheet_name="Sheet2",
|
288 |
+
skiprows=[1],
|
289 |
+
index_col=0,
|
290 |
+
usecols="A,C,D",
|
291 |
+
)
|
292 |
+
# TODO add index to xls file
|
293 |
+
tm.assert_frame_equal(df2, expected)
|
294 |
+
tm.assert_frame_equal(df3, expected)
|
295 |
+
|
296 |
+
df2 = pd.read_excel(
|
297 |
+
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C:D"
|
298 |
+
)
|
299 |
+
df3 = pd.read_excel(
|
300 |
+
"test1" + read_ext,
|
301 |
+
sheet_name="Sheet2",
|
302 |
+
skiprows=[1],
|
303 |
+
index_col=0,
|
304 |
+
usecols="A,C:D",
|
305 |
+
)
|
306 |
+
tm.assert_frame_equal(df2, expected)
|
307 |
+
tm.assert_frame_equal(df3, expected)
|
308 |
+
|
309 |
+
@pytest.mark.parametrize(
|
310 |
+
"usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]]
|
311 |
+
)
|
312 |
+
def test_usecols_diff_positional_int_columns_order(
|
313 |
+
self, request, engine, read_ext, usecols, df_ref
|
314 |
+
):
|
315 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
316 |
+
|
317 |
+
expected = df_ref[["A", "C"]]
|
318 |
+
adjust_expected(expected, read_ext, engine)
|
319 |
+
|
320 |
+
result = pd.read_excel(
|
321 |
+
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=usecols
|
322 |
+
)
|
323 |
+
tm.assert_frame_equal(result, expected)
|
324 |
+
|
325 |
+
@pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]])
|
326 |
+
def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref):
|
327 |
+
expected = df_ref[["B", "D"]]
|
328 |
+
expected.index = range(len(expected))
|
329 |
+
|
330 |
+
result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols=usecols)
|
331 |
+
tm.assert_frame_equal(result, expected)
|
332 |
+
|
333 |
+
def test_read_excel_without_slicing(self, request, engine, read_ext, df_ref):
|
334 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
335 |
+
|
336 |
+
expected = df_ref
|
337 |
+
adjust_expected(expected, read_ext, engine)
|
338 |
+
|
339 |
+
result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0)
|
340 |
+
tm.assert_frame_equal(result, expected)
|
341 |
+
|
342 |
+
def test_usecols_excel_range_str(self, request, engine, read_ext, df_ref):
|
343 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
344 |
+
|
345 |
+
expected = df_ref[["C", "D"]]
|
346 |
+
adjust_expected(expected, read_ext, engine)
|
347 |
+
|
348 |
+
result = pd.read_excel(
|
349 |
+
"test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,D:E"
|
350 |
+
)
|
351 |
+
tm.assert_frame_equal(result, expected)
|
352 |
+
|
353 |
+
def test_usecols_excel_range_str_invalid(self, read_ext):
|
354 |
+
msg = "Invalid column name: E1"
|
355 |
+
|
356 |
+
with pytest.raises(ValueError, match=msg):
|
357 |
+
pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols="D:E1")
|
358 |
+
|
359 |
+
def test_index_col_label_error(self, read_ext):
|
360 |
+
msg = "list indices must be integers.*, not str"
|
361 |
+
|
362 |
+
with pytest.raises(TypeError, match=msg):
|
363 |
+
pd.read_excel(
|
364 |
+
"test1" + read_ext,
|
365 |
+
sheet_name="Sheet1",
|
366 |
+
index_col=["A"],
|
367 |
+
usecols=["A", "C"],
|
368 |
+
)
|
369 |
+
|
370 |
+
def test_index_col_str(self, read_ext):
|
371 |
+
# see gh-52716
|
372 |
+
result = pd.read_excel("test1" + read_ext, sheet_name="Sheet3", index_col="A")
|
373 |
+
expected = DataFrame(
|
374 |
+
columns=["B", "C", "D", "E", "F"], index=Index([], name="A")
|
375 |
+
)
|
376 |
+
tm.assert_frame_equal(result, expected)
|
377 |
+
|
378 |
+
def test_index_col_empty(self, read_ext):
|
379 |
+
# see gh-9208
|
380 |
+
result = pd.read_excel(
|
381 |
+
"test1" + read_ext, sheet_name="Sheet3", index_col=["A", "B", "C"]
|
382 |
+
)
|
383 |
+
expected = DataFrame(
|
384 |
+
columns=["D", "E", "F"],
|
385 |
+
index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]),
|
386 |
+
)
|
387 |
+
tm.assert_frame_equal(result, expected)
|
388 |
+
|
389 |
+
@pytest.mark.parametrize("index_col", [None, 2])
|
390 |
+
def test_index_col_with_unnamed(self, read_ext, index_col):
|
391 |
+
# see gh-18792
|
392 |
+
result = pd.read_excel(
|
393 |
+
"test1" + read_ext, sheet_name="Sheet4", index_col=index_col
|
394 |
+
)
|
395 |
+
expected = DataFrame(
|
396 |
+
[["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"]
|
397 |
+
)
|
398 |
+
if index_col:
|
399 |
+
expected = expected.set_index(expected.columns[index_col])
|
400 |
+
|
401 |
+
tm.assert_frame_equal(result, expected)
|
402 |
+
|
403 |
+
def test_usecols_pass_non_existent_column(self, read_ext):
|
404 |
+
msg = (
|
405 |
+
"Usecols do not match columns, "
|
406 |
+
"columns expected but not found: "
|
407 |
+
r"\['E'\]"
|
408 |
+
)
|
409 |
+
|
410 |
+
with pytest.raises(ValueError, match=msg):
|
411 |
+
pd.read_excel("test1" + read_ext, usecols=["E"])
|
412 |
+
|
413 |
+
def test_usecols_wrong_type(self, read_ext):
|
414 |
+
msg = (
|
415 |
+
"'usecols' must either be list-like of "
|
416 |
+
"all strings, all unicode, all integers or a callable."
|
417 |
+
)
|
418 |
+
|
419 |
+
with pytest.raises(ValueError, match=msg):
|
420 |
+
pd.read_excel("test1" + read_ext, usecols=["E1", 0])
|
421 |
+
|
422 |
+
def test_excel_stop_iterator(self, read_ext):
|
423 |
+
parsed = pd.read_excel("test2" + read_ext, sheet_name="Sheet1")
|
424 |
+
expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"])
|
425 |
+
tm.assert_frame_equal(parsed, expected)
|
426 |
+
|
427 |
+
def test_excel_cell_error_na(self, request, engine, read_ext):
|
428 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
429 |
+
|
430 |
+
# https://github.com/tafia/calamine/issues/355
|
431 |
+
if engine == "calamine" and read_ext == ".ods":
|
432 |
+
request.applymarker(
|
433 |
+
pytest.mark.xfail(reason="Calamine can't extract error from ods files")
|
434 |
+
)
|
435 |
+
|
436 |
+
parsed = pd.read_excel("test3" + read_ext, sheet_name="Sheet1")
|
437 |
+
expected = DataFrame([[np.nan]], columns=["Test"])
|
438 |
+
tm.assert_frame_equal(parsed, expected)
|
439 |
+
|
440 |
+
def test_excel_table(self, request, engine, read_ext, df_ref):
|
441 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
442 |
+
|
443 |
+
expected = df_ref
|
444 |
+
adjust_expected(expected, read_ext, engine)
|
445 |
+
|
446 |
+
df1 = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0)
|
447 |
+
df2 = pd.read_excel(
|
448 |
+
"test1" + read_ext, sheet_name="Sheet2", skiprows=[1], index_col=0
|
449 |
+
)
|
450 |
+
# TODO add index to file
|
451 |
+
tm.assert_frame_equal(df1, expected)
|
452 |
+
tm.assert_frame_equal(df2, expected)
|
453 |
+
|
454 |
+
df3 = pd.read_excel(
|
455 |
+
"test1" + read_ext, sheet_name="Sheet1", index_col=0, skipfooter=1
|
456 |
+
)
|
457 |
+
tm.assert_frame_equal(df3, df1.iloc[:-1])
|
458 |
+
|
459 |
+
def test_reader_special_dtypes(self, request, engine, read_ext):
|
460 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
461 |
+
|
462 |
+
unit = get_exp_unit(read_ext, engine)
|
463 |
+
expected = DataFrame.from_dict(
|
464 |
+
{
|
465 |
+
"IntCol": [1, 2, -3, 4, 0],
|
466 |
+
"FloatCol": [1.25, 2.25, 1.83, 1.92, 0.0000000005],
|
467 |
+
"BoolCol": [True, False, True, True, False],
|
468 |
+
"StrCol": [1, 2, 3, 4, 5],
|
469 |
+
"Str2Col": ["a", 3, "c", "d", "e"],
|
470 |
+
"DateCol": Index(
|
471 |
+
[
|
472 |
+
datetime(2013, 10, 30),
|
473 |
+
datetime(2013, 10, 31),
|
474 |
+
datetime(1905, 1, 1),
|
475 |
+
datetime(2013, 12, 14),
|
476 |
+
datetime(2015, 3, 14),
|
477 |
+
],
|
478 |
+
dtype=f"M8[{unit}]",
|
479 |
+
),
|
480 |
+
},
|
481 |
+
)
|
482 |
+
basename = "test_types"
|
483 |
+
|
484 |
+
# should read in correctly and infer types
|
485 |
+
actual = pd.read_excel(basename + read_ext, sheet_name="Sheet1")
|
486 |
+
tm.assert_frame_equal(actual, expected)
|
487 |
+
|
488 |
+
# if not coercing number, then int comes in as float
|
489 |
+
float_expected = expected.copy()
|
490 |
+
float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
|
491 |
+
actual = pd.read_excel(basename + read_ext, sheet_name="Sheet1")
|
492 |
+
tm.assert_frame_equal(actual, float_expected)
|
493 |
+
|
494 |
+
# check setting Index (assuming xls and xlsx are the same here)
|
495 |
+
for icol, name in enumerate(expected.columns):
|
496 |
+
actual = pd.read_excel(
|
497 |
+
basename + read_ext, sheet_name="Sheet1", index_col=icol
|
498 |
+
)
|
499 |
+
exp = expected.set_index(name)
|
500 |
+
tm.assert_frame_equal(actual, exp)
|
501 |
+
|
502 |
+
expected["StrCol"] = expected["StrCol"].apply(str)
|
503 |
+
actual = pd.read_excel(
|
504 |
+
basename + read_ext, sheet_name="Sheet1", converters={"StrCol": str}
|
505 |
+
)
|
506 |
+
tm.assert_frame_equal(actual, expected)
|
507 |
+
|
508 |
+
# GH8212 - support for converters and missing values
|
509 |
+
def test_reader_converters(self, read_ext):
|
510 |
+
basename = "test_converters"
|
511 |
+
|
512 |
+
expected = DataFrame.from_dict(
|
513 |
+
{
|
514 |
+
"IntCol": [1, 2, -3, -1000, 0],
|
515 |
+
"FloatCol": [12.5, np.nan, 18.3, 19.2, 0.000000005],
|
516 |
+
"BoolCol": ["Found", "Found", "Found", "Not found", "Found"],
|
517 |
+
"StrCol": ["1", np.nan, "3", "4", "5"],
|
518 |
+
}
|
519 |
+
)
|
520 |
+
|
521 |
+
converters = {
|
522 |
+
"IntCol": lambda x: int(x) if x != "" else -1000,
|
523 |
+
"FloatCol": lambda x: 10 * x if x else np.nan,
|
524 |
+
2: lambda x: "Found" if x != "" else "Not found",
|
525 |
+
3: lambda x: str(x) if x else "",
|
526 |
+
}
|
527 |
+
|
528 |
+
# should read in correctly and set types of single cells (not array
|
529 |
+
# dtypes)
|
530 |
+
actual = pd.read_excel(
|
531 |
+
basename + read_ext, sheet_name="Sheet1", converters=converters
|
532 |
+
)
|
533 |
+
tm.assert_frame_equal(actual, expected)
|
534 |
+
|
535 |
+
def test_reader_dtype(self, read_ext):
|
536 |
+
# GH 8212
|
537 |
+
basename = "testdtype"
|
538 |
+
actual = pd.read_excel(basename + read_ext)
|
539 |
+
|
540 |
+
expected = DataFrame(
|
541 |
+
{
|
542 |
+
"a": [1, 2, 3, 4],
|
543 |
+
"b": [2.5, 3.5, 4.5, 5.5],
|
544 |
+
"c": [1, 2, 3, 4],
|
545 |
+
"d": [1.0, 2.0, np.nan, 4.0],
|
546 |
+
}
|
547 |
+
)
|
548 |
+
|
549 |
+
tm.assert_frame_equal(actual, expected)
|
550 |
+
|
551 |
+
actual = pd.read_excel(
|
552 |
+
basename + read_ext, dtype={"a": "float64", "b": "float32", "c": str}
|
553 |
+
)
|
554 |
+
|
555 |
+
expected["a"] = expected["a"].astype("float64")
|
556 |
+
expected["b"] = expected["b"].astype("float32")
|
557 |
+
expected["c"] = Series(["001", "002", "003", "004"], dtype=object)
|
558 |
+
tm.assert_frame_equal(actual, expected)
|
559 |
+
|
560 |
+
msg = "Unable to convert column d to type int64"
|
561 |
+
with pytest.raises(ValueError, match=msg):
|
562 |
+
pd.read_excel(basename + read_ext, dtype={"d": "int64"})
|
563 |
+
|
564 |
+
@pytest.mark.parametrize(
|
565 |
+
"dtype,expected",
|
566 |
+
[
|
567 |
+
(
|
568 |
+
None,
|
569 |
+
DataFrame(
|
570 |
+
{
|
571 |
+
"a": [1, 2, 3, 4],
|
572 |
+
"b": [2.5, 3.5, 4.5, 5.5],
|
573 |
+
"c": [1, 2, 3, 4],
|
574 |
+
"d": [1.0, 2.0, np.nan, 4.0],
|
575 |
+
}
|
576 |
+
),
|
577 |
+
),
|
578 |
+
(
|
579 |
+
{"a": "float64", "b": "float32", "c": str, "d": str},
|
580 |
+
DataFrame(
|
581 |
+
{
|
582 |
+
"a": Series([1, 2, 3, 4], dtype="float64"),
|
583 |
+
"b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
|
584 |
+
"c": Series(["001", "002", "003", "004"], dtype=object),
|
585 |
+
"d": Series(["1", "2", np.nan, "4"], dtype=object),
|
586 |
+
}
|
587 |
+
),
|
588 |
+
),
|
589 |
+
],
|
590 |
+
)
|
591 |
+
def test_reader_dtype_str(self, read_ext, dtype, expected):
|
592 |
+
# see gh-20377
|
593 |
+
basename = "testdtype"
|
594 |
+
|
595 |
+
actual = pd.read_excel(basename + read_ext, dtype=dtype)
|
596 |
+
tm.assert_frame_equal(actual, expected)
|
597 |
+
|
598 |
+
def test_dtype_backend(self, read_ext, dtype_backend, engine):
|
599 |
+
# GH#36712
|
600 |
+
if read_ext in (".xlsb", ".xls"):
|
601 |
+
pytest.skip(f"No engine for filetype: '{read_ext}'")
|
602 |
+
|
603 |
+
df = DataFrame(
|
604 |
+
{
|
605 |
+
"a": Series([1, 3], dtype="Int64"),
|
606 |
+
"b": Series([2.5, 4.5], dtype="Float64"),
|
607 |
+
"c": Series([True, False], dtype="boolean"),
|
608 |
+
"d": Series(["a", "b"], dtype="string"),
|
609 |
+
"e": Series([pd.NA, 6], dtype="Int64"),
|
610 |
+
"f": Series([pd.NA, 7.5], dtype="Float64"),
|
611 |
+
"g": Series([pd.NA, True], dtype="boolean"),
|
612 |
+
"h": Series([pd.NA, "a"], dtype="string"),
|
613 |
+
"i": Series([pd.Timestamp("2019-12-31")] * 2),
|
614 |
+
"j": Series([pd.NA, pd.NA], dtype="Int64"),
|
615 |
+
}
|
616 |
+
)
|
617 |
+
with tm.ensure_clean(read_ext) as file_path:
|
618 |
+
df.to_excel(file_path, sheet_name="test", index=False)
|
619 |
+
result = pd.read_excel(
|
620 |
+
file_path, sheet_name="test", dtype_backend=dtype_backend
|
621 |
+
)
|
622 |
+
if dtype_backend == "pyarrow":
|
623 |
+
import pyarrow as pa
|
624 |
+
|
625 |
+
from pandas.arrays import ArrowExtensionArray
|
626 |
+
|
627 |
+
expected = DataFrame(
|
628 |
+
{
|
629 |
+
col: ArrowExtensionArray(pa.array(df[col], from_pandas=True))
|
630 |
+
for col in df.columns
|
631 |
+
}
|
632 |
+
)
|
633 |
+
# pyarrow by default infers timestamp resolution as us, not ns
|
634 |
+
expected["i"] = ArrowExtensionArray(
|
635 |
+
expected["i"].array._pa_array.cast(pa.timestamp(unit="us"))
|
636 |
+
)
|
637 |
+
# pyarrow supports a null type, so don't have to default to Int64
|
638 |
+
expected["j"] = ArrowExtensionArray(pa.array([None, None]))
|
639 |
+
else:
|
640 |
+
expected = df
|
641 |
+
unit = get_exp_unit(read_ext, engine)
|
642 |
+
expected["i"] = expected["i"].astype(f"M8[{unit}]")
|
643 |
+
|
644 |
+
tm.assert_frame_equal(result, expected)
|
645 |
+
|
646 |
+
def test_dtype_backend_and_dtype(self, read_ext):
|
647 |
+
# GH#36712
|
648 |
+
if read_ext in (".xlsb", ".xls"):
|
649 |
+
pytest.skip(f"No engine for filetype: '{read_ext}'")
|
650 |
+
|
651 |
+
df = DataFrame({"a": [np.nan, 1.0], "b": [2.5, np.nan]})
|
652 |
+
with tm.ensure_clean(read_ext) as file_path:
|
653 |
+
df.to_excel(file_path, sheet_name="test", index=False)
|
654 |
+
result = pd.read_excel(
|
655 |
+
file_path,
|
656 |
+
sheet_name="test",
|
657 |
+
dtype_backend="numpy_nullable",
|
658 |
+
dtype="float64",
|
659 |
+
)
|
660 |
+
tm.assert_frame_equal(result, df)
|
661 |
+
|
662 |
+
@pytest.mark.xfail(
|
663 |
+
using_pyarrow_string_dtype(), reason="infer_string takes precedence"
|
664 |
+
)
|
665 |
+
def test_dtype_backend_string(self, read_ext, string_storage):
|
666 |
+
# GH#36712
|
667 |
+
if read_ext in (".xlsb", ".xls"):
|
668 |
+
pytest.skip(f"No engine for filetype: '{read_ext}'")
|
669 |
+
|
670 |
+
pa = pytest.importorskip("pyarrow")
|
671 |
+
|
672 |
+
with pd.option_context("mode.string_storage", string_storage):
|
673 |
+
df = DataFrame(
|
674 |
+
{
|
675 |
+
"a": np.array(["a", "b"], dtype=np.object_),
|
676 |
+
"b": np.array(["x", pd.NA], dtype=np.object_),
|
677 |
+
}
|
678 |
+
)
|
679 |
+
with tm.ensure_clean(read_ext) as file_path:
|
680 |
+
df.to_excel(file_path, sheet_name="test", index=False)
|
681 |
+
result = pd.read_excel(
|
682 |
+
file_path, sheet_name="test", dtype_backend="numpy_nullable"
|
683 |
+
)
|
684 |
+
|
685 |
+
if string_storage == "python":
|
686 |
+
expected = DataFrame(
|
687 |
+
{
|
688 |
+
"a": StringArray(np.array(["a", "b"], dtype=np.object_)),
|
689 |
+
"b": StringArray(np.array(["x", pd.NA], dtype=np.object_)),
|
690 |
+
}
|
691 |
+
)
|
692 |
+
else:
|
693 |
+
expected = DataFrame(
|
694 |
+
{
|
695 |
+
"a": ArrowStringArray(pa.array(["a", "b"])),
|
696 |
+
"b": ArrowStringArray(pa.array(["x", None])),
|
697 |
+
}
|
698 |
+
)
|
699 |
+
tm.assert_frame_equal(result, expected)
|
700 |
+
|
701 |
+
@pytest.mark.parametrize("dtypes, exp_value", [({}, 1), ({"a.1": "int64"}, 1)])
|
702 |
+
def test_dtype_mangle_dup_cols(self, read_ext, dtypes, exp_value):
|
703 |
+
# GH#35211
|
704 |
+
basename = "df_mangle_dup_col_dtypes"
|
705 |
+
dtype_dict = {"a": object, **dtypes}
|
706 |
+
dtype_dict_copy = dtype_dict.copy()
|
707 |
+
# GH#42462
|
708 |
+
result = pd.read_excel(basename + read_ext, dtype=dtype_dict)
|
709 |
+
expected = DataFrame(
|
710 |
+
{
|
711 |
+
"a": Series([1], dtype=object),
|
712 |
+
"a.1": Series([exp_value], dtype=object if not dtypes else None),
|
713 |
+
}
|
714 |
+
)
|
715 |
+
assert dtype_dict == dtype_dict_copy, "dtype dict changed"
|
716 |
+
tm.assert_frame_equal(result, expected)
|
717 |
+
|
718 |
+
def test_reader_spaces(self, read_ext):
|
719 |
+
# see gh-32207
|
720 |
+
basename = "test_spaces"
|
721 |
+
|
722 |
+
actual = pd.read_excel(basename + read_ext)
|
723 |
+
expected = DataFrame(
|
724 |
+
{
|
725 |
+
"testcol": [
|
726 |
+
"this is great",
|
727 |
+
"4 spaces",
|
728 |
+
"1 trailing ",
|
729 |
+
" 1 leading",
|
730 |
+
"2 spaces multiple times",
|
731 |
+
]
|
732 |
+
}
|
733 |
+
)
|
734 |
+
tm.assert_frame_equal(actual, expected)
|
735 |
+
|
736 |
+
# gh-36122, gh-35802
|
737 |
+
@pytest.mark.parametrize(
|
738 |
+
"basename,expected",
|
739 |
+
[
|
740 |
+
("gh-35802", DataFrame({"COLUMN": ["Test (1)"]})),
|
741 |
+
("gh-36122", DataFrame(columns=["got 2nd sa"])),
|
742 |
+
],
|
743 |
+
)
|
744 |
+
def test_read_excel_ods_nested_xml(self, engine, read_ext, basename, expected):
|
745 |
+
# see gh-35802
|
746 |
+
if engine != "odf":
|
747 |
+
pytest.skip(f"Skipped for engine: {engine}")
|
748 |
+
|
749 |
+
actual = pd.read_excel(basename + read_ext)
|
750 |
+
tm.assert_frame_equal(actual, expected)
|
751 |
+
|
752 |
+
def test_reading_all_sheets(self, read_ext):
|
753 |
+
# Test reading all sheet names by setting sheet_name to None,
|
754 |
+
# Ensure a dict is returned.
|
755 |
+
# See PR #9450
|
756 |
+
basename = "test_multisheet"
|
757 |
+
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
|
758 |
+
# ensure this is not alphabetical to test order preservation
|
759 |
+
expected_keys = ["Charlie", "Alpha", "Beta"]
|
760 |
+
tm.assert_contains_all(expected_keys, dfs.keys())
|
761 |
+
# Issue 9930
|
762 |
+
# Ensure sheet order is preserved
|
763 |
+
assert expected_keys == list(dfs.keys())
|
764 |
+
|
765 |
+
def test_reading_multiple_specific_sheets(self, read_ext):
|
766 |
+
# Test reading specific sheet names by specifying a mixed list
|
767 |
+
# of integers and strings, and confirm that duplicated sheet
|
768 |
+
# references (positions/names) are removed properly.
|
769 |
+
# Ensure a dict is returned
|
770 |
+
# See PR #9450
|
771 |
+
basename = "test_multisheet"
|
772 |
+
# Explicitly request duplicates. Only the set should be returned.
|
773 |
+
expected_keys = [2, "Charlie", "Charlie"]
|
774 |
+
dfs = pd.read_excel(basename + read_ext, sheet_name=expected_keys)
|
775 |
+
expected_keys = list(set(expected_keys))
|
776 |
+
tm.assert_contains_all(expected_keys, dfs.keys())
|
777 |
+
assert len(expected_keys) == len(dfs.keys())
|
778 |
+
|
779 |
+
def test_reading_all_sheets_with_blank(self, read_ext):
|
780 |
+
# Test reading all sheet names by setting sheet_name to None,
|
781 |
+
# In the case where some sheets are blank.
|
782 |
+
# Issue #11711
|
783 |
+
basename = "blank_with_header"
|
784 |
+
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
|
785 |
+
expected_keys = ["Sheet1", "Sheet2", "Sheet3"]
|
786 |
+
tm.assert_contains_all(expected_keys, dfs.keys())
|
787 |
+
|
788 |
+
# GH6403
|
789 |
+
def test_read_excel_blank(self, read_ext):
|
790 |
+
actual = pd.read_excel("blank" + read_ext, sheet_name="Sheet1")
|
791 |
+
tm.assert_frame_equal(actual, DataFrame())
|
792 |
+
|
793 |
+
def test_read_excel_blank_with_header(self, read_ext):
|
794 |
+
expected = DataFrame(columns=["col_1", "col_2"])
|
795 |
+
actual = pd.read_excel("blank_with_header" + read_ext, sheet_name="Sheet1")
|
796 |
+
tm.assert_frame_equal(actual, expected)
|
797 |
+
|
798 |
+
def test_exception_message_includes_sheet_name(self, read_ext):
|
799 |
+
# GH 48706
|
800 |
+
with pytest.raises(ValueError, match=r" \(sheet: Sheet1\)$"):
|
801 |
+
pd.read_excel("blank_with_header" + read_ext, header=[1], sheet_name=None)
|
802 |
+
with pytest.raises(ZeroDivisionError, match=r" \(sheet: Sheet1\)$"):
|
803 |
+
pd.read_excel("test1" + read_ext, usecols=lambda x: 1 / 0, sheet_name=None)
|
804 |
+
|
805 |
+
@pytest.mark.filterwarnings("ignore:Cell A4 is marked:UserWarning:openpyxl")
|
806 |
+
def test_date_conversion_overflow(self, request, engine, read_ext):
|
807 |
+
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
|
808 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
809 |
+
|
810 |
+
expected = DataFrame(
|
811 |
+
[
|
812 |
+
[pd.Timestamp("2016-03-12"), "Marc Johnson"],
|
813 |
+
[pd.Timestamp("2016-03-16"), "Jack Black"],
|
814 |
+
[1e20, "Timothy Brown"],
|
815 |
+
],
|
816 |
+
columns=["DateColWithBigInt", "StringCol"],
|
817 |
+
)
|
818 |
+
|
819 |
+
if engine == "openpyxl":
|
820 |
+
request.applymarker(
|
821 |
+
pytest.mark.xfail(reason="Maybe not supported by openpyxl")
|
822 |
+
)
|
823 |
+
|
824 |
+
if engine is None and read_ext in (".xlsx", ".xlsm"):
|
825 |
+
# GH 35029
|
826 |
+
request.applymarker(
|
827 |
+
pytest.mark.xfail(reason="Defaults to openpyxl, maybe not supported")
|
828 |
+
)
|
829 |
+
|
830 |
+
result = pd.read_excel("testdateoverflow" + read_ext)
|
831 |
+
tm.assert_frame_equal(result, expected)
|
832 |
+
|
833 |
+
def test_sheet_name(self, request, read_ext, engine, df_ref):
|
834 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
835 |
+
|
836 |
+
filename = "test1"
|
837 |
+
sheet_name = "Sheet1"
|
838 |
+
|
839 |
+
expected = df_ref
|
840 |
+
adjust_expected(expected, read_ext, engine)
|
841 |
+
|
842 |
+
df1 = pd.read_excel(
|
843 |
+
filename + read_ext, sheet_name=sheet_name, index_col=0
|
844 |
+
) # doc
|
845 |
+
df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name)
|
846 |
+
|
847 |
+
tm.assert_frame_equal(df1, expected)
|
848 |
+
tm.assert_frame_equal(df2, expected)
|
849 |
+
|
850 |
+
def test_excel_read_buffer(self, read_ext):
|
851 |
+
pth = "test1" + read_ext
|
852 |
+
expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0)
|
853 |
+
with open(pth, "rb") as f:
|
854 |
+
actual = pd.read_excel(f, sheet_name="Sheet1", index_col=0)
|
855 |
+
tm.assert_frame_equal(expected, actual)
|
856 |
+
|
857 |
+
def test_bad_engine_raises(self):
|
858 |
+
bad_engine = "foo"
|
859 |
+
with pytest.raises(ValueError, match="Unknown engine: foo"):
|
860 |
+
pd.read_excel("", engine=bad_engine)
|
861 |
+
|
862 |
+
@pytest.mark.parametrize(
|
863 |
+
"sheet_name",
|
864 |
+
[3, [0, 3], [3, 0], "Sheet4", ["Sheet1", "Sheet4"], ["Sheet4", "Sheet1"]],
|
865 |
+
)
|
866 |
+
def test_bad_sheetname_raises(self, read_ext, sheet_name):
|
867 |
+
# GH 39250
|
868 |
+
msg = "Worksheet index 3 is invalid|Worksheet named 'Sheet4' not found"
|
869 |
+
with pytest.raises(ValueError, match=msg):
|
870 |
+
pd.read_excel("blank" + read_ext, sheet_name=sheet_name)
|
871 |
+
|
872 |
+
def test_missing_file_raises(self, read_ext):
|
873 |
+
bad_file = f"foo{read_ext}"
|
874 |
+
# CI tests with other languages, translates to "No such file or directory"
|
875 |
+
match = "|".join(
|
876 |
+
[
|
877 |
+
"(No such file or directory",
|
878 |
+
"没有那个文件或目录",
|
879 |
+
"File o directory non esistente)",
|
880 |
+
]
|
881 |
+
)
|
882 |
+
with pytest.raises(FileNotFoundError, match=match):
|
883 |
+
pd.read_excel(bad_file)
|
884 |
+
|
885 |
+
def test_corrupt_bytes_raises(self, engine):
|
886 |
+
bad_stream = b"foo"
|
887 |
+
if engine is None:
|
888 |
+
error = ValueError
|
889 |
+
msg = (
|
890 |
+
"Excel file format cannot be determined, you must "
|
891 |
+
"specify an engine manually."
|
892 |
+
)
|
893 |
+
elif engine == "xlrd":
|
894 |
+
from xlrd import XLRDError
|
895 |
+
|
896 |
+
error = XLRDError
|
897 |
+
msg = (
|
898 |
+
"Unsupported format, or corrupt file: Expected BOF "
|
899 |
+
"record; found b'foo'"
|
900 |
+
)
|
901 |
+
elif engine == "calamine":
|
902 |
+
from python_calamine import CalamineError
|
903 |
+
|
904 |
+
error = CalamineError
|
905 |
+
msg = "Cannot detect file format"
|
906 |
+
else:
|
907 |
+
error = BadZipFile
|
908 |
+
msg = "File is not a zip file"
|
909 |
+
with pytest.raises(error, match=msg):
|
910 |
+
pd.read_excel(BytesIO(bad_stream))
|
911 |
+
|
912 |
+
@pytest.mark.network
|
913 |
+
@pytest.mark.single_cpu
|
914 |
+
def test_read_from_http_url(self, httpserver, read_ext):
|
915 |
+
with open("test1" + read_ext, "rb") as f:
|
916 |
+
httpserver.serve_content(content=f.read())
|
917 |
+
url_table = pd.read_excel(httpserver.url)
|
918 |
+
local_table = pd.read_excel("test1" + read_ext)
|
919 |
+
tm.assert_frame_equal(url_table, local_table)
|
920 |
+
|
921 |
+
@td.skip_if_not_us_locale
|
922 |
+
@pytest.mark.single_cpu
|
923 |
+
def test_read_from_s3_url(self, read_ext, s3_public_bucket, s3so):
|
924 |
+
# Bucket created in tests/io/conftest.py
|
925 |
+
with open("test1" + read_ext, "rb") as f:
|
926 |
+
s3_public_bucket.put_object(Key="test1" + read_ext, Body=f)
|
927 |
+
|
928 |
+
url = f"s3://{s3_public_bucket.name}/test1" + read_ext
|
929 |
+
|
930 |
+
url_table = pd.read_excel(url, storage_options=s3so)
|
931 |
+
local_table = pd.read_excel("test1" + read_ext)
|
932 |
+
tm.assert_frame_equal(url_table, local_table)
|
933 |
+
|
934 |
+
@pytest.mark.single_cpu
|
935 |
+
def test_read_from_s3_object(self, read_ext, s3_public_bucket, s3so):
|
936 |
+
# GH 38788
|
937 |
+
# Bucket created in tests/io/conftest.py
|
938 |
+
with open("test1" + read_ext, "rb") as f:
|
939 |
+
s3_public_bucket.put_object(Key="test1" + read_ext, Body=f)
|
940 |
+
|
941 |
+
import s3fs
|
942 |
+
|
943 |
+
s3 = s3fs.S3FileSystem(**s3so)
|
944 |
+
|
945 |
+
with s3.open(f"s3://{s3_public_bucket.name}/test1" + read_ext) as f:
|
946 |
+
url_table = pd.read_excel(f)
|
947 |
+
|
948 |
+
local_table = pd.read_excel("test1" + read_ext)
|
949 |
+
tm.assert_frame_equal(url_table, local_table)
|
950 |
+
|
951 |
+
@pytest.mark.slow
|
952 |
+
def test_read_from_file_url(self, read_ext, datapath):
|
953 |
+
# FILE
|
954 |
+
localtable = os.path.join(datapath("io", "data", "excel"), "test1" + read_ext)
|
955 |
+
local_table = pd.read_excel(localtable)
|
956 |
+
|
957 |
+
try:
|
958 |
+
url_table = pd.read_excel("file://localhost/" + localtable)
|
959 |
+
except URLError:
|
960 |
+
# fails on some systems
|
961 |
+
platform_info = " ".join(platform.uname()).strip()
|
962 |
+
pytest.skip(f"failing on {platform_info}")
|
963 |
+
|
964 |
+
tm.assert_frame_equal(url_table, local_table)
|
965 |
+
|
966 |
+
def test_read_from_pathlib_path(self, read_ext):
|
967 |
+
# GH12655
|
968 |
+
str_path = "test1" + read_ext
|
969 |
+
expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0)
|
970 |
+
|
971 |
+
path_obj = Path("test1" + read_ext)
|
972 |
+
actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0)
|
973 |
+
|
974 |
+
tm.assert_frame_equal(expected, actual)
|
975 |
+
|
976 |
+
@td.skip_if_no("py.path")
|
977 |
+
def test_read_from_py_localpath(self, read_ext):
|
978 |
+
# GH12655
|
979 |
+
from py.path import local as LocalPath
|
980 |
+
|
981 |
+
str_path = os.path.join("test1" + read_ext)
|
982 |
+
expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0)
|
983 |
+
|
984 |
+
path_obj = LocalPath().join("test1" + read_ext)
|
985 |
+
actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0)
|
986 |
+
|
987 |
+
tm.assert_frame_equal(expected, actual)
|
988 |
+
|
989 |
+
def test_close_from_py_localpath(self, read_ext):
|
990 |
+
# GH31467
|
991 |
+
str_path = os.path.join("test1" + read_ext)
|
992 |
+
with open(str_path, "rb") as f:
|
993 |
+
x = pd.read_excel(f, sheet_name="Sheet1", index_col=0)
|
994 |
+
del x
|
995 |
+
# should not throw an exception because the passed file was closed
|
996 |
+
f.read()
|
997 |
+
|
998 |
+
def test_reader_seconds(self, request, engine, read_ext):
|
999 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
1000 |
+
|
1001 |
+
# GH 55045
|
1002 |
+
if engine == "calamine" and read_ext == ".ods":
|
1003 |
+
request.applymarker(
|
1004 |
+
pytest.mark.xfail(
|
1005 |
+
reason="ODS file contains bad datetime (seconds as text)"
|
1006 |
+
)
|
1007 |
+
)
|
1008 |
+
|
1009 |
+
# Test reading times with and without milliseconds. GH5945.
|
1010 |
+
expected = DataFrame.from_dict(
|
1011 |
+
{
|
1012 |
+
"Time": [
|
1013 |
+
time(1, 2, 3),
|
1014 |
+
time(2, 45, 56, 100000),
|
1015 |
+
time(4, 29, 49, 200000),
|
1016 |
+
time(6, 13, 42, 300000),
|
1017 |
+
time(7, 57, 35, 400000),
|
1018 |
+
time(9, 41, 28, 500000),
|
1019 |
+
time(11, 25, 21, 600000),
|
1020 |
+
time(13, 9, 14, 700000),
|
1021 |
+
time(14, 53, 7, 800000),
|
1022 |
+
time(16, 37, 0, 900000),
|
1023 |
+
time(18, 20, 54),
|
1024 |
+
]
|
1025 |
+
}
|
1026 |
+
)
|
1027 |
+
|
1028 |
+
actual = pd.read_excel("times_1900" + read_ext, sheet_name="Sheet1")
|
1029 |
+
tm.assert_frame_equal(actual, expected)
|
1030 |
+
|
1031 |
+
actual = pd.read_excel("times_1904" + read_ext, sheet_name="Sheet1")
|
1032 |
+
tm.assert_frame_equal(actual, expected)
|
1033 |
+
|
1034 |
+
def test_read_excel_multiindex(self, request, engine, read_ext):
|
1035 |
+
# see gh-4679
|
1036 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
1037 |
+
|
1038 |
+
unit = get_exp_unit(read_ext, engine)
|
1039 |
+
|
1040 |
+
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
|
1041 |
+
mi_file = "testmultiindex" + read_ext
|
1042 |
+
|
1043 |
+
# "mi_column" sheet
|
1044 |
+
expected = DataFrame(
|
1045 |
+
[
|
1046 |
+
[1, 2.5, pd.Timestamp("2015-01-01"), True],
|
1047 |
+
[2, 3.5, pd.Timestamp("2015-01-02"), False],
|
1048 |
+
[3, 4.5, pd.Timestamp("2015-01-03"), False],
|
1049 |
+
[4, 5.5, pd.Timestamp("2015-01-04"), True],
|
1050 |
+
],
|
1051 |
+
columns=mi,
|
1052 |
+
)
|
1053 |
+
expected[mi[2]] = expected[mi[2]].astype(f"M8[{unit}]")
|
1054 |
+
|
1055 |
+
actual = pd.read_excel(
|
1056 |
+
mi_file, sheet_name="mi_column", header=[0, 1], index_col=0
|
1057 |
+
)
|
1058 |
+
tm.assert_frame_equal(actual, expected)
|
1059 |
+
|
1060 |
+
# "mi_index" sheet
|
1061 |
+
expected.index = mi
|
1062 |
+
expected.columns = ["a", "b", "c", "d"]
|
1063 |
+
|
1064 |
+
actual = pd.read_excel(mi_file, sheet_name="mi_index", index_col=[0, 1])
|
1065 |
+
tm.assert_frame_equal(actual, expected)
|
1066 |
+
|
1067 |
+
# "both" sheet
|
1068 |
+
expected.columns = mi
|
1069 |
+
|
1070 |
+
actual = pd.read_excel(
|
1071 |
+
mi_file, sheet_name="both", index_col=[0, 1], header=[0, 1]
|
1072 |
+
)
|
1073 |
+
tm.assert_frame_equal(actual, expected)
|
1074 |
+
|
1075 |
+
# "mi_index_name" sheet
|
1076 |
+
expected.columns = ["a", "b", "c", "d"]
|
1077 |
+
expected.index = mi.set_names(["ilvl1", "ilvl2"])
|
1078 |
+
|
1079 |
+
actual = pd.read_excel(mi_file, sheet_name="mi_index_name", index_col=[0, 1])
|
1080 |
+
tm.assert_frame_equal(actual, expected)
|
1081 |
+
|
1082 |
+
# "mi_column_name" sheet
|
1083 |
+
expected.index = list(range(4))
|
1084 |
+
expected.columns = mi.set_names(["c1", "c2"])
|
1085 |
+
actual = pd.read_excel(
|
1086 |
+
mi_file, sheet_name="mi_column_name", header=[0, 1], index_col=0
|
1087 |
+
)
|
1088 |
+
tm.assert_frame_equal(actual, expected)
|
1089 |
+
|
1090 |
+
# see gh-11317
|
1091 |
+
# "name_with_int" sheet
|
1092 |
+
expected.columns = mi.set_levels([1, 2], level=1).set_names(["c1", "c2"])
|
1093 |
+
|
1094 |
+
actual = pd.read_excel(
|
1095 |
+
mi_file, sheet_name="name_with_int", index_col=0, header=[0, 1]
|
1096 |
+
)
|
1097 |
+
tm.assert_frame_equal(actual, expected)
|
1098 |
+
|
1099 |
+
# "both_name" sheet
|
1100 |
+
expected.columns = mi.set_names(["c1", "c2"])
|
1101 |
+
expected.index = mi.set_names(["ilvl1", "ilvl2"])
|
1102 |
+
|
1103 |
+
actual = pd.read_excel(
|
1104 |
+
mi_file, sheet_name="both_name", index_col=[0, 1], header=[0, 1]
|
1105 |
+
)
|
1106 |
+
tm.assert_frame_equal(actual, expected)
|
1107 |
+
|
1108 |
+
# "both_skiprows" sheet
|
1109 |
+
actual = pd.read_excel(
|
1110 |
+
mi_file,
|
1111 |
+
sheet_name="both_name_skiprows",
|
1112 |
+
index_col=[0, 1],
|
1113 |
+
header=[0, 1],
|
1114 |
+
skiprows=2,
|
1115 |
+
)
|
1116 |
+
tm.assert_frame_equal(actual, expected)
|
1117 |
+
|
1118 |
+
@pytest.mark.parametrize(
|
1119 |
+
"sheet_name,idx_lvl2",
|
1120 |
+
[
|
1121 |
+
("both_name_blank_after_mi_name", [np.nan, "b", "a", "b"]),
|
1122 |
+
("both_name_multiple_blanks", [np.nan] * 4),
|
1123 |
+
],
|
1124 |
+
)
|
1125 |
+
def test_read_excel_multiindex_blank_after_name(
|
1126 |
+
self, request, engine, read_ext, sheet_name, idx_lvl2
|
1127 |
+
):
|
1128 |
+
# GH34673
|
1129 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
1130 |
+
|
1131 |
+
mi_file = "testmultiindex" + read_ext
|
1132 |
+
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]], names=["c1", "c2"])
|
1133 |
+
|
1134 |
+
unit = get_exp_unit(read_ext, engine)
|
1135 |
+
|
1136 |
+
expected = DataFrame(
|
1137 |
+
[
|
1138 |
+
[1, 2.5, pd.Timestamp("2015-01-01"), True],
|
1139 |
+
[2, 3.5, pd.Timestamp("2015-01-02"), False],
|
1140 |
+
[3, 4.5, pd.Timestamp("2015-01-03"), False],
|
1141 |
+
[4, 5.5, pd.Timestamp("2015-01-04"), True],
|
1142 |
+
],
|
1143 |
+
columns=mi,
|
1144 |
+
index=MultiIndex.from_arrays(
|
1145 |
+
(["foo", "foo", "bar", "bar"], idx_lvl2),
|
1146 |
+
names=["ilvl1", "ilvl2"],
|
1147 |
+
),
|
1148 |
+
)
|
1149 |
+
expected[mi[2]] = expected[mi[2]].astype(f"M8[{unit}]")
|
1150 |
+
result = pd.read_excel(
|
1151 |
+
mi_file,
|
1152 |
+
sheet_name=sheet_name,
|
1153 |
+
index_col=[0, 1],
|
1154 |
+
header=[0, 1],
|
1155 |
+
)
|
1156 |
+
tm.assert_frame_equal(result, expected)
|
1157 |
+
|
1158 |
+
def test_read_excel_multiindex_header_only(self, read_ext):
|
1159 |
+
# see gh-11733.
|
1160 |
+
#
|
1161 |
+
# Don't try to parse a header name if there isn't one.
|
1162 |
+
mi_file = "testmultiindex" + read_ext
|
1163 |
+
result = pd.read_excel(mi_file, sheet_name="index_col_none", header=[0, 1])
|
1164 |
+
|
1165 |
+
exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")])
|
1166 |
+
expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns)
|
1167 |
+
tm.assert_frame_equal(result, expected)
|
1168 |
+
|
1169 |
+
def test_excel_old_index_format(self, read_ext):
|
1170 |
+
# see gh-4679
|
1171 |
+
filename = "test_index_name_pre17" + read_ext
|
1172 |
+
|
1173 |
+
# We detect headers to determine if index names exist, so
|
1174 |
+
# that "index" name in the "names" version of the data will
|
1175 |
+
# now be interpreted as rows that include null data.
|
1176 |
+
data = np.array(
|
1177 |
+
[
|
1178 |
+
[np.nan, np.nan, np.nan, np.nan, np.nan],
|
1179 |
+
["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
|
1180 |
+
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
|
1181 |
+
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
|
1182 |
+
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
|
1183 |
+
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"],
|
1184 |
+
],
|
1185 |
+
dtype=object,
|
1186 |
+
)
|
1187 |
+
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
|
1188 |
+
mi = MultiIndex(
|
1189 |
+
levels=[
|
1190 |
+
["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"],
|
1191 |
+
["R1", "R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"],
|
1192 |
+
],
|
1193 |
+
codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]],
|
1194 |
+
names=[None, None],
|
1195 |
+
)
|
1196 |
+
si = Index(
|
1197 |
+
["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None
|
1198 |
+
)
|
1199 |
+
|
1200 |
+
expected = DataFrame(data, index=si, columns=columns)
|
1201 |
+
|
1202 |
+
actual = pd.read_excel(filename, sheet_name="single_names", index_col=0)
|
1203 |
+
tm.assert_frame_equal(actual, expected)
|
1204 |
+
|
1205 |
+
expected.index = mi
|
1206 |
+
|
1207 |
+
actual = pd.read_excel(filename, sheet_name="multi_names", index_col=[0, 1])
|
1208 |
+
tm.assert_frame_equal(actual, expected)
|
1209 |
+
|
1210 |
+
# The analogous versions of the "names" version data
|
1211 |
+
# where there are explicitly no names for the indices.
|
1212 |
+
data = np.array(
|
1213 |
+
[
|
1214 |
+
["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
|
1215 |
+
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
|
1216 |
+
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
|
1217 |
+
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
|
1218 |
+
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"],
|
1219 |
+
]
|
1220 |
+
)
|
1221 |
+
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
|
1222 |
+
mi = MultiIndex(
|
1223 |
+
levels=[
|
1224 |
+
["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"],
|
1225 |
+
["R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"],
|
1226 |
+
],
|
1227 |
+
codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
|
1228 |
+
names=[None, None],
|
1229 |
+
)
|
1230 |
+
si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None)
|
1231 |
+
|
1232 |
+
expected = DataFrame(data, index=si, columns=columns)
|
1233 |
+
|
1234 |
+
actual = pd.read_excel(filename, sheet_name="single_no_names", index_col=0)
|
1235 |
+
tm.assert_frame_equal(actual, expected)
|
1236 |
+
|
1237 |
+
expected.index = mi
|
1238 |
+
|
1239 |
+
actual = pd.read_excel(filename, sheet_name="multi_no_names", index_col=[0, 1])
|
1240 |
+
tm.assert_frame_equal(actual, expected)
|
1241 |
+
|
1242 |
+
def test_read_excel_bool_header_arg(self, read_ext):
|
1243 |
+
# GH 6114
|
1244 |
+
msg = "Passing a bool to header is invalid"
|
1245 |
+
for arg in [True, False]:
|
1246 |
+
with pytest.raises(TypeError, match=msg):
|
1247 |
+
pd.read_excel("test1" + read_ext, header=arg)
|
1248 |
+
|
1249 |
+
def test_read_excel_skiprows(self, request, engine, read_ext):
|
1250 |
+
# GH 4903
|
1251 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
1252 |
+
|
1253 |
+
unit = get_exp_unit(read_ext, engine)
|
1254 |
+
|
1255 |
+
actual = pd.read_excel(
|
1256 |
+
"testskiprows" + read_ext, sheet_name="skiprows_list", skiprows=[0, 2]
|
1257 |
+
)
|
1258 |
+
expected = DataFrame(
|
1259 |
+
[
|
1260 |
+
[1, 2.5, pd.Timestamp("2015-01-01"), True],
|
1261 |
+
[2, 3.5, pd.Timestamp("2015-01-02"), False],
|
1262 |
+
[3, 4.5, pd.Timestamp("2015-01-03"), False],
|
1263 |
+
[4, 5.5, pd.Timestamp("2015-01-04"), True],
|
1264 |
+
],
|
1265 |
+
columns=["a", "b", "c", "d"],
|
1266 |
+
)
|
1267 |
+
expected["c"] = expected["c"].astype(f"M8[{unit}]")
|
1268 |
+
tm.assert_frame_equal(actual, expected)
|
1269 |
+
|
1270 |
+
actual = pd.read_excel(
|
1271 |
+
"testskiprows" + read_ext,
|
1272 |
+
sheet_name="skiprows_list",
|
1273 |
+
skiprows=np.array([0, 2]),
|
1274 |
+
)
|
1275 |
+
tm.assert_frame_equal(actual, expected)
|
1276 |
+
|
1277 |
+
# GH36435
|
1278 |
+
actual = pd.read_excel(
|
1279 |
+
"testskiprows" + read_ext,
|
1280 |
+
sheet_name="skiprows_list",
|
1281 |
+
skiprows=lambda x: x in [0, 2],
|
1282 |
+
)
|
1283 |
+
tm.assert_frame_equal(actual, expected)
|
1284 |
+
|
1285 |
+
actual = pd.read_excel(
|
1286 |
+
"testskiprows" + read_ext,
|
1287 |
+
sheet_name="skiprows_list",
|
1288 |
+
skiprows=3,
|
1289 |
+
names=["a", "b", "c", "d"],
|
1290 |
+
)
|
1291 |
+
expected = DataFrame(
|
1292 |
+
[
|
1293 |
+
# [1, 2.5, pd.Timestamp("2015-01-01"), True],
|
1294 |
+
[2, 3.5, pd.Timestamp("2015-01-02"), False],
|
1295 |
+
[3, 4.5, pd.Timestamp("2015-01-03"), False],
|
1296 |
+
[4, 5.5, pd.Timestamp("2015-01-04"), True],
|
1297 |
+
],
|
1298 |
+
columns=["a", "b", "c", "d"],
|
1299 |
+
)
|
1300 |
+
expected["c"] = expected["c"].astype(f"M8[{unit}]")
|
1301 |
+
tm.assert_frame_equal(actual, expected)
|
1302 |
+
|
1303 |
+
def test_read_excel_skiprows_callable_not_in(self, request, engine, read_ext):
|
1304 |
+
# GH 4903
|
1305 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
1306 |
+
unit = get_exp_unit(read_ext, engine)
|
1307 |
+
|
1308 |
+
actual = pd.read_excel(
|
1309 |
+
"testskiprows" + read_ext,
|
1310 |
+
sheet_name="skiprows_list",
|
1311 |
+
skiprows=lambda x: x not in [1, 3, 5],
|
1312 |
+
)
|
1313 |
+
expected = DataFrame(
|
1314 |
+
[
|
1315 |
+
[1, 2.5, pd.Timestamp("2015-01-01"), True],
|
1316 |
+
# [2, 3.5, pd.Timestamp("2015-01-02"), False],
|
1317 |
+
[3, 4.5, pd.Timestamp("2015-01-03"), False],
|
1318 |
+
# [4, 5.5, pd.Timestamp("2015-01-04"), True],
|
1319 |
+
],
|
1320 |
+
columns=["a", "b", "c", "d"],
|
1321 |
+
)
|
1322 |
+
expected["c"] = expected["c"].astype(f"M8[{unit}]")
|
1323 |
+
tm.assert_frame_equal(actual, expected)
|
1324 |
+
|
1325 |
+
def test_read_excel_nrows(self, read_ext):
|
1326 |
+
# GH 16645
|
1327 |
+
num_rows_to_pull = 5
|
1328 |
+
actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull)
|
1329 |
+
expected = pd.read_excel("test1" + read_ext)
|
1330 |
+
expected = expected[:num_rows_to_pull]
|
1331 |
+
tm.assert_frame_equal(actual, expected)
|
1332 |
+
|
1333 |
+
def test_read_excel_nrows_greater_than_nrows_in_file(self, read_ext):
|
1334 |
+
# GH 16645
|
1335 |
+
expected = pd.read_excel("test1" + read_ext)
|
1336 |
+
num_records_in_file = len(expected)
|
1337 |
+
num_rows_to_pull = num_records_in_file + 10
|
1338 |
+
actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull)
|
1339 |
+
tm.assert_frame_equal(actual, expected)
|
1340 |
+
|
1341 |
+
def test_read_excel_nrows_non_integer_parameter(self, read_ext):
|
1342 |
+
# GH 16645
|
1343 |
+
msg = "'nrows' must be an integer >=0"
|
1344 |
+
with pytest.raises(ValueError, match=msg):
|
1345 |
+
pd.read_excel("test1" + read_ext, nrows="5")
|
1346 |
+
|
1347 |
+
@pytest.mark.parametrize(
|
1348 |
+
"filename,sheet_name,header,index_col,skiprows",
|
1349 |
+
[
|
1350 |
+
("testmultiindex", "mi_column", [0, 1], 0, None),
|
1351 |
+
("testmultiindex", "mi_index", None, [0, 1], None),
|
1352 |
+
("testmultiindex", "both", [0, 1], [0, 1], None),
|
1353 |
+
("testmultiindex", "mi_column_name", [0, 1], 0, None),
|
1354 |
+
("testskiprows", "skiprows_list", None, None, [0, 2]),
|
1355 |
+
("testskiprows", "skiprows_list", None, None, lambda x: x in (0, 2)),
|
1356 |
+
],
|
1357 |
+
)
|
1358 |
+
def test_read_excel_nrows_params(
|
1359 |
+
self, read_ext, filename, sheet_name, header, index_col, skiprows
|
1360 |
+
):
|
1361 |
+
"""
|
1362 |
+
For various parameters, we should get the same result whether we
|
1363 |
+
limit the rows during load (nrows=3) or after (df.iloc[:3]).
|
1364 |
+
"""
|
1365 |
+
# GH 46894
|
1366 |
+
expected = pd.read_excel(
|
1367 |
+
filename + read_ext,
|
1368 |
+
sheet_name=sheet_name,
|
1369 |
+
header=header,
|
1370 |
+
index_col=index_col,
|
1371 |
+
skiprows=skiprows,
|
1372 |
+
).iloc[:3]
|
1373 |
+
actual = pd.read_excel(
|
1374 |
+
filename + read_ext,
|
1375 |
+
sheet_name=sheet_name,
|
1376 |
+
header=header,
|
1377 |
+
index_col=index_col,
|
1378 |
+
skiprows=skiprows,
|
1379 |
+
nrows=3,
|
1380 |
+
)
|
1381 |
+
tm.assert_frame_equal(actual, expected)
|
1382 |
+
|
1383 |
+
def test_deprecated_kwargs(self, read_ext):
|
1384 |
+
with pytest.raises(TypeError, match="but 3 positional arguments"):
|
1385 |
+
pd.read_excel("test1" + read_ext, "Sheet1", 0)
|
1386 |
+
|
1387 |
+
def test_no_header_with_list_index_col(self, read_ext):
|
1388 |
+
# GH 31783
|
1389 |
+
file_name = "testmultiindex" + read_ext
|
1390 |
+
data = [("B", "B"), ("key", "val"), (3, 4), (3, 4)]
|
1391 |
+
idx = MultiIndex.from_tuples(
|
1392 |
+
[("A", "A"), ("key", "val"), (1, 2), (1, 2)], names=(0, 1)
|
1393 |
+
)
|
1394 |
+
expected = DataFrame(data, index=idx, columns=(2, 3))
|
1395 |
+
result = pd.read_excel(
|
1396 |
+
file_name, sheet_name="index_col_none", index_col=[0, 1], header=None
|
1397 |
+
)
|
1398 |
+
tm.assert_frame_equal(expected, result)
|
1399 |
+
|
1400 |
+
def test_one_col_noskip_blank_line(self, read_ext):
|
1401 |
+
# GH 39808
|
1402 |
+
file_name = "one_col_blank_line" + read_ext
|
1403 |
+
data = [0.5, np.nan, 1, 2]
|
1404 |
+
expected = DataFrame(data, columns=["numbers"])
|
1405 |
+
result = pd.read_excel(file_name)
|
1406 |
+
tm.assert_frame_equal(result, expected)
|
1407 |
+
|
1408 |
+
def test_multiheader_two_blank_lines(self, read_ext):
|
1409 |
+
# GH 40442
|
1410 |
+
file_name = "testmultiindex" + read_ext
|
1411 |
+
columns = MultiIndex.from_tuples([("a", "A"), ("b", "B")])
|
1412 |
+
data = [[np.nan, np.nan], [np.nan, np.nan], [1, 3], [2, 4]]
|
1413 |
+
expected = DataFrame(data, columns=columns)
|
1414 |
+
result = pd.read_excel(
|
1415 |
+
file_name, sheet_name="mi_column_empty_rows", header=[0, 1]
|
1416 |
+
)
|
1417 |
+
tm.assert_frame_equal(result, expected)
|
1418 |
+
|
1419 |
+
def test_trailing_blanks(self, read_ext):
|
1420 |
+
"""
|
1421 |
+
Sheets can contain blank cells with no data. Some of our readers
|
1422 |
+
were including those cells, creating many empty rows and columns
|
1423 |
+
"""
|
1424 |
+
file_name = "trailing_blanks" + read_ext
|
1425 |
+
result = pd.read_excel(file_name)
|
1426 |
+
assert result.shape == (3, 3)
|
1427 |
+
|
1428 |
+
def test_ignore_chartsheets_by_str(self, request, engine, read_ext):
|
1429 |
+
# GH 41448
|
1430 |
+
if read_ext == ".ods":
|
1431 |
+
pytest.skip("chartsheets do not exist in the ODF format")
|
1432 |
+
if engine == "pyxlsb":
|
1433 |
+
request.applymarker(
|
1434 |
+
pytest.mark.xfail(
|
1435 |
+
reason="pyxlsb can't distinguish chartsheets from worksheets"
|
1436 |
+
)
|
1437 |
+
)
|
1438 |
+
with pytest.raises(ValueError, match="Worksheet named 'Chart1' not found"):
|
1439 |
+
pd.read_excel("chartsheet" + read_ext, sheet_name="Chart1")
|
1440 |
+
|
1441 |
+
def test_ignore_chartsheets_by_int(self, request, engine, read_ext):
|
1442 |
+
# GH 41448
|
1443 |
+
if read_ext == ".ods":
|
1444 |
+
pytest.skip("chartsheets do not exist in the ODF format")
|
1445 |
+
if engine == "pyxlsb":
|
1446 |
+
request.applymarker(
|
1447 |
+
pytest.mark.xfail(
|
1448 |
+
reason="pyxlsb can't distinguish chartsheets from worksheets"
|
1449 |
+
)
|
1450 |
+
)
|
1451 |
+
with pytest.raises(
|
1452 |
+
ValueError, match="Worksheet index 1 is invalid, 1 worksheets found"
|
1453 |
+
):
|
1454 |
+
pd.read_excel("chartsheet" + read_ext, sheet_name=1)
|
1455 |
+
|
1456 |
+
def test_euro_decimal_format(self, read_ext):
|
1457 |
+
# copied from read_csv
|
1458 |
+
result = pd.read_excel("test_decimal" + read_ext, decimal=",", skiprows=1)
|
1459 |
+
expected = DataFrame(
|
1460 |
+
[
|
1461 |
+
[1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],
|
1462 |
+
[2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],
|
1463 |
+
[3, 878.158, 108013.434, "GHI", "rez", 2.735694704],
|
1464 |
+
],
|
1465 |
+
columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
|
1466 |
+
)
|
1467 |
+
tm.assert_frame_equal(result, expected)
|
1468 |
+
|
1469 |
+
|
1470 |
+
class TestExcelFileRead:
|
1471 |
+
def test_deprecate_bytes_input(self, engine, read_ext):
|
1472 |
+
# GH 53830
|
1473 |
+
msg = (
|
1474 |
+
"Passing bytes to 'read_excel' is deprecated and "
|
1475 |
+
"will be removed in a future version. To read from a "
|
1476 |
+
"byte string, wrap it in a `BytesIO` object."
|
1477 |
+
)
|
1478 |
+
|
1479 |
+
with tm.assert_produces_warning(
|
1480 |
+
FutureWarning, match=msg, raise_on_extra_warnings=False
|
1481 |
+
):
|
1482 |
+
with open("test1" + read_ext, "rb") as f:
|
1483 |
+
pd.read_excel(f.read(), engine=engine)
|
1484 |
+
|
1485 |
+
@pytest.fixture(autouse=True)
|
1486 |
+
def cd_and_set_engine(self, engine, datapath, monkeypatch):
|
1487 |
+
"""
|
1488 |
+
Change directory and set engine for ExcelFile objects.
|
1489 |
+
"""
|
1490 |
+
func = partial(pd.ExcelFile, engine=engine)
|
1491 |
+
monkeypatch.chdir(datapath("io", "data", "excel"))
|
1492 |
+
monkeypatch.setattr(pd, "ExcelFile", func)
|
1493 |
+
|
1494 |
+
def test_engine_used(self, read_ext, engine):
|
1495 |
+
expected_defaults = {
|
1496 |
+
"xlsx": "openpyxl",
|
1497 |
+
"xlsm": "openpyxl",
|
1498 |
+
"xlsb": "pyxlsb",
|
1499 |
+
"xls": "xlrd",
|
1500 |
+
"ods": "odf",
|
1501 |
+
}
|
1502 |
+
|
1503 |
+
with pd.ExcelFile("test1" + read_ext) as excel:
|
1504 |
+
result = excel.engine
|
1505 |
+
|
1506 |
+
if engine is not None:
|
1507 |
+
expected = engine
|
1508 |
+
else:
|
1509 |
+
expected = expected_defaults[read_ext[1:]]
|
1510 |
+
assert result == expected
|
1511 |
+
|
1512 |
+
def test_excel_passes_na(self, read_ext):
|
1513 |
+
with pd.ExcelFile("test4" + read_ext) as excel:
|
1514 |
+
parsed = pd.read_excel(
|
1515 |
+
excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"]
|
1516 |
+
)
|
1517 |
+
expected = DataFrame(
|
1518 |
+
[["NA"], [1], ["NA"], [np.nan], ["rabbit"]], columns=["Test"]
|
1519 |
+
)
|
1520 |
+
tm.assert_frame_equal(parsed, expected)
|
1521 |
+
|
1522 |
+
with pd.ExcelFile("test4" + read_ext) as excel:
|
1523 |
+
parsed = pd.read_excel(
|
1524 |
+
excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"]
|
1525 |
+
)
|
1526 |
+
expected = DataFrame(
|
1527 |
+
[[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"]
|
1528 |
+
)
|
1529 |
+
tm.assert_frame_equal(parsed, expected)
|
1530 |
+
|
1531 |
+
# 13967
|
1532 |
+
with pd.ExcelFile("test5" + read_ext) as excel:
|
1533 |
+
parsed = pd.read_excel(
|
1534 |
+
excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"]
|
1535 |
+
)
|
1536 |
+
expected = DataFrame(
|
1537 |
+
[["1.#QNAN"], [1], ["nan"], [np.nan], ["rabbit"]], columns=["Test"]
|
1538 |
+
)
|
1539 |
+
tm.assert_frame_equal(parsed, expected)
|
1540 |
+
|
1541 |
+
with pd.ExcelFile("test5" + read_ext) as excel:
|
1542 |
+
parsed = pd.read_excel(
|
1543 |
+
excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"]
|
1544 |
+
)
|
1545 |
+
expected = DataFrame(
|
1546 |
+
[[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"]
|
1547 |
+
)
|
1548 |
+
tm.assert_frame_equal(parsed, expected)
|
1549 |
+
|
1550 |
+
@pytest.mark.parametrize("na_filter", [None, True, False])
|
1551 |
+
def test_excel_passes_na_filter(self, read_ext, na_filter):
|
1552 |
+
# gh-25453
|
1553 |
+
kwargs = {}
|
1554 |
+
|
1555 |
+
if na_filter is not None:
|
1556 |
+
kwargs["na_filter"] = na_filter
|
1557 |
+
|
1558 |
+
with pd.ExcelFile("test5" + read_ext) as excel:
|
1559 |
+
parsed = pd.read_excel(
|
1560 |
+
excel,
|
1561 |
+
sheet_name="Sheet1",
|
1562 |
+
keep_default_na=True,
|
1563 |
+
na_values=["apple"],
|
1564 |
+
**kwargs,
|
1565 |
+
)
|
1566 |
+
|
1567 |
+
if na_filter is False:
|
1568 |
+
expected = [["1.#QNAN"], [1], ["nan"], ["apple"], ["rabbit"]]
|
1569 |
+
else:
|
1570 |
+
expected = [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]]
|
1571 |
+
|
1572 |
+
expected = DataFrame(expected, columns=["Test"])
|
1573 |
+
tm.assert_frame_equal(parsed, expected)
|
1574 |
+
|
1575 |
+
def test_excel_table_sheet_by_index(self, request, engine, read_ext, df_ref):
|
1576 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
1577 |
+
|
1578 |
+
expected = df_ref
|
1579 |
+
adjust_expected(expected, read_ext, engine)
|
1580 |
+
|
1581 |
+
with pd.ExcelFile("test1" + read_ext) as excel:
|
1582 |
+
df1 = pd.read_excel(excel, sheet_name=0, index_col=0)
|
1583 |
+
df2 = pd.read_excel(excel, sheet_name=1, skiprows=[1], index_col=0)
|
1584 |
+
tm.assert_frame_equal(df1, expected)
|
1585 |
+
tm.assert_frame_equal(df2, expected)
|
1586 |
+
|
1587 |
+
with pd.ExcelFile("test1" + read_ext) as excel:
|
1588 |
+
df1 = excel.parse(0, index_col=0)
|
1589 |
+
df2 = excel.parse(1, skiprows=[1], index_col=0)
|
1590 |
+
tm.assert_frame_equal(df1, expected)
|
1591 |
+
tm.assert_frame_equal(df2, expected)
|
1592 |
+
|
1593 |
+
with pd.ExcelFile("test1" + read_ext) as excel:
|
1594 |
+
df3 = pd.read_excel(excel, sheet_name=0, index_col=0, skipfooter=1)
|
1595 |
+
tm.assert_frame_equal(df3, df1.iloc[:-1])
|
1596 |
+
|
1597 |
+
with pd.ExcelFile("test1" + read_ext) as excel:
|
1598 |
+
df3 = excel.parse(0, index_col=0, skipfooter=1)
|
1599 |
+
|
1600 |
+
tm.assert_frame_equal(df3, df1.iloc[:-1])
|
1601 |
+
|
1602 |
+
def test_sheet_name(self, request, engine, read_ext, df_ref):
|
1603 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
1604 |
+
|
1605 |
+
expected = df_ref
|
1606 |
+
adjust_expected(expected, read_ext, engine)
|
1607 |
+
|
1608 |
+
filename = "test1"
|
1609 |
+
sheet_name = "Sheet1"
|
1610 |
+
|
1611 |
+
with pd.ExcelFile(filename + read_ext) as excel:
|
1612 |
+
df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc
|
1613 |
+
|
1614 |
+
with pd.ExcelFile(filename + read_ext) as excel:
|
1615 |
+
df2_parse = excel.parse(index_col=0, sheet_name=sheet_name)
|
1616 |
+
|
1617 |
+
tm.assert_frame_equal(df1_parse, expected)
|
1618 |
+
tm.assert_frame_equal(df2_parse, expected)
|
1619 |
+
|
1620 |
+
@pytest.mark.parametrize(
|
1621 |
+
"sheet_name",
|
1622 |
+
[3, [0, 3], [3, 0], "Sheet4", ["Sheet1", "Sheet4"], ["Sheet4", "Sheet1"]],
|
1623 |
+
)
|
1624 |
+
def test_bad_sheetname_raises(self, read_ext, sheet_name):
|
1625 |
+
# GH 39250
|
1626 |
+
msg = "Worksheet index 3 is invalid|Worksheet named 'Sheet4' not found"
|
1627 |
+
with pytest.raises(ValueError, match=msg):
|
1628 |
+
with pd.ExcelFile("blank" + read_ext) as excel:
|
1629 |
+
excel.parse(sheet_name=sheet_name)
|
1630 |
+
|
1631 |
+
def test_excel_read_buffer(self, engine, read_ext):
|
1632 |
+
pth = "test1" + read_ext
|
1633 |
+
expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0, engine=engine)
|
1634 |
+
|
1635 |
+
with open(pth, "rb") as f:
|
1636 |
+
with pd.ExcelFile(f) as xls:
|
1637 |
+
actual = pd.read_excel(xls, sheet_name="Sheet1", index_col=0)
|
1638 |
+
|
1639 |
+
tm.assert_frame_equal(expected, actual)
|
1640 |
+
|
1641 |
+
def test_reader_closes_file(self, engine, read_ext):
|
1642 |
+
with open("test1" + read_ext, "rb") as f:
|
1643 |
+
with pd.ExcelFile(f) as xlsx:
|
1644 |
+
# parses okay
|
1645 |
+
pd.read_excel(xlsx, sheet_name="Sheet1", index_col=0, engine=engine)
|
1646 |
+
|
1647 |
+
assert f.closed
|
1648 |
+
|
1649 |
+
def test_conflicting_excel_engines(self, read_ext):
|
1650 |
+
# GH 26566
|
1651 |
+
msg = "Engine should not be specified when passing an ExcelFile"
|
1652 |
+
|
1653 |
+
with pd.ExcelFile("test1" + read_ext) as xl:
|
1654 |
+
with pytest.raises(ValueError, match=msg):
|
1655 |
+
pd.read_excel(xl, engine="foo")
|
1656 |
+
|
1657 |
+
def test_excel_read_binary(self, engine, read_ext):
|
1658 |
+
# GH 15914
|
1659 |
+
expected = pd.read_excel("test1" + read_ext, engine=engine)
|
1660 |
+
|
1661 |
+
with open("test1" + read_ext, "rb") as f:
|
1662 |
+
data = f.read()
|
1663 |
+
|
1664 |
+
actual = pd.read_excel(BytesIO(data), engine=engine)
|
1665 |
+
tm.assert_frame_equal(expected, actual)
|
1666 |
+
|
1667 |
+
def test_excel_read_binary_via_read_excel(self, read_ext, engine):
|
1668 |
+
# GH 38424
|
1669 |
+
with open("test1" + read_ext, "rb") as f:
|
1670 |
+
result = pd.read_excel(f, engine=engine)
|
1671 |
+
expected = pd.read_excel("test1" + read_ext, engine=engine)
|
1672 |
+
tm.assert_frame_equal(result, expected)
|
1673 |
+
|
1674 |
+
def test_read_excel_header_index_out_of_range(self, engine):
|
1675 |
+
# GH#43143
|
1676 |
+
with open("df_header_oob.xlsx", "rb") as f:
|
1677 |
+
with pytest.raises(ValueError, match="exceeds maximum"):
|
1678 |
+
pd.read_excel(f, header=[0, 1])
|
1679 |
+
|
1680 |
+
@pytest.mark.parametrize("filename", ["df_empty.xlsx", "df_equals.xlsx"])
|
1681 |
+
def test_header_with_index_col(self, filename):
|
1682 |
+
# GH 33476
|
1683 |
+
idx = Index(["Z"], name="I2")
|
1684 |
+
cols = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"])
|
1685 |
+
expected = DataFrame([[1, 3]], index=idx, columns=cols, dtype="int64")
|
1686 |
+
result = pd.read_excel(
|
1687 |
+
filename, sheet_name="Sheet1", index_col=0, header=[0, 1]
|
1688 |
+
)
|
1689 |
+
tm.assert_frame_equal(expected, result)
|
1690 |
+
|
1691 |
+
def test_read_datetime_multiindex(self, request, engine, read_ext):
|
1692 |
+
# GH 34748
|
1693 |
+
xfail_datetimes_with_pyxlsb(engine, request)
|
1694 |
+
|
1695 |
+
f = "test_datetime_mi" + read_ext
|
1696 |
+
with pd.ExcelFile(f) as excel:
|
1697 |
+
actual = pd.read_excel(excel, header=[0, 1], index_col=0, engine=engine)
|
1698 |
+
|
1699 |
+
unit = get_exp_unit(read_ext, engine)
|
1700 |
+
dti = pd.DatetimeIndex(["2020-02-29", "2020-03-01"], dtype=f"M8[{unit}]")
|
1701 |
+
expected_column_index = MultiIndex.from_arrays(
|
1702 |
+
[dti[:1], dti[1:]],
|
1703 |
+
names=[
|
1704 |
+
dti[0].to_pydatetime(),
|
1705 |
+
dti[1].to_pydatetime(),
|
1706 |
+
],
|
1707 |
+
)
|
1708 |
+
expected = DataFrame([], index=[], columns=expected_column_index)
|
1709 |
+
|
1710 |
+
tm.assert_frame_equal(expected, actual)
|
1711 |
+
|
1712 |
+
def test_engine_invalid_option(self, read_ext):
|
1713 |
+
# read_ext includes the '.' hence the weird formatting
|
1714 |
+
with pytest.raises(ValueError, match="Value must be one of *"):
|
1715 |
+
with pd.option_context(f"io.excel{read_ext}.reader", "abc"):
|
1716 |
+
pass
|
1717 |
+
|
1718 |
+
def test_ignore_chartsheets(self, request, engine, read_ext):
|
1719 |
+
# GH 41448
|
1720 |
+
if read_ext == ".ods":
|
1721 |
+
pytest.skip("chartsheets do not exist in the ODF format")
|
1722 |
+
if engine == "pyxlsb":
|
1723 |
+
request.applymarker(
|
1724 |
+
pytest.mark.xfail(
|
1725 |
+
reason="pyxlsb can't distinguish chartsheets from worksheets"
|
1726 |
+
)
|
1727 |
+
)
|
1728 |
+
with pd.ExcelFile("chartsheet" + read_ext) as excel:
|
1729 |
+
assert excel.sheet_names == ["Sheet1"]
|
1730 |
+
|
1731 |
+
def test_corrupt_files_closed(self, engine, read_ext):
|
1732 |
+
# GH41778
|
1733 |
+
errors = (BadZipFile,)
|
1734 |
+
if engine is None:
|
1735 |
+
pytest.skip(f"Invalid test for engine={engine}")
|
1736 |
+
elif engine == "xlrd":
|
1737 |
+
import xlrd
|
1738 |
+
|
1739 |
+
errors = (BadZipFile, xlrd.biffh.XLRDError)
|
1740 |
+
elif engine == "calamine":
|
1741 |
+
from python_calamine import CalamineError
|
1742 |
+
|
1743 |
+
errors = (CalamineError,)
|
1744 |
+
|
1745 |
+
with tm.ensure_clean(f"corrupt{read_ext}") as file:
|
1746 |
+
Path(file).write_text("corrupt", encoding="utf-8")
|
1747 |
+
with tm.assert_produces_warning(False):
|
1748 |
+
try:
|
1749 |
+
pd.ExcelFile(file, engine=engine)
|
1750 |
+
except errors:
|
1751 |
+
pass
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_style.py
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import contextlib
|
2 |
+
import time
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
from pandas.compat import is_platform_windows
|
8 |
+
import pandas.util._test_decorators as td
|
9 |
+
|
10 |
+
from pandas import (
|
11 |
+
DataFrame,
|
12 |
+
read_excel,
|
13 |
+
)
|
14 |
+
import pandas._testing as tm
|
15 |
+
|
16 |
+
from pandas.io.excel import ExcelWriter
|
17 |
+
from pandas.io.formats.excel import ExcelFormatter
|
18 |
+
|
19 |
+
pytest.importorskip("jinja2")
|
20 |
+
# jinja2 is currently required for Styler.__init__(). Technically Styler.to_excel
|
21 |
+
# could compute styles and render to excel without jinja2, since there is no
|
22 |
+
# 'template' file, but this needs the import error to delayed until render time.
|
23 |
+
|
24 |
+
if is_platform_windows():
|
25 |
+
pytestmark = pytest.mark.single_cpu
|
26 |
+
|
27 |
+
|
28 |
+
def assert_equal_cell_styles(cell1, cell2):
|
29 |
+
# TODO: should find a better way to check equality
|
30 |
+
assert cell1.alignment.__dict__ == cell2.alignment.__dict__
|
31 |
+
assert cell1.border.__dict__ == cell2.border.__dict__
|
32 |
+
assert cell1.fill.__dict__ == cell2.fill.__dict__
|
33 |
+
assert cell1.font.__dict__ == cell2.font.__dict__
|
34 |
+
assert cell1.number_format == cell2.number_format
|
35 |
+
assert cell1.protection.__dict__ == cell2.protection.__dict__
|
36 |
+
|
37 |
+
|
38 |
+
@pytest.mark.parametrize(
|
39 |
+
"engine",
|
40 |
+
["xlsxwriter", "openpyxl"],
|
41 |
+
)
|
42 |
+
def test_styler_to_excel_unstyled(engine):
|
43 |
+
# compare DataFrame.to_excel and Styler.to_excel when no styles applied
|
44 |
+
pytest.importorskip(engine)
|
45 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((2, 2)))
|
46 |
+
with tm.ensure_clean(".xlsx") as path:
|
47 |
+
with ExcelWriter(path, engine=engine) as writer:
|
48 |
+
df.to_excel(writer, sheet_name="dataframe")
|
49 |
+
df.style.to_excel(writer, sheet_name="unstyled")
|
50 |
+
|
51 |
+
openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
|
52 |
+
with contextlib.closing(openpyxl.load_workbook(path)) as wb:
|
53 |
+
for col1, col2 in zip(wb["dataframe"].columns, wb["unstyled"].columns):
|
54 |
+
assert len(col1) == len(col2)
|
55 |
+
for cell1, cell2 in zip(col1, col2):
|
56 |
+
assert cell1.value == cell2.value
|
57 |
+
assert_equal_cell_styles(cell1, cell2)
|
58 |
+
|
59 |
+
|
60 |
+
shared_style_params = [
|
61 |
+
(
|
62 |
+
"background-color: #111222",
|
63 |
+
["fill", "fgColor", "rgb"],
|
64 |
+
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
|
65 |
+
),
|
66 |
+
(
|
67 |
+
"color: #111222",
|
68 |
+
["font", "color", "value"],
|
69 |
+
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
|
70 |
+
),
|
71 |
+
("font-family: Arial;", ["font", "name"], "arial"),
|
72 |
+
("font-weight: bold;", ["font", "b"], True),
|
73 |
+
("font-style: italic;", ["font", "i"], True),
|
74 |
+
("text-decoration: underline;", ["font", "u"], "single"),
|
75 |
+
("number-format: $??,???.00;", ["number_format"], "$??,???.00"),
|
76 |
+
("text-align: left;", ["alignment", "horizontal"], "left"),
|
77 |
+
(
|
78 |
+
"vertical-align: bottom;",
|
79 |
+
["alignment", "vertical"],
|
80 |
+
{"xlsxwriter": None, "openpyxl": "bottom"}, # xlsxwriter Fails
|
81 |
+
),
|
82 |
+
("vertical-align: middle;", ["alignment", "vertical"], "center"),
|
83 |
+
# Border widths
|
84 |
+
("border-left: 2pt solid red", ["border", "left", "style"], "medium"),
|
85 |
+
("border-left: 1pt dotted red", ["border", "left", "style"], "dotted"),
|
86 |
+
("border-left: 2pt dotted red", ["border", "left", "style"], "mediumDashDotDot"),
|
87 |
+
("border-left: 1pt dashed red", ["border", "left", "style"], "dashed"),
|
88 |
+
("border-left: 2pt dashed red", ["border", "left", "style"], "mediumDashed"),
|
89 |
+
("border-left: 1pt solid red", ["border", "left", "style"], "thin"),
|
90 |
+
("border-left: 3pt solid red", ["border", "left", "style"], "thick"),
|
91 |
+
# Border expansion
|
92 |
+
(
|
93 |
+
"border-left: 2pt solid #111222",
|
94 |
+
["border", "left", "color", "rgb"],
|
95 |
+
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
|
96 |
+
),
|
97 |
+
("border: 1pt solid red", ["border", "top", "style"], "thin"),
|
98 |
+
(
|
99 |
+
"border: 1pt solid #111222",
|
100 |
+
["border", "top", "color", "rgb"],
|
101 |
+
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
|
102 |
+
),
|
103 |
+
("border: 1pt solid red", ["border", "right", "style"], "thin"),
|
104 |
+
(
|
105 |
+
"border: 1pt solid #111222",
|
106 |
+
["border", "right", "color", "rgb"],
|
107 |
+
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
|
108 |
+
),
|
109 |
+
("border: 1pt solid red", ["border", "bottom", "style"], "thin"),
|
110 |
+
(
|
111 |
+
"border: 1pt solid #111222",
|
112 |
+
["border", "bottom", "color", "rgb"],
|
113 |
+
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
|
114 |
+
),
|
115 |
+
("border: 1pt solid red", ["border", "left", "style"], "thin"),
|
116 |
+
(
|
117 |
+
"border: 1pt solid #111222",
|
118 |
+
["border", "left", "color", "rgb"],
|
119 |
+
{"xlsxwriter": "FF111222", "openpyxl": "00111222"},
|
120 |
+
),
|
121 |
+
# Border styles
|
122 |
+
(
|
123 |
+
"border-left-style: hair; border-left-color: black",
|
124 |
+
["border", "left", "style"],
|
125 |
+
"hair",
|
126 |
+
),
|
127 |
+
]
|
128 |
+
|
129 |
+
|
130 |
+
@pytest.mark.parametrize(
|
131 |
+
"engine",
|
132 |
+
["xlsxwriter", "openpyxl"],
|
133 |
+
)
|
134 |
+
@pytest.mark.parametrize("css, attrs, expected", shared_style_params)
|
135 |
+
def test_styler_to_excel_basic(engine, css, attrs, expected):
|
136 |
+
pytest.importorskip(engine)
|
137 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
|
138 |
+
styler = df.style.map(lambda x: css)
|
139 |
+
|
140 |
+
with tm.ensure_clean(".xlsx") as path:
|
141 |
+
with ExcelWriter(path, engine=engine) as writer:
|
142 |
+
df.to_excel(writer, sheet_name="dataframe")
|
143 |
+
styler.to_excel(writer, sheet_name="styled")
|
144 |
+
|
145 |
+
openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
|
146 |
+
with contextlib.closing(openpyxl.load_workbook(path)) as wb:
|
147 |
+
# test unstyled data cell does not have expected styles
|
148 |
+
# test styled cell has expected styles
|
149 |
+
u_cell, s_cell = wb["dataframe"].cell(2, 2), wb["styled"].cell(2, 2)
|
150 |
+
for attr in attrs:
|
151 |
+
u_cell, s_cell = getattr(u_cell, attr, None), getattr(s_cell, attr)
|
152 |
+
|
153 |
+
if isinstance(expected, dict):
|
154 |
+
assert u_cell is None or u_cell != expected[engine]
|
155 |
+
assert s_cell == expected[engine]
|
156 |
+
else:
|
157 |
+
assert u_cell is None or u_cell != expected
|
158 |
+
assert s_cell == expected
|
159 |
+
|
160 |
+
|
161 |
+
@pytest.mark.parametrize(
|
162 |
+
"engine",
|
163 |
+
["xlsxwriter", "openpyxl"],
|
164 |
+
)
|
165 |
+
@pytest.mark.parametrize("css, attrs, expected", shared_style_params)
|
166 |
+
def test_styler_to_excel_basic_indexes(engine, css, attrs, expected):
|
167 |
+
pytest.importorskip(engine)
|
168 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
|
169 |
+
|
170 |
+
styler = df.style
|
171 |
+
styler.map_index(lambda x: css, axis=0)
|
172 |
+
styler.map_index(lambda x: css, axis=1)
|
173 |
+
|
174 |
+
null_styler = df.style
|
175 |
+
null_styler.map(lambda x: "null: css;")
|
176 |
+
null_styler.map_index(lambda x: "null: css;", axis=0)
|
177 |
+
null_styler.map_index(lambda x: "null: css;", axis=1)
|
178 |
+
|
179 |
+
with tm.ensure_clean(".xlsx") as path:
|
180 |
+
with ExcelWriter(path, engine=engine) as writer:
|
181 |
+
null_styler.to_excel(writer, sheet_name="null_styled")
|
182 |
+
styler.to_excel(writer, sheet_name="styled")
|
183 |
+
|
184 |
+
openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
|
185 |
+
with contextlib.closing(openpyxl.load_workbook(path)) as wb:
|
186 |
+
# test null styled index cells does not have expected styles
|
187 |
+
# test styled cell has expected styles
|
188 |
+
ui_cell, si_cell = wb["null_styled"].cell(2, 1), wb["styled"].cell(2, 1)
|
189 |
+
uc_cell, sc_cell = wb["null_styled"].cell(1, 2), wb["styled"].cell(1, 2)
|
190 |
+
for attr in attrs:
|
191 |
+
ui_cell, si_cell = getattr(ui_cell, attr, None), getattr(si_cell, attr)
|
192 |
+
uc_cell, sc_cell = getattr(uc_cell, attr, None), getattr(sc_cell, attr)
|
193 |
+
|
194 |
+
if isinstance(expected, dict):
|
195 |
+
assert ui_cell is None or ui_cell != expected[engine]
|
196 |
+
assert si_cell == expected[engine]
|
197 |
+
assert uc_cell is None or uc_cell != expected[engine]
|
198 |
+
assert sc_cell == expected[engine]
|
199 |
+
else:
|
200 |
+
assert ui_cell is None or ui_cell != expected
|
201 |
+
assert si_cell == expected
|
202 |
+
assert uc_cell is None or uc_cell != expected
|
203 |
+
assert sc_cell == expected
|
204 |
+
|
205 |
+
|
206 |
+
# From https://openpyxl.readthedocs.io/en/stable/api/openpyxl.styles.borders.html
|
207 |
+
# Note: Leaving behavior of "width"-type styles undefined; user should use border-width
|
208 |
+
# instead
|
209 |
+
excel_border_styles = [
|
210 |
+
# "thin",
|
211 |
+
"dashed",
|
212 |
+
"mediumDashDot",
|
213 |
+
"dashDotDot",
|
214 |
+
"hair",
|
215 |
+
"dotted",
|
216 |
+
"mediumDashDotDot",
|
217 |
+
# "medium",
|
218 |
+
"double",
|
219 |
+
"dashDot",
|
220 |
+
"slantDashDot",
|
221 |
+
# "thick",
|
222 |
+
"mediumDashed",
|
223 |
+
]
|
224 |
+
|
225 |
+
|
226 |
+
@pytest.mark.parametrize(
|
227 |
+
"engine",
|
228 |
+
["xlsxwriter", "openpyxl"],
|
229 |
+
)
|
230 |
+
@pytest.mark.parametrize("border_style", excel_border_styles)
|
231 |
+
def test_styler_to_excel_border_style(engine, border_style):
|
232 |
+
css = f"border-left: {border_style} black thin"
|
233 |
+
attrs = ["border", "left", "style"]
|
234 |
+
expected = border_style
|
235 |
+
|
236 |
+
pytest.importorskip(engine)
|
237 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
|
238 |
+
styler = df.style.map(lambda x: css)
|
239 |
+
|
240 |
+
with tm.ensure_clean(".xlsx") as path:
|
241 |
+
with ExcelWriter(path, engine=engine) as writer:
|
242 |
+
df.to_excel(writer, sheet_name="dataframe")
|
243 |
+
styler.to_excel(writer, sheet_name="styled")
|
244 |
+
|
245 |
+
openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
|
246 |
+
with contextlib.closing(openpyxl.load_workbook(path)) as wb:
|
247 |
+
# test unstyled data cell does not have expected styles
|
248 |
+
# test styled cell has expected styles
|
249 |
+
u_cell, s_cell = wb["dataframe"].cell(2, 2), wb["styled"].cell(2, 2)
|
250 |
+
for attr in attrs:
|
251 |
+
u_cell, s_cell = getattr(u_cell, attr, None), getattr(s_cell, attr)
|
252 |
+
|
253 |
+
if isinstance(expected, dict):
|
254 |
+
assert u_cell is None or u_cell != expected[engine]
|
255 |
+
assert s_cell == expected[engine]
|
256 |
+
else:
|
257 |
+
assert u_cell is None or u_cell != expected
|
258 |
+
assert s_cell == expected
|
259 |
+
|
260 |
+
|
261 |
+
def test_styler_custom_converter():
|
262 |
+
openpyxl = pytest.importorskip("openpyxl")
|
263 |
+
|
264 |
+
def custom_converter(css):
|
265 |
+
return {"font": {"color": {"rgb": "111222"}}}
|
266 |
+
|
267 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
|
268 |
+
styler = df.style.map(lambda x: "color: #888999")
|
269 |
+
with tm.ensure_clean(".xlsx") as path:
|
270 |
+
with ExcelWriter(path, engine="openpyxl") as writer:
|
271 |
+
ExcelFormatter(styler, style_converter=custom_converter).write(
|
272 |
+
writer, sheet_name="custom"
|
273 |
+
)
|
274 |
+
|
275 |
+
with contextlib.closing(openpyxl.load_workbook(path)) as wb:
|
276 |
+
assert wb["custom"].cell(2, 2).font.color.value == "00111222"
|
277 |
+
|
278 |
+
|
279 |
+
@pytest.mark.single_cpu
|
280 |
+
@td.skip_if_not_us_locale
|
281 |
+
def test_styler_to_s3(s3_public_bucket, s3so):
|
282 |
+
# GH#46381
|
283 |
+
|
284 |
+
mock_bucket_name, target_file = s3_public_bucket.name, "test.xlsx"
|
285 |
+
df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]})
|
286 |
+
styler = df.style.set_sticky(axis="index")
|
287 |
+
styler.to_excel(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so)
|
288 |
+
timeout = 5
|
289 |
+
while True:
|
290 |
+
if target_file in (obj.key for obj in s3_public_bucket.objects.all()):
|
291 |
+
break
|
292 |
+
time.sleep(0.1)
|
293 |
+
timeout -= 0.1
|
294 |
+
assert timeout > 0, "Timed out waiting for file to appear on moto"
|
295 |
+
result = read_excel(
|
296 |
+
f"s3://{mock_bucket_name}/{target_file}", index_col=0, storage_options=s3so
|
297 |
+
)
|
298 |
+
tm.assert_frame_equal(result, df)
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_writers.py
ADDED
@@ -0,0 +1,1511 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import (
|
2 |
+
date,
|
3 |
+
datetime,
|
4 |
+
timedelta,
|
5 |
+
)
|
6 |
+
from functools import partial
|
7 |
+
from io import BytesIO
|
8 |
+
import os
|
9 |
+
import re
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
import pytest
|
13 |
+
|
14 |
+
from pandas.compat import is_platform_windows
|
15 |
+
from pandas.compat._constants import PY310
|
16 |
+
from pandas.compat._optional import import_optional_dependency
|
17 |
+
import pandas.util._test_decorators as td
|
18 |
+
|
19 |
+
import pandas as pd
|
20 |
+
from pandas import (
|
21 |
+
DataFrame,
|
22 |
+
Index,
|
23 |
+
MultiIndex,
|
24 |
+
date_range,
|
25 |
+
option_context,
|
26 |
+
)
|
27 |
+
import pandas._testing as tm
|
28 |
+
|
29 |
+
from pandas.io.excel import (
|
30 |
+
ExcelFile,
|
31 |
+
ExcelWriter,
|
32 |
+
_OpenpyxlWriter,
|
33 |
+
_XlsxWriter,
|
34 |
+
register_writer,
|
35 |
+
)
|
36 |
+
from pandas.io.excel._util import _writers
|
37 |
+
|
38 |
+
if is_platform_windows():
|
39 |
+
pytestmark = pytest.mark.single_cpu
|
40 |
+
|
41 |
+
|
42 |
+
def get_exp_unit(path: str) -> str:
|
43 |
+
return "ns"
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.fixture
|
47 |
+
def frame(float_frame):
|
48 |
+
"""
|
49 |
+
Returns the first ten items in fixture "float_frame".
|
50 |
+
"""
|
51 |
+
return float_frame[:10]
|
52 |
+
|
53 |
+
|
54 |
+
@pytest.fixture(params=[True, False])
|
55 |
+
def merge_cells(request):
|
56 |
+
return request.param
|
57 |
+
|
58 |
+
|
59 |
+
@pytest.fixture
|
60 |
+
def path(ext):
|
61 |
+
"""
|
62 |
+
Fixture to open file for use in each test case.
|
63 |
+
"""
|
64 |
+
with tm.ensure_clean(ext) as file_path:
|
65 |
+
yield file_path
|
66 |
+
|
67 |
+
|
68 |
+
@pytest.fixture
|
69 |
+
def set_engine(engine, ext):
|
70 |
+
"""
|
71 |
+
Fixture to set engine for use in each test case.
|
72 |
+
|
73 |
+
Rather than requiring `engine=...` to be provided explicitly as an
|
74 |
+
argument in each test, this fixture sets a global option to dictate
|
75 |
+
which engine should be used to write Excel files. After executing
|
76 |
+
the test it rolls back said change to the global option.
|
77 |
+
"""
|
78 |
+
option_name = f"io.excel.{ext.strip('.')}.writer"
|
79 |
+
with option_context(option_name, engine):
|
80 |
+
yield
|
81 |
+
|
82 |
+
|
83 |
+
@pytest.mark.parametrize(
|
84 |
+
"ext",
|
85 |
+
[
|
86 |
+
pytest.param(".xlsx", marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")]),
|
87 |
+
pytest.param(".xlsm", marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")]),
|
88 |
+
pytest.param(
|
89 |
+
".xlsx", marks=[td.skip_if_no("xlsxwriter"), td.skip_if_no("xlrd")]
|
90 |
+
),
|
91 |
+
pytest.param(".ods", marks=td.skip_if_no("odf")),
|
92 |
+
],
|
93 |
+
)
|
94 |
+
class TestRoundTrip:
|
95 |
+
@pytest.mark.parametrize(
|
96 |
+
"header,expected",
|
97 |
+
[(None, DataFrame([np.nan] * 4)), (0, DataFrame({"Unnamed: 0": [np.nan] * 3}))],
|
98 |
+
)
|
99 |
+
def test_read_one_empty_col_no_header(self, ext, header, expected):
|
100 |
+
# xref gh-12292
|
101 |
+
filename = "no_header"
|
102 |
+
df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]])
|
103 |
+
|
104 |
+
with tm.ensure_clean(ext) as path:
|
105 |
+
df.to_excel(path, sheet_name=filename, index=False, header=False)
|
106 |
+
result = pd.read_excel(
|
107 |
+
path, sheet_name=filename, usecols=[0], header=header
|
108 |
+
)
|
109 |
+
|
110 |
+
tm.assert_frame_equal(result, expected)
|
111 |
+
|
112 |
+
@pytest.mark.parametrize(
|
113 |
+
"header,expected",
|
114 |
+
[(None, DataFrame([0] + [np.nan] * 4)), (0, DataFrame([np.nan] * 4))],
|
115 |
+
)
|
116 |
+
def test_read_one_empty_col_with_header(self, ext, header, expected):
|
117 |
+
filename = "with_header"
|
118 |
+
df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]])
|
119 |
+
|
120 |
+
with tm.ensure_clean(ext) as path:
|
121 |
+
df.to_excel(path, sheet_name="with_header", index=False, header=True)
|
122 |
+
result = pd.read_excel(
|
123 |
+
path, sheet_name=filename, usecols=[0], header=header
|
124 |
+
)
|
125 |
+
|
126 |
+
tm.assert_frame_equal(result, expected)
|
127 |
+
|
128 |
+
def test_set_column_names_in_parameter(self, ext):
|
129 |
+
# GH 12870 : pass down column names associated with
|
130 |
+
# keyword argument names
|
131 |
+
refdf = DataFrame([[1, "foo"], [2, "bar"], [3, "baz"]], columns=["a", "b"])
|
132 |
+
|
133 |
+
with tm.ensure_clean(ext) as pth:
|
134 |
+
with ExcelWriter(pth) as writer:
|
135 |
+
refdf.to_excel(
|
136 |
+
writer, sheet_name="Data_no_head", header=False, index=False
|
137 |
+
)
|
138 |
+
refdf.to_excel(writer, sheet_name="Data_with_head", index=False)
|
139 |
+
|
140 |
+
refdf.columns = ["A", "B"]
|
141 |
+
|
142 |
+
with ExcelFile(pth) as reader:
|
143 |
+
xlsdf_no_head = pd.read_excel(
|
144 |
+
reader, sheet_name="Data_no_head", header=None, names=["A", "B"]
|
145 |
+
)
|
146 |
+
xlsdf_with_head = pd.read_excel(
|
147 |
+
reader,
|
148 |
+
sheet_name="Data_with_head",
|
149 |
+
index_col=None,
|
150 |
+
names=["A", "B"],
|
151 |
+
)
|
152 |
+
|
153 |
+
tm.assert_frame_equal(xlsdf_no_head, refdf)
|
154 |
+
tm.assert_frame_equal(xlsdf_with_head, refdf)
|
155 |
+
|
156 |
+
def test_creating_and_reading_multiple_sheets(self, ext):
|
157 |
+
# see gh-9450
|
158 |
+
#
|
159 |
+
# Test reading multiple sheets, from a runtime
|
160 |
+
# created Excel file with multiple sheets.
|
161 |
+
def tdf(col_sheet_name):
|
162 |
+
d, i = [11, 22, 33], [1, 2, 3]
|
163 |
+
return DataFrame(d, i, columns=[col_sheet_name])
|
164 |
+
|
165 |
+
sheets = ["AAA", "BBB", "CCC"]
|
166 |
+
|
167 |
+
dfs = [tdf(s) for s in sheets]
|
168 |
+
dfs = dict(zip(sheets, dfs))
|
169 |
+
|
170 |
+
with tm.ensure_clean(ext) as pth:
|
171 |
+
with ExcelWriter(pth) as ew:
|
172 |
+
for sheetname, df in dfs.items():
|
173 |
+
df.to_excel(ew, sheet_name=sheetname)
|
174 |
+
|
175 |
+
dfs_returned = pd.read_excel(pth, sheet_name=sheets, index_col=0)
|
176 |
+
|
177 |
+
for s in sheets:
|
178 |
+
tm.assert_frame_equal(dfs[s], dfs_returned[s])
|
179 |
+
|
180 |
+
def test_read_excel_multiindex_empty_level(self, ext):
|
181 |
+
# see gh-12453
|
182 |
+
with tm.ensure_clean(ext) as path:
|
183 |
+
df = DataFrame(
|
184 |
+
{
|
185 |
+
("One", "x"): {0: 1},
|
186 |
+
("Two", "X"): {0: 3},
|
187 |
+
("Two", "Y"): {0: 7},
|
188 |
+
("Zero", ""): {0: 0},
|
189 |
+
}
|
190 |
+
)
|
191 |
+
|
192 |
+
expected = DataFrame(
|
193 |
+
{
|
194 |
+
("One", "x"): {0: 1},
|
195 |
+
("Two", "X"): {0: 3},
|
196 |
+
("Two", "Y"): {0: 7},
|
197 |
+
("Zero", "Unnamed: 4_level_1"): {0: 0},
|
198 |
+
}
|
199 |
+
)
|
200 |
+
|
201 |
+
df.to_excel(path)
|
202 |
+
actual = pd.read_excel(path, header=[0, 1], index_col=0)
|
203 |
+
tm.assert_frame_equal(actual, expected)
|
204 |
+
|
205 |
+
df = DataFrame(
|
206 |
+
{
|
207 |
+
("Beg", ""): {0: 0},
|
208 |
+
("Middle", "x"): {0: 1},
|
209 |
+
("Tail", "X"): {0: 3},
|
210 |
+
("Tail", "Y"): {0: 7},
|
211 |
+
}
|
212 |
+
)
|
213 |
+
|
214 |
+
expected = DataFrame(
|
215 |
+
{
|
216 |
+
("Beg", "Unnamed: 1_level_1"): {0: 0},
|
217 |
+
("Middle", "x"): {0: 1},
|
218 |
+
("Tail", "X"): {0: 3},
|
219 |
+
("Tail", "Y"): {0: 7},
|
220 |
+
}
|
221 |
+
)
|
222 |
+
|
223 |
+
df.to_excel(path)
|
224 |
+
actual = pd.read_excel(path, header=[0, 1], index_col=0)
|
225 |
+
tm.assert_frame_equal(actual, expected)
|
226 |
+
|
227 |
+
@pytest.mark.parametrize("c_idx_names", ["a", None])
|
228 |
+
@pytest.mark.parametrize("r_idx_names", ["b", None])
|
229 |
+
@pytest.mark.parametrize("c_idx_levels", [1, 3])
|
230 |
+
@pytest.mark.parametrize("r_idx_levels", [1, 3])
|
231 |
+
def test_excel_multindex_roundtrip(
|
232 |
+
self, ext, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels, request
|
233 |
+
):
|
234 |
+
# see gh-4679
|
235 |
+
with tm.ensure_clean(ext) as pth:
|
236 |
+
# Empty name case current read in as
|
237 |
+
# unnamed levels, not Nones.
|
238 |
+
check_names = bool(r_idx_names) or r_idx_levels <= 1
|
239 |
+
|
240 |
+
if c_idx_levels == 1:
|
241 |
+
columns = Index(list("abcde"))
|
242 |
+
else:
|
243 |
+
columns = MultiIndex.from_arrays(
|
244 |
+
[range(5) for _ in range(c_idx_levels)],
|
245 |
+
names=[f"{c_idx_names}-{i}" for i in range(c_idx_levels)],
|
246 |
+
)
|
247 |
+
if r_idx_levels == 1:
|
248 |
+
index = Index(list("ghijk"))
|
249 |
+
else:
|
250 |
+
index = MultiIndex.from_arrays(
|
251 |
+
[range(5) for _ in range(r_idx_levels)],
|
252 |
+
names=[f"{r_idx_names}-{i}" for i in range(r_idx_levels)],
|
253 |
+
)
|
254 |
+
df = DataFrame(
|
255 |
+
1.1 * np.ones((5, 5)),
|
256 |
+
columns=columns,
|
257 |
+
index=index,
|
258 |
+
)
|
259 |
+
df.to_excel(pth)
|
260 |
+
|
261 |
+
act = pd.read_excel(
|
262 |
+
pth,
|
263 |
+
index_col=list(range(r_idx_levels)),
|
264 |
+
header=list(range(c_idx_levels)),
|
265 |
+
)
|
266 |
+
tm.assert_frame_equal(df, act, check_names=check_names)
|
267 |
+
|
268 |
+
df.iloc[0, :] = np.nan
|
269 |
+
df.to_excel(pth)
|
270 |
+
|
271 |
+
act = pd.read_excel(
|
272 |
+
pth,
|
273 |
+
index_col=list(range(r_idx_levels)),
|
274 |
+
header=list(range(c_idx_levels)),
|
275 |
+
)
|
276 |
+
tm.assert_frame_equal(df, act, check_names=check_names)
|
277 |
+
|
278 |
+
df.iloc[-1, :] = np.nan
|
279 |
+
df.to_excel(pth)
|
280 |
+
act = pd.read_excel(
|
281 |
+
pth,
|
282 |
+
index_col=list(range(r_idx_levels)),
|
283 |
+
header=list(range(c_idx_levels)),
|
284 |
+
)
|
285 |
+
tm.assert_frame_equal(df, act, check_names=check_names)
|
286 |
+
|
287 |
+
def test_read_excel_parse_dates(self, ext):
|
288 |
+
# see gh-11544, gh-12051
|
289 |
+
df = DataFrame(
|
290 |
+
{"col": [1, 2, 3], "date_strings": date_range("2012-01-01", periods=3)}
|
291 |
+
)
|
292 |
+
df2 = df.copy()
|
293 |
+
df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y")
|
294 |
+
|
295 |
+
with tm.ensure_clean(ext) as pth:
|
296 |
+
df2.to_excel(pth)
|
297 |
+
|
298 |
+
res = pd.read_excel(pth, index_col=0)
|
299 |
+
tm.assert_frame_equal(df2, res)
|
300 |
+
|
301 |
+
res = pd.read_excel(pth, parse_dates=["date_strings"], index_col=0)
|
302 |
+
tm.assert_frame_equal(df, res)
|
303 |
+
|
304 |
+
date_parser = lambda x: datetime.strptime(x, "%m/%d/%Y")
|
305 |
+
with tm.assert_produces_warning(
|
306 |
+
FutureWarning,
|
307 |
+
match="use 'date_format' instead",
|
308 |
+
raise_on_extra_warnings=False,
|
309 |
+
):
|
310 |
+
res = pd.read_excel(
|
311 |
+
pth,
|
312 |
+
parse_dates=["date_strings"],
|
313 |
+
date_parser=date_parser,
|
314 |
+
index_col=0,
|
315 |
+
)
|
316 |
+
tm.assert_frame_equal(df, res)
|
317 |
+
res = pd.read_excel(
|
318 |
+
pth, parse_dates=["date_strings"], date_format="%m/%d/%Y", index_col=0
|
319 |
+
)
|
320 |
+
tm.assert_frame_equal(df, res)
|
321 |
+
|
322 |
+
def test_multiindex_interval_datetimes(self, ext):
|
323 |
+
# GH 30986
|
324 |
+
midx = MultiIndex.from_arrays(
|
325 |
+
[
|
326 |
+
range(4),
|
327 |
+
pd.interval_range(
|
328 |
+
start=pd.Timestamp("2020-01-01"), periods=4, freq="6ME"
|
329 |
+
),
|
330 |
+
]
|
331 |
+
)
|
332 |
+
df = DataFrame(range(4), index=midx)
|
333 |
+
with tm.ensure_clean(ext) as pth:
|
334 |
+
df.to_excel(pth)
|
335 |
+
result = pd.read_excel(pth, index_col=[0, 1])
|
336 |
+
expected = DataFrame(
|
337 |
+
range(4),
|
338 |
+
MultiIndex.from_arrays(
|
339 |
+
[
|
340 |
+
range(4),
|
341 |
+
[
|
342 |
+
"(2020-01-31 00:00:00, 2020-07-31 00:00:00]",
|
343 |
+
"(2020-07-31 00:00:00, 2021-01-31 00:00:00]",
|
344 |
+
"(2021-01-31 00:00:00, 2021-07-31 00:00:00]",
|
345 |
+
"(2021-07-31 00:00:00, 2022-01-31 00:00:00]",
|
346 |
+
],
|
347 |
+
]
|
348 |
+
),
|
349 |
+
)
|
350 |
+
tm.assert_frame_equal(result, expected)
|
351 |
+
|
352 |
+
|
353 |
+
@pytest.mark.parametrize(
|
354 |
+
"engine,ext",
|
355 |
+
[
|
356 |
+
pytest.param(
|
357 |
+
"openpyxl",
|
358 |
+
".xlsx",
|
359 |
+
marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")],
|
360 |
+
),
|
361 |
+
pytest.param(
|
362 |
+
"openpyxl",
|
363 |
+
".xlsm",
|
364 |
+
marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")],
|
365 |
+
),
|
366 |
+
pytest.param(
|
367 |
+
"xlsxwriter",
|
368 |
+
".xlsx",
|
369 |
+
marks=[td.skip_if_no("xlsxwriter"), td.skip_if_no("xlrd")],
|
370 |
+
),
|
371 |
+
pytest.param("odf", ".ods", marks=td.skip_if_no("odf")),
|
372 |
+
],
|
373 |
+
)
|
374 |
+
@pytest.mark.usefixtures("set_engine")
|
375 |
+
class TestExcelWriter:
|
376 |
+
def test_excel_sheet_size(self, path):
|
377 |
+
# GH 26080
|
378 |
+
breaking_row_count = 2**20 + 1
|
379 |
+
breaking_col_count = 2**14 + 1
|
380 |
+
# purposely using two arrays to prevent memory issues while testing
|
381 |
+
row_arr = np.zeros(shape=(breaking_row_count, 1))
|
382 |
+
col_arr = np.zeros(shape=(1, breaking_col_count))
|
383 |
+
row_df = DataFrame(row_arr)
|
384 |
+
col_df = DataFrame(col_arr)
|
385 |
+
|
386 |
+
msg = "sheet is too large"
|
387 |
+
with pytest.raises(ValueError, match=msg):
|
388 |
+
row_df.to_excel(path)
|
389 |
+
|
390 |
+
with pytest.raises(ValueError, match=msg):
|
391 |
+
col_df.to_excel(path)
|
392 |
+
|
393 |
+
def test_excel_sheet_by_name_raise(self, path):
|
394 |
+
gt = DataFrame(np.random.default_rng(2).standard_normal((10, 2)))
|
395 |
+
gt.to_excel(path)
|
396 |
+
|
397 |
+
with ExcelFile(path) as xl:
|
398 |
+
df = pd.read_excel(xl, sheet_name=0, index_col=0)
|
399 |
+
|
400 |
+
tm.assert_frame_equal(gt, df)
|
401 |
+
|
402 |
+
msg = "Worksheet named '0' not found"
|
403 |
+
with pytest.raises(ValueError, match=msg):
|
404 |
+
pd.read_excel(xl, "0")
|
405 |
+
|
406 |
+
def test_excel_writer_context_manager(self, frame, path):
|
407 |
+
with ExcelWriter(path) as writer:
|
408 |
+
frame.to_excel(writer, sheet_name="Data1")
|
409 |
+
frame2 = frame.copy()
|
410 |
+
frame2.columns = frame.columns[::-1]
|
411 |
+
frame2.to_excel(writer, sheet_name="Data2")
|
412 |
+
|
413 |
+
with ExcelFile(path) as reader:
|
414 |
+
found_df = pd.read_excel(reader, sheet_name="Data1", index_col=0)
|
415 |
+
found_df2 = pd.read_excel(reader, sheet_name="Data2", index_col=0)
|
416 |
+
|
417 |
+
tm.assert_frame_equal(found_df, frame)
|
418 |
+
tm.assert_frame_equal(found_df2, frame2)
|
419 |
+
|
420 |
+
def test_roundtrip(self, frame, path):
|
421 |
+
frame = frame.copy()
|
422 |
+
frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
|
423 |
+
|
424 |
+
frame.to_excel(path, sheet_name="test1")
|
425 |
+
frame.to_excel(path, sheet_name="test1", columns=["A", "B"])
|
426 |
+
frame.to_excel(path, sheet_name="test1", header=False)
|
427 |
+
frame.to_excel(path, sheet_name="test1", index=False)
|
428 |
+
|
429 |
+
# test roundtrip
|
430 |
+
frame.to_excel(path, sheet_name="test1")
|
431 |
+
recons = pd.read_excel(path, sheet_name="test1", index_col=0)
|
432 |
+
tm.assert_frame_equal(frame, recons)
|
433 |
+
|
434 |
+
frame.to_excel(path, sheet_name="test1", index=False)
|
435 |
+
recons = pd.read_excel(path, sheet_name="test1", index_col=None)
|
436 |
+
recons.index = frame.index
|
437 |
+
tm.assert_frame_equal(frame, recons)
|
438 |
+
|
439 |
+
frame.to_excel(path, sheet_name="test1", na_rep="NA")
|
440 |
+
recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["NA"])
|
441 |
+
tm.assert_frame_equal(frame, recons)
|
442 |
+
|
443 |
+
# GH 3611
|
444 |
+
frame.to_excel(path, sheet_name="test1", na_rep="88")
|
445 |
+
recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["88"])
|
446 |
+
tm.assert_frame_equal(frame, recons)
|
447 |
+
|
448 |
+
frame.to_excel(path, sheet_name="test1", na_rep="88")
|
449 |
+
recons = pd.read_excel(
|
450 |
+
path, sheet_name="test1", index_col=0, na_values=[88, 88.0]
|
451 |
+
)
|
452 |
+
tm.assert_frame_equal(frame, recons)
|
453 |
+
|
454 |
+
# GH 6573
|
455 |
+
frame.to_excel(path, sheet_name="Sheet1")
|
456 |
+
recons = pd.read_excel(path, index_col=0)
|
457 |
+
tm.assert_frame_equal(frame, recons)
|
458 |
+
|
459 |
+
frame.to_excel(path, sheet_name="0")
|
460 |
+
recons = pd.read_excel(path, index_col=0)
|
461 |
+
tm.assert_frame_equal(frame, recons)
|
462 |
+
|
463 |
+
# GH 8825 Pandas Series should provide to_excel method
|
464 |
+
s = frame["A"]
|
465 |
+
s.to_excel(path)
|
466 |
+
recons = pd.read_excel(path, index_col=0)
|
467 |
+
tm.assert_frame_equal(s.to_frame(), recons)
|
468 |
+
|
469 |
+
def test_mixed(self, frame, path):
|
470 |
+
mixed_frame = frame.copy()
|
471 |
+
mixed_frame["foo"] = "bar"
|
472 |
+
|
473 |
+
mixed_frame.to_excel(path, sheet_name="test1")
|
474 |
+
with ExcelFile(path) as reader:
|
475 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
|
476 |
+
tm.assert_frame_equal(mixed_frame, recons)
|
477 |
+
|
478 |
+
def test_ts_frame(self, path):
|
479 |
+
unit = get_exp_unit(path)
|
480 |
+
df = DataFrame(
|
481 |
+
np.random.default_rng(2).standard_normal((5, 4)),
|
482 |
+
columns=Index(list("ABCD")),
|
483 |
+
index=date_range("2000-01-01", periods=5, freq="B"),
|
484 |
+
)
|
485 |
+
|
486 |
+
# freq doesn't round-trip
|
487 |
+
index = pd.DatetimeIndex(np.asarray(df.index), freq=None)
|
488 |
+
df.index = index
|
489 |
+
|
490 |
+
expected = df[:]
|
491 |
+
expected.index = expected.index.as_unit(unit)
|
492 |
+
|
493 |
+
df.to_excel(path, sheet_name="test1")
|
494 |
+
with ExcelFile(path) as reader:
|
495 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
|
496 |
+
tm.assert_frame_equal(expected, recons)
|
497 |
+
|
498 |
+
def test_basics_with_nan(self, frame, path):
|
499 |
+
frame = frame.copy()
|
500 |
+
frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
|
501 |
+
frame.to_excel(path, sheet_name="test1")
|
502 |
+
frame.to_excel(path, sheet_name="test1", columns=["A", "B"])
|
503 |
+
frame.to_excel(path, sheet_name="test1", header=False)
|
504 |
+
frame.to_excel(path, sheet_name="test1", index=False)
|
505 |
+
|
506 |
+
@pytest.mark.parametrize("np_type", [np.int8, np.int16, np.int32, np.int64])
|
507 |
+
def test_int_types(self, np_type, path):
|
508 |
+
# Test np.int values read come back as int
|
509 |
+
# (rather than float which is Excel's format).
|
510 |
+
df = DataFrame(
|
511 |
+
np.random.default_rng(2).integers(-10, 10, size=(10, 2)), dtype=np_type
|
512 |
+
)
|
513 |
+
df.to_excel(path, sheet_name="test1")
|
514 |
+
|
515 |
+
with ExcelFile(path) as reader:
|
516 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
|
517 |
+
|
518 |
+
int_frame = df.astype(np.int64)
|
519 |
+
tm.assert_frame_equal(int_frame, recons)
|
520 |
+
|
521 |
+
recons2 = pd.read_excel(path, sheet_name="test1", index_col=0)
|
522 |
+
tm.assert_frame_equal(int_frame, recons2)
|
523 |
+
|
524 |
+
@pytest.mark.parametrize("np_type", [np.float16, np.float32, np.float64])
|
525 |
+
def test_float_types(self, np_type, path):
|
526 |
+
# Test np.float values read come back as float.
|
527 |
+
df = DataFrame(np.random.default_rng(2).random(10), dtype=np_type)
|
528 |
+
df.to_excel(path, sheet_name="test1")
|
529 |
+
|
530 |
+
with ExcelFile(path) as reader:
|
531 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
|
532 |
+
np_type
|
533 |
+
)
|
534 |
+
|
535 |
+
tm.assert_frame_equal(df, recons)
|
536 |
+
|
537 |
+
def test_bool_types(self, path):
|
538 |
+
# Test np.bool_ values read come back as float.
|
539 |
+
df = DataFrame([1, 0, True, False], dtype=np.bool_)
|
540 |
+
df.to_excel(path, sheet_name="test1")
|
541 |
+
|
542 |
+
with ExcelFile(path) as reader:
|
543 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
|
544 |
+
np.bool_
|
545 |
+
)
|
546 |
+
|
547 |
+
tm.assert_frame_equal(df, recons)
|
548 |
+
|
549 |
+
def test_inf_roundtrip(self, path):
|
550 |
+
df = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
|
551 |
+
df.to_excel(path, sheet_name="test1")
|
552 |
+
|
553 |
+
with ExcelFile(path) as reader:
|
554 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
|
555 |
+
|
556 |
+
tm.assert_frame_equal(df, recons)
|
557 |
+
|
558 |
+
def test_sheets(self, frame, path):
|
559 |
+
# freq doesn't round-trip
|
560 |
+
unit = get_exp_unit(path)
|
561 |
+
tsframe = DataFrame(
|
562 |
+
np.random.default_rng(2).standard_normal((5, 4)),
|
563 |
+
columns=Index(list("ABCD")),
|
564 |
+
index=date_range("2000-01-01", periods=5, freq="B"),
|
565 |
+
)
|
566 |
+
index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None)
|
567 |
+
tsframe.index = index
|
568 |
+
|
569 |
+
expected = tsframe[:]
|
570 |
+
expected.index = expected.index.as_unit(unit)
|
571 |
+
|
572 |
+
frame = frame.copy()
|
573 |
+
frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
|
574 |
+
|
575 |
+
frame.to_excel(path, sheet_name="test1")
|
576 |
+
frame.to_excel(path, sheet_name="test1", columns=["A", "B"])
|
577 |
+
frame.to_excel(path, sheet_name="test1", header=False)
|
578 |
+
frame.to_excel(path, sheet_name="test1", index=False)
|
579 |
+
|
580 |
+
# Test writing to separate sheets
|
581 |
+
with ExcelWriter(path) as writer:
|
582 |
+
frame.to_excel(writer, sheet_name="test1")
|
583 |
+
tsframe.to_excel(writer, sheet_name="test2")
|
584 |
+
with ExcelFile(path) as reader:
|
585 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
|
586 |
+
tm.assert_frame_equal(frame, recons)
|
587 |
+
recons = pd.read_excel(reader, sheet_name="test2", index_col=0)
|
588 |
+
tm.assert_frame_equal(expected, recons)
|
589 |
+
assert 2 == len(reader.sheet_names)
|
590 |
+
assert "test1" == reader.sheet_names[0]
|
591 |
+
assert "test2" == reader.sheet_names[1]
|
592 |
+
|
593 |
+
def test_colaliases(self, frame, path):
|
594 |
+
frame = frame.copy()
|
595 |
+
frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
|
596 |
+
|
597 |
+
frame.to_excel(path, sheet_name="test1")
|
598 |
+
frame.to_excel(path, sheet_name="test1", columns=["A", "B"])
|
599 |
+
frame.to_excel(path, sheet_name="test1", header=False)
|
600 |
+
frame.to_excel(path, sheet_name="test1", index=False)
|
601 |
+
|
602 |
+
# column aliases
|
603 |
+
col_aliases = Index(["AA", "X", "Y", "Z"])
|
604 |
+
frame.to_excel(path, sheet_name="test1", header=col_aliases)
|
605 |
+
with ExcelFile(path) as reader:
|
606 |
+
rs = pd.read_excel(reader, sheet_name="test1", index_col=0)
|
607 |
+
xp = frame.copy()
|
608 |
+
xp.columns = col_aliases
|
609 |
+
tm.assert_frame_equal(xp, rs)
|
610 |
+
|
611 |
+
def test_roundtrip_indexlabels(self, merge_cells, frame, path):
|
612 |
+
frame = frame.copy()
|
613 |
+
frame.iloc[:5, frame.columns.get_loc("A")] = np.nan
|
614 |
+
|
615 |
+
frame.to_excel(path, sheet_name="test1")
|
616 |
+
frame.to_excel(path, sheet_name="test1", columns=["A", "B"])
|
617 |
+
frame.to_excel(path, sheet_name="test1", header=False)
|
618 |
+
frame.to_excel(path, sheet_name="test1", index=False)
|
619 |
+
|
620 |
+
# test index_label
|
621 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0
|
622 |
+
df.to_excel(
|
623 |
+
path, sheet_name="test1", index_label=["test"], merge_cells=merge_cells
|
624 |
+
)
|
625 |
+
with ExcelFile(path) as reader:
|
626 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
|
627 |
+
np.int64
|
628 |
+
)
|
629 |
+
df.index.names = ["test"]
|
630 |
+
assert df.index.names == recons.index.names
|
631 |
+
|
632 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0
|
633 |
+
df.to_excel(
|
634 |
+
path,
|
635 |
+
sheet_name="test1",
|
636 |
+
index_label=["test", "dummy", "dummy2"],
|
637 |
+
merge_cells=merge_cells,
|
638 |
+
)
|
639 |
+
with ExcelFile(path) as reader:
|
640 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
|
641 |
+
np.int64
|
642 |
+
)
|
643 |
+
df.index.names = ["test"]
|
644 |
+
assert df.index.names == recons.index.names
|
645 |
+
|
646 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0
|
647 |
+
df.to_excel(
|
648 |
+
path, sheet_name="test1", index_label="test", merge_cells=merge_cells
|
649 |
+
)
|
650 |
+
with ExcelFile(path) as reader:
|
651 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype(
|
652 |
+
np.int64
|
653 |
+
)
|
654 |
+
df.index.names = ["test"]
|
655 |
+
tm.assert_frame_equal(df, recons.astype(bool))
|
656 |
+
|
657 |
+
frame.to_excel(
|
658 |
+
path,
|
659 |
+
sheet_name="test1",
|
660 |
+
columns=["A", "B", "C", "D"],
|
661 |
+
index=False,
|
662 |
+
merge_cells=merge_cells,
|
663 |
+
)
|
664 |
+
# take 'A' and 'B' as indexes (same row as cols 'C', 'D')
|
665 |
+
df = frame.copy()
|
666 |
+
df = df.set_index(["A", "B"])
|
667 |
+
|
668 |
+
with ExcelFile(path) as reader:
|
669 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1])
|
670 |
+
tm.assert_frame_equal(df, recons)
|
671 |
+
|
672 |
+
def test_excel_roundtrip_indexname(self, merge_cells, path):
|
673 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
|
674 |
+
df.index.name = "foo"
|
675 |
+
|
676 |
+
df.to_excel(path, merge_cells=merge_cells)
|
677 |
+
|
678 |
+
with ExcelFile(path) as xf:
|
679 |
+
result = pd.read_excel(xf, sheet_name=xf.sheet_names[0], index_col=0)
|
680 |
+
|
681 |
+
tm.assert_frame_equal(result, df)
|
682 |
+
assert result.index.name == "foo"
|
683 |
+
|
684 |
+
def test_excel_roundtrip_datetime(self, merge_cells, path):
|
685 |
+
# datetime.date, not sure what to test here exactly
|
686 |
+
unit = get_exp_unit(path)
|
687 |
+
|
688 |
+
# freq does not round-trip
|
689 |
+
tsframe = DataFrame(
|
690 |
+
np.random.default_rng(2).standard_normal((5, 4)),
|
691 |
+
columns=Index(list("ABCD")),
|
692 |
+
index=date_range("2000-01-01", periods=5, freq="B"),
|
693 |
+
)
|
694 |
+
index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None)
|
695 |
+
tsframe.index = index
|
696 |
+
|
697 |
+
tsf = tsframe.copy()
|
698 |
+
|
699 |
+
tsf.index = [x.date() for x in tsframe.index]
|
700 |
+
tsf.to_excel(path, sheet_name="test1", merge_cells=merge_cells)
|
701 |
+
|
702 |
+
with ExcelFile(path) as reader:
|
703 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
|
704 |
+
|
705 |
+
expected = tsframe[:]
|
706 |
+
expected.index = expected.index.as_unit(unit)
|
707 |
+
tm.assert_frame_equal(expected, recons)
|
708 |
+
|
709 |
+
def test_excel_date_datetime_format(self, ext, path):
|
710 |
+
# see gh-4133
|
711 |
+
#
|
712 |
+
# Excel output format strings
|
713 |
+
unit = get_exp_unit(path)
|
714 |
+
|
715 |
+
df = DataFrame(
|
716 |
+
[
|
717 |
+
[date(2014, 1, 31), date(1999, 9, 24)],
|
718 |
+
[datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
|
719 |
+
],
|
720 |
+
index=["DATE", "DATETIME"],
|
721 |
+
columns=["X", "Y"],
|
722 |
+
)
|
723 |
+
df_expected = DataFrame(
|
724 |
+
[
|
725 |
+
[datetime(2014, 1, 31), datetime(1999, 9, 24)],
|
726 |
+
[datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
|
727 |
+
],
|
728 |
+
index=["DATE", "DATETIME"],
|
729 |
+
columns=["X", "Y"],
|
730 |
+
)
|
731 |
+
df_expected = df_expected.astype(f"M8[{unit}]")
|
732 |
+
|
733 |
+
with tm.ensure_clean(ext) as filename2:
|
734 |
+
with ExcelWriter(path) as writer1:
|
735 |
+
df.to_excel(writer1, sheet_name="test1")
|
736 |
+
|
737 |
+
with ExcelWriter(
|
738 |
+
filename2,
|
739 |
+
date_format="DD.MM.YYYY",
|
740 |
+
datetime_format="DD.MM.YYYY HH-MM-SS",
|
741 |
+
) as writer2:
|
742 |
+
df.to_excel(writer2, sheet_name="test1")
|
743 |
+
|
744 |
+
with ExcelFile(path) as reader1:
|
745 |
+
rs1 = pd.read_excel(reader1, sheet_name="test1", index_col=0)
|
746 |
+
|
747 |
+
with ExcelFile(filename2) as reader2:
|
748 |
+
rs2 = pd.read_excel(reader2, sheet_name="test1", index_col=0)
|
749 |
+
|
750 |
+
tm.assert_frame_equal(rs1, rs2)
|
751 |
+
|
752 |
+
# Since the reader returns a datetime object for dates,
|
753 |
+
# we need to use df_expected to check the result.
|
754 |
+
tm.assert_frame_equal(rs2, df_expected)
|
755 |
+
|
756 |
+
def test_to_excel_interval_no_labels(self, path, using_infer_string):
|
757 |
+
# see gh-19242
|
758 |
+
#
|
759 |
+
# Test writing Interval without labels.
|
760 |
+
df = DataFrame(
|
761 |
+
np.random.default_rng(2).integers(-10, 10, size=(20, 1)), dtype=np.int64
|
762 |
+
)
|
763 |
+
expected = df.copy()
|
764 |
+
|
765 |
+
df["new"] = pd.cut(df[0], 10)
|
766 |
+
expected["new"] = pd.cut(expected[0], 10).astype(
|
767 |
+
str if not using_infer_string else "string[pyarrow_numpy]"
|
768 |
+
)
|
769 |
+
|
770 |
+
df.to_excel(path, sheet_name="test1")
|
771 |
+
with ExcelFile(path) as reader:
|
772 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
|
773 |
+
tm.assert_frame_equal(expected, recons)
|
774 |
+
|
775 |
+
def test_to_excel_interval_labels(self, path):
|
776 |
+
# see gh-19242
|
777 |
+
#
|
778 |
+
# Test writing Interval with labels.
|
779 |
+
df = DataFrame(
|
780 |
+
np.random.default_rng(2).integers(-10, 10, size=(20, 1)), dtype=np.int64
|
781 |
+
)
|
782 |
+
expected = df.copy()
|
783 |
+
intervals = pd.cut(
|
784 |
+
df[0], 10, labels=["A", "B", "C", "D", "E", "F", "G", "H", "I", "J"]
|
785 |
+
)
|
786 |
+
df["new"] = intervals
|
787 |
+
expected["new"] = pd.Series(list(intervals))
|
788 |
+
|
789 |
+
df.to_excel(path, sheet_name="test1")
|
790 |
+
with ExcelFile(path) as reader:
|
791 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
|
792 |
+
tm.assert_frame_equal(expected, recons)
|
793 |
+
|
794 |
+
def test_to_excel_timedelta(self, path):
|
795 |
+
# see gh-19242, gh-9155
|
796 |
+
#
|
797 |
+
# Test writing timedelta to xls.
|
798 |
+
df = DataFrame(
|
799 |
+
np.random.default_rng(2).integers(-10, 10, size=(20, 1)),
|
800 |
+
columns=["A"],
|
801 |
+
dtype=np.int64,
|
802 |
+
)
|
803 |
+
expected = df.copy()
|
804 |
+
|
805 |
+
df["new"] = df["A"].apply(lambda x: timedelta(seconds=x))
|
806 |
+
expected["new"] = expected["A"].apply(
|
807 |
+
lambda x: timedelta(seconds=x).total_seconds() / 86400
|
808 |
+
)
|
809 |
+
|
810 |
+
df.to_excel(path, sheet_name="test1")
|
811 |
+
with ExcelFile(path) as reader:
|
812 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=0)
|
813 |
+
tm.assert_frame_equal(expected, recons)
|
814 |
+
|
815 |
+
def test_to_excel_periodindex(self, path):
|
816 |
+
# xp has a PeriodIndex
|
817 |
+
df = DataFrame(
|
818 |
+
np.random.default_rng(2).standard_normal((5, 4)),
|
819 |
+
columns=Index(list("ABCD")),
|
820 |
+
index=date_range("2000-01-01", periods=5, freq="B"),
|
821 |
+
)
|
822 |
+
xp = df.resample("ME").mean().to_period("M")
|
823 |
+
|
824 |
+
xp.to_excel(path, sheet_name="sht1")
|
825 |
+
|
826 |
+
with ExcelFile(path) as reader:
|
827 |
+
rs = pd.read_excel(reader, sheet_name="sht1", index_col=0)
|
828 |
+
tm.assert_frame_equal(xp, rs.to_period("M"))
|
829 |
+
|
830 |
+
def test_to_excel_multiindex(self, merge_cells, frame, path):
|
831 |
+
arrays = np.arange(len(frame.index) * 2, dtype=np.int64).reshape(2, -1)
|
832 |
+
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
|
833 |
+
frame.index = new_index
|
834 |
+
|
835 |
+
frame.to_excel(path, sheet_name="test1", header=False)
|
836 |
+
frame.to_excel(path, sheet_name="test1", columns=["A", "B"])
|
837 |
+
|
838 |
+
# round trip
|
839 |
+
frame.to_excel(path, sheet_name="test1", merge_cells=merge_cells)
|
840 |
+
with ExcelFile(path) as reader:
|
841 |
+
df = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1])
|
842 |
+
tm.assert_frame_equal(frame, df)
|
843 |
+
|
844 |
+
# GH13511
|
845 |
+
def test_to_excel_multiindex_nan_label(self, merge_cells, path):
|
846 |
+
df = DataFrame(
|
847 |
+
{
|
848 |
+
"A": [None, 2, 3],
|
849 |
+
"B": [10, 20, 30],
|
850 |
+
"C": np.random.default_rng(2).random(3),
|
851 |
+
}
|
852 |
+
)
|
853 |
+
df = df.set_index(["A", "B"])
|
854 |
+
|
855 |
+
df.to_excel(path, merge_cells=merge_cells)
|
856 |
+
df1 = pd.read_excel(path, index_col=[0, 1])
|
857 |
+
tm.assert_frame_equal(df, df1)
|
858 |
+
|
859 |
+
# Test for Issue 11328. If column indices are integers, make
|
860 |
+
# sure they are handled correctly for either setting of
|
861 |
+
# merge_cells
|
862 |
+
def test_to_excel_multiindex_cols(self, merge_cells, frame, path):
|
863 |
+
arrays = np.arange(len(frame.index) * 2, dtype=np.int64).reshape(2, -1)
|
864 |
+
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
|
865 |
+
frame.index = new_index
|
866 |
+
|
867 |
+
new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2), (50, 1), (50, 2)])
|
868 |
+
frame.columns = new_cols_index
|
869 |
+
header = [0, 1]
|
870 |
+
if not merge_cells:
|
871 |
+
header = 0
|
872 |
+
|
873 |
+
# round trip
|
874 |
+
frame.to_excel(path, sheet_name="test1", merge_cells=merge_cells)
|
875 |
+
with ExcelFile(path) as reader:
|
876 |
+
df = pd.read_excel(
|
877 |
+
reader, sheet_name="test1", header=header, index_col=[0, 1]
|
878 |
+
)
|
879 |
+
if not merge_cells:
|
880 |
+
fm = frame.columns._format_multi(sparsify=False, include_names=False)
|
881 |
+
frame.columns = [".".join(map(str, q)) for q in zip(*fm)]
|
882 |
+
tm.assert_frame_equal(frame, df)
|
883 |
+
|
884 |
+
def test_to_excel_multiindex_dates(self, merge_cells, path):
|
885 |
+
# try multiindex with dates
|
886 |
+
unit = get_exp_unit(path)
|
887 |
+
tsframe = DataFrame(
|
888 |
+
np.random.default_rng(2).standard_normal((5, 4)),
|
889 |
+
columns=Index(list("ABCD")),
|
890 |
+
index=date_range("2000-01-01", periods=5, freq="B"),
|
891 |
+
)
|
892 |
+
tsframe.index = MultiIndex.from_arrays(
|
893 |
+
[
|
894 |
+
tsframe.index.as_unit(unit),
|
895 |
+
np.arange(len(tsframe.index), dtype=np.int64),
|
896 |
+
],
|
897 |
+
names=["time", "foo"],
|
898 |
+
)
|
899 |
+
|
900 |
+
tsframe.to_excel(path, sheet_name="test1", merge_cells=merge_cells)
|
901 |
+
with ExcelFile(path) as reader:
|
902 |
+
recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1])
|
903 |
+
|
904 |
+
tm.assert_frame_equal(tsframe, recons)
|
905 |
+
assert recons.index.names == ("time", "foo")
|
906 |
+
|
907 |
+
def test_to_excel_multiindex_no_write_index(self, path):
|
908 |
+
# Test writing and re-reading a MI without the index. GH 5616.
|
909 |
+
|
910 |
+
# Initial non-MI frame.
|
911 |
+
frame1 = DataFrame({"a": [10, 20], "b": [30, 40], "c": [50, 60]})
|
912 |
+
|
913 |
+
# Add a MI.
|
914 |
+
frame2 = frame1.copy()
|
915 |
+
multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])
|
916 |
+
frame2.index = multi_index
|
917 |
+
|
918 |
+
# Write out to Excel without the index.
|
919 |
+
frame2.to_excel(path, sheet_name="test1", index=False)
|
920 |
+
|
921 |
+
# Read it back in.
|
922 |
+
with ExcelFile(path) as reader:
|
923 |
+
frame3 = pd.read_excel(reader, sheet_name="test1")
|
924 |
+
|
925 |
+
# Test that it is the same as the initial frame.
|
926 |
+
tm.assert_frame_equal(frame1, frame3)
|
927 |
+
|
928 |
+
def test_to_excel_empty_multiindex(self, path):
|
929 |
+
# GH 19543.
|
930 |
+
expected = DataFrame([], columns=[0, 1, 2])
|
931 |
+
|
932 |
+
df = DataFrame([], index=MultiIndex.from_tuples([], names=[0, 1]), columns=[2])
|
933 |
+
df.to_excel(path, sheet_name="test1")
|
934 |
+
|
935 |
+
with ExcelFile(path) as reader:
|
936 |
+
result = pd.read_excel(reader, sheet_name="test1")
|
937 |
+
tm.assert_frame_equal(
|
938 |
+
result, expected, check_index_type=False, check_dtype=False
|
939 |
+
)
|
940 |
+
|
941 |
+
def test_to_excel_float_format(self, path):
|
942 |
+
df = DataFrame(
|
943 |
+
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
|
944 |
+
index=["A", "B"],
|
945 |
+
columns=["X", "Y", "Z"],
|
946 |
+
)
|
947 |
+
df.to_excel(path, sheet_name="test1", float_format="%.2f")
|
948 |
+
|
949 |
+
with ExcelFile(path) as reader:
|
950 |
+
result = pd.read_excel(reader, sheet_name="test1", index_col=0)
|
951 |
+
|
952 |
+
expected = DataFrame(
|
953 |
+
[[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
|
954 |
+
index=["A", "B"],
|
955 |
+
columns=["X", "Y", "Z"],
|
956 |
+
)
|
957 |
+
tm.assert_frame_equal(result, expected)
|
958 |
+
|
959 |
+
def test_to_excel_output_encoding(self, ext):
|
960 |
+
# Avoid mixed inferred_type.
|
961 |
+
df = DataFrame(
|
962 |
+
[["\u0192", "\u0193", "\u0194"], ["\u0195", "\u0196", "\u0197"]],
|
963 |
+
index=["A\u0192", "B"],
|
964 |
+
columns=["X\u0193", "Y", "Z"],
|
965 |
+
)
|
966 |
+
|
967 |
+
with tm.ensure_clean("__tmp_to_excel_float_format__." + ext) as filename:
|
968 |
+
df.to_excel(filename, sheet_name="TestSheet")
|
969 |
+
result = pd.read_excel(filename, sheet_name="TestSheet", index_col=0)
|
970 |
+
tm.assert_frame_equal(result, df)
|
971 |
+
|
972 |
+
def test_to_excel_unicode_filename(self, ext):
|
973 |
+
with tm.ensure_clean("\u0192u." + ext) as filename:
|
974 |
+
try:
|
975 |
+
with open(filename, "wb"):
|
976 |
+
pass
|
977 |
+
except UnicodeEncodeError:
|
978 |
+
pytest.skip("No unicode file names on this system")
|
979 |
+
|
980 |
+
df = DataFrame(
|
981 |
+
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
|
982 |
+
index=["A", "B"],
|
983 |
+
columns=["X", "Y", "Z"],
|
984 |
+
)
|
985 |
+
df.to_excel(filename, sheet_name="test1", float_format="%.2f")
|
986 |
+
|
987 |
+
with ExcelFile(filename) as reader:
|
988 |
+
result = pd.read_excel(reader, sheet_name="test1", index_col=0)
|
989 |
+
|
990 |
+
expected = DataFrame(
|
991 |
+
[[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]],
|
992 |
+
index=["A", "B"],
|
993 |
+
columns=["X", "Y", "Z"],
|
994 |
+
)
|
995 |
+
tm.assert_frame_equal(result, expected)
|
996 |
+
|
997 |
+
@pytest.mark.parametrize("use_headers", [True, False])
|
998 |
+
@pytest.mark.parametrize("r_idx_nlevels", [1, 2, 3])
|
999 |
+
@pytest.mark.parametrize("c_idx_nlevels", [1, 2, 3])
|
1000 |
+
def test_excel_010_hemstring(
|
1001 |
+
self, merge_cells, c_idx_nlevels, r_idx_nlevels, use_headers, path
|
1002 |
+
):
|
1003 |
+
def roundtrip(data, header=True, parser_hdr=0, index=True):
|
1004 |
+
data.to_excel(path, header=header, merge_cells=merge_cells, index=index)
|
1005 |
+
|
1006 |
+
with ExcelFile(path) as xf:
|
1007 |
+
return pd.read_excel(
|
1008 |
+
xf, sheet_name=xf.sheet_names[0], header=parser_hdr
|
1009 |
+
)
|
1010 |
+
|
1011 |
+
# Basic test.
|
1012 |
+
parser_header = 0 if use_headers else None
|
1013 |
+
res = roundtrip(DataFrame([0]), use_headers, parser_header)
|
1014 |
+
|
1015 |
+
assert res.shape == (1, 2)
|
1016 |
+
assert res.iloc[0, 0] is not np.nan
|
1017 |
+
|
1018 |
+
# More complex tests with multi-index.
|
1019 |
+
nrows = 5
|
1020 |
+
ncols = 3
|
1021 |
+
|
1022 |
+
# ensure limited functionality in 0.10
|
1023 |
+
# override of gh-2370 until sorted out in 0.11
|
1024 |
+
|
1025 |
+
if c_idx_nlevels == 1:
|
1026 |
+
columns = Index([f"a-{i}" for i in range(ncols)], dtype=object)
|
1027 |
+
else:
|
1028 |
+
columns = MultiIndex.from_arrays(
|
1029 |
+
[range(ncols) for _ in range(c_idx_nlevels)],
|
1030 |
+
names=[f"i-{i}" for i in range(c_idx_nlevels)],
|
1031 |
+
)
|
1032 |
+
if r_idx_nlevels == 1:
|
1033 |
+
index = Index([f"b-{i}" for i in range(nrows)], dtype=object)
|
1034 |
+
else:
|
1035 |
+
index = MultiIndex.from_arrays(
|
1036 |
+
[range(nrows) for _ in range(r_idx_nlevels)],
|
1037 |
+
names=[f"j-{i}" for i in range(r_idx_nlevels)],
|
1038 |
+
)
|
1039 |
+
|
1040 |
+
df = DataFrame(
|
1041 |
+
np.ones((nrows, ncols)),
|
1042 |
+
columns=columns,
|
1043 |
+
index=index,
|
1044 |
+
)
|
1045 |
+
|
1046 |
+
# This if will be removed once multi-column Excel writing
|
1047 |
+
# is implemented. For now fixing gh-9794.
|
1048 |
+
if c_idx_nlevels > 1:
|
1049 |
+
msg = (
|
1050 |
+
"Writing to Excel with MultiIndex columns and no index "
|
1051 |
+
"\\('index'=False\\) is not yet implemented."
|
1052 |
+
)
|
1053 |
+
with pytest.raises(NotImplementedError, match=msg):
|
1054 |
+
roundtrip(df, use_headers, index=False)
|
1055 |
+
else:
|
1056 |
+
res = roundtrip(df, use_headers)
|
1057 |
+
|
1058 |
+
if use_headers:
|
1059 |
+
assert res.shape == (nrows, ncols + r_idx_nlevels)
|
1060 |
+
else:
|
1061 |
+
# First row taken as columns.
|
1062 |
+
assert res.shape == (nrows - 1, ncols + r_idx_nlevels)
|
1063 |
+
|
1064 |
+
# No NaNs.
|
1065 |
+
for r in range(len(res.index)):
|
1066 |
+
for c in range(len(res.columns)):
|
1067 |
+
assert res.iloc[r, c] is not np.nan
|
1068 |
+
|
1069 |
+
def test_duplicated_columns(self, path):
|
1070 |
+
# see gh-5235
|
1071 |
+
df = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B"])
|
1072 |
+
df.to_excel(path, sheet_name="test1")
|
1073 |
+
expected = DataFrame(
|
1074 |
+
[[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B.1"]
|
1075 |
+
)
|
1076 |
+
|
1077 |
+
# By default, we mangle.
|
1078 |
+
result = pd.read_excel(path, sheet_name="test1", index_col=0)
|
1079 |
+
tm.assert_frame_equal(result, expected)
|
1080 |
+
|
1081 |
+
# see gh-11007, gh-10970
|
1082 |
+
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A", "B"])
|
1083 |
+
df.to_excel(path, sheet_name="test1")
|
1084 |
+
|
1085 |
+
result = pd.read_excel(path, sheet_name="test1", index_col=0)
|
1086 |
+
expected = DataFrame(
|
1087 |
+
[[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A.1", "B.1"]
|
1088 |
+
)
|
1089 |
+
tm.assert_frame_equal(result, expected)
|
1090 |
+
|
1091 |
+
# see gh-10982
|
1092 |
+
df.to_excel(path, sheet_name="test1", index=False, header=False)
|
1093 |
+
result = pd.read_excel(path, sheet_name="test1", header=None)
|
1094 |
+
|
1095 |
+
expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
|
1096 |
+
tm.assert_frame_equal(result, expected)
|
1097 |
+
|
1098 |
+
def test_swapped_columns(self, path):
|
1099 |
+
# Test for issue #5427.
|
1100 |
+
write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]})
|
1101 |
+
write_frame.to_excel(path, sheet_name="test1", columns=["B", "A"])
|
1102 |
+
|
1103 |
+
read_frame = pd.read_excel(path, sheet_name="test1", header=0)
|
1104 |
+
|
1105 |
+
tm.assert_series_equal(write_frame["A"], read_frame["A"])
|
1106 |
+
tm.assert_series_equal(write_frame["B"], read_frame["B"])
|
1107 |
+
|
1108 |
+
def test_invalid_columns(self, path):
|
1109 |
+
# see gh-10982
|
1110 |
+
write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]})
|
1111 |
+
|
1112 |
+
with pytest.raises(KeyError, match="Not all names specified"):
|
1113 |
+
write_frame.to_excel(path, sheet_name="test1", columns=["B", "C"])
|
1114 |
+
|
1115 |
+
with pytest.raises(
|
1116 |
+
KeyError, match="'passes columns are not ALL present dataframe'"
|
1117 |
+
):
|
1118 |
+
write_frame.to_excel(path, sheet_name="test1", columns=["C", "D"])
|
1119 |
+
|
1120 |
+
@pytest.mark.parametrize(
|
1121 |
+
"to_excel_index,read_excel_index_col",
|
1122 |
+
[
|
1123 |
+
(True, 0), # Include index in write to file
|
1124 |
+
(False, None), # Dont include index in write to file
|
1125 |
+
],
|
1126 |
+
)
|
1127 |
+
def test_write_subset_columns(self, path, to_excel_index, read_excel_index_col):
|
1128 |
+
# GH 31677
|
1129 |
+
write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2], "C": [3, 3, 3]})
|
1130 |
+
write_frame.to_excel(
|
1131 |
+
path, sheet_name="col_subset_bug", columns=["A", "B"], index=to_excel_index
|
1132 |
+
)
|
1133 |
+
|
1134 |
+
expected = write_frame[["A", "B"]]
|
1135 |
+
read_frame = pd.read_excel(
|
1136 |
+
path, sheet_name="col_subset_bug", index_col=read_excel_index_col
|
1137 |
+
)
|
1138 |
+
|
1139 |
+
tm.assert_frame_equal(expected, read_frame)
|
1140 |
+
|
1141 |
+
def test_comment_arg(self, path):
|
1142 |
+
# see gh-18735
|
1143 |
+
#
|
1144 |
+
# Test the comment argument functionality to pd.read_excel.
|
1145 |
+
|
1146 |
+
# Create file to read in.
|
1147 |
+
df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]})
|
1148 |
+
df.to_excel(path, sheet_name="test_c")
|
1149 |
+
|
1150 |
+
# Read file without comment arg.
|
1151 |
+
result1 = pd.read_excel(path, sheet_name="test_c", index_col=0)
|
1152 |
+
|
1153 |
+
result1.iloc[1, 0] = None
|
1154 |
+
result1.iloc[1, 1] = None
|
1155 |
+
result1.iloc[2, 1] = None
|
1156 |
+
|
1157 |
+
result2 = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0)
|
1158 |
+
tm.assert_frame_equal(result1, result2)
|
1159 |
+
|
1160 |
+
def test_comment_default(self, path):
|
1161 |
+
# Re issue #18735
|
1162 |
+
# Test the comment argument default to pd.read_excel
|
1163 |
+
|
1164 |
+
# Create file to read in
|
1165 |
+
df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]})
|
1166 |
+
df.to_excel(path, sheet_name="test_c")
|
1167 |
+
|
1168 |
+
# Read file with default and explicit comment=None
|
1169 |
+
result1 = pd.read_excel(path, sheet_name="test_c")
|
1170 |
+
result2 = pd.read_excel(path, sheet_name="test_c", comment=None)
|
1171 |
+
tm.assert_frame_equal(result1, result2)
|
1172 |
+
|
1173 |
+
def test_comment_used(self, path):
|
1174 |
+
# see gh-18735
|
1175 |
+
#
|
1176 |
+
# Test the comment argument is working as expected when used.
|
1177 |
+
|
1178 |
+
# Create file to read in.
|
1179 |
+
df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]})
|
1180 |
+
df.to_excel(path, sheet_name="test_c")
|
1181 |
+
|
1182 |
+
# Test read_frame_comment against manually produced expected output.
|
1183 |
+
expected = DataFrame({"A": ["one", None, "one"], "B": ["two", None, None]})
|
1184 |
+
result = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0)
|
1185 |
+
tm.assert_frame_equal(result, expected)
|
1186 |
+
|
1187 |
+
def test_comment_empty_line(self, path):
|
1188 |
+
# Re issue #18735
|
1189 |
+
# Test that pd.read_excel ignores commented lines at the end of file
|
1190 |
+
|
1191 |
+
df = DataFrame({"a": ["1", "#2"], "b": ["2", "3"]})
|
1192 |
+
df.to_excel(path, index=False)
|
1193 |
+
|
1194 |
+
# Test that all-comment lines at EoF are ignored
|
1195 |
+
expected = DataFrame({"a": [1], "b": [2]})
|
1196 |
+
result = pd.read_excel(path, comment="#")
|
1197 |
+
tm.assert_frame_equal(result, expected)
|
1198 |
+
|
1199 |
+
def test_datetimes(self, path):
|
1200 |
+
# Test writing and reading datetimes. For issue #9139. (xref #9185)
|
1201 |
+
unit = get_exp_unit(path)
|
1202 |
+
datetimes = [
|
1203 |
+
datetime(2013, 1, 13, 1, 2, 3),
|
1204 |
+
datetime(2013, 1, 13, 2, 45, 56),
|
1205 |
+
datetime(2013, 1, 13, 4, 29, 49),
|
1206 |
+
datetime(2013, 1, 13, 6, 13, 42),
|
1207 |
+
datetime(2013, 1, 13, 7, 57, 35),
|
1208 |
+
datetime(2013, 1, 13, 9, 41, 28),
|
1209 |
+
datetime(2013, 1, 13, 11, 25, 21),
|
1210 |
+
datetime(2013, 1, 13, 13, 9, 14),
|
1211 |
+
datetime(2013, 1, 13, 14, 53, 7),
|
1212 |
+
datetime(2013, 1, 13, 16, 37, 0),
|
1213 |
+
datetime(2013, 1, 13, 18, 20, 52),
|
1214 |
+
]
|
1215 |
+
|
1216 |
+
write_frame = DataFrame({"A": datetimes})
|
1217 |
+
write_frame.to_excel(path, sheet_name="Sheet1")
|
1218 |
+
read_frame = pd.read_excel(path, sheet_name="Sheet1", header=0)
|
1219 |
+
|
1220 |
+
expected = write_frame.astype(f"M8[{unit}]")
|
1221 |
+
tm.assert_series_equal(expected["A"], read_frame["A"])
|
1222 |
+
|
1223 |
+
def test_bytes_io(self, engine):
|
1224 |
+
# see gh-7074
|
1225 |
+
with BytesIO() as bio:
|
1226 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)))
|
1227 |
+
|
1228 |
+
# Pass engine explicitly, as there is no file path to infer from.
|
1229 |
+
with ExcelWriter(bio, engine=engine) as writer:
|
1230 |
+
df.to_excel(writer)
|
1231 |
+
|
1232 |
+
bio.seek(0)
|
1233 |
+
reread_df = pd.read_excel(bio, index_col=0)
|
1234 |
+
tm.assert_frame_equal(df, reread_df)
|
1235 |
+
|
1236 |
+
def test_engine_kwargs(self, engine, path):
|
1237 |
+
# GH#52368
|
1238 |
+
df = DataFrame([{"A": 1, "B": 2}, {"A": 3, "B": 4}])
|
1239 |
+
|
1240 |
+
msgs = {
|
1241 |
+
"odf": r"OpenDocumentSpreadsheet() got an unexpected keyword "
|
1242 |
+
r"argument 'foo'",
|
1243 |
+
"openpyxl": r"__init__() got an unexpected keyword argument 'foo'",
|
1244 |
+
"xlsxwriter": r"__init__() got an unexpected keyword argument 'foo'",
|
1245 |
+
}
|
1246 |
+
|
1247 |
+
if PY310:
|
1248 |
+
msgs[
|
1249 |
+
"openpyxl"
|
1250 |
+
] = "Workbook.__init__() got an unexpected keyword argument 'foo'"
|
1251 |
+
msgs[
|
1252 |
+
"xlsxwriter"
|
1253 |
+
] = "Workbook.__init__() got an unexpected keyword argument 'foo'"
|
1254 |
+
|
1255 |
+
# Handle change in error message for openpyxl (write and append mode)
|
1256 |
+
if engine == "openpyxl" and not os.path.exists(path):
|
1257 |
+
msgs[
|
1258 |
+
"openpyxl"
|
1259 |
+
] = r"load_workbook() got an unexpected keyword argument 'foo'"
|
1260 |
+
|
1261 |
+
with pytest.raises(TypeError, match=re.escape(msgs[engine])):
|
1262 |
+
df.to_excel(
|
1263 |
+
path,
|
1264 |
+
engine=engine,
|
1265 |
+
engine_kwargs={"foo": "bar"},
|
1266 |
+
)
|
1267 |
+
|
1268 |
+
def test_write_lists_dict(self, path):
|
1269 |
+
# see gh-8188.
|
1270 |
+
df = DataFrame(
|
1271 |
+
{
|
1272 |
+
"mixed": ["a", ["b", "c"], {"d": "e", "f": 2}],
|
1273 |
+
"numeric": [1, 2, 3.0],
|
1274 |
+
"str": ["apple", "banana", "cherry"],
|
1275 |
+
}
|
1276 |
+
)
|
1277 |
+
df.to_excel(path, sheet_name="Sheet1")
|
1278 |
+
read = pd.read_excel(path, sheet_name="Sheet1", header=0, index_col=0)
|
1279 |
+
|
1280 |
+
expected = df.copy()
|
1281 |
+
expected.mixed = expected.mixed.apply(str)
|
1282 |
+
expected.numeric = expected.numeric.astype("int64")
|
1283 |
+
|
1284 |
+
tm.assert_frame_equal(read, expected)
|
1285 |
+
|
1286 |
+
def test_render_as_column_name(self, path):
|
1287 |
+
# see gh-34331
|
1288 |
+
df = DataFrame({"render": [1, 2], "data": [3, 4]})
|
1289 |
+
df.to_excel(path, sheet_name="Sheet1")
|
1290 |
+
read = pd.read_excel(path, "Sheet1", index_col=0)
|
1291 |
+
expected = df
|
1292 |
+
tm.assert_frame_equal(read, expected)
|
1293 |
+
|
1294 |
+
def test_true_and_false_value_options(self, path):
|
1295 |
+
# see gh-13347
|
1296 |
+
df = DataFrame([["foo", "bar"]], columns=["col1", "col2"], dtype=object)
|
1297 |
+
with option_context("future.no_silent_downcasting", True):
|
1298 |
+
expected = df.replace({"foo": True, "bar": False}).astype("bool")
|
1299 |
+
|
1300 |
+
df.to_excel(path)
|
1301 |
+
read_frame = pd.read_excel(
|
1302 |
+
path, true_values=["foo"], false_values=["bar"], index_col=0
|
1303 |
+
)
|
1304 |
+
tm.assert_frame_equal(read_frame, expected)
|
1305 |
+
|
1306 |
+
def test_freeze_panes(self, path):
|
1307 |
+
# see gh-15160
|
1308 |
+
expected = DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
|
1309 |
+
expected.to_excel(path, sheet_name="Sheet1", freeze_panes=(1, 1))
|
1310 |
+
|
1311 |
+
result = pd.read_excel(path, index_col=0)
|
1312 |
+
tm.assert_frame_equal(result, expected)
|
1313 |
+
|
1314 |
+
def test_path_path_lib(self, engine, ext):
|
1315 |
+
df = DataFrame(
|
1316 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
1317 |
+
columns=Index(list("ABCD")),
|
1318 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
1319 |
+
)
|
1320 |
+
writer = partial(df.to_excel, engine=engine)
|
1321 |
+
|
1322 |
+
reader = partial(pd.read_excel, index_col=0)
|
1323 |
+
result = tm.round_trip_pathlib(writer, reader, path=f"foo{ext}")
|
1324 |
+
tm.assert_frame_equal(result, df)
|
1325 |
+
|
1326 |
+
def test_path_local_path(self, engine, ext):
|
1327 |
+
df = DataFrame(
|
1328 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
1329 |
+
columns=Index(list("ABCD")),
|
1330 |
+
index=Index([f"i-{i}" for i in range(30)]),
|
1331 |
+
)
|
1332 |
+
writer = partial(df.to_excel, engine=engine)
|
1333 |
+
|
1334 |
+
reader = partial(pd.read_excel, index_col=0)
|
1335 |
+
result = tm.round_trip_localpath(writer, reader, path=f"foo{ext}")
|
1336 |
+
tm.assert_frame_equal(result, df)
|
1337 |
+
|
1338 |
+
def test_merged_cell_custom_objects(self, path):
|
1339 |
+
# see GH-27006
|
1340 |
+
mi = MultiIndex.from_tuples(
|
1341 |
+
[
|
1342 |
+
(pd.Period("2018"), pd.Period("2018Q1")),
|
1343 |
+
(pd.Period("2018"), pd.Period("2018Q2")),
|
1344 |
+
]
|
1345 |
+
)
|
1346 |
+
expected = DataFrame(np.ones((2, 2), dtype="int64"), columns=mi)
|
1347 |
+
expected.to_excel(path)
|
1348 |
+
result = pd.read_excel(path, header=[0, 1], index_col=0)
|
1349 |
+
# need to convert PeriodIndexes to standard Indexes for assert equal
|
1350 |
+
expected.columns = expected.columns.set_levels(
|
1351 |
+
[[str(i) for i in mi.levels[0]], [str(i) for i in mi.levels[1]]],
|
1352 |
+
level=[0, 1],
|
1353 |
+
)
|
1354 |
+
tm.assert_frame_equal(result, expected)
|
1355 |
+
|
1356 |
+
@pytest.mark.parametrize("dtype", [None, object])
|
1357 |
+
def test_raise_when_saving_timezones(self, dtype, tz_aware_fixture, path):
|
1358 |
+
# GH 27008, GH 7056
|
1359 |
+
tz = tz_aware_fixture
|
1360 |
+
data = pd.Timestamp("2019", tz=tz)
|
1361 |
+
df = DataFrame([data], dtype=dtype)
|
1362 |
+
with pytest.raises(ValueError, match="Excel does not support"):
|
1363 |
+
df.to_excel(path)
|
1364 |
+
|
1365 |
+
data = data.to_pydatetime()
|
1366 |
+
df = DataFrame([data], dtype=dtype)
|
1367 |
+
with pytest.raises(ValueError, match="Excel does not support"):
|
1368 |
+
df.to_excel(path)
|
1369 |
+
|
1370 |
+
def test_excel_duplicate_columns_with_names(self, path):
|
1371 |
+
# GH#39695
|
1372 |
+
df = DataFrame({"A": [0, 1], "B": [10, 11]})
|
1373 |
+
df.to_excel(path, columns=["A", "B", "A"], index=False)
|
1374 |
+
|
1375 |
+
result = pd.read_excel(path)
|
1376 |
+
expected = DataFrame([[0, 10, 0], [1, 11, 1]], columns=["A", "B", "A.1"])
|
1377 |
+
tm.assert_frame_equal(result, expected)
|
1378 |
+
|
1379 |
+
def test_if_sheet_exists_raises(self, ext):
|
1380 |
+
# GH 40230
|
1381 |
+
msg = "if_sheet_exists is only valid in append mode (mode='a')"
|
1382 |
+
|
1383 |
+
with tm.ensure_clean(ext) as f:
|
1384 |
+
with pytest.raises(ValueError, match=re.escape(msg)):
|
1385 |
+
ExcelWriter(f, if_sheet_exists="replace")
|
1386 |
+
|
1387 |
+
def test_excel_writer_empty_frame(self, engine, ext):
|
1388 |
+
# GH#45793
|
1389 |
+
with tm.ensure_clean(ext) as path:
|
1390 |
+
with ExcelWriter(path, engine=engine) as writer:
|
1391 |
+
DataFrame().to_excel(writer)
|
1392 |
+
result = pd.read_excel(path)
|
1393 |
+
expected = DataFrame()
|
1394 |
+
tm.assert_frame_equal(result, expected)
|
1395 |
+
|
1396 |
+
def test_to_excel_empty_frame(self, engine, ext):
|
1397 |
+
# GH#45793
|
1398 |
+
with tm.ensure_clean(ext) as path:
|
1399 |
+
DataFrame().to_excel(path, engine=engine)
|
1400 |
+
result = pd.read_excel(path)
|
1401 |
+
expected = DataFrame()
|
1402 |
+
tm.assert_frame_equal(result, expected)
|
1403 |
+
|
1404 |
+
|
1405 |
+
class TestExcelWriterEngineTests:
|
1406 |
+
@pytest.mark.parametrize(
|
1407 |
+
"klass,ext",
|
1408 |
+
[
|
1409 |
+
pytest.param(_XlsxWriter, ".xlsx", marks=td.skip_if_no("xlsxwriter")),
|
1410 |
+
pytest.param(_OpenpyxlWriter, ".xlsx", marks=td.skip_if_no("openpyxl")),
|
1411 |
+
],
|
1412 |
+
)
|
1413 |
+
def test_ExcelWriter_dispatch(self, klass, ext):
|
1414 |
+
with tm.ensure_clean(ext) as path:
|
1415 |
+
with ExcelWriter(path) as writer:
|
1416 |
+
if ext == ".xlsx" and bool(
|
1417 |
+
import_optional_dependency("xlsxwriter", errors="ignore")
|
1418 |
+
):
|
1419 |
+
# xlsxwriter has preference over openpyxl if both installed
|
1420 |
+
assert isinstance(writer, _XlsxWriter)
|
1421 |
+
else:
|
1422 |
+
assert isinstance(writer, klass)
|
1423 |
+
|
1424 |
+
def test_ExcelWriter_dispatch_raises(self):
|
1425 |
+
with pytest.raises(ValueError, match="No engine"):
|
1426 |
+
ExcelWriter("nothing")
|
1427 |
+
|
1428 |
+
def test_register_writer(self):
|
1429 |
+
class DummyClass(ExcelWriter):
|
1430 |
+
called_save = False
|
1431 |
+
called_write_cells = False
|
1432 |
+
called_sheets = False
|
1433 |
+
_supported_extensions = ("xlsx", "xls")
|
1434 |
+
_engine = "dummy"
|
1435 |
+
|
1436 |
+
def book(self):
|
1437 |
+
pass
|
1438 |
+
|
1439 |
+
def _save(self):
|
1440 |
+
type(self).called_save = True
|
1441 |
+
|
1442 |
+
def _write_cells(self, *args, **kwargs):
|
1443 |
+
type(self).called_write_cells = True
|
1444 |
+
|
1445 |
+
@property
|
1446 |
+
def sheets(self):
|
1447 |
+
type(self).called_sheets = True
|
1448 |
+
|
1449 |
+
@classmethod
|
1450 |
+
def assert_called_and_reset(cls):
|
1451 |
+
assert cls.called_save
|
1452 |
+
assert cls.called_write_cells
|
1453 |
+
assert not cls.called_sheets
|
1454 |
+
cls.called_save = False
|
1455 |
+
cls.called_write_cells = False
|
1456 |
+
|
1457 |
+
register_writer(DummyClass)
|
1458 |
+
|
1459 |
+
with option_context("io.excel.xlsx.writer", "dummy"):
|
1460 |
+
path = "something.xlsx"
|
1461 |
+
with tm.ensure_clean(path) as filepath:
|
1462 |
+
with ExcelWriter(filepath) as writer:
|
1463 |
+
assert isinstance(writer, DummyClass)
|
1464 |
+
df = DataFrame(
|
1465 |
+
["a"],
|
1466 |
+
columns=Index(["b"], name="foo"),
|
1467 |
+
index=Index(["c"], name="bar"),
|
1468 |
+
)
|
1469 |
+
df.to_excel(filepath)
|
1470 |
+
DummyClass.assert_called_and_reset()
|
1471 |
+
|
1472 |
+
with tm.ensure_clean("something.xls") as filepath:
|
1473 |
+
df.to_excel(filepath, engine="dummy")
|
1474 |
+
DummyClass.assert_called_and_reset()
|
1475 |
+
|
1476 |
+
|
1477 |
+
@td.skip_if_no("xlrd")
|
1478 |
+
@td.skip_if_no("openpyxl")
|
1479 |
+
class TestFSPath:
|
1480 |
+
def test_excelfile_fspath(self):
|
1481 |
+
with tm.ensure_clean("foo.xlsx") as path:
|
1482 |
+
df = DataFrame({"A": [1, 2]})
|
1483 |
+
df.to_excel(path)
|
1484 |
+
with ExcelFile(path) as xl:
|
1485 |
+
result = os.fspath(xl)
|
1486 |
+
assert result == path
|
1487 |
+
|
1488 |
+
def test_excelwriter_fspath(self):
|
1489 |
+
with tm.ensure_clean("foo.xlsx") as path:
|
1490 |
+
with ExcelWriter(path) as writer:
|
1491 |
+
assert os.fspath(writer) == str(path)
|
1492 |
+
|
1493 |
+
def test_to_excel_pos_args_deprecation(self):
|
1494 |
+
# GH-54229
|
1495 |
+
df = DataFrame({"a": [1, 2, 3]})
|
1496 |
+
msg = (
|
1497 |
+
r"Starting with pandas version 3.0 all arguments of to_excel except "
|
1498 |
+
r"for the argument 'excel_writer' will be keyword-only."
|
1499 |
+
)
|
1500 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
1501 |
+
buf = BytesIO()
|
1502 |
+
writer = ExcelWriter(buf)
|
1503 |
+
df.to_excel(writer, "Sheet_name_1")
|
1504 |
+
|
1505 |
+
|
1506 |
+
@pytest.mark.parametrize("klass", _writers.values())
|
1507 |
+
def test_subclass_attr(klass):
|
1508 |
+
# testing that subclasses of ExcelWriter don't have public attributes (issue 49602)
|
1509 |
+
attrs_base = {name for name in dir(ExcelWriter) if not name.startswith("_")}
|
1510 |
+
attrs_klass = {name for name in dir(klass) if not name.startswith("_")}
|
1511 |
+
assert not attrs_base.symmetric_difference(attrs_klass)
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlrd.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from pandas.compat import is_platform_windows
|
7 |
+
|
8 |
+
import pandas as pd
|
9 |
+
import pandas._testing as tm
|
10 |
+
|
11 |
+
from pandas.io.excel import ExcelFile
|
12 |
+
from pandas.io.excel._base import inspect_excel_format
|
13 |
+
|
14 |
+
xlrd = pytest.importorskip("xlrd")
|
15 |
+
|
16 |
+
if is_platform_windows():
|
17 |
+
pytestmark = pytest.mark.single_cpu
|
18 |
+
|
19 |
+
|
20 |
+
@pytest.fixture(params=[".xls"])
|
21 |
+
def read_ext_xlrd(request):
|
22 |
+
"""
|
23 |
+
Valid extensions for reading Excel files with xlrd.
|
24 |
+
|
25 |
+
Similar to read_ext, but excludes .ods, .xlsb, and for xlrd>2 .xlsx, .xlsm
|
26 |
+
"""
|
27 |
+
return request.param
|
28 |
+
|
29 |
+
|
30 |
+
def test_read_xlrd_book(read_ext_xlrd, datapath):
|
31 |
+
engine = "xlrd"
|
32 |
+
sheet_name = "Sheet1"
|
33 |
+
pth = datapath("io", "data", "excel", "test1.xls")
|
34 |
+
with xlrd.open_workbook(pth) as book:
|
35 |
+
with ExcelFile(book, engine=engine) as xl:
|
36 |
+
result = pd.read_excel(xl, sheet_name=sheet_name, index_col=0)
|
37 |
+
|
38 |
+
expected = pd.read_excel(
|
39 |
+
book, sheet_name=sheet_name, engine=engine, index_col=0
|
40 |
+
)
|
41 |
+
tm.assert_frame_equal(result, expected)
|
42 |
+
|
43 |
+
|
44 |
+
def test_read_xlsx_fails(datapath):
|
45 |
+
# GH 29375
|
46 |
+
from xlrd.biffh import XLRDError
|
47 |
+
|
48 |
+
path = datapath("io", "data", "excel", "test1.xlsx")
|
49 |
+
with pytest.raises(XLRDError, match="Excel xlsx file; not supported"):
|
50 |
+
pd.read_excel(path, engine="xlrd")
|
51 |
+
|
52 |
+
|
53 |
+
def test_nan_in_xls(datapath):
|
54 |
+
# GH 54564
|
55 |
+
path = datapath("io", "data", "excel", "test6.xls")
|
56 |
+
|
57 |
+
expected = pd.DataFrame({0: np.r_[0, 2].astype("int64"), 1: np.r_[1, np.nan]})
|
58 |
+
|
59 |
+
result = pd.read_excel(path, header=None)
|
60 |
+
|
61 |
+
tm.assert_frame_equal(result, expected)
|
62 |
+
|
63 |
+
|
64 |
+
@pytest.mark.parametrize(
|
65 |
+
"file_header",
|
66 |
+
[
|
67 |
+
b"\x09\x00\x04\x00\x07\x00\x10\x00",
|
68 |
+
b"\x09\x02\x06\x00\x00\x00\x10\x00",
|
69 |
+
b"\x09\x04\x06\x00\x00\x00\x10\x00",
|
70 |
+
b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1",
|
71 |
+
],
|
72 |
+
)
|
73 |
+
def test_read_old_xls_files(file_header):
|
74 |
+
# GH 41226
|
75 |
+
f = io.BytesIO(file_header)
|
76 |
+
assert inspect_excel_format(f) == "xls"
|
venv/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlsxwriter.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import contextlib
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from pandas.compat import is_platform_windows
|
6 |
+
|
7 |
+
from pandas import DataFrame
|
8 |
+
import pandas._testing as tm
|
9 |
+
|
10 |
+
from pandas.io.excel import ExcelWriter
|
11 |
+
|
12 |
+
xlsxwriter = pytest.importorskip("xlsxwriter")
|
13 |
+
|
14 |
+
if is_platform_windows():
|
15 |
+
pytestmark = pytest.mark.single_cpu
|
16 |
+
|
17 |
+
|
18 |
+
@pytest.fixture
|
19 |
+
def ext():
|
20 |
+
return ".xlsx"
|
21 |
+
|
22 |
+
|
23 |
+
def test_column_format(ext):
|
24 |
+
# Test that column formats are applied to cells. Test for issue #9167.
|
25 |
+
# Applicable to xlsxwriter only.
|
26 |
+
openpyxl = pytest.importorskip("openpyxl")
|
27 |
+
|
28 |
+
with tm.ensure_clean(ext) as path:
|
29 |
+
frame = DataFrame({"A": [123456, 123456], "B": [123456, 123456]})
|
30 |
+
|
31 |
+
with ExcelWriter(path) as writer:
|
32 |
+
frame.to_excel(writer)
|
33 |
+
|
34 |
+
# Add a number format to col B and ensure it is applied to cells.
|
35 |
+
num_format = "#,##0"
|
36 |
+
write_workbook = writer.book
|
37 |
+
write_worksheet = write_workbook.worksheets()[0]
|
38 |
+
col_format = write_workbook.add_format({"num_format": num_format})
|
39 |
+
write_worksheet.set_column("B:B", None, col_format)
|
40 |
+
|
41 |
+
with contextlib.closing(openpyxl.load_workbook(path)) as read_workbook:
|
42 |
+
try:
|
43 |
+
read_worksheet = read_workbook["Sheet1"]
|
44 |
+
except TypeError:
|
45 |
+
# compat
|
46 |
+
read_worksheet = read_workbook.get_sheet_by_name(name="Sheet1")
|
47 |
+
|
48 |
+
# Get the number format from the cell.
|
49 |
+
try:
|
50 |
+
cell = read_worksheet["B2"]
|
51 |
+
except TypeError:
|
52 |
+
# compat
|
53 |
+
cell = read_worksheet.cell("B2")
|
54 |
+
|
55 |
+
try:
|
56 |
+
read_num_format = cell.number_format
|
57 |
+
except AttributeError:
|
58 |
+
read_num_format = cell.style.number_format._format_code
|
59 |
+
|
60 |
+
assert read_num_format == num_format
|
61 |
+
|
62 |
+
|
63 |
+
def test_write_append_mode_raises(ext):
|
64 |
+
msg = "Append mode is not supported with xlsxwriter!"
|
65 |
+
|
66 |
+
with tm.ensure_clean(ext) as f:
|
67 |
+
with pytest.raises(ValueError, match=msg):
|
68 |
+
ExcelWriter(f, engine="xlsxwriter", mode="a")
|
69 |
+
|
70 |
+
|
71 |
+
@pytest.mark.parametrize("nan_inf_to_errors", [True, False])
|
72 |
+
def test_engine_kwargs(ext, nan_inf_to_errors):
|
73 |
+
# GH 42286
|
74 |
+
engine_kwargs = {"options": {"nan_inf_to_errors": nan_inf_to_errors}}
|
75 |
+
with tm.ensure_clean(ext) as f:
|
76 |
+
with ExcelWriter(f, engine="xlsxwriter", engine_kwargs=engine_kwargs) as writer:
|
77 |
+
assert writer.book.nan_inf_to_errors == nan_inf_to_errors
|
78 |
+
|
79 |
+
|
80 |
+
def test_book_and_sheets_consistent(ext):
|
81 |
+
# GH#45687 - Ensure sheets is updated if user modifies book
|
82 |
+
with tm.ensure_clean(ext) as f:
|
83 |
+
with ExcelWriter(f, engine="xlsxwriter") as writer:
|
84 |
+
assert writer.sheets == {}
|
85 |
+
sheet = writer.book.add_worksheet("test_name")
|
86 |
+
assert writer.sheets == {"test_name": sheet}
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (188 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/conftest.cpython-310.pyc
ADDED
Binary file (456 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc
ADDED
Binary file (4.45 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_deprecated_kwargs.cpython-310.pyc
ADDED
Binary file (944 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema.cpython-310.pyc
ADDED
Binary file (23.8 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc
ADDED
Binary file (8.38 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_normalize.cpython-310.pyc
ADDED
Binary file (20 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_pandas.cpython-310.pyc
ADDED
Binary file (65.7 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc
ADDED
Binary file (15 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_ujson.cpython-310.pyc
ADDED
Binary file (35.6 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/conftest.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
|
4 |
+
@pytest.fixture(params=["split", "records", "index", "columns", "values"])
|
5 |
+
def orient(request):
|
6 |
+
"""
|
7 |
+
Fixture for orients excluding the table format.
|
8 |
+
"""
|
9 |
+
return request.param
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema_ext_dtype.py
ADDED
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tests for ExtensionDtype Table Schema integration."""
|
2 |
+
|
3 |
+
from collections import OrderedDict
|
4 |
+
import datetime as dt
|
5 |
+
import decimal
|
6 |
+
from io import StringIO
|
7 |
+
import json
|
8 |
+
|
9 |
+
import pytest
|
10 |
+
|
11 |
+
from pandas import (
|
12 |
+
NA,
|
13 |
+
DataFrame,
|
14 |
+
Index,
|
15 |
+
array,
|
16 |
+
read_json,
|
17 |
+
)
|
18 |
+
import pandas._testing as tm
|
19 |
+
from pandas.core.arrays.integer import Int64Dtype
|
20 |
+
from pandas.core.arrays.string_ import StringDtype
|
21 |
+
from pandas.core.series import Series
|
22 |
+
from pandas.tests.extension.date import (
|
23 |
+
DateArray,
|
24 |
+
DateDtype,
|
25 |
+
)
|
26 |
+
from pandas.tests.extension.decimal.array import (
|
27 |
+
DecimalArray,
|
28 |
+
DecimalDtype,
|
29 |
+
)
|
30 |
+
|
31 |
+
from pandas.io.json._table_schema import (
|
32 |
+
as_json_table_type,
|
33 |
+
build_table_schema,
|
34 |
+
)
|
35 |
+
|
36 |
+
|
37 |
+
class TestBuildSchema:
|
38 |
+
def test_build_table_schema(self):
|
39 |
+
df = DataFrame(
|
40 |
+
{
|
41 |
+
"A": DateArray([dt.date(2021, 10, 10)]),
|
42 |
+
"B": DecimalArray([decimal.Decimal(10)]),
|
43 |
+
"C": array(["pandas"], dtype="string"),
|
44 |
+
"D": array([10], dtype="Int64"),
|
45 |
+
}
|
46 |
+
)
|
47 |
+
result = build_table_schema(df, version=False)
|
48 |
+
expected = {
|
49 |
+
"fields": [
|
50 |
+
{"name": "index", "type": "integer"},
|
51 |
+
{"name": "A", "type": "any", "extDtype": "DateDtype"},
|
52 |
+
{"name": "B", "type": "number", "extDtype": "decimal"},
|
53 |
+
{"name": "C", "type": "any", "extDtype": "string"},
|
54 |
+
{"name": "D", "type": "integer", "extDtype": "Int64"},
|
55 |
+
],
|
56 |
+
"primaryKey": ["index"],
|
57 |
+
}
|
58 |
+
assert result == expected
|
59 |
+
result = build_table_schema(df)
|
60 |
+
assert "pandas_version" in result
|
61 |
+
|
62 |
+
|
63 |
+
class TestTableSchemaType:
|
64 |
+
@pytest.mark.parametrize(
|
65 |
+
"date_data",
|
66 |
+
[
|
67 |
+
DateArray([dt.date(2021, 10, 10)]),
|
68 |
+
DateArray(dt.date(2021, 10, 10)),
|
69 |
+
Series(DateArray(dt.date(2021, 10, 10))),
|
70 |
+
],
|
71 |
+
)
|
72 |
+
def test_as_json_table_type_ext_date_array_dtype(self, date_data):
|
73 |
+
assert as_json_table_type(date_data.dtype) == "any"
|
74 |
+
|
75 |
+
def test_as_json_table_type_ext_date_dtype(self):
|
76 |
+
assert as_json_table_type(DateDtype()) == "any"
|
77 |
+
|
78 |
+
@pytest.mark.parametrize(
|
79 |
+
"decimal_data",
|
80 |
+
[
|
81 |
+
DecimalArray([decimal.Decimal(10)]),
|
82 |
+
Series(DecimalArray([decimal.Decimal(10)])),
|
83 |
+
],
|
84 |
+
)
|
85 |
+
def test_as_json_table_type_ext_decimal_array_dtype(self, decimal_data):
|
86 |
+
assert as_json_table_type(decimal_data.dtype) == "number"
|
87 |
+
|
88 |
+
def test_as_json_table_type_ext_decimal_dtype(self):
|
89 |
+
assert as_json_table_type(DecimalDtype()) == "number"
|
90 |
+
|
91 |
+
@pytest.mark.parametrize(
|
92 |
+
"string_data",
|
93 |
+
[
|
94 |
+
array(["pandas"], dtype="string"),
|
95 |
+
Series(array(["pandas"], dtype="string")),
|
96 |
+
],
|
97 |
+
)
|
98 |
+
def test_as_json_table_type_ext_string_array_dtype(self, string_data):
|
99 |
+
assert as_json_table_type(string_data.dtype) == "any"
|
100 |
+
|
101 |
+
def test_as_json_table_type_ext_string_dtype(self):
|
102 |
+
assert as_json_table_type(StringDtype()) == "any"
|
103 |
+
|
104 |
+
@pytest.mark.parametrize(
|
105 |
+
"integer_data",
|
106 |
+
[
|
107 |
+
array([10], dtype="Int64"),
|
108 |
+
Series(array([10], dtype="Int64")),
|
109 |
+
],
|
110 |
+
)
|
111 |
+
def test_as_json_table_type_ext_integer_array_dtype(self, integer_data):
|
112 |
+
assert as_json_table_type(integer_data.dtype) == "integer"
|
113 |
+
|
114 |
+
def test_as_json_table_type_ext_integer_dtype(self):
|
115 |
+
assert as_json_table_type(Int64Dtype()) == "integer"
|
116 |
+
|
117 |
+
|
118 |
+
class TestTableOrient:
|
119 |
+
@pytest.fixture
|
120 |
+
def da(self):
|
121 |
+
return DateArray([dt.date(2021, 10, 10)])
|
122 |
+
|
123 |
+
@pytest.fixture
|
124 |
+
def dc(self):
|
125 |
+
return DecimalArray([decimal.Decimal(10)])
|
126 |
+
|
127 |
+
@pytest.fixture
|
128 |
+
def sa(self):
|
129 |
+
return array(["pandas"], dtype="string")
|
130 |
+
|
131 |
+
@pytest.fixture
|
132 |
+
def ia(self):
|
133 |
+
return array([10], dtype="Int64")
|
134 |
+
|
135 |
+
@pytest.fixture
|
136 |
+
def df(self, da, dc, sa, ia):
|
137 |
+
return DataFrame(
|
138 |
+
{
|
139 |
+
"A": da,
|
140 |
+
"B": dc,
|
141 |
+
"C": sa,
|
142 |
+
"D": ia,
|
143 |
+
}
|
144 |
+
)
|
145 |
+
|
146 |
+
def test_build_date_series(self, da):
|
147 |
+
s = Series(da, name="a")
|
148 |
+
s.index.name = "id"
|
149 |
+
result = s.to_json(orient="table", date_format="iso")
|
150 |
+
result = json.loads(result, object_pairs_hook=OrderedDict)
|
151 |
+
|
152 |
+
assert "pandas_version" in result["schema"]
|
153 |
+
result["schema"].pop("pandas_version")
|
154 |
+
|
155 |
+
fields = [
|
156 |
+
{"name": "id", "type": "integer"},
|
157 |
+
{"name": "a", "type": "any", "extDtype": "DateDtype"},
|
158 |
+
]
|
159 |
+
|
160 |
+
schema = {"fields": fields, "primaryKey": ["id"]}
|
161 |
+
|
162 |
+
expected = OrderedDict(
|
163 |
+
[
|
164 |
+
("schema", schema),
|
165 |
+
("data", [OrderedDict([("id", 0), ("a", "2021-10-10T00:00:00.000")])]),
|
166 |
+
]
|
167 |
+
)
|
168 |
+
|
169 |
+
assert result == expected
|
170 |
+
|
171 |
+
def test_build_decimal_series(self, dc):
|
172 |
+
s = Series(dc, name="a")
|
173 |
+
s.index.name = "id"
|
174 |
+
result = s.to_json(orient="table", date_format="iso")
|
175 |
+
result = json.loads(result, object_pairs_hook=OrderedDict)
|
176 |
+
|
177 |
+
assert "pandas_version" in result["schema"]
|
178 |
+
result["schema"].pop("pandas_version")
|
179 |
+
|
180 |
+
fields = [
|
181 |
+
{"name": "id", "type": "integer"},
|
182 |
+
{"name": "a", "type": "number", "extDtype": "decimal"},
|
183 |
+
]
|
184 |
+
|
185 |
+
schema = {"fields": fields, "primaryKey": ["id"]}
|
186 |
+
|
187 |
+
expected = OrderedDict(
|
188 |
+
[
|
189 |
+
("schema", schema),
|
190 |
+
("data", [OrderedDict([("id", 0), ("a", 10.0)])]),
|
191 |
+
]
|
192 |
+
)
|
193 |
+
|
194 |
+
assert result == expected
|
195 |
+
|
196 |
+
def test_build_string_series(self, sa):
|
197 |
+
s = Series(sa, name="a")
|
198 |
+
s.index.name = "id"
|
199 |
+
result = s.to_json(orient="table", date_format="iso")
|
200 |
+
result = json.loads(result, object_pairs_hook=OrderedDict)
|
201 |
+
|
202 |
+
assert "pandas_version" in result["schema"]
|
203 |
+
result["schema"].pop("pandas_version")
|
204 |
+
|
205 |
+
fields = [
|
206 |
+
{"name": "id", "type": "integer"},
|
207 |
+
{"name": "a", "type": "any", "extDtype": "string"},
|
208 |
+
]
|
209 |
+
|
210 |
+
schema = {"fields": fields, "primaryKey": ["id"]}
|
211 |
+
|
212 |
+
expected = OrderedDict(
|
213 |
+
[
|
214 |
+
("schema", schema),
|
215 |
+
("data", [OrderedDict([("id", 0), ("a", "pandas")])]),
|
216 |
+
]
|
217 |
+
)
|
218 |
+
|
219 |
+
assert result == expected
|
220 |
+
|
221 |
+
def test_build_int64_series(self, ia):
|
222 |
+
s = Series(ia, name="a")
|
223 |
+
s.index.name = "id"
|
224 |
+
result = s.to_json(orient="table", date_format="iso")
|
225 |
+
result = json.loads(result, object_pairs_hook=OrderedDict)
|
226 |
+
|
227 |
+
assert "pandas_version" in result["schema"]
|
228 |
+
result["schema"].pop("pandas_version")
|
229 |
+
|
230 |
+
fields = [
|
231 |
+
{"name": "id", "type": "integer"},
|
232 |
+
{"name": "a", "type": "integer", "extDtype": "Int64"},
|
233 |
+
]
|
234 |
+
|
235 |
+
schema = {"fields": fields, "primaryKey": ["id"]}
|
236 |
+
|
237 |
+
expected = OrderedDict(
|
238 |
+
[
|
239 |
+
("schema", schema),
|
240 |
+
("data", [OrderedDict([("id", 0), ("a", 10)])]),
|
241 |
+
]
|
242 |
+
)
|
243 |
+
|
244 |
+
assert result == expected
|
245 |
+
|
246 |
+
def test_to_json(self, df):
|
247 |
+
df = df.copy()
|
248 |
+
df.index.name = "idx"
|
249 |
+
result = df.to_json(orient="table", date_format="iso")
|
250 |
+
result = json.loads(result, object_pairs_hook=OrderedDict)
|
251 |
+
|
252 |
+
assert "pandas_version" in result["schema"]
|
253 |
+
result["schema"].pop("pandas_version")
|
254 |
+
|
255 |
+
fields = [
|
256 |
+
OrderedDict({"name": "idx", "type": "integer"}),
|
257 |
+
OrderedDict({"name": "A", "type": "any", "extDtype": "DateDtype"}),
|
258 |
+
OrderedDict({"name": "B", "type": "number", "extDtype": "decimal"}),
|
259 |
+
OrderedDict({"name": "C", "type": "any", "extDtype": "string"}),
|
260 |
+
OrderedDict({"name": "D", "type": "integer", "extDtype": "Int64"}),
|
261 |
+
]
|
262 |
+
|
263 |
+
schema = OrderedDict({"fields": fields, "primaryKey": ["idx"]})
|
264 |
+
data = [
|
265 |
+
OrderedDict(
|
266 |
+
[
|
267 |
+
("idx", 0),
|
268 |
+
("A", "2021-10-10T00:00:00.000"),
|
269 |
+
("B", 10.0),
|
270 |
+
("C", "pandas"),
|
271 |
+
("D", 10),
|
272 |
+
]
|
273 |
+
)
|
274 |
+
]
|
275 |
+
expected = OrderedDict([("schema", schema), ("data", data)])
|
276 |
+
|
277 |
+
assert result == expected
|
278 |
+
|
279 |
+
def test_json_ext_dtype_reading_roundtrip(self):
|
280 |
+
# GH#40255
|
281 |
+
df = DataFrame(
|
282 |
+
{
|
283 |
+
"a": Series([2, NA], dtype="Int64"),
|
284 |
+
"b": Series([1.5, NA], dtype="Float64"),
|
285 |
+
"c": Series([True, NA], dtype="boolean"),
|
286 |
+
},
|
287 |
+
index=Index([1, NA], dtype="Int64"),
|
288 |
+
)
|
289 |
+
expected = df.copy()
|
290 |
+
data_json = df.to_json(orient="table", indent=4)
|
291 |
+
result = read_json(StringIO(data_json), orient="table")
|
292 |
+
tm.assert_frame_equal(result, expected)
|
293 |
+
|
294 |
+
def test_json_ext_dtype_reading(self):
|
295 |
+
# GH#40255
|
296 |
+
data_json = """{
|
297 |
+
"schema":{
|
298 |
+
"fields":[
|
299 |
+
{
|
300 |
+
"name":"a",
|
301 |
+
"type":"integer",
|
302 |
+
"extDtype":"Int64"
|
303 |
+
}
|
304 |
+
],
|
305 |
+
},
|
306 |
+
"data":[
|
307 |
+
{
|
308 |
+
"a":2
|
309 |
+
},
|
310 |
+
{
|
311 |
+
"a":null
|
312 |
+
}
|
313 |
+
]
|
314 |
+
}"""
|
315 |
+
result = read_json(StringIO(data_json), orient="table")
|
316 |
+
expected = DataFrame({"a": Series([2, NA], dtype="Int64")})
|
317 |
+
tm.assert_frame_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/test_normalize.py
ADDED
@@ -0,0 +1,907 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from pandas import (
|
7 |
+
DataFrame,
|
8 |
+
Index,
|
9 |
+
Series,
|
10 |
+
json_normalize,
|
11 |
+
)
|
12 |
+
import pandas._testing as tm
|
13 |
+
|
14 |
+
from pandas.io.json._normalize import nested_to_record
|
15 |
+
|
16 |
+
|
17 |
+
@pytest.fixture
|
18 |
+
def deep_nested():
|
19 |
+
# deeply nested data
|
20 |
+
return [
|
21 |
+
{
|
22 |
+
"country": "USA",
|
23 |
+
"states": [
|
24 |
+
{
|
25 |
+
"name": "California",
|
26 |
+
"cities": [
|
27 |
+
{"name": "San Francisco", "pop": 12345},
|
28 |
+
{"name": "Los Angeles", "pop": 12346},
|
29 |
+
],
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"name": "Ohio",
|
33 |
+
"cities": [
|
34 |
+
{"name": "Columbus", "pop": 1234},
|
35 |
+
{"name": "Cleveland", "pop": 1236},
|
36 |
+
],
|
37 |
+
},
|
38 |
+
],
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"country": "Germany",
|
42 |
+
"states": [
|
43 |
+
{"name": "Bayern", "cities": [{"name": "Munich", "pop": 12347}]},
|
44 |
+
{
|
45 |
+
"name": "Nordrhein-Westfalen",
|
46 |
+
"cities": [
|
47 |
+
{"name": "Duesseldorf", "pop": 1238},
|
48 |
+
{"name": "Koeln", "pop": 1239},
|
49 |
+
],
|
50 |
+
},
|
51 |
+
],
|
52 |
+
},
|
53 |
+
]
|
54 |
+
|
55 |
+
|
56 |
+
@pytest.fixture
|
57 |
+
def state_data():
|
58 |
+
return [
|
59 |
+
{
|
60 |
+
"counties": [
|
61 |
+
{"name": "Dade", "population": 12345},
|
62 |
+
{"name": "Broward", "population": 40000},
|
63 |
+
{"name": "Palm Beach", "population": 60000},
|
64 |
+
],
|
65 |
+
"info": {"governor": "Rick Scott"},
|
66 |
+
"shortname": "FL",
|
67 |
+
"state": "Florida",
|
68 |
+
},
|
69 |
+
{
|
70 |
+
"counties": [
|
71 |
+
{"name": "Summit", "population": 1234},
|
72 |
+
{"name": "Cuyahoga", "population": 1337},
|
73 |
+
],
|
74 |
+
"info": {"governor": "John Kasich"},
|
75 |
+
"shortname": "OH",
|
76 |
+
"state": "Ohio",
|
77 |
+
},
|
78 |
+
]
|
79 |
+
|
80 |
+
|
81 |
+
@pytest.fixture
|
82 |
+
def author_missing_data():
|
83 |
+
return [
|
84 |
+
{"info": None},
|
85 |
+
{
|
86 |
+
"info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},
|
87 |
+
"author_name": {"first": "Jane", "last_name": "Doe"},
|
88 |
+
},
|
89 |
+
]
|
90 |
+
|
91 |
+
|
92 |
+
@pytest.fixture
|
93 |
+
def missing_metadata():
|
94 |
+
return [
|
95 |
+
{
|
96 |
+
"name": "Alice",
|
97 |
+
"addresses": [
|
98 |
+
{
|
99 |
+
"number": 9562,
|
100 |
+
"street": "Morris St.",
|
101 |
+
"city": "Massillon",
|
102 |
+
"state": "OH",
|
103 |
+
"zip": 44646,
|
104 |
+
}
|
105 |
+
],
|
106 |
+
"previous_residences": {"cities": [{"city_name": "Foo York City"}]},
|
107 |
+
},
|
108 |
+
{
|
109 |
+
"addresses": [
|
110 |
+
{
|
111 |
+
"number": 8449,
|
112 |
+
"street": "Spring St.",
|
113 |
+
"city": "Elizabethton",
|
114 |
+
"state": "TN",
|
115 |
+
"zip": 37643,
|
116 |
+
}
|
117 |
+
],
|
118 |
+
"previous_residences": {"cities": [{"city_name": "Barmingham"}]},
|
119 |
+
},
|
120 |
+
]
|
121 |
+
|
122 |
+
|
123 |
+
@pytest.fixture
|
124 |
+
def max_level_test_input_data():
|
125 |
+
"""
|
126 |
+
input data to test json_normalize with max_level param
|
127 |
+
"""
|
128 |
+
return [
|
129 |
+
{
|
130 |
+
"CreatedBy": {"Name": "User001"},
|
131 |
+
"Lookup": {
|
132 |
+
"TextField": "Some text",
|
133 |
+
"UserField": {"Id": "ID001", "Name": "Name001"},
|
134 |
+
},
|
135 |
+
"Image": {"a": "b"},
|
136 |
+
}
|
137 |
+
]
|
138 |
+
|
139 |
+
|
140 |
+
class TestJSONNormalize:
|
141 |
+
def test_simple_records(self):
|
142 |
+
recs = [
|
143 |
+
{"a": 1, "b": 2, "c": 3},
|
144 |
+
{"a": 4, "b": 5, "c": 6},
|
145 |
+
{"a": 7, "b": 8, "c": 9},
|
146 |
+
{"a": 10, "b": 11, "c": 12},
|
147 |
+
]
|
148 |
+
|
149 |
+
result = json_normalize(recs)
|
150 |
+
expected = DataFrame(recs)
|
151 |
+
|
152 |
+
tm.assert_frame_equal(result, expected)
|
153 |
+
|
154 |
+
def test_simple_normalize(self, state_data):
|
155 |
+
result = json_normalize(state_data[0], "counties")
|
156 |
+
expected = DataFrame(state_data[0]["counties"])
|
157 |
+
tm.assert_frame_equal(result, expected)
|
158 |
+
|
159 |
+
result = json_normalize(state_data, "counties")
|
160 |
+
|
161 |
+
expected = []
|
162 |
+
for rec in state_data:
|
163 |
+
expected.extend(rec["counties"])
|
164 |
+
expected = DataFrame(expected)
|
165 |
+
|
166 |
+
tm.assert_frame_equal(result, expected)
|
167 |
+
|
168 |
+
result = json_normalize(state_data, "counties", meta="state")
|
169 |
+
expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])
|
170 |
+
|
171 |
+
tm.assert_frame_equal(result, expected)
|
172 |
+
|
173 |
+
def test_fields_list_type_normalize(self):
|
174 |
+
parse_metadata_fields_list_type = [
|
175 |
+
{"values": [1, 2, 3], "metadata": {"listdata": [1, 2]}}
|
176 |
+
]
|
177 |
+
result = json_normalize(
|
178 |
+
parse_metadata_fields_list_type,
|
179 |
+
record_path=["values"],
|
180 |
+
meta=[["metadata", "listdata"]],
|
181 |
+
)
|
182 |
+
expected = DataFrame(
|
183 |
+
{0: [1, 2, 3], "metadata.listdata": [[1, 2], [1, 2], [1, 2]]}
|
184 |
+
)
|
185 |
+
tm.assert_frame_equal(result, expected)
|
186 |
+
|
187 |
+
def test_empty_array(self):
|
188 |
+
result = json_normalize([])
|
189 |
+
expected = DataFrame()
|
190 |
+
tm.assert_frame_equal(result, expected)
|
191 |
+
|
192 |
+
@pytest.mark.parametrize(
|
193 |
+
"data, record_path, exception_type",
|
194 |
+
[
|
195 |
+
([{"a": 0}, {"a": 1}], None, None),
|
196 |
+
({"a": [{"a": 0}, {"a": 1}]}, "a", None),
|
197 |
+
('{"a": [{"a": 0}, {"a": 1}]}', None, NotImplementedError),
|
198 |
+
(None, None, NotImplementedError),
|
199 |
+
],
|
200 |
+
)
|
201 |
+
def test_accepted_input(self, data, record_path, exception_type):
|
202 |
+
if exception_type is not None:
|
203 |
+
with pytest.raises(exception_type, match=""):
|
204 |
+
json_normalize(data, record_path=record_path)
|
205 |
+
else:
|
206 |
+
result = json_normalize(data, record_path=record_path)
|
207 |
+
expected = DataFrame([0, 1], columns=["a"])
|
208 |
+
tm.assert_frame_equal(result, expected)
|
209 |
+
|
210 |
+
def test_simple_normalize_with_separator(self, deep_nested):
|
211 |
+
# GH 14883
|
212 |
+
result = json_normalize({"A": {"A": 1, "B": 2}})
|
213 |
+
expected = DataFrame([[1, 2]], columns=["A.A", "A.B"])
|
214 |
+
tm.assert_frame_equal(result.reindex_like(expected), expected)
|
215 |
+
|
216 |
+
result = json_normalize({"A": {"A": 1, "B": 2}}, sep="_")
|
217 |
+
expected = DataFrame([[1, 2]], columns=["A_A", "A_B"])
|
218 |
+
tm.assert_frame_equal(result.reindex_like(expected), expected)
|
219 |
+
|
220 |
+
result = json_normalize({"A": {"A": 1, "B": 2}}, sep="\u03c3")
|
221 |
+
expected = DataFrame([[1, 2]], columns=["A\u03c3A", "A\u03c3B"])
|
222 |
+
tm.assert_frame_equal(result.reindex_like(expected), expected)
|
223 |
+
|
224 |
+
result = json_normalize(
|
225 |
+
deep_nested,
|
226 |
+
["states", "cities"],
|
227 |
+
meta=["country", ["states", "name"]],
|
228 |
+
sep="_",
|
229 |
+
)
|
230 |
+
expected = Index(["name", "pop", "country", "states_name"]).sort_values()
|
231 |
+
assert result.columns.sort_values().equals(expected)
|
232 |
+
|
233 |
+
def test_normalize_with_multichar_separator(self):
|
234 |
+
# GH #43831
|
235 |
+
data = {"a": [1, 2], "b": {"b_1": 2, "b_2": (3, 4)}}
|
236 |
+
result = json_normalize(data, sep="__")
|
237 |
+
expected = DataFrame([[[1, 2], 2, (3, 4)]], columns=["a", "b__b_1", "b__b_2"])
|
238 |
+
tm.assert_frame_equal(result, expected)
|
239 |
+
|
240 |
+
def test_value_array_record_prefix(self):
|
241 |
+
# GH 21536
|
242 |
+
result = json_normalize({"A": [1, 2]}, "A", record_prefix="Prefix.")
|
243 |
+
expected = DataFrame([[1], [2]], columns=["Prefix.0"])
|
244 |
+
tm.assert_frame_equal(result, expected)
|
245 |
+
|
246 |
+
def test_nested_object_record_path(self):
|
247 |
+
# GH 22706
|
248 |
+
data = {
|
249 |
+
"state": "Florida",
|
250 |
+
"info": {
|
251 |
+
"governor": "Rick Scott",
|
252 |
+
"counties": [
|
253 |
+
{"name": "Dade", "population": 12345},
|
254 |
+
{"name": "Broward", "population": 40000},
|
255 |
+
{"name": "Palm Beach", "population": 60000},
|
256 |
+
],
|
257 |
+
},
|
258 |
+
}
|
259 |
+
result = json_normalize(data, record_path=["info", "counties"])
|
260 |
+
expected = DataFrame(
|
261 |
+
[["Dade", 12345], ["Broward", 40000], ["Palm Beach", 60000]],
|
262 |
+
columns=["name", "population"],
|
263 |
+
)
|
264 |
+
tm.assert_frame_equal(result, expected)
|
265 |
+
|
266 |
+
def test_more_deeply_nested(self, deep_nested):
|
267 |
+
result = json_normalize(
|
268 |
+
deep_nested, ["states", "cities"], meta=["country", ["states", "name"]]
|
269 |
+
)
|
270 |
+
ex_data = {
|
271 |
+
"country": ["USA"] * 4 + ["Germany"] * 3,
|
272 |
+
"states.name": [
|
273 |
+
"California",
|
274 |
+
"California",
|
275 |
+
"Ohio",
|
276 |
+
"Ohio",
|
277 |
+
"Bayern",
|
278 |
+
"Nordrhein-Westfalen",
|
279 |
+
"Nordrhein-Westfalen",
|
280 |
+
],
|
281 |
+
"name": [
|
282 |
+
"San Francisco",
|
283 |
+
"Los Angeles",
|
284 |
+
"Columbus",
|
285 |
+
"Cleveland",
|
286 |
+
"Munich",
|
287 |
+
"Duesseldorf",
|
288 |
+
"Koeln",
|
289 |
+
],
|
290 |
+
"pop": [12345, 12346, 1234, 1236, 12347, 1238, 1239],
|
291 |
+
}
|
292 |
+
|
293 |
+
expected = DataFrame(ex_data, columns=result.columns)
|
294 |
+
tm.assert_frame_equal(result, expected)
|
295 |
+
|
296 |
+
def test_shallow_nested(self):
|
297 |
+
data = [
|
298 |
+
{
|
299 |
+
"state": "Florida",
|
300 |
+
"shortname": "FL",
|
301 |
+
"info": {"governor": "Rick Scott"},
|
302 |
+
"counties": [
|
303 |
+
{"name": "Dade", "population": 12345},
|
304 |
+
{"name": "Broward", "population": 40000},
|
305 |
+
{"name": "Palm Beach", "population": 60000},
|
306 |
+
],
|
307 |
+
},
|
308 |
+
{
|
309 |
+
"state": "Ohio",
|
310 |
+
"shortname": "OH",
|
311 |
+
"info": {"governor": "John Kasich"},
|
312 |
+
"counties": [
|
313 |
+
{"name": "Summit", "population": 1234},
|
314 |
+
{"name": "Cuyahoga", "population": 1337},
|
315 |
+
],
|
316 |
+
},
|
317 |
+
]
|
318 |
+
|
319 |
+
result = json_normalize(
|
320 |
+
data, "counties", ["state", "shortname", ["info", "governor"]]
|
321 |
+
)
|
322 |
+
ex_data = {
|
323 |
+
"name": ["Dade", "Broward", "Palm Beach", "Summit", "Cuyahoga"],
|
324 |
+
"state": ["Florida"] * 3 + ["Ohio"] * 2,
|
325 |
+
"shortname": ["FL", "FL", "FL", "OH", "OH"],
|
326 |
+
"info.governor": ["Rick Scott"] * 3 + ["John Kasich"] * 2,
|
327 |
+
"population": [12345, 40000, 60000, 1234, 1337],
|
328 |
+
}
|
329 |
+
expected = DataFrame(ex_data, columns=result.columns)
|
330 |
+
tm.assert_frame_equal(result, expected)
|
331 |
+
|
332 |
+
def test_nested_meta_path_with_nested_record_path(self, state_data):
|
333 |
+
# GH 27220
|
334 |
+
result = json_normalize(
|
335 |
+
data=state_data,
|
336 |
+
record_path=["counties"],
|
337 |
+
meta=["state", "shortname", ["info", "governor"]],
|
338 |
+
errors="ignore",
|
339 |
+
)
|
340 |
+
|
341 |
+
ex_data = {
|
342 |
+
"name": ["Dade", "Broward", "Palm Beach", "Summit", "Cuyahoga"],
|
343 |
+
"population": [12345, 40000, 60000, 1234, 1337],
|
344 |
+
"state": ["Florida"] * 3 + ["Ohio"] * 2,
|
345 |
+
"shortname": ["FL"] * 3 + ["OH"] * 2,
|
346 |
+
"info.governor": ["Rick Scott"] * 3 + ["John Kasich"] * 2,
|
347 |
+
}
|
348 |
+
|
349 |
+
expected = DataFrame(ex_data)
|
350 |
+
tm.assert_frame_equal(result, expected)
|
351 |
+
|
352 |
+
def test_meta_name_conflict(self):
|
353 |
+
data = [
|
354 |
+
{
|
355 |
+
"foo": "hello",
|
356 |
+
"bar": "there",
|
357 |
+
"data": [
|
358 |
+
{"foo": "something", "bar": "else"},
|
359 |
+
{"foo": "something2", "bar": "else2"},
|
360 |
+
],
|
361 |
+
}
|
362 |
+
]
|
363 |
+
|
364 |
+
msg = r"Conflicting metadata name (foo|bar), need distinguishing prefix"
|
365 |
+
with pytest.raises(ValueError, match=msg):
|
366 |
+
json_normalize(data, "data", meta=["foo", "bar"])
|
367 |
+
|
368 |
+
result = json_normalize(data, "data", meta=["foo", "bar"], meta_prefix="meta")
|
369 |
+
|
370 |
+
for val in ["metafoo", "metabar", "foo", "bar"]:
|
371 |
+
assert val in result
|
372 |
+
|
373 |
+
def test_meta_parameter_not_modified(self):
|
374 |
+
# GH 18610
|
375 |
+
data = [
|
376 |
+
{
|
377 |
+
"foo": "hello",
|
378 |
+
"bar": "there",
|
379 |
+
"data": [
|
380 |
+
{"foo": "something", "bar": "else"},
|
381 |
+
{"foo": "something2", "bar": "else2"},
|
382 |
+
],
|
383 |
+
}
|
384 |
+
]
|
385 |
+
|
386 |
+
COLUMNS = ["foo", "bar"]
|
387 |
+
result = json_normalize(data, "data", meta=COLUMNS, meta_prefix="meta")
|
388 |
+
|
389 |
+
assert COLUMNS == ["foo", "bar"]
|
390 |
+
for val in ["metafoo", "metabar", "foo", "bar"]:
|
391 |
+
assert val in result
|
392 |
+
|
393 |
+
def test_record_prefix(self, state_data):
|
394 |
+
result = json_normalize(state_data[0], "counties")
|
395 |
+
expected = DataFrame(state_data[0]["counties"])
|
396 |
+
tm.assert_frame_equal(result, expected)
|
397 |
+
|
398 |
+
result = json_normalize(
|
399 |
+
state_data, "counties", meta="state", record_prefix="county_"
|
400 |
+
)
|
401 |
+
|
402 |
+
expected = []
|
403 |
+
for rec in state_data:
|
404 |
+
expected.extend(rec["counties"])
|
405 |
+
expected = DataFrame(expected)
|
406 |
+
expected = expected.rename(columns=lambda x: "county_" + x)
|
407 |
+
expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])
|
408 |
+
|
409 |
+
tm.assert_frame_equal(result, expected)
|
410 |
+
|
411 |
+
def test_non_ascii_key(self):
|
412 |
+
testjson = (
|
413 |
+
b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},'
|
414 |
+
b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]'
|
415 |
+
).decode("utf8")
|
416 |
+
|
417 |
+
testdata = {
|
418 |
+
b"\xc3\x9cnic\xc3\xb8de".decode("utf8"): [0, 1],
|
419 |
+
"sub.A": [1, 3],
|
420 |
+
"sub.B": [2, 4],
|
421 |
+
}
|
422 |
+
expected = DataFrame(testdata)
|
423 |
+
|
424 |
+
result = json_normalize(json.loads(testjson))
|
425 |
+
tm.assert_frame_equal(result, expected)
|
426 |
+
|
427 |
+
def test_missing_field(self, author_missing_data):
|
428 |
+
# GH20030:
|
429 |
+
result = json_normalize(author_missing_data)
|
430 |
+
ex_data = [
|
431 |
+
{
|
432 |
+
"info": np.nan,
|
433 |
+
"info.created_at": np.nan,
|
434 |
+
"info.last_updated": np.nan,
|
435 |
+
"author_name.first": np.nan,
|
436 |
+
"author_name.last_name": np.nan,
|
437 |
+
},
|
438 |
+
{
|
439 |
+
"info": None,
|
440 |
+
"info.created_at": "11/08/1993",
|
441 |
+
"info.last_updated": "26/05/2012",
|
442 |
+
"author_name.first": "Jane",
|
443 |
+
"author_name.last_name": "Doe",
|
444 |
+
},
|
445 |
+
]
|
446 |
+
expected = DataFrame(ex_data)
|
447 |
+
tm.assert_frame_equal(result, expected)
|
448 |
+
|
449 |
+
@pytest.mark.parametrize(
|
450 |
+
"max_level,expected",
|
451 |
+
[
|
452 |
+
(
|
453 |
+
0,
|
454 |
+
[
|
455 |
+
{
|
456 |
+
"TextField": "Some text",
|
457 |
+
"UserField": {"Id": "ID001", "Name": "Name001"},
|
458 |
+
"CreatedBy": {"Name": "User001"},
|
459 |
+
"Image": {"a": "b"},
|
460 |
+
},
|
461 |
+
{
|
462 |
+
"TextField": "Some text",
|
463 |
+
"UserField": {"Id": "ID001", "Name": "Name001"},
|
464 |
+
"CreatedBy": {"Name": "User001"},
|
465 |
+
"Image": {"a": "b"},
|
466 |
+
},
|
467 |
+
],
|
468 |
+
),
|
469 |
+
(
|
470 |
+
1,
|
471 |
+
[
|
472 |
+
{
|
473 |
+
"TextField": "Some text",
|
474 |
+
"UserField.Id": "ID001",
|
475 |
+
"UserField.Name": "Name001",
|
476 |
+
"CreatedBy": {"Name": "User001"},
|
477 |
+
"Image": {"a": "b"},
|
478 |
+
},
|
479 |
+
{
|
480 |
+
"TextField": "Some text",
|
481 |
+
"UserField.Id": "ID001",
|
482 |
+
"UserField.Name": "Name001",
|
483 |
+
"CreatedBy": {"Name": "User001"},
|
484 |
+
"Image": {"a": "b"},
|
485 |
+
},
|
486 |
+
],
|
487 |
+
),
|
488 |
+
],
|
489 |
+
)
|
490 |
+
def test_max_level_with_records_path(self, max_level, expected):
|
491 |
+
# GH23843: Enhanced JSON normalize
|
492 |
+
test_input = [
|
493 |
+
{
|
494 |
+
"CreatedBy": {"Name": "User001"},
|
495 |
+
"Lookup": [
|
496 |
+
{
|
497 |
+
"TextField": "Some text",
|
498 |
+
"UserField": {"Id": "ID001", "Name": "Name001"},
|
499 |
+
},
|
500 |
+
{
|
501 |
+
"TextField": "Some text",
|
502 |
+
"UserField": {"Id": "ID001", "Name": "Name001"},
|
503 |
+
},
|
504 |
+
],
|
505 |
+
"Image": {"a": "b"},
|
506 |
+
"tags": [
|
507 |
+
{"foo": "something", "bar": "else"},
|
508 |
+
{"foo": "something2", "bar": "else2"},
|
509 |
+
],
|
510 |
+
}
|
511 |
+
]
|
512 |
+
|
513 |
+
result = json_normalize(
|
514 |
+
test_input,
|
515 |
+
record_path=["Lookup"],
|
516 |
+
meta=[["CreatedBy"], ["Image"]],
|
517 |
+
max_level=max_level,
|
518 |
+
)
|
519 |
+
expected_df = DataFrame(data=expected, columns=result.columns.values)
|
520 |
+
tm.assert_equal(expected_df, result)
|
521 |
+
|
522 |
+
def test_nested_flattening_consistent(self):
|
523 |
+
# see gh-21537
|
524 |
+
df1 = json_normalize([{"A": {"B": 1}}])
|
525 |
+
df2 = json_normalize({"dummy": [{"A": {"B": 1}}]}, "dummy")
|
526 |
+
|
527 |
+
# They should be the same.
|
528 |
+
tm.assert_frame_equal(df1, df2)
|
529 |
+
|
530 |
+
def test_nonetype_record_path(self, nulls_fixture):
|
531 |
+
# see gh-30148
|
532 |
+
# should not raise TypeError
|
533 |
+
result = json_normalize(
|
534 |
+
[
|
535 |
+
{"state": "Texas", "info": nulls_fixture},
|
536 |
+
{"state": "Florida", "info": [{"i": 2}]},
|
537 |
+
],
|
538 |
+
record_path=["info"],
|
539 |
+
)
|
540 |
+
expected = DataFrame({"i": 2}, index=[0])
|
541 |
+
tm.assert_equal(result, expected)
|
542 |
+
|
543 |
+
@pytest.mark.parametrize("value", ["false", "true", "{}", "1", '"text"'])
|
544 |
+
def test_non_list_record_path_errors(self, value):
|
545 |
+
# see gh-30148, GH 26284
|
546 |
+
parsed_value = json.loads(value)
|
547 |
+
test_input = {"state": "Texas", "info": parsed_value}
|
548 |
+
test_path = "info"
|
549 |
+
msg = (
|
550 |
+
f"{test_input} has non list value {parsed_value} for path {test_path}. "
|
551 |
+
"Must be list or null."
|
552 |
+
)
|
553 |
+
with pytest.raises(TypeError, match=msg):
|
554 |
+
json_normalize([test_input], record_path=[test_path])
|
555 |
+
|
556 |
+
def test_meta_non_iterable(self):
|
557 |
+
# GH 31507
|
558 |
+
data = """[{"id": 99, "data": [{"one": 1, "two": 2}]}]"""
|
559 |
+
|
560 |
+
result = json_normalize(json.loads(data), record_path=["data"], meta=["id"])
|
561 |
+
expected = DataFrame(
|
562 |
+
{"one": [1], "two": [2], "id": np.array([99], dtype=object)}
|
563 |
+
)
|
564 |
+
tm.assert_frame_equal(result, expected)
|
565 |
+
|
566 |
+
def test_generator(self, state_data):
|
567 |
+
# GH35923 Fix pd.json_normalize to not skip the first element of a
|
568 |
+
# generator input
|
569 |
+
def generator_data():
|
570 |
+
yield from state_data[0]["counties"]
|
571 |
+
|
572 |
+
result = json_normalize(generator_data())
|
573 |
+
expected = DataFrame(state_data[0]["counties"])
|
574 |
+
|
575 |
+
tm.assert_frame_equal(result, expected)
|
576 |
+
|
577 |
+
def test_top_column_with_leading_underscore(self):
|
578 |
+
# 49861
|
579 |
+
data = {"_id": {"a1": 10, "l2": {"l3": 0}}, "gg": 4}
|
580 |
+
result = json_normalize(data, sep="_")
|
581 |
+
expected = DataFrame([[4, 10, 0]], columns=["gg", "_id_a1", "_id_l2_l3"])
|
582 |
+
|
583 |
+
tm.assert_frame_equal(result, expected)
|
584 |
+
|
585 |
+
|
586 |
+
class TestNestedToRecord:
|
587 |
+
def test_flat_stays_flat(self):
|
588 |
+
recs = [{"flat1": 1, "flat2": 2}, {"flat3": 3, "flat2": 4}]
|
589 |
+
result = nested_to_record(recs)
|
590 |
+
expected = recs
|
591 |
+
assert result == expected
|
592 |
+
|
593 |
+
def test_one_level_deep_flattens(self):
|
594 |
+
data = {"flat1": 1, "dict1": {"c": 1, "d": 2}}
|
595 |
+
|
596 |
+
result = nested_to_record(data)
|
597 |
+
expected = {"dict1.c": 1, "dict1.d": 2, "flat1": 1}
|
598 |
+
|
599 |
+
assert result == expected
|
600 |
+
|
601 |
+
def test_nested_flattens(self):
|
602 |
+
data = {
|
603 |
+
"flat1": 1,
|
604 |
+
"dict1": {"c": 1, "d": 2},
|
605 |
+
"nested": {"e": {"c": 1, "d": 2}, "d": 2},
|
606 |
+
}
|
607 |
+
|
608 |
+
result = nested_to_record(data)
|
609 |
+
expected = {
|
610 |
+
"dict1.c": 1,
|
611 |
+
"dict1.d": 2,
|
612 |
+
"flat1": 1,
|
613 |
+
"nested.d": 2,
|
614 |
+
"nested.e.c": 1,
|
615 |
+
"nested.e.d": 2,
|
616 |
+
}
|
617 |
+
|
618 |
+
assert result == expected
|
619 |
+
|
620 |
+
def test_json_normalize_errors(self, missing_metadata):
|
621 |
+
# GH14583:
|
622 |
+
# If meta keys are not always present a new option to set
|
623 |
+
# errors='ignore' has been implemented
|
624 |
+
|
625 |
+
msg = (
|
626 |
+
"Key 'name' not found. To replace missing values of "
|
627 |
+
"'name' with np.nan, pass in errors='ignore'"
|
628 |
+
)
|
629 |
+
with pytest.raises(KeyError, match=msg):
|
630 |
+
json_normalize(
|
631 |
+
data=missing_metadata,
|
632 |
+
record_path="addresses",
|
633 |
+
meta="name",
|
634 |
+
errors="raise",
|
635 |
+
)
|
636 |
+
|
637 |
+
def test_missing_meta(self, missing_metadata):
|
638 |
+
# GH25468
|
639 |
+
# If metadata is nullable with errors set to ignore, the null values
|
640 |
+
# should be numpy.nan values
|
641 |
+
result = json_normalize(
|
642 |
+
data=missing_metadata, record_path="addresses", meta="name", errors="ignore"
|
643 |
+
)
|
644 |
+
ex_data = [
|
645 |
+
[9562, "Morris St.", "Massillon", "OH", 44646, "Alice"],
|
646 |
+
[8449, "Spring St.", "Elizabethton", "TN", 37643, np.nan],
|
647 |
+
]
|
648 |
+
columns = ["number", "street", "city", "state", "zip", "name"]
|
649 |
+
expected = DataFrame(ex_data, columns=columns)
|
650 |
+
tm.assert_frame_equal(result, expected)
|
651 |
+
|
652 |
+
def test_missing_nested_meta(self):
|
653 |
+
# GH44312
|
654 |
+
# If errors="ignore" and nested metadata is null, we should return nan
|
655 |
+
data = {"meta": "foo", "nested_meta": None, "value": [{"rec": 1}, {"rec": 2}]}
|
656 |
+
result = json_normalize(
|
657 |
+
data,
|
658 |
+
record_path="value",
|
659 |
+
meta=["meta", ["nested_meta", "leaf"]],
|
660 |
+
errors="ignore",
|
661 |
+
)
|
662 |
+
ex_data = [[1, "foo", np.nan], [2, "foo", np.nan]]
|
663 |
+
columns = ["rec", "meta", "nested_meta.leaf"]
|
664 |
+
expected = DataFrame(ex_data, columns=columns).astype(
|
665 |
+
{"nested_meta.leaf": object}
|
666 |
+
)
|
667 |
+
tm.assert_frame_equal(result, expected)
|
668 |
+
|
669 |
+
# If errors="raise" and nested metadata is null, we should raise with the
|
670 |
+
# key of the first missing level
|
671 |
+
with pytest.raises(KeyError, match="'leaf' not found"):
|
672 |
+
json_normalize(
|
673 |
+
data,
|
674 |
+
record_path="value",
|
675 |
+
meta=["meta", ["nested_meta", "leaf"]],
|
676 |
+
errors="raise",
|
677 |
+
)
|
678 |
+
|
679 |
+
def test_missing_meta_multilevel_record_path_errors_raise(self, missing_metadata):
|
680 |
+
# GH41876
|
681 |
+
# Ensure errors='raise' works as intended even when a record_path of length
|
682 |
+
# greater than one is passed in
|
683 |
+
msg = (
|
684 |
+
"Key 'name' not found. To replace missing values of "
|
685 |
+
"'name' with np.nan, pass in errors='ignore'"
|
686 |
+
)
|
687 |
+
with pytest.raises(KeyError, match=msg):
|
688 |
+
json_normalize(
|
689 |
+
data=missing_metadata,
|
690 |
+
record_path=["previous_residences", "cities"],
|
691 |
+
meta="name",
|
692 |
+
errors="raise",
|
693 |
+
)
|
694 |
+
|
695 |
+
def test_missing_meta_multilevel_record_path_errors_ignore(self, missing_metadata):
|
696 |
+
# GH41876
|
697 |
+
# Ensure errors='ignore' works as intended even when a record_path of length
|
698 |
+
# greater than one is passed in
|
699 |
+
result = json_normalize(
|
700 |
+
data=missing_metadata,
|
701 |
+
record_path=["previous_residences", "cities"],
|
702 |
+
meta="name",
|
703 |
+
errors="ignore",
|
704 |
+
)
|
705 |
+
ex_data = [
|
706 |
+
["Foo York City", "Alice"],
|
707 |
+
["Barmingham", np.nan],
|
708 |
+
]
|
709 |
+
columns = ["city_name", "name"]
|
710 |
+
expected = DataFrame(ex_data, columns=columns)
|
711 |
+
tm.assert_frame_equal(result, expected)
|
712 |
+
|
713 |
+
def test_donot_drop_nonevalues(self):
|
714 |
+
# GH21356
|
715 |
+
data = [
|
716 |
+
{"info": None, "author_name": {"first": "Smith", "last_name": "Appleseed"}},
|
717 |
+
{
|
718 |
+
"info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},
|
719 |
+
"author_name": {"first": "Jane", "last_name": "Doe"},
|
720 |
+
},
|
721 |
+
]
|
722 |
+
result = nested_to_record(data)
|
723 |
+
expected = [
|
724 |
+
{
|
725 |
+
"info": None,
|
726 |
+
"author_name.first": "Smith",
|
727 |
+
"author_name.last_name": "Appleseed",
|
728 |
+
},
|
729 |
+
{
|
730 |
+
"author_name.first": "Jane",
|
731 |
+
"author_name.last_name": "Doe",
|
732 |
+
"info.created_at": "11/08/1993",
|
733 |
+
"info.last_updated": "26/05/2012",
|
734 |
+
},
|
735 |
+
]
|
736 |
+
|
737 |
+
assert result == expected
|
738 |
+
|
739 |
+
def test_nonetype_top_level_bottom_level(self):
|
740 |
+
# GH21158: If inner level json has a key with a null value
|
741 |
+
# make sure it does not do a new_d.pop twice and except
|
742 |
+
data = {
|
743 |
+
"id": None,
|
744 |
+
"location": {
|
745 |
+
"country": {
|
746 |
+
"state": {
|
747 |
+
"id": None,
|
748 |
+
"town.info": {
|
749 |
+
"id": None,
|
750 |
+
"region": None,
|
751 |
+
"x": 49.151580810546875,
|
752 |
+
"y": -33.148521423339844,
|
753 |
+
"z": 27.572303771972656,
|
754 |
+
},
|
755 |
+
}
|
756 |
+
}
|
757 |
+
},
|
758 |
+
}
|
759 |
+
result = nested_to_record(data)
|
760 |
+
expected = {
|
761 |
+
"id": None,
|
762 |
+
"location.country.state.id": None,
|
763 |
+
"location.country.state.town.info.id": None,
|
764 |
+
"location.country.state.town.info.region": None,
|
765 |
+
"location.country.state.town.info.x": 49.151580810546875,
|
766 |
+
"location.country.state.town.info.y": -33.148521423339844,
|
767 |
+
"location.country.state.town.info.z": 27.572303771972656,
|
768 |
+
}
|
769 |
+
assert result == expected
|
770 |
+
|
771 |
+
def test_nonetype_multiple_levels(self):
|
772 |
+
# GH21158: If inner level json has a key with a null value
|
773 |
+
# make sure it does not do a new_d.pop twice and except
|
774 |
+
data = {
|
775 |
+
"id": None,
|
776 |
+
"location": {
|
777 |
+
"id": None,
|
778 |
+
"country": {
|
779 |
+
"id": None,
|
780 |
+
"state": {
|
781 |
+
"id": None,
|
782 |
+
"town.info": {
|
783 |
+
"region": None,
|
784 |
+
"x": 49.151580810546875,
|
785 |
+
"y": -33.148521423339844,
|
786 |
+
"z": 27.572303771972656,
|
787 |
+
},
|
788 |
+
},
|
789 |
+
},
|
790 |
+
},
|
791 |
+
}
|
792 |
+
result = nested_to_record(data)
|
793 |
+
expected = {
|
794 |
+
"id": None,
|
795 |
+
"location.id": None,
|
796 |
+
"location.country.id": None,
|
797 |
+
"location.country.state.id": None,
|
798 |
+
"location.country.state.town.info.region": None,
|
799 |
+
"location.country.state.town.info.x": 49.151580810546875,
|
800 |
+
"location.country.state.town.info.y": -33.148521423339844,
|
801 |
+
"location.country.state.town.info.z": 27.572303771972656,
|
802 |
+
}
|
803 |
+
assert result == expected
|
804 |
+
|
805 |
+
@pytest.mark.parametrize(
|
806 |
+
"max_level, expected",
|
807 |
+
[
|
808 |
+
(
|
809 |
+
None,
|
810 |
+
[
|
811 |
+
{
|
812 |
+
"CreatedBy.Name": "User001",
|
813 |
+
"Lookup.TextField": "Some text",
|
814 |
+
"Lookup.UserField.Id": "ID001",
|
815 |
+
"Lookup.UserField.Name": "Name001",
|
816 |
+
"Image.a": "b",
|
817 |
+
}
|
818 |
+
],
|
819 |
+
),
|
820 |
+
(
|
821 |
+
0,
|
822 |
+
[
|
823 |
+
{
|
824 |
+
"CreatedBy": {"Name": "User001"},
|
825 |
+
"Lookup": {
|
826 |
+
"TextField": "Some text",
|
827 |
+
"UserField": {"Id": "ID001", "Name": "Name001"},
|
828 |
+
},
|
829 |
+
"Image": {"a": "b"},
|
830 |
+
}
|
831 |
+
],
|
832 |
+
),
|
833 |
+
(
|
834 |
+
1,
|
835 |
+
[
|
836 |
+
{
|
837 |
+
"CreatedBy.Name": "User001",
|
838 |
+
"Lookup.TextField": "Some text",
|
839 |
+
"Lookup.UserField": {"Id": "ID001", "Name": "Name001"},
|
840 |
+
"Image.a": "b",
|
841 |
+
}
|
842 |
+
],
|
843 |
+
),
|
844 |
+
],
|
845 |
+
)
|
846 |
+
def test_with_max_level(self, max_level, expected, max_level_test_input_data):
|
847 |
+
# GH23843: Enhanced JSON normalize
|
848 |
+
output = nested_to_record(max_level_test_input_data, max_level=max_level)
|
849 |
+
assert output == expected
|
850 |
+
|
851 |
+
def test_with_large_max_level(self):
|
852 |
+
# GH23843: Enhanced JSON normalize
|
853 |
+
max_level = 100
|
854 |
+
input_data = [
|
855 |
+
{
|
856 |
+
"CreatedBy": {
|
857 |
+
"user": {
|
858 |
+
"name": {"firstname": "Leo", "LastName": "Thomson"},
|
859 |
+
"family_tree": {
|
860 |
+
"father": {
|
861 |
+
"name": "Father001",
|
862 |
+
"father": {
|
863 |
+
"Name": "Father002",
|
864 |
+
"father": {
|
865 |
+
"name": "Father003",
|
866 |
+
"father": {"Name": "Father004"},
|
867 |
+
},
|
868 |
+
},
|
869 |
+
}
|
870 |
+
},
|
871 |
+
}
|
872 |
+
}
|
873 |
+
}
|
874 |
+
]
|
875 |
+
expected = [
|
876 |
+
{
|
877 |
+
"CreatedBy.user.name.firstname": "Leo",
|
878 |
+
"CreatedBy.user.name.LastName": "Thomson",
|
879 |
+
"CreatedBy.user.family_tree.father.name": "Father001",
|
880 |
+
"CreatedBy.user.family_tree.father.father.Name": "Father002",
|
881 |
+
"CreatedBy.user.family_tree.father.father.father.name": "Father003",
|
882 |
+
"CreatedBy.user.family_tree.father.father.father.father.Name": "Father004", # noqa: E501
|
883 |
+
}
|
884 |
+
]
|
885 |
+
output = nested_to_record(input_data, max_level=max_level)
|
886 |
+
assert output == expected
|
887 |
+
|
888 |
+
def test_series_non_zero_index(self):
|
889 |
+
# GH 19020
|
890 |
+
data = {
|
891 |
+
0: {"id": 1, "name": "Foo", "elements": {"a": 1}},
|
892 |
+
1: {"id": 2, "name": "Bar", "elements": {"b": 2}},
|
893 |
+
2: {"id": 3, "name": "Baz", "elements": {"c": 3}},
|
894 |
+
}
|
895 |
+
s = Series(data)
|
896 |
+
s.index = [1, 2, 3]
|
897 |
+
result = json_normalize(s)
|
898 |
+
expected = DataFrame(
|
899 |
+
{
|
900 |
+
"id": [1, 2, 3],
|
901 |
+
"name": ["Foo", "Bar", "Baz"],
|
902 |
+
"elements.a": [1.0, np.nan, np.nan],
|
903 |
+
"elements.b": [np.nan, 2.0, np.nan],
|
904 |
+
"elements.c": [np.nan, np.nan, 3.0],
|
905 |
+
}
|
906 |
+
)
|
907 |
+
tm.assert_frame_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/io/json/test_pandas.py
ADDED
@@ -0,0 +1,2202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
from datetime import timedelta
|
3 |
+
from decimal import Decimal
|
4 |
+
from io import (
|
5 |
+
BytesIO,
|
6 |
+
StringIO,
|
7 |
+
)
|
8 |
+
import json
|
9 |
+
import os
|
10 |
+
import sys
|
11 |
+
import time
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
import pytest
|
15 |
+
|
16 |
+
from pandas._config import using_pyarrow_string_dtype
|
17 |
+
|
18 |
+
from pandas.compat import IS64
|
19 |
+
import pandas.util._test_decorators as td
|
20 |
+
|
21 |
+
import pandas as pd
|
22 |
+
from pandas import (
|
23 |
+
NA,
|
24 |
+
DataFrame,
|
25 |
+
DatetimeIndex,
|
26 |
+
Index,
|
27 |
+
RangeIndex,
|
28 |
+
Series,
|
29 |
+
Timestamp,
|
30 |
+
date_range,
|
31 |
+
read_json,
|
32 |
+
)
|
33 |
+
import pandas._testing as tm
|
34 |
+
from pandas.core.arrays import (
|
35 |
+
ArrowStringArray,
|
36 |
+
StringArray,
|
37 |
+
)
|
38 |
+
from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics
|
39 |
+
|
40 |
+
from pandas.io.json import ujson_dumps
|
41 |
+
|
42 |
+
|
43 |
+
def test_literal_json_deprecation():
|
44 |
+
# PR 53409
|
45 |
+
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
|
46 |
+
|
47 |
+
jsonl = """{"a": 1, "b": 2}
|
48 |
+
{"a": 3, "b": 4}
|
49 |
+
{"a": 5, "b": 6}
|
50 |
+
{"a": 7, "b": 8}"""
|
51 |
+
|
52 |
+
msg = (
|
53 |
+
"Passing literal json to 'read_json' is deprecated and "
|
54 |
+
"will be removed in a future version. To read from a "
|
55 |
+
"literal string, wrap it in a 'StringIO' object."
|
56 |
+
)
|
57 |
+
|
58 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
59 |
+
try:
|
60 |
+
read_json(jsonl, lines=False)
|
61 |
+
except ValueError:
|
62 |
+
pass
|
63 |
+
|
64 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
65 |
+
read_json(expected.to_json(), lines=False)
|
66 |
+
|
67 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
68 |
+
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
|
69 |
+
tm.assert_frame_equal(result, expected)
|
70 |
+
|
71 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
72 |
+
try:
|
73 |
+
result = read_json(
|
74 |
+
'{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n',
|
75 |
+
lines=False,
|
76 |
+
)
|
77 |
+
except ValueError:
|
78 |
+
pass
|
79 |
+
|
80 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
81 |
+
try:
|
82 |
+
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=False)
|
83 |
+
except ValueError:
|
84 |
+
pass
|
85 |
+
tm.assert_frame_equal(result, expected)
|
86 |
+
|
87 |
+
|
88 |
+
def assert_json_roundtrip_equal(result, expected, orient):
|
89 |
+
if orient in ("records", "values"):
|
90 |
+
expected = expected.reset_index(drop=True)
|
91 |
+
if orient == "values":
|
92 |
+
expected.columns = range(len(expected.columns))
|
93 |
+
tm.assert_frame_equal(result, expected)
|
94 |
+
|
95 |
+
|
96 |
+
class TestPandasContainer:
|
97 |
+
@pytest.fixture
|
98 |
+
def categorical_frame(self):
|
99 |
+
data = {
|
100 |
+
c: np.random.default_rng(i).standard_normal(30)
|
101 |
+
for i, c in enumerate(list("ABCD"))
|
102 |
+
}
|
103 |
+
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * 15
|
104 |
+
data["E"] = list(reversed(cat))
|
105 |
+
data["sort"] = np.arange(30, dtype="int64")
|
106 |
+
return DataFrame(data, index=pd.CategoricalIndex(cat, name="E"))
|
107 |
+
|
108 |
+
@pytest.fixture
|
109 |
+
def datetime_series(self):
|
110 |
+
# Same as usual datetime_series, but with index freq set to None,
|
111 |
+
# since that doesn't round-trip, see GH#33711
|
112 |
+
ser = Series(
|
113 |
+
1.1 * np.arange(10, dtype=np.float64),
|
114 |
+
index=date_range("2020-01-01", periods=10),
|
115 |
+
name="ts",
|
116 |
+
)
|
117 |
+
ser.index = ser.index._with_freq(None)
|
118 |
+
return ser
|
119 |
+
|
120 |
+
@pytest.fixture
|
121 |
+
def datetime_frame(self):
|
122 |
+
# Same as usual datetime_frame, but with index freq set to None,
|
123 |
+
# since that doesn't round-trip, see GH#33711
|
124 |
+
df = DataFrame(
|
125 |
+
np.random.default_rng(2).standard_normal((30, 4)),
|
126 |
+
columns=Index(list("ABCD"), dtype=object),
|
127 |
+
index=date_range("2000-01-01", periods=30, freq="B"),
|
128 |
+
)
|
129 |
+
df.index = df.index._with_freq(None)
|
130 |
+
return df
|
131 |
+
|
132 |
+
def test_frame_double_encoded_labels(self, orient):
|
133 |
+
df = DataFrame(
|
134 |
+
[["a", "b"], ["c", "d"]],
|
135 |
+
index=['index " 1', "index / 2"],
|
136 |
+
columns=["a \\ b", "y / z"],
|
137 |
+
)
|
138 |
+
|
139 |
+
data = StringIO(df.to_json(orient=orient))
|
140 |
+
result = read_json(data, orient=orient)
|
141 |
+
expected = df.copy()
|
142 |
+
assert_json_roundtrip_equal(result, expected, orient)
|
143 |
+
|
144 |
+
@pytest.mark.parametrize("orient", ["split", "records", "values"])
|
145 |
+
def test_frame_non_unique_index(self, orient):
|
146 |
+
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
|
147 |
+
data = StringIO(df.to_json(orient=orient))
|
148 |
+
result = read_json(data, orient=orient)
|
149 |
+
expected = df.copy()
|
150 |
+
|
151 |
+
assert_json_roundtrip_equal(result, expected, orient)
|
152 |
+
|
153 |
+
@pytest.mark.parametrize("orient", ["index", "columns"])
|
154 |
+
def test_frame_non_unique_index_raises(self, orient):
|
155 |
+
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
|
156 |
+
msg = f"DataFrame index must be unique for orient='{orient}'"
|
157 |
+
with pytest.raises(ValueError, match=msg):
|
158 |
+
df.to_json(orient=orient)
|
159 |
+
|
160 |
+
@pytest.mark.parametrize("orient", ["split", "values"])
|
161 |
+
@pytest.mark.parametrize(
|
162 |
+
"data",
|
163 |
+
[
|
164 |
+
[["a", "b"], ["c", "d"]],
|
165 |
+
[[1.5, 2.5], [3.5, 4.5]],
|
166 |
+
[[1, 2.5], [3, 4.5]],
|
167 |
+
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
|
168 |
+
],
|
169 |
+
)
|
170 |
+
def test_frame_non_unique_columns(self, orient, data):
|
171 |
+
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
|
172 |
+
|
173 |
+
result = read_json(
|
174 |
+
StringIO(df.to_json(orient=orient)), orient=orient, convert_dates=["x"]
|
175 |
+
)
|
176 |
+
if orient == "values":
|
177 |
+
expected = DataFrame(data)
|
178 |
+
if expected.iloc[:, 0].dtype == "datetime64[ns]":
|
179 |
+
# orient == "values" by default will write Timestamp objects out
|
180 |
+
# in milliseconds; these are internally stored in nanosecond,
|
181 |
+
# so divide to get where we need
|
182 |
+
# TODO: a to_epoch method would also solve; see GH 14772
|
183 |
+
expected.isetitem(0, expected.iloc[:, 0].astype(np.int64) // 1000000)
|
184 |
+
elif orient == "split":
|
185 |
+
expected = df
|
186 |
+
expected.columns = ["x", "x.1"]
|
187 |
+
|
188 |
+
tm.assert_frame_equal(result, expected)
|
189 |
+
|
190 |
+
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
|
191 |
+
def test_frame_non_unique_columns_raises(self, orient):
|
192 |
+
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
|
193 |
+
|
194 |
+
msg = f"DataFrame columns must be unique for orient='{orient}'"
|
195 |
+
with pytest.raises(ValueError, match=msg):
|
196 |
+
df.to_json(orient=orient)
|
197 |
+
|
198 |
+
def test_frame_default_orient(self, float_frame):
|
199 |
+
assert float_frame.to_json() == float_frame.to_json(orient="columns")
|
200 |
+
|
201 |
+
@pytest.mark.parametrize("dtype", [False, float])
|
202 |
+
@pytest.mark.parametrize("convert_axes", [True, False])
|
203 |
+
def test_roundtrip_simple(self, orient, convert_axes, dtype, float_frame):
|
204 |
+
data = StringIO(float_frame.to_json(orient=orient))
|
205 |
+
result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)
|
206 |
+
|
207 |
+
expected = float_frame
|
208 |
+
|
209 |
+
assert_json_roundtrip_equal(result, expected, orient)
|
210 |
+
|
211 |
+
@pytest.mark.parametrize("dtype", [False, np.int64])
|
212 |
+
@pytest.mark.parametrize("convert_axes", [True, False])
|
213 |
+
def test_roundtrip_intframe(self, orient, convert_axes, dtype, int_frame):
|
214 |
+
data = StringIO(int_frame.to_json(orient=orient))
|
215 |
+
result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)
|
216 |
+
expected = int_frame
|
217 |
+
assert_json_roundtrip_equal(result, expected, orient)
|
218 |
+
|
219 |
+
@pytest.mark.parametrize("dtype", [None, np.float64, int, "U3"])
|
220 |
+
@pytest.mark.parametrize("convert_axes", [True, False])
|
221 |
+
def test_roundtrip_str_axes(self, orient, convert_axes, dtype):
|
222 |
+
df = DataFrame(
|
223 |
+
np.zeros((200, 4)),
|
224 |
+
columns=[str(i) for i in range(4)],
|
225 |
+
index=[str(i) for i in range(200)],
|
226 |
+
dtype=dtype,
|
227 |
+
)
|
228 |
+
|
229 |
+
data = StringIO(df.to_json(orient=orient))
|
230 |
+
result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)
|
231 |
+
|
232 |
+
expected = df.copy()
|
233 |
+
if not dtype:
|
234 |
+
expected = expected.astype(np.int64)
|
235 |
+
|
236 |
+
# index columns, and records orients cannot fully preserve the string
|
237 |
+
# dtype for axes as the index and column labels are used as keys in
|
238 |
+
# JSON objects. JSON keys are by definition strings, so there's no way
|
239 |
+
# to disambiguate whether those keys actually were strings or numeric
|
240 |
+
# beforehand and numeric wins out.
|
241 |
+
if convert_axes and (orient in ("index", "columns")):
|
242 |
+
expected.columns = expected.columns.astype(np.int64)
|
243 |
+
expected.index = expected.index.astype(np.int64)
|
244 |
+
elif orient == "records" and convert_axes:
|
245 |
+
expected.columns = expected.columns.astype(np.int64)
|
246 |
+
elif convert_axes and orient == "split":
|
247 |
+
expected.columns = expected.columns.astype(np.int64)
|
248 |
+
|
249 |
+
assert_json_roundtrip_equal(result, expected, orient)
|
250 |
+
|
251 |
+
@pytest.mark.parametrize("convert_axes", [True, False])
|
252 |
+
def test_roundtrip_categorical(
|
253 |
+
self, request, orient, categorical_frame, convert_axes, using_infer_string
|
254 |
+
):
|
255 |
+
# TODO: create a better frame to test with and improve coverage
|
256 |
+
if orient in ("index", "columns"):
|
257 |
+
request.applymarker(
|
258 |
+
pytest.mark.xfail(
|
259 |
+
reason=f"Can't have duplicate index values for orient '{orient}')"
|
260 |
+
)
|
261 |
+
)
|
262 |
+
|
263 |
+
data = StringIO(categorical_frame.to_json(orient=orient))
|
264 |
+
result = read_json(data, orient=orient, convert_axes=convert_axes)
|
265 |
+
|
266 |
+
expected = categorical_frame.copy()
|
267 |
+
expected.index = expected.index.astype(
|
268 |
+
str if not using_infer_string else "string[pyarrow_numpy]"
|
269 |
+
) # Categorical not preserved
|
270 |
+
expected.index.name = None # index names aren't preserved in JSON
|
271 |
+
assert_json_roundtrip_equal(result, expected, orient)
|
272 |
+
|
273 |
+
@pytest.mark.parametrize("convert_axes", [True, False])
|
274 |
+
def test_roundtrip_empty(self, orient, convert_axes):
|
275 |
+
empty_frame = DataFrame()
|
276 |
+
data = StringIO(empty_frame.to_json(orient=orient))
|
277 |
+
result = read_json(data, orient=orient, convert_axes=convert_axes)
|
278 |
+
if orient == "split":
|
279 |
+
idx = Index([], dtype=(float if convert_axes else object))
|
280 |
+
expected = DataFrame(index=idx, columns=idx)
|
281 |
+
elif orient in ["index", "columns"]:
|
282 |
+
expected = DataFrame()
|
283 |
+
else:
|
284 |
+
expected = empty_frame.copy()
|
285 |
+
|
286 |
+
tm.assert_frame_equal(result, expected)
|
287 |
+
|
288 |
+
@pytest.mark.parametrize("convert_axes", [True, False])
|
289 |
+
def test_roundtrip_timestamp(self, orient, convert_axes, datetime_frame):
|
290 |
+
# TODO: improve coverage with date_format parameter
|
291 |
+
data = StringIO(datetime_frame.to_json(orient=orient))
|
292 |
+
result = read_json(data, orient=orient, convert_axes=convert_axes)
|
293 |
+
expected = datetime_frame.copy()
|
294 |
+
|
295 |
+
if not convert_axes: # one off for ts handling
|
296 |
+
# DTI gets converted to epoch values
|
297 |
+
idx = expected.index.view(np.int64) // 1000000
|
298 |
+
if orient != "split": # TODO: handle consistently across orients
|
299 |
+
idx = idx.astype(str)
|
300 |
+
|
301 |
+
expected.index = idx
|
302 |
+
|
303 |
+
assert_json_roundtrip_equal(result, expected, orient)
|
304 |
+
|
305 |
+
@pytest.mark.parametrize("convert_axes", [True, False])
|
306 |
+
def test_roundtrip_mixed(self, orient, convert_axes):
|
307 |
+
index = Index(["a", "b", "c", "d", "e"])
|
308 |
+
values = {
|
309 |
+
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
|
310 |
+
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
|
311 |
+
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
|
312 |
+
"D": [True, False, True, False, True],
|
313 |
+
}
|
314 |
+
|
315 |
+
df = DataFrame(data=values, index=index)
|
316 |
+
|
317 |
+
data = StringIO(df.to_json(orient=orient))
|
318 |
+
result = read_json(data, orient=orient, convert_axes=convert_axes)
|
319 |
+
|
320 |
+
expected = df.copy()
|
321 |
+
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
|
322 |
+
|
323 |
+
assert_json_roundtrip_equal(result, expected, orient)
|
324 |
+
|
325 |
+
@pytest.mark.xfail(
|
326 |
+
reason="#50456 Column multiindex is stored and loaded differently",
|
327 |
+
raises=AssertionError,
|
328 |
+
)
|
329 |
+
@pytest.mark.parametrize(
|
330 |
+
"columns",
|
331 |
+
[
|
332 |
+
[["2022", "2022"], ["JAN", "FEB"]],
|
333 |
+
[["2022", "2023"], ["JAN", "JAN"]],
|
334 |
+
[["2022", "2022"], ["JAN", "JAN"]],
|
335 |
+
],
|
336 |
+
)
|
337 |
+
def test_roundtrip_multiindex(self, columns):
|
338 |
+
df = DataFrame(
|
339 |
+
[[1, 2], [3, 4]],
|
340 |
+
columns=pd.MultiIndex.from_arrays(columns),
|
341 |
+
)
|
342 |
+
data = StringIO(df.to_json(orient="split"))
|
343 |
+
result = read_json(data, orient="split")
|
344 |
+
tm.assert_frame_equal(result, df)
|
345 |
+
|
346 |
+
@pytest.mark.parametrize(
|
347 |
+
"data,msg,orient",
|
348 |
+
[
|
349 |
+
('{"key":b:a:d}', "Expected object or value", "columns"),
|
350 |
+
# too few indices
|
351 |
+
(
|
352 |
+
'{"columns":["A","B"],'
|
353 |
+
'"index":["2","3"],'
|
354 |
+
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
|
355 |
+
"|".join(
|
356 |
+
[
|
357 |
+
r"Length of values \(3\) does not match length of index \(2\)",
|
358 |
+
]
|
359 |
+
),
|
360 |
+
"split",
|
361 |
+
),
|
362 |
+
# too many columns
|
363 |
+
(
|
364 |
+
'{"columns":["A","B","C"],'
|
365 |
+
'"index":["1","2","3"],'
|
366 |
+
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
|
367 |
+
"3 columns passed, passed data had 2 columns",
|
368 |
+
"split",
|
369 |
+
),
|
370 |
+
# bad key
|
371 |
+
(
|
372 |
+
'{"badkey":["A","B"],'
|
373 |
+
'"index":["2","3"],'
|
374 |
+
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
|
375 |
+
r"unexpected key\(s\): badkey",
|
376 |
+
"split",
|
377 |
+
),
|
378 |
+
],
|
379 |
+
)
|
380 |
+
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
|
381 |
+
with pytest.raises(ValueError, match=msg):
|
382 |
+
read_json(StringIO(data), orient=orient)
|
383 |
+
|
384 |
+
@pytest.mark.parametrize("dtype", [True, False])
|
385 |
+
@pytest.mark.parametrize("convert_axes", [True, False])
|
386 |
+
def test_frame_from_json_missing_data(self, orient, convert_axes, dtype):
|
387 |
+
num_df = DataFrame([[1, 2], [4, 5, 6]])
|
388 |
+
|
389 |
+
result = read_json(
|
390 |
+
StringIO(num_df.to_json(orient=orient)),
|
391 |
+
orient=orient,
|
392 |
+
convert_axes=convert_axes,
|
393 |
+
dtype=dtype,
|
394 |
+
)
|
395 |
+
assert np.isnan(result.iloc[0, 2])
|
396 |
+
|
397 |
+
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
|
398 |
+
result = read_json(
|
399 |
+
StringIO(obj_df.to_json(orient=orient)),
|
400 |
+
orient=orient,
|
401 |
+
convert_axes=convert_axes,
|
402 |
+
dtype=dtype,
|
403 |
+
)
|
404 |
+
assert np.isnan(result.iloc[0, 2])
|
405 |
+
|
406 |
+
@pytest.mark.parametrize("dtype", [True, False])
|
407 |
+
def test_frame_read_json_dtype_missing_value(self, dtype):
|
408 |
+
# GH28501 Parse missing values using read_json with dtype=False
|
409 |
+
# to NaN instead of None
|
410 |
+
result = read_json(StringIO("[null]"), dtype=dtype)
|
411 |
+
expected = DataFrame([np.nan])
|
412 |
+
|
413 |
+
tm.assert_frame_equal(result, expected)
|
414 |
+
|
415 |
+
@pytest.mark.parametrize("inf", [np.inf, -np.inf])
|
416 |
+
@pytest.mark.parametrize("dtype", [True, False])
|
417 |
+
def test_frame_infinity(self, inf, dtype):
|
418 |
+
# infinities get mapped to nulls which get mapped to NaNs during
|
419 |
+
# deserialisation
|
420 |
+
df = DataFrame([[1, 2], [4, 5, 6]])
|
421 |
+
df.loc[0, 2] = inf
|
422 |
+
|
423 |
+
data = StringIO(df.to_json())
|
424 |
+
result = read_json(data, dtype=dtype)
|
425 |
+
assert np.isnan(result.iloc[0, 2])
|
426 |
+
|
427 |
+
@pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865")
|
428 |
+
@pytest.mark.parametrize(
|
429 |
+
"value,precision,expected_val",
|
430 |
+
[
|
431 |
+
(0.95, 1, 1.0),
|
432 |
+
(1.95, 1, 2.0),
|
433 |
+
(-1.95, 1, -2.0),
|
434 |
+
(0.995, 2, 1.0),
|
435 |
+
(0.9995, 3, 1.0),
|
436 |
+
(0.99999999999999944, 15, 1.0),
|
437 |
+
],
|
438 |
+
)
|
439 |
+
def test_frame_to_json_float_precision(self, value, precision, expected_val):
|
440 |
+
df = DataFrame([{"a_float": value}])
|
441 |
+
encoded = df.to_json(double_precision=precision)
|
442 |
+
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
|
443 |
+
|
444 |
+
def test_frame_to_json_except(self):
|
445 |
+
df = DataFrame([1, 2, 3])
|
446 |
+
msg = "Invalid value 'garbage' for option 'orient'"
|
447 |
+
with pytest.raises(ValueError, match=msg):
|
448 |
+
df.to_json(orient="garbage")
|
449 |
+
|
450 |
+
def test_frame_empty(self):
|
451 |
+
df = DataFrame(columns=["jim", "joe"])
|
452 |
+
assert not df._is_mixed_type
|
453 |
+
|
454 |
+
data = StringIO(df.to_json())
|
455 |
+
result = read_json(data, dtype=dict(df.dtypes))
|
456 |
+
tm.assert_frame_equal(result, df, check_index_type=False)
|
457 |
+
|
458 |
+
def test_frame_empty_to_json(self):
|
459 |
+
# GH 7445
|
460 |
+
df = DataFrame({"test": []}, index=[])
|
461 |
+
result = df.to_json(orient="columns")
|
462 |
+
expected = '{"test":{}}'
|
463 |
+
assert result == expected
|
464 |
+
|
465 |
+
def test_frame_empty_mixedtype(self):
|
466 |
+
# mixed type
|
467 |
+
df = DataFrame(columns=["jim", "joe"])
|
468 |
+
df["joe"] = df["joe"].astype("i8")
|
469 |
+
assert df._is_mixed_type
|
470 |
+
data = df.to_json()
|
471 |
+
tm.assert_frame_equal(
|
472 |
+
read_json(StringIO(data), dtype=dict(df.dtypes)),
|
473 |
+
df,
|
474 |
+
check_index_type=False,
|
475 |
+
)
|
476 |
+
|
477 |
+
def test_frame_mixedtype_orient(self): # GH10289
|
478 |
+
vals = [
|
479 |
+
[10, 1, "foo", 0.1, 0.01],
|
480 |
+
[20, 2, "bar", 0.2, 0.02],
|
481 |
+
[30, 3, "baz", 0.3, 0.03],
|
482 |
+
[40, 4, "qux", 0.4, 0.04],
|
483 |
+
]
|
484 |
+
|
485 |
+
df = DataFrame(
|
486 |
+
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
|
487 |
+
)
|
488 |
+
|
489 |
+
assert df._is_mixed_type
|
490 |
+
right = df.copy()
|
491 |
+
|
492 |
+
for orient in ["split", "index", "columns"]:
|
493 |
+
inp = StringIO(df.to_json(orient=orient))
|
494 |
+
left = read_json(inp, orient=orient, convert_axes=False)
|
495 |
+
tm.assert_frame_equal(left, right)
|
496 |
+
|
497 |
+
right.index = RangeIndex(len(df))
|
498 |
+
inp = StringIO(df.to_json(orient="records"))
|
499 |
+
left = read_json(inp, orient="records", convert_axes=False)
|
500 |
+
tm.assert_frame_equal(left, right)
|
501 |
+
|
502 |
+
right.columns = RangeIndex(df.shape[1])
|
503 |
+
inp = StringIO(df.to_json(orient="values"))
|
504 |
+
left = read_json(inp, orient="values", convert_axes=False)
|
505 |
+
tm.assert_frame_equal(left, right)
|
506 |
+
|
507 |
+
def test_v12_compat(self, datapath):
|
508 |
+
dti = date_range("2000-01-03", "2000-01-07")
|
509 |
+
# freq doesn't roundtrip
|
510 |
+
dti = DatetimeIndex(np.asarray(dti), freq=None)
|
511 |
+
df = DataFrame(
|
512 |
+
[
|
513 |
+
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
|
514 |
+
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
|
515 |
+
[1.51493992, 0.11805825, 1.629455, -1.31506612],
|
516 |
+
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
|
517 |
+
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
|
518 |
+
],
|
519 |
+
columns=["A", "B", "C", "D"],
|
520 |
+
index=dti,
|
521 |
+
)
|
522 |
+
df["date"] = Timestamp("19920106 18:21:32.12").as_unit("ns")
|
523 |
+
df.iloc[3, df.columns.get_loc("date")] = Timestamp("20130101")
|
524 |
+
df["modified"] = df["date"]
|
525 |
+
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
|
526 |
+
|
527 |
+
dirpath = datapath("io", "json", "data")
|
528 |
+
v12_json = os.path.join(dirpath, "tsframe_v012.json")
|
529 |
+
df_unser = read_json(v12_json)
|
530 |
+
tm.assert_frame_equal(df, df_unser)
|
531 |
+
|
532 |
+
df_iso = df.drop(["modified"], axis=1)
|
533 |
+
v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
|
534 |
+
df_unser_iso = read_json(v12_iso_json)
|
535 |
+
tm.assert_frame_equal(df_iso, df_unser_iso, check_column_type=False)
|
536 |
+
|
537 |
+
def test_blocks_compat_GH9037(self, using_infer_string):
|
538 |
+
index = date_range("20000101", periods=10, freq="h")
|
539 |
+
# freq doesn't round-trip
|
540 |
+
index = DatetimeIndex(list(index), freq=None)
|
541 |
+
|
542 |
+
df_mixed = DataFrame(
|
543 |
+
{
|
544 |
+
"float_1": [
|
545 |
+
-0.92077639,
|
546 |
+
0.77434435,
|
547 |
+
1.25234727,
|
548 |
+
0.61485564,
|
549 |
+
-0.60316077,
|
550 |
+
0.24653374,
|
551 |
+
0.28668979,
|
552 |
+
-2.51969012,
|
553 |
+
0.95748401,
|
554 |
+
-1.02970536,
|
555 |
+
],
|
556 |
+
"int_1": [
|
557 |
+
19680418,
|
558 |
+
75337055,
|
559 |
+
99973684,
|
560 |
+
65103179,
|
561 |
+
79373900,
|
562 |
+
40314334,
|
563 |
+
21290235,
|
564 |
+
4991321,
|
565 |
+
41903419,
|
566 |
+
16008365,
|
567 |
+
],
|
568 |
+
"str_1": [
|
569 |
+
"78c608f1",
|
570 |
+
"64a99743",
|
571 |
+
"13d2ff52",
|
572 |
+
"ca7f4af2",
|
573 |
+
"97236474",
|
574 |
+
"bde7e214",
|
575 |
+
"1a6bde47",
|
576 |
+
"b1190be5",
|
577 |
+
"7a669144",
|
578 |
+
"8d64d068",
|
579 |
+
],
|
580 |
+
"float_2": [
|
581 |
+
-0.0428278,
|
582 |
+
-1.80872357,
|
583 |
+
3.36042349,
|
584 |
+
-0.7573685,
|
585 |
+
-0.48217572,
|
586 |
+
0.86229683,
|
587 |
+
1.08935819,
|
588 |
+
0.93898739,
|
589 |
+
-0.03030452,
|
590 |
+
1.43366348,
|
591 |
+
],
|
592 |
+
"str_2": [
|
593 |
+
"14f04af9",
|
594 |
+
"d085da90",
|
595 |
+
"4bcfac83",
|
596 |
+
"81504caf",
|
597 |
+
"2ffef4a9",
|
598 |
+
"08e2f5c4",
|
599 |
+
"07e1af03",
|
600 |
+
"addbd4a7",
|
601 |
+
"1f6a09ba",
|
602 |
+
"4bfc4d87",
|
603 |
+
],
|
604 |
+
"int_2": [
|
605 |
+
86967717,
|
606 |
+
98098830,
|
607 |
+
51927505,
|
608 |
+
20372254,
|
609 |
+
12601730,
|
610 |
+
20884027,
|
611 |
+
34193846,
|
612 |
+
10561746,
|
613 |
+
24867120,
|
614 |
+
76131025,
|
615 |
+
],
|
616 |
+
},
|
617 |
+
index=index,
|
618 |
+
)
|
619 |
+
|
620 |
+
# JSON deserialisation always creates unicode strings
|
621 |
+
df_mixed.columns = df_mixed.columns.astype(
|
622 |
+
np.str_ if not using_infer_string else "string[pyarrow_numpy]"
|
623 |
+
)
|
624 |
+
data = StringIO(df_mixed.to_json(orient="split"))
|
625 |
+
df_roundtrip = read_json(data, orient="split")
|
626 |
+
tm.assert_frame_equal(
|
627 |
+
df_mixed,
|
628 |
+
df_roundtrip,
|
629 |
+
check_index_type=True,
|
630 |
+
check_column_type=True,
|
631 |
+
by_blocks=True,
|
632 |
+
check_exact=True,
|
633 |
+
)
|
634 |
+
|
635 |
+
def test_frame_nonprintable_bytes(self):
|
636 |
+
# GH14256: failing column caused segfaults, if it is not the last one
|
637 |
+
|
638 |
+
class BinaryThing:
|
639 |
+
def __init__(self, hexed) -> None:
|
640 |
+
self.hexed = hexed
|
641 |
+
self.binary = bytes.fromhex(hexed)
|
642 |
+
|
643 |
+
def __str__(self) -> str:
|
644 |
+
return self.hexed
|
645 |
+
|
646 |
+
hexed = "574b4454ba8c5eb4f98a8f45"
|
647 |
+
binthing = BinaryThing(hexed)
|
648 |
+
|
649 |
+
# verify the proper conversion of printable content
|
650 |
+
df_printable = DataFrame({"A": [binthing.hexed]})
|
651 |
+
assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
|
652 |
+
|
653 |
+
# check if non-printable content throws appropriate Exception
|
654 |
+
df_nonprintable = DataFrame({"A": [binthing]})
|
655 |
+
msg = "Unsupported UTF-8 sequence length when encoding string"
|
656 |
+
with pytest.raises(OverflowError, match=msg):
|
657 |
+
df_nonprintable.to_json()
|
658 |
+
|
659 |
+
# the same with multiple columns threw segfaults
|
660 |
+
df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
|
661 |
+
with pytest.raises(OverflowError, match=msg):
|
662 |
+
df_mixed.to_json()
|
663 |
+
|
664 |
+
# default_handler should resolve exceptions for non-string types
|
665 |
+
result = df_nonprintable.to_json(default_handler=str)
|
666 |
+
expected = f'{{"A":{{"0":"{hexed}"}}}}'
|
667 |
+
assert result == expected
|
668 |
+
assert (
|
669 |
+
df_mixed.to_json(default_handler=str)
|
670 |
+
== f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
|
671 |
+
)
|
672 |
+
|
673 |
+
def test_label_overflow(self):
|
674 |
+
# GH14256: buffer length not checked when writing label
|
675 |
+
result = DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
|
676 |
+
expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
|
677 |
+
assert result == expected
|
678 |
+
|
679 |
+
def test_series_non_unique_index(self):
|
680 |
+
s = Series(["a", "b"], index=[1, 1])
|
681 |
+
|
682 |
+
msg = "Series index must be unique for orient='index'"
|
683 |
+
with pytest.raises(ValueError, match=msg):
|
684 |
+
s.to_json(orient="index")
|
685 |
+
|
686 |
+
tm.assert_series_equal(
|
687 |
+
s,
|
688 |
+
read_json(
|
689 |
+
StringIO(s.to_json(orient="split")), orient="split", typ="series"
|
690 |
+
),
|
691 |
+
)
|
692 |
+
unserialized = read_json(
|
693 |
+
StringIO(s.to_json(orient="records")), orient="records", typ="series"
|
694 |
+
)
|
695 |
+
tm.assert_equal(s.values, unserialized.values)
|
696 |
+
|
697 |
+
def test_series_default_orient(self, string_series):
|
698 |
+
assert string_series.to_json() == string_series.to_json(orient="index")
|
699 |
+
|
700 |
+
def test_series_roundtrip_simple(self, orient, string_series, using_infer_string):
|
701 |
+
data = StringIO(string_series.to_json(orient=orient))
|
702 |
+
result = read_json(data, typ="series", orient=orient)
|
703 |
+
|
704 |
+
expected = string_series
|
705 |
+
if using_infer_string and orient in ("split", "index", "columns"):
|
706 |
+
# These schemas don't contain dtypes, so we infer string
|
707 |
+
expected.index = expected.index.astype("string[pyarrow_numpy]")
|
708 |
+
if orient in ("values", "records"):
|
709 |
+
expected = expected.reset_index(drop=True)
|
710 |
+
if orient != "split":
|
711 |
+
expected.name = None
|
712 |
+
|
713 |
+
tm.assert_series_equal(result, expected)
|
714 |
+
|
715 |
+
@pytest.mark.parametrize("dtype", [False, None])
|
716 |
+
def test_series_roundtrip_object(self, orient, dtype, object_series):
|
717 |
+
data = StringIO(object_series.to_json(orient=orient))
|
718 |
+
result = read_json(data, typ="series", orient=orient, dtype=dtype)
|
719 |
+
|
720 |
+
expected = object_series
|
721 |
+
if orient in ("values", "records"):
|
722 |
+
expected = expected.reset_index(drop=True)
|
723 |
+
if orient != "split":
|
724 |
+
expected.name = None
|
725 |
+
|
726 |
+
tm.assert_series_equal(result, expected)
|
727 |
+
|
728 |
+
def test_series_roundtrip_empty(self, orient):
|
729 |
+
empty_series = Series([], index=[], dtype=np.float64)
|
730 |
+
data = StringIO(empty_series.to_json(orient=orient))
|
731 |
+
result = read_json(data, typ="series", orient=orient)
|
732 |
+
|
733 |
+
expected = empty_series.reset_index(drop=True)
|
734 |
+
if orient in ("split"):
|
735 |
+
expected.index = expected.index.astype(np.float64)
|
736 |
+
|
737 |
+
tm.assert_series_equal(result, expected)
|
738 |
+
|
739 |
+
def test_series_roundtrip_timeseries(self, orient, datetime_series):
|
740 |
+
data = StringIO(datetime_series.to_json(orient=orient))
|
741 |
+
result = read_json(data, typ="series", orient=orient)
|
742 |
+
|
743 |
+
expected = datetime_series
|
744 |
+
if orient in ("values", "records"):
|
745 |
+
expected = expected.reset_index(drop=True)
|
746 |
+
if orient != "split":
|
747 |
+
expected.name = None
|
748 |
+
|
749 |
+
tm.assert_series_equal(result, expected)
|
750 |
+
|
751 |
+
@pytest.mark.parametrize("dtype", [np.float64, int])
|
752 |
+
def test_series_roundtrip_numeric(self, orient, dtype):
|
753 |
+
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
|
754 |
+
data = StringIO(s.to_json(orient=orient))
|
755 |
+
result = read_json(data, typ="series", orient=orient)
|
756 |
+
|
757 |
+
expected = s.copy()
|
758 |
+
if orient in ("values", "records"):
|
759 |
+
expected = expected.reset_index(drop=True)
|
760 |
+
|
761 |
+
tm.assert_series_equal(result, expected)
|
762 |
+
|
763 |
+
def test_series_to_json_except(self):
|
764 |
+
s = Series([1, 2, 3])
|
765 |
+
msg = "Invalid value 'garbage' for option 'orient'"
|
766 |
+
with pytest.raises(ValueError, match=msg):
|
767 |
+
s.to_json(orient="garbage")
|
768 |
+
|
769 |
+
def test_series_from_json_precise_float(self):
|
770 |
+
s = Series([4.56, 4.56, 4.56])
|
771 |
+
result = read_json(StringIO(s.to_json()), typ="series", precise_float=True)
|
772 |
+
tm.assert_series_equal(result, s, check_index_type=False)
|
773 |
+
|
774 |
+
def test_series_with_dtype(self):
|
775 |
+
# GH 21986
|
776 |
+
s = Series([4.56, 4.56, 4.56])
|
777 |
+
result = read_json(StringIO(s.to_json()), typ="series", dtype=np.int64)
|
778 |
+
expected = Series([4] * 3)
|
779 |
+
tm.assert_series_equal(result, expected)
|
780 |
+
|
781 |
+
@pytest.mark.parametrize(
|
782 |
+
"dtype,expected",
|
783 |
+
[
|
784 |
+
(True, Series(["2000-01-01"], dtype="datetime64[ns]")),
|
785 |
+
(False, Series([946684800000])),
|
786 |
+
],
|
787 |
+
)
|
788 |
+
def test_series_with_dtype_datetime(self, dtype, expected):
|
789 |
+
s = Series(["2000-01-01"], dtype="datetime64[ns]")
|
790 |
+
data = StringIO(s.to_json())
|
791 |
+
result = read_json(data, typ="series", dtype=dtype)
|
792 |
+
tm.assert_series_equal(result, expected)
|
793 |
+
|
794 |
+
def test_frame_from_json_precise_float(self):
|
795 |
+
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
|
796 |
+
result = read_json(StringIO(df.to_json()), precise_float=True)
|
797 |
+
tm.assert_frame_equal(result, df)
|
798 |
+
|
799 |
+
def test_typ(self):
|
800 |
+
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
|
801 |
+
result = read_json(StringIO(s.to_json()), typ=None)
|
802 |
+
tm.assert_series_equal(result, s)
|
803 |
+
|
804 |
+
def test_reconstruction_index(self):
|
805 |
+
df = DataFrame([[1, 2, 3], [4, 5, 6]])
|
806 |
+
result = read_json(StringIO(df.to_json()))
|
807 |
+
tm.assert_frame_equal(result, df)
|
808 |
+
|
809 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
|
810 |
+
result = read_json(StringIO(df.to_json()))
|
811 |
+
tm.assert_frame_equal(result, df)
|
812 |
+
|
813 |
+
def test_path(self, float_frame, int_frame, datetime_frame):
|
814 |
+
with tm.ensure_clean("test.json") as path:
|
815 |
+
for df in [float_frame, int_frame, datetime_frame]:
|
816 |
+
df.to_json(path)
|
817 |
+
read_json(path)
|
818 |
+
|
819 |
+
def test_axis_dates(self, datetime_series, datetime_frame):
|
820 |
+
# frame
|
821 |
+
json = StringIO(datetime_frame.to_json())
|
822 |
+
result = read_json(json)
|
823 |
+
tm.assert_frame_equal(result, datetime_frame)
|
824 |
+
|
825 |
+
# series
|
826 |
+
json = StringIO(datetime_series.to_json())
|
827 |
+
result = read_json(json, typ="series")
|
828 |
+
tm.assert_series_equal(result, datetime_series, check_names=False)
|
829 |
+
assert result.name is None
|
830 |
+
|
831 |
+
def test_convert_dates(self, datetime_series, datetime_frame):
|
832 |
+
# frame
|
833 |
+
df = datetime_frame
|
834 |
+
df["date"] = Timestamp("20130101").as_unit("ns")
|
835 |
+
|
836 |
+
json = StringIO(df.to_json())
|
837 |
+
result = read_json(json)
|
838 |
+
tm.assert_frame_equal(result, df)
|
839 |
+
|
840 |
+
df["foo"] = 1.0
|
841 |
+
json = StringIO(df.to_json(date_unit="ns"))
|
842 |
+
|
843 |
+
result = read_json(json, convert_dates=False)
|
844 |
+
expected = df.copy()
|
845 |
+
expected["date"] = expected["date"].values.view("i8")
|
846 |
+
expected["foo"] = expected["foo"].astype("int64")
|
847 |
+
tm.assert_frame_equal(result, expected)
|
848 |
+
|
849 |
+
# series
|
850 |
+
ts = Series(Timestamp("20130101").as_unit("ns"), index=datetime_series.index)
|
851 |
+
json = StringIO(ts.to_json())
|
852 |
+
result = read_json(json, typ="series")
|
853 |
+
tm.assert_series_equal(result, ts)
|
854 |
+
|
855 |
+
@pytest.mark.parametrize("date_format", ["epoch", "iso"])
|
856 |
+
@pytest.mark.parametrize("as_object", [True, False])
|
857 |
+
@pytest.mark.parametrize("date_typ", [datetime.date, datetime.datetime, Timestamp])
|
858 |
+
def test_date_index_and_values(self, date_format, as_object, date_typ):
|
859 |
+
data = [date_typ(year=2020, month=1, day=1), pd.NaT]
|
860 |
+
if as_object:
|
861 |
+
data.append("a")
|
862 |
+
|
863 |
+
ser = Series(data, index=data)
|
864 |
+
result = ser.to_json(date_format=date_format)
|
865 |
+
|
866 |
+
if date_format == "epoch":
|
867 |
+
expected = '{"1577836800000":1577836800000,"null":null}'
|
868 |
+
else:
|
869 |
+
expected = (
|
870 |
+
'{"2020-01-01T00:00:00.000":"2020-01-01T00:00:00.000","null":null}'
|
871 |
+
)
|
872 |
+
|
873 |
+
if as_object:
|
874 |
+
expected = expected.replace("}", ',"a":"a"}')
|
875 |
+
|
876 |
+
assert result == expected
|
877 |
+
|
878 |
+
@pytest.mark.parametrize(
|
879 |
+
"infer_word",
|
880 |
+
[
|
881 |
+
"trade_time",
|
882 |
+
"date",
|
883 |
+
"datetime",
|
884 |
+
"sold_at",
|
885 |
+
"modified",
|
886 |
+
"timestamp",
|
887 |
+
"timestamps",
|
888 |
+
],
|
889 |
+
)
|
890 |
+
def test_convert_dates_infer(self, infer_word):
|
891 |
+
# GH10747
|
892 |
+
|
893 |
+
data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
|
894 |
+
expected = DataFrame(
|
895 |
+
[[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
|
896 |
+
)
|
897 |
+
|
898 |
+
result = read_json(StringIO(ujson_dumps(data)))[["id", infer_word]]
|
899 |
+
tm.assert_frame_equal(result, expected)
|
900 |
+
|
901 |
+
@pytest.mark.parametrize(
|
902 |
+
"date,date_unit",
|
903 |
+
[
|
904 |
+
("20130101 20:43:42.123", None),
|
905 |
+
("20130101 20:43:42", "s"),
|
906 |
+
("20130101 20:43:42.123", "ms"),
|
907 |
+
("20130101 20:43:42.123456", "us"),
|
908 |
+
("20130101 20:43:42.123456789", "ns"),
|
909 |
+
],
|
910 |
+
)
|
911 |
+
def test_date_format_frame(self, date, date_unit, datetime_frame):
|
912 |
+
df = datetime_frame
|
913 |
+
|
914 |
+
df["date"] = Timestamp(date).as_unit("ns")
|
915 |
+
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
|
916 |
+
df.iloc[5, df.columns.get_loc("date")] = pd.NaT
|
917 |
+
if date_unit:
|
918 |
+
json = df.to_json(date_format="iso", date_unit=date_unit)
|
919 |
+
else:
|
920 |
+
json = df.to_json(date_format="iso")
|
921 |
+
|
922 |
+
result = read_json(StringIO(json))
|
923 |
+
expected = df.copy()
|
924 |
+
tm.assert_frame_equal(result, expected)
|
925 |
+
|
926 |
+
def test_date_format_frame_raises(self, datetime_frame):
|
927 |
+
df = datetime_frame
|
928 |
+
msg = "Invalid value 'foo' for option 'date_unit'"
|
929 |
+
with pytest.raises(ValueError, match=msg):
|
930 |
+
df.to_json(date_format="iso", date_unit="foo")
|
931 |
+
|
932 |
+
@pytest.mark.parametrize(
|
933 |
+
"date,date_unit",
|
934 |
+
[
|
935 |
+
("20130101 20:43:42.123", None),
|
936 |
+
("20130101 20:43:42", "s"),
|
937 |
+
("20130101 20:43:42.123", "ms"),
|
938 |
+
("20130101 20:43:42.123456", "us"),
|
939 |
+
("20130101 20:43:42.123456789", "ns"),
|
940 |
+
],
|
941 |
+
)
|
942 |
+
def test_date_format_series(self, date, date_unit, datetime_series):
|
943 |
+
ts = Series(Timestamp(date).as_unit("ns"), index=datetime_series.index)
|
944 |
+
ts.iloc[1] = pd.NaT
|
945 |
+
ts.iloc[5] = pd.NaT
|
946 |
+
if date_unit:
|
947 |
+
json = ts.to_json(date_format="iso", date_unit=date_unit)
|
948 |
+
else:
|
949 |
+
json = ts.to_json(date_format="iso")
|
950 |
+
|
951 |
+
result = read_json(StringIO(json), typ="series")
|
952 |
+
expected = ts.copy()
|
953 |
+
tm.assert_series_equal(result, expected)
|
954 |
+
|
955 |
+
def test_date_format_series_raises(self, datetime_series):
|
956 |
+
ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index)
|
957 |
+
msg = "Invalid value 'foo' for option 'date_unit'"
|
958 |
+
with pytest.raises(ValueError, match=msg):
|
959 |
+
ts.to_json(date_format="iso", date_unit="foo")
|
960 |
+
|
961 |
+
@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
|
962 |
+
def test_date_unit(self, unit, datetime_frame):
|
963 |
+
df = datetime_frame
|
964 |
+
df["date"] = Timestamp("20130101 20:43:42").as_unit("ns")
|
965 |
+
dl = df.columns.get_loc("date")
|
966 |
+
df.iloc[1, dl] = Timestamp("19710101 20:43:42")
|
967 |
+
df.iloc[2, dl] = Timestamp("21460101 20:43:42")
|
968 |
+
df.iloc[4, dl] = pd.NaT
|
969 |
+
|
970 |
+
json = df.to_json(date_format="epoch", date_unit=unit)
|
971 |
+
|
972 |
+
# force date unit
|
973 |
+
result = read_json(StringIO(json), date_unit=unit)
|
974 |
+
tm.assert_frame_equal(result, df)
|
975 |
+
|
976 |
+
# detect date unit
|
977 |
+
result = read_json(StringIO(json), date_unit=None)
|
978 |
+
tm.assert_frame_equal(result, df)
|
979 |
+
|
980 |
+
@pytest.mark.parametrize("unit", ["s", "ms", "us"])
|
981 |
+
def test_iso_non_nano_datetimes(self, unit):
|
982 |
+
# Test that numpy datetimes
|
983 |
+
# in an Index or a column with non-nano resolution can be serialized
|
984 |
+
# correctly
|
985 |
+
# GH53686
|
986 |
+
index = DatetimeIndex(
|
987 |
+
[np.datetime64("2023-01-01T11:22:33.123456", unit)],
|
988 |
+
dtype=f"datetime64[{unit}]",
|
989 |
+
)
|
990 |
+
df = DataFrame(
|
991 |
+
{
|
992 |
+
"date": Series(
|
993 |
+
[np.datetime64("2022-01-01T11:22:33.123456", unit)],
|
994 |
+
dtype=f"datetime64[{unit}]",
|
995 |
+
index=index,
|
996 |
+
),
|
997 |
+
"date_obj": Series(
|
998 |
+
[np.datetime64("2023-01-01T11:22:33.123456", unit)],
|
999 |
+
dtype=object,
|
1000 |
+
index=index,
|
1001 |
+
),
|
1002 |
+
},
|
1003 |
+
)
|
1004 |
+
|
1005 |
+
buf = StringIO()
|
1006 |
+
df.to_json(buf, date_format="iso", date_unit=unit)
|
1007 |
+
buf.seek(0)
|
1008 |
+
|
1009 |
+
# read_json always reads datetimes in nanosecond resolution
|
1010 |
+
# TODO: check_dtype/check_index_type should be removable
|
1011 |
+
# once read_json gets non-nano support
|
1012 |
+
tm.assert_frame_equal(
|
1013 |
+
read_json(buf, convert_dates=["date", "date_obj"]),
|
1014 |
+
df,
|
1015 |
+
check_index_type=False,
|
1016 |
+
check_dtype=False,
|
1017 |
+
)
|
1018 |
+
|
1019 |
+
def test_weird_nested_json(self):
|
1020 |
+
# this used to core dump the parser
|
1021 |
+
s = r"""{
|
1022 |
+
"status": "success",
|
1023 |
+
"data": {
|
1024 |
+
"posts": [
|
1025 |
+
{
|
1026 |
+
"id": 1,
|
1027 |
+
"title": "A blog post",
|
1028 |
+
"body": "Some useful content"
|
1029 |
+
},
|
1030 |
+
{
|
1031 |
+
"id": 2,
|
1032 |
+
"title": "Another blog post",
|
1033 |
+
"body": "More content"
|
1034 |
+
}
|
1035 |
+
]
|
1036 |
+
}
|
1037 |
+
}"""
|
1038 |
+
read_json(StringIO(s))
|
1039 |
+
|
1040 |
+
def test_doc_example(self):
|
1041 |
+
dfj2 = DataFrame(
|
1042 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=list("AB")
|
1043 |
+
)
|
1044 |
+
dfj2["date"] = Timestamp("20130101")
|
1045 |
+
dfj2["ints"] = range(5)
|
1046 |
+
dfj2["bools"] = True
|
1047 |
+
dfj2.index = date_range("20130101", periods=5)
|
1048 |
+
|
1049 |
+
json = StringIO(dfj2.to_json())
|
1050 |
+
result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
|
1051 |
+
tm.assert_frame_equal(result, result)
|
1052 |
+
|
1053 |
+
def test_round_trip_exception(self, datapath):
|
1054 |
+
# GH 3867
|
1055 |
+
path = datapath("io", "json", "data", "teams.csv")
|
1056 |
+
df = pd.read_csv(path)
|
1057 |
+
s = df.to_json()
|
1058 |
+
|
1059 |
+
result = read_json(StringIO(s))
|
1060 |
+
res = result.reindex(index=df.index, columns=df.columns)
|
1061 |
+
msg = "The 'downcast' keyword in fillna is deprecated"
|
1062 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
1063 |
+
res = res.fillna(np.nan, downcast=False)
|
1064 |
+
tm.assert_frame_equal(res, df)
|
1065 |
+
|
1066 |
+
@pytest.mark.network
|
1067 |
+
@pytest.mark.single_cpu
|
1068 |
+
@pytest.mark.parametrize(
|
1069 |
+
"field,dtype",
|
1070 |
+
[
|
1071 |
+
["created_at", pd.DatetimeTZDtype(tz="UTC")],
|
1072 |
+
["closed_at", "datetime64[ns]"],
|
1073 |
+
["updated_at", pd.DatetimeTZDtype(tz="UTC")],
|
1074 |
+
],
|
1075 |
+
)
|
1076 |
+
def test_url(self, field, dtype, httpserver):
|
1077 |
+
data = '{"created_at": ["2023-06-23T18:21:36Z"], "closed_at": ["2023-06-23T18:21:36"], "updated_at": ["2023-06-23T18:21:36Z"]}\n' # noqa: E501
|
1078 |
+
httpserver.serve_content(content=data)
|
1079 |
+
result = read_json(httpserver.url, convert_dates=True)
|
1080 |
+
assert result[field].dtype == dtype
|
1081 |
+
|
1082 |
+
def test_timedelta(self):
|
1083 |
+
converter = lambda x: pd.to_timedelta(x, unit="ms")
|
1084 |
+
|
1085 |
+
ser = Series([timedelta(23), timedelta(seconds=5)])
|
1086 |
+
assert ser.dtype == "timedelta64[ns]"
|
1087 |
+
|
1088 |
+
result = read_json(StringIO(ser.to_json()), typ="series").apply(converter)
|
1089 |
+
tm.assert_series_equal(result, ser)
|
1090 |
+
|
1091 |
+
ser = Series([timedelta(23), timedelta(seconds=5)], index=Index([0, 1]))
|
1092 |
+
assert ser.dtype == "timedelta64[ns]"
|
1093 |
+
result = read_json(StringIO(ser.to_json()), typ="series").apply(converter)
|
1094 |
+
tm.assert_series_equal(result, ser)
|
1095 |
+
|
1096 |
+
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
|
1097 |
+
assert frame[0].dtype == "timedelta64[ns]"
|
1098 |
+
tm.assert_frame_equal(
|
1099 |
+
frame, read_json(StringIO(frame.to_json())).apply(converter)
|
1100 |
+
)
|
1101 |
+
|
1102 |
+
def test_timedelta2(self):
|
1103 |
+
frame = DataFrame(
|
1104 |
+
{
|
1105 |
+
"a": [timedelta(days=23), timedelta(seconds=5)],
|
1106 |
+
"b": [1, 2],
|
1107 |
+
"c": date_range(start="20130101", periods=2),
|
1108 |
+
}
|
1109 |
+
)
|
1110 |
+
data = StringIO(frame.to_json(date_unit="ns"))
|
1111 |
+
result = read_json(data)
|
1112 |
+
result["a"] = pd.to_timedelta(result.a, unit="ns")
|
1113 |
+
result["c"] = pd.to_datetime(result.c)
|
1114 |
+
tm.assert_frame_equal(frame, result)
|
1115 |
+
|
1116 |
+
def test_mixed_timedelta_datetime(self):
|
1117 |
+
td = timedelta(23)
|
1118 |
+
ts = Timestamp("20130101")
|
1119 |
+
frame = DataFrame({"a": [td, ts]}, dtype=object)
|
1120 |
+
|
1121 |
+
expected = DataFrame(
|
1122 |
+
{"a": [pd.Timedelta(td).as_unit("ns")._value, ts.as_unit("ns")._value]}
|
1123 |
+
)
|
1124 |
+
data = StringIO(frame.to_json(date_unit="ns"))
|
1125 |
+
result = read_json(data, dtype={"a": "int64"})
|
1126 |
+
tm.assert_frame_equal(result, expected, check_index_type=False)
|
1127 |
+
|
1128 |
+
@pytest.mark.parametrize("as_object", [True, False])
|
1129 |
+
@pytest.mark.parametrize("date_format", ["iso", "epoch"])
|
1130 |
+
@pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])
|
1131 |
+
def test_timedelta_to_json(self, as_object, date_format, timedelta_typ):
|
1132 |
+
# GH28156: to_json not correctly formatting Timedelta
|
1133 |
+
data = [timedelta_typ(days=1), timedelta_typ(days=2), pd.NaT]
|
1134 |
+
if as_object:
|
1135 |
+
data.append("a")
|
1136 |
+
|
1137 |
+
ser = Series(data, index=data)
|
1138 |
+
if date_format == "iso":
|
1139 |
+
expected = (
|
1140 |
+
'{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}'
|
1141 |
+
)
|
1142 |
+
else:
|
1143 |
+
expected = '{"86400000":86400000,"172800000":172800000,"null":null}'
|
1144 |
+
|
1145 |
+
if as_object:
|
1146 |
+
expected = expected.replace("}", ',"a":"a"}')
|
1147 |
+
|
1148 |
+
result = ser.to_json(date_format=date_format)
|
1149 |
+
assert result == expected
|
1150 |
+
|
1151 |
+
@pytest.mark.parametrize("as_object", [True, False])
|
1152 |
+
@pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])
|
1153 |
+
def test_timedelta_to_json_fractional_precision(self, as_object, timedelta_typ):
|
1154 |
+
data = [timedelta_typ(milliseconds=42)]
|
1155 |
+
ser = Series(data, index=data)
|
1156 |
+
if as_object:
|
1157 |
+
ser = ser.astype(object)
|
1158 |
+
|
1159 |
+
result = ser.to_json()
|
1160 |
+
expected = '{"42":42}'
|
1161 |
+
assert result == expected
|
1162 |
+
|
1163 |
+
def test_default_handler(self):
|
1164 |
+
value = object()
|
1165 |
+
frame = DataFrame({"a": [7, value]})
|
1166 |
+
expected = DataFrame({"a": [7, str(value)]})
|
1167 |
+
result = read_json(StringIO(frame.to_json(default_handler=str)))
|
1168 |
+
tm.assert_frame_equal(expected, result, check_index_type=False)
|
1169 |
+
|
1170 |
+
def test_default_handler_indirect(self):
|
1171 |
+
def default(obj):
|
1172 |
+
if isinstance(obj, complex):
|
1173 |
+
return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)]
|
1174 |
+
return str(obj)
|
1175 |
+
|
1176 |
+
df_list = [
|
1177 |
+
9,
|
1178 |
+
DataFrame(
|
1179 |
+
{"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]},
|
1180 |
+
columns=["a", "b"],
|
1181 |
+
),
|
1182 |
+
]
|
1183 |
+
expected = (
|
1184 |
+
'[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
|
1185 |
+
'["re",4.0],["im",-5.0]],"N\\/A"]]]'
|
1186 |
+
)
|
1187 |
+
assert (
|
1188 |
+
ujson_dumps(df_list, default_handler=default, orient="values") == expected
|
1189 |
+
)
|
1190 |
+
|
1191 |
+
def test_default_handler_numpy_unsupported_dtype(self):
|
1192 |
+
# GH12554 to_json raises 'Unhandled numpy dtype 15'
|
1193 |
+
df = DataFrame(
|
1194 |
+
{"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]},
|
1195 |
+
columns=["a", "b"],
|
1196 |
+
)
|
1197 |
+
expected = (
|
1198 |
+
'[["(1+0j)","(nan+0j)"],'
|
1199 |
+
'["(2.3+0j)","(nan+0j)"],'
|
1200 |
+
'["(4-5j)","(1.2+0j)"]]'
|
1201 |
+
)
|
1202 |
+
assert df.to_json(default_handler=str, orient="values") == expected
|
1203 |
+
|
1204 |
+
def test_default_handler_raises(self):
|
1205 |
+
msg = "raisin"
|
1206 |
+
|
1207 |
+
def my_handler_raises(obj):
|
1208 |
+
raise TypeError(msg)
|
1209 |
+
|
1210 |
+
with pytest.raises(TypeError, match=msg):
|
1211 |
+
DataFrame({"a": [1, 2, object()]}).to_json(
|
1212 |
+
default_handler=my_handler_raises
|
1213 |
+
)
|
1214 |
+
with pytest.raises(TypeError, match=msg):
|
1215 |
+
DataFrame({"a": [1, 2, complex(4, -5)]}).to_json(
|
1216 |
+
default_handler=my_handler_raises
|
1217 |
+
)
|
1218 |
+
|
1219 |
+
def test_categorical(self):
|
1220 |
+
# GH4377 df.to_json segfaults with non-ndarray blocks
|
1221 |
+
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
|
1222 |
+
df["B"] = df["A"]
|
1223 |
+
expected = df.to_json()
|
1224 |
+
|
1225 |
+
df["B"] = df["A"].astype("category")
|
1226 |
+
assert expected == df.to_json()
|
1227 |
+
|
1228 |
+
s = df["A"]
|
1229 |
+
sc = df["B"]
|
1230 |
+
assert s.to_json() == sc.to_json()
|
1231 |
+
|
1232 |
+
def test_datetime_tz(self):
|
1233 |
+
# GH4377 df.to_json segfaults with non-ndarray blocks
|
1234 |
+
tz_range = date_range("20130101", periods=3, tz="US/Eastern")
|
1235 |
+
tz_naive = tz_range.tz_convert("utc").tz_localize(None)
|
1236 |
+
|
1237 |
+
df = DataFrame({"A": tz_range, "B": date_range("20130101", periods=3)})
|
1238 |
+
|
1239 |
+
df_naive = df.copy()
|
1240 |
+
df_naive["A"] = tz_naive
|
1241 |
+
expected = df_naive.to_json()
|
1242 |
+
assert expected == df.to_json()
|
1243 |
+
|
1244 |
+
stz = Series(tz_range)
|
1245 |
+
s_naive = Series(tz_naive)
|
1246 |
+
assert stz.to_json() == s_naive.to_json()
|
1247 |
+
|
1248 |
+
def test_sparse(self):
|
1249 |
+
# GH4377 df.to_json segfaults with non-ndarray blocks
|
1250 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
|
1251 |
+
df.loc[:8] = np.nan
|
1252 |
+
|
1253 |
+
sdf = df.astype("Sparse")
|
1254 |
+
expected = df.to_json()
|
1255 |
+
assert expected == sdf.to_json()
|
1256 |
+
|
1257 |
+
s = Series(np.random.default_rng(2).standard_normal(10))
|
1258 |
+
s.loc[:8] = np.nan
|
1259 |
+
ss = s.astype("Sparse")
|
1260 |
+
|
1261 |
+
expected = s.to_json()
|
1262 |
+
assert expected == ss.to_json()
|
1263 |
+
|
1264 |
+
@pytest.mark.parametrize(
|
1265 |
+
"ts",
|
1266 |
+
[
|
1267 |
+
Timestamp("2013-01-10 05:00:00Z"),
|
1268 |
+
Timestamp("2013-01-10 00:00:00", tz="US/Eastern"),
|
1269 |
+
Timestamp("2013-01-10 00:00:00-0500"),
|
1270 |
+
],
|
1271 |
+
)
|
1272 |
+
def test_tz_is_utc(self, ts):
|
1273 |
+
exp = '"2013-01-10T05:00:00.000Z"'
|
1274 |
+
|
1275 |
+
assert ujson_dumps(ts, iso_dates=True) == exp
|
1276 |
+
dt = ts.to_pydatetime()
|
1277 |
+
assert ujson_dumps(dt, iso_dates=True) == exp
|
1278 |
+
|
1279 |
+
def test_tz_is_naive(self):
|
1280 |
+
ts = Timestamp("2013-01-10 05:00:00")
|
1281 |
+
exp = '"2013-01-10T05:00:00.000"'
|
1282 |
+
|
1283 |
+
assert ujson_dumps(ts, iso_dates=True) == exp
|
1284 |
+
dt = ts.to_pydatetime()
|
1285 |
+
assert ujson_dumps(dt, iso_dates=True) == exp
|
1286 |
+
|
1287 |
+
@pytest.mark.parametrize(
|
1288 |
+
"tz_range",
|
1289 |
+
[
|
1290 |
+
date_range("2013-01-01 05:00:00Z", periods=2),
|
1291 |
+
date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),
|
1292 |
+
date_range("2013-01-01 00:00:00-0500", periods=2),
|
1293 |
+
],
|
1294 |
+
)
|
1295 |
+
def test_tz_range_is_utc(self, tz_range):
|
1296 |
+
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
|
1297 |
+
dfexp = (
|
1298 |
+
'{"DT":{'
|
1299 |
+
'"0":"2013-01-01T05:00:00.000Z",'
|
1300 |
+
'"1":"2013-01-02T05:00:00.000Z"}}'
|
1301 |
+
)
|
1302 |
+
|
1303 |
+
assert ujson_dumps(tz_range, iso_dates=True) == exp
|
1304 |
+
dti = DatetimeIndex(tz_range)
|
1305 |
+
# Ensure datetimes in object array are serialized correctly
|
1306 |
+
# in addition to the normal DTI case
|
1307 |
+
assert ujson_dumps(dti, iso_dates=True) == exp
|
1308 |
+
assert ujson_dumps(dti.astype(object), iso_dates=True) == exp
|
1309 |
+
df = DataFrame({"DT": dti})
|
1310 |
+
result = ujson_dumps(df, iso_dates=True)
|
1311 |
+
assert result == dfexp
|
1312 |
+
assert ujson_dumps(df.astype({"DT": object}), iso_dates=True)
|
1313 |
+
|
1314 |
+
def test_tz_range_is_naive(self):
|
1315 |
+
dti = date_range("2013-01-01 05:00:00", periods=2)
|
1316 |
+
|
1317 |
+
exp = '["2013-01-01T05:00:00.000","2013-01-02T05:00:00.000"]'
|
1318 |
+
dfexp = '{"DT":{"0":"2013-01-01T05:00:00.000","1":"2013-01-02T05:00:00.000"}}'
|
1319 |
+
|
1320 |
+
# Ensure datetimes in object array are serialized correctly
|
1321 |
+
# in addition to the normal DTI case
|
1322 |
+
assert ujson_dumps(dti, iso_dates=True) == exp
|
1323 |
+
assert ujson_dumps(dti.astype(object), iso_dates=True) == exp
|
1324 |
+
df = DataFrame({"DT": dti})
|
1325 |
+
result = ujson_dumps(df, iso_dates=True)
|
1326 |
+
assert result == dfexp
|
1327 |
+
assert ujson_dumps(df.astype({"DT": object}), iso_dates=True)
|
1328 |
+
|
1329 |
+
def test_read_inline_jsonl(self):
|
1330 |
+
# GH9180
|
1331 |
+
|
1332 |
+
result = read_json(StringIO('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n'), lines=True)
|
1333 |
+
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
|
1334 |
+
tm.assert_frame_equal(result, expected)
|
1335 |
+
|
1336 |
+
@pytest.mark.single_cpu
|
1337 |
+
@td.skip_if_not_us_locale
|
1338 |
+
def test_read_s3_jsonl(self, s3_public_bucket_with_data, s3so):
|
1339 |
+
# GH17200
|
1340 |
+
|
1341 |
+
result = read_json(
|
1342 |
+
f"s3n://{s3_public_bucket_with_data.name}/items.jsonl",
|
1343 |
+
lines=True,
|
1344 |
+
storage_options=s3so,
|
1345 |
+
)
|
1346 |
+
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
|
1347 |
+
tm.assert_frame_equal(result, expected)
|
1348 |
+
|
1349 |
+
def test_read_local_jsonl(self):
|
1350 |
+
# GH17200
|
1351 |
+
with tm.ensure_clean("tmp_items.json") as path:
|
1352 |
+
with open(path, "w", encoding="utf-8") as infile:
|
1353 |
+
infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
|
1354 |
+
result = read_json(path, lines=True)
|
1355 |
+
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
|
1356 |
+
tm.assert_frame_equal(result, expected)
|
1357 |
+
|
1358 |
+
def test_read_jsonl_unicode_chars(self):
|
1359 |
+
# GH15132: non-ascii unicode characters
|
1360 |
+
# \u201d == RIGHT DOUBLE QUOTATION MARK
|
1361 |
+
|
1362 |
+
# simulate file handle
|
1363 |
+
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
|
1364 |
+
json = StringIO(json)
|
1365 |
+
result = read_json(json, lines=True)
|
1366 |
+
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
|
1367 |
+
tm.assert_frame_equal(result, expected)
|
1368 |
+
|
1369 |
+
# simulate string
|
1370 |
+
json = StringIO('{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n')
|
1371 |
+
result = read_json(json, lines=True)
|
1372 |
+
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
|
1373 |
+
tm.assert_frame_equal(result, expected)
|
1374 |
+
|
1375 |
+
@pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)])
|
1376 |
+
def test_to_json_large_numbers(self, bigNum):
|
1377 |
+
# GH34473
|
1378 |
+
series = Series(bigNum, dtype=object, index=["articleId"])
|
1379 |
+
json = series.to_json()
|
1380 |
+
expected = '{"articleId":' + str(bigNum) + "}"
|
1381 |
+
assert json == expected
|
1382 |
+
|
1383 |
+
df = DataFrame(bigNum, dtype=object, index=["articleId"], columns=[0])
|
1384 |
+
json = df.to_json()
|
1385 |
+
expected = '{"0":{"articleId":' + str(bigNum) + "}}"
|
1386 |
+
assert json == expected
|
1387 |
+
|
1388 |
+
@pytest.mark.parametrize("bigNum", [-(2**63) - 1, 2**64])
|
1389 |
+
def test_read_json_large_numbers(self, bigNum):
|
1390 |
+
# GH20599, 26068
|
1391 |
+
json = StringIO('{"articleId":' + str(bigNum) + "}")
|
1392 |
+
msg = r"Value is too small|Value is too big"
|
1393 |
+
with pytest.raises(ValueError, match=msg):
|
1394 |
+
read_json(json)
|
1395 |
+
|
1396 |
+
json = StringIO('{"0":{"articleId":' + str(bigNum) + "}}")
|
1397 |
+
with pytest.raises(ValueError, match=msg):
|
1398 |
+
read_json(json)
|
1399 |
+
|
1400 |
+
def test_read_json_large_numbers2(self):
|
1401 |
+
# GH18842
|
1402 |
+
json = '{"articleId": "1404366058080022500245"}'
|
1403 |
+
json = StringIO(json)
|
1404 |
+
result = read_json(json, typ="series")
|
1405 |
+
expected = Series(1.404366e21, index=["articleId"])
|
1406 |
+
tm.assert_series_equal(result, expected)
|
1407 |
+
|
1408 |
+
json = '{"0": {"articleId": "1404366058080022500245"}}'
|
1409 |
+
json = StringIO(json)
|
1410 |
+
result = read_json(json)
|
1411 |
+
expected = DataFrame(1.404366e21, index=["articleId"], columns=[0])
|
1412 |
+
tm.assert_frame_equal(result, expected)
|
1413 |
+
|
1414 |
+
def test_to_jsonl(self):
|
1415 |
+
# GH9180
|
1416 |
+
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
|
1417 |
+
result = df.to_json(orient="records", lines=True)
|
1418 |
+
expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n'
|
1419 |
+
assert result == expected
|
1420 |
+
|
1421 |
+
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
|
1422 |
+
result = df.to_json(orient="records", lines=True)
|
1423 |
+
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n'
|
1424 |
+
assert result == expected
|
1425 |
+
tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
|
1426 |
+
|
1427 |
+
# GH15096: escaped characters in columns and data
|
1428 |
+
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
|
1429 |
+
result = df.to_json(orient="records", lines=True)
|
1430 |
+
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n'
|
1431 |
+
assert result == expected
|
1432 |
+
|
1433 |
+
tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
|
1434 |
+
|
1435 |
+
# TODO: there is a near-identical test for pytables; can we share?
|
1436 |
+
@pytest.mark.xfail(reason="GH#13774 encoding kwarg not supported", raises=TypeError)
|
1437 |
+
@pytest.mark.parametrize(
|
1438 |
+
"val",
|
1439 |
+
[
|
1440 |
+
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
|
1441 |
+
[b"E\xc9, 17", b"a", b"b", b"c"],
|
1442 |
+
[b"EE, 17", b"", b"a", b"b", b"c"],
|
1443 |
+
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
|
1444 |
+
[b"", b"a", b"b", b"c"],
|
1445 |
+
[b"\xf8\xfc", b"a", b"b", b"c"],
|
1446 |
+
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
|
1447 |
+
[np.nan, b"", b"b", b"c"],
|
1448 |
+
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
|
1449 |
+
],
|
1450 |
+
)
|
1451 |
+
@pytest.mark.parametrize("dtype", ["category", object])
|
1452 |
+
def test_latin_encoding(self, dtype, val):
|
1453 |
+
# GH 13774
|
1454 |
+
ser = Series(
|
1455 |
+
[x.decode("latin-1") if isinstance(x, bytes) else x for x in val],
|
1456 |
+
dtype=dtype,
|
1457 |
+
)
|
1458 |
+
encoding = "latin-1"
|
1459 |
+
with tm.ensure_clean("test.json") as path:
|
1460 |
+
ser.to_json(path, encoding=encoding)
|
1461 |
+
retr = read_json(StringIO(path), encoding=encoding)
|
1462 |
+
tm.assert_series_equal(ser, retr, check_categorical=False)
|
1463 |
+
|
1464 |
+
def test_data_frame_size_after_to_json(self):
|
1465 |
+
# GH15344
|
1466 |
+
df = DataFrame({"a": [str(1)]})
|
1467 |
+
|
1468 |
+
size_before = df.memory_usage(index=True, deep=True).sum()
|
1469 |
+
df.to_json()
|
1470 |
+
size_after = df.memory_usage(index=True, deep=True).sum()
|
1471 |
+
|
1472 |
+
assert size_before == size_after
|
1473 |
+
|
1474 |
+
@pytest.mark.parametrize(
|
1475 |
+
"index", [None, [1, 2], [1.0, 2.0], ["a", "b"], ["1", "2"], ["1.", "2."]]
|
1476 |
+
)
|
1477 |
+
@pytest.mark.parametrize("columns", [["a", "b"], ["1", "2"], ["1.", "2."]])
|
1478 |
+
def test_from_json_to_json_table_index_and_columns(self, index, columns):
|
1479 |
+
# GH25433 GH25435
|
1480 |
+
expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns)
|
1481 |
+
dfjson = expected.to_json(orient="table")
|
1482 |
+
|
1483 |
+
result = read_json(StringIO(dfjson), orient="table")
|
1484 |
+
tm.assert_frame_equal(result, expected)
|
1485 |
+
|
1486 |
+
def test_from_json_to_json_table_dtypes(self):
|
1487 |
+
# GH21345
|
1488 |
+
expected = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
|
1489 |
+
dfjson = expected.to_json(orient="table")
|
1490 |
+
result = read_json(StringIO(dfjson), orient="table")
|
1491 |
+
tm.assert_frame_equal(result, expected)
|
1492 |
+
|
1493 |
+
# TODO: We are casting to string which coerces None to NaN before casting back
|
1494 |
+
# to object, ending up with incorrect na values
|
1495 |
+
@pytest.mark.xfail(using_pyarrow_string_dtype(), reason="incorrect na conversion")
|
1496 |
+
@pytest.mark.parametrize("orient", ["split", "records", "index", "columns"])
|
1497 |
+
def test_to_json_from_json_columns_dtypes(self, orient):
|
1498 |
+
# GH21892 GH33205
|
1499 |
+
expected = DataFrame.from_dict(
|
1500 |
+
{
|
1501 |
+
"Integer": Series([1, 2, 3], dtype="int64"),
|
1502 |
+
"Float": Series([None, 2.0, 3.0], dtype="float64"),
|
1503 |
+
"Object": Series([None, "", "c"], dtype="object"),
|
1504 |
+
"Bool": Series([True, False, True], dtype="bool"),
|
1505 |
+
"Category": Series(["a", "b", None], dtype="category"),
|
1506 |
+
"Datetime": Series(
|
1507 |
+
["2020-01-01", None, "2020-01-03"], dtype="datetime64[ns]"
|
1508 |
+
),
|
1509 |
+
}
|
1510 |
+
)
|
1511 |
+
dfjson = expected.to_json(orient=orient)
|
1512 |
+
|
1513 |
+
result = read_json(
|
1514 |
+
StringIO(dfjson),
|
1515 |
+
orient=orient,
|
1516 |
+
dtype={
|
1517 |
+
"Integer": "int64",
|
1518 |
+
"Float": "float64",
|
1519 |
+
"Object": "object",
|
1520 |
+
"Bool": "bool",
|
1521 |
+
"Category": "category",
|
1522 |
+
"Datetime": "datetime64[ns]",
|
1523 |
+
},
|
1524 |
+
)
|
1525 |
+
tm.assert_frame_equal(result, expected)
|
1526 |
+
|
1527 |
+
@pytest.mark.parametrize("dtype", [True, {"b": int, "c": int}])
|
1528 |
+
def test_read_json_table_dtype_raises(self, dtype):
|
1529 |
+
# GH21345
|
1530 |
+
df = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
|
1531 |
+
dfjson = df.to_json(orient="table")
|
1532 |
+
msg = "cannot pass both dtype and orient='table'"
|
1533 |
+
with pytest.raises(ValueError, match=msg):
|
1534 |
+
read_json(dfjson, orient="table", dtype=dtype)
|
1535 |
+
|
1536 |
+
@pytest.mark.parametrize("orient", ["index", "columns", "records", "values"])
|
1537 |
+
def test_read_json_table_empty_axes_dtype(self, orient):
|
1538 |
+
# GH28558
|
1539 |
+
|
1540 |
+
expected = DataFrame()
|
1541 |
+
result = read_json(StringIO("{}"), orient=orient, convert_axes=True)
|
1542 |
+
tm.assert_index_equal(result.index, expected.index)
|
1543 |
+
tm.assert_index_equal(result.columns, expected.columns)
|
1544 |
+
|
1545 |
+
def test_read_json_table_convert_axes_raises(self):
|
1546 |
+
# GH25433 GH25435
|
1547 |
+
df = DataFrame([[1, 2], [3, 4]], index=[1.0, 2.0], columns=["1.", "2."])
|
1548 |
+
dfjson = df.to_json(orient="table")
|
1549 |
+
msg = "cannot pass both convert_axes and orient='table'"
|
1550 |
+
with pytest.raises(ValueError, match=msg):
|
1551 |
+
read_json(dfjson, orient="table", convert_axes=True)
|
1552 |
+
|
1553 |
+
@pytest.mark.parametrize(
|
1554 |
+
"data, expected",
|
1555 |
+
[
|
1556 |
+
(
|
1557 |
+
DataFrame([[1, 2], [4, 5]], columns=["a", "b"]),
|
1558 |
+
{"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
|
1559 |
+
),
|
1560 |
+
(
|
1561 |
+
DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo"),
|
1562 |
+
{"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
|
1563 |
+
),
|
1564 |
+
(
|
1565 |
+
DataFrame(
|
1566 |
+
[[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
|
1567 |
+
),
|
1568 |
+
{"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
|
1569 |
+
),
|
1570 |
+
(Series([1, 2, 3], name="A"), {"name": "A", "data": [1, 2, 3]}),
|
1571 |
+
(
|
1572 |
+
Series([1, 2, 3], name="A").rename_axis("foo"),
|
1573 |
+
{"name": "A", "data": [1, 2, 3]},
|
1574 |
+
),
|
1575 |
+
(
|
1576 |
+
Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]]),
|
1577 |
+
{"name": "A", "data": [1, 2]},
|
1578 |
+
),
|
1579 |
+
],
|
1580 |
+
)
|
1581 |
+
def test_index_false_to_json_split(self, data, expected):
|
1582 |
+
# GH 17394
|
1583 |
+
# Testing index=False in to_json with orient='split'
|
1584 |
+
|
1585 |
+
result = data.to_json(orient="split", index=False)
|
1586 |
+
result = json.loads(result)
|
1587 |
+
|
1588 |
+
assert result == expected
|
1589 |
+
|
1590 |
+
@pytest.mark.parametrize(
|
1591 |
+
"data",
|
1592 |
+
[
|
1593 |
+
(DataFrame([[1, 2], [4, 5]], columns=["a", "b"])),
|
1594 |
+
(DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo")),
|
1595 |
+
(
|
1596 |
+
DataFrame(
|
1597 |
+
[[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
|
1598 |
+
)
|
1599 |
+
),
|
1600 |
+
(Series([1, 2, 3], name="A")),
|
1601 |
+
(Series([1, 2, 3], name="A").rename_axis("foo")),
|
1602 |
+
(Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]])),
|
1603 |
+
],
|
1604 |
+
)
|
1605 |
+
def test_index_false_to_json_table(self, data):
|
1606 |
+
# GH 17394
|
1607 |
+
# Testing index=False in to_json with orient='table'
|
1608 |
+
|
1609 |
+
result = data.to_json(orient="table", index=False)
|
1610 |
+
result = json.loads(result)
|
1611 |
+
|
1612 |
+
expected = {
|
1613 |
+
"schema": pd.io.json.build_table_schema(data, index=False),
|
1614 |
+
"data": DataFrame(data).to_dict(orient="records"),
|
1615 |
+
}
|
1616 |
+
|
1617 |
+
assert result == expected
|
1618 |
+
|
1619 |
+
@pytest.mark.parametrize("orient", ["index", "columns"])
|
1620 |
+
def test_index_false_error_to_json(self, orient):
|
1621 |
+
# GH 17394, 25513
|
1622 |
+
# Testing error message from to_json with index=False
|
1623 |
+
|
1624 |
+
df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"])
|
1625 |
+
|
1626 |
+
msg = (
|
1627 |
+
"'index=False' is only valid when 'orient' is 'split', "
|
1628 |
+
"'table', 'records', or 'values'"
|
1629 |
+
)
|
1630 |
+
with pytest.raises(ValueError, match=msg):
|
1631 |
+
df.to_json(orient=orient, index=False)
|
1632 |
+
|
1633 |
+
@pytest.mark.parametrize("orient", ["records", "values"])
|
1634 |
+
def test_index_true_error_to_json(self, orient):
|
1635 |
+
# GH 25513
|
1636 |
+
# Testing error message from to_json with index=True
|
1637 |
+
|
1638 |
+
df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"])
|
1639 |
+
|
1640 |
+
msg = (
|
1641 |
+
"'index=True' is only valid when 'orient' is 'split', "
|
1642 |
+
"'table', 'index', or 'columns'"
|
1643 |
+
)
|
1644 |
+
with pytest.raises(ValueError, match=msg):
|
1645 |
+
df.to_json(orient=orient, index=True)
|
1646 |
+
|
1647 |
+
@pytest.mark.parametrize("orient", ["split", "table"])
|
1648 |
+
@pytest.mark.parametrize("index", [True, False])
|
1649 |
+
def test_index_false_from_json_to_json(self, orient, index):
|
1650 |
+
# GH25170
|
1651 |
+
# Test index=False in from_json to_json
|
1652 |
+
expected = DataFrame({"a": [1, 2], "b": [3, 4]})
|
1653 |
+
dfjson = expected.to_json(orient=orient, index=index)
|
1654 |
+
result = read_json(StringIO(dfjson), orient=orient)
|
1655 |
+
tm.assert_frame_equal(result, expected)
|
1656 |
+
|
1657 |
+
def test_read_timezone_information(self):
|
1658 |
+
# GH 25546
|
1659 |
+
result = read_json(
|
1660 |
+
StringIO('{"2019-01-01T11:00:00.000Z":88}'), typ="series", orient="index"
|
1661 |
+
)
|
1662 |
+
exp_dti = DatetimeIndex(["2019-01-01 11:00:00"], dtype="M8[ns, UTC]")
|
1663 |
+
expected = Series([88], index=exp_dti)
|
1664 |
+
tm.assert_series_equal(result, expected)
|
1665 |
+
|
1666 |
+
@pytest.mark.parametrize(
|
1667 |
+
"url",
|
1668 |
+
[
|
1669 |
+
"s3://example-fsspec/",
|
1670 |
+
"gcs://another-fsspec/file.json",
|
1671 |
+
"https://example-site.com/data",
|
1672 |
+
"some-protocol://data.txt",
|
1673 |
+
],
|
1674 |
+
)
|
1675 |
+
def test_read_json_with_url_value(self, url):
|
1676 |
+
# GH 36271
|
1677 |
+
result = read_json(StringIO(f'{{"url":{{"0":"{url}"}}}}'))
|
1678 |
+
expected = DataFrame({"url": [url]})
|
1679 |
+
tm.assert_frame_equal(result, expected)
|
1680 |
+
|
1681 |
+
@pytest.mark.parametrize(
|
1682 |
+
"compression",
|
1683 |
+
["", ".gz", ".bz2", ".tar"],
|
1684 |
+
)
|
1685 |
+
def test_read_json_with_very_long_file_path(self, compression):
|
1686 |
+
# GH 46718
|
1687 |
+
long_json_path = f'{"a" * 1000}.json{compression}'
|
1688 |
+
with pytest.raises(
|
1689 |
+
FileNotFoundError, match=f"File {long_json_path} does not exist"
|
1690 |
+
):
|
1691 |
+
# path too long for Windows is handled in file_exists() but raises in
|
1692 |
+
# _get_data_from_filepath()
|
1693 |
+
read_json(long_json_path)
|
1694 |
+
|
1695 |
+
@pytest.mark.parametrize(
|
1696 |
+
"date_format,key", [("epoch", 86400000), ("iso", "P1DT0H0M0S")]
|
1697 |
+
)
|
1698 |
+
def test_timedelta_as_label(self, date_format, key):
|
1699 |
+
df = DataFrame([[1]], columns=[pd.Timedelta("1D")])
|
1700 |
+
expected = f'{{"{key}":{{"0":1}}}}'
|
1701 |
+
result = df.to_json(date_format=date_format)
|
1702 |
+
|
1703 |
+
assert result == expected
|
1704 |
+
|
1705 |
+
@pytest.mark.parametrize(
|
1706 |
+
"orient,expected",
|
1707 |
+
[
|
1708 |
+
("index", "{\"('a', 'b')\":{\"('c', 'd')\":1}}"),
|
1709 |
+
("columns", "{\"('c', 'd')\":{\"('a', 'b')\":1}}"),
|
1710 |
+
# TODO: the below have separate encoding procedures
|
1711 |
+
pytest.param(
|
1712 |
+
"split",
|
1713 |
+
"",
|
1714 |
+
marks=pytest.mark.xfail(
|
1715 |
+
reason="Produces JSON but not in a consistent manner"
|
1716 |
+
),
|
1717 |
+
),
|
1718 |
+
pytest.param(
|
1719 |
+
"table",
|
1720 |
+
"",
|
1721 |
+
marks=pytest.mark.xfail(
|
1722 |
+
reason="Produces JSON but not in a consistent manner"
|
1723 |
+
),
|
1724 |
+
),
|
1725 |
+
],
|
1726 |
+
)
|
1727 |
+
def test_tuple_labels(self, orient, expected):
|
1728 |
+
# GH 20500
|
1729 |
+
df = DataFrame([[1]], index=[("a", "b")], columns=[("c", "d")])
|
1730 |
+
result = df.to_json(orient=orient)
|
1731 |
+
assert result == expected
|
1732 |
+
|
1733 |
+
@pytest.mark.parametrize("indent", [1, 2, 4])
|
1734 |
+
def test_to_json_indent(self, indent):
|
1735 |
+
# GH 12004
|
1736 |
+
df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])
|
1737 |
+
|
1738 |
+
result = df.to_json(indent=indent)
|
1739 |
+
spaces = " " * indent
|
1740 |
+
expected = f"""{{
|
1741 |
+
{spaces}"a":{{
|
1742 |
+
{spaces}{spaces}"0":"foo",
|
1743 |
+
{spaces}{spaces}"1":"baz"
|
1744 |
+
{spaces}}},
|
1745 |
+
{spaces}"b":{{
|
1746 |
+
{spaces}{spaces}"0":"bar",
|
1747 |
+
{spaces}{spaces}"1":"qux"
|
1748 |
+
{spaces}}}
|
1749 |
+
}}"""
|
1750 |
+
|
1751 |
+
assert result == expected
|
1752 |
+
|
1753 |
+
@pytest.mark.skipif(
|
1754 |
+
using_pyarrow_string_dtype(),
|
1755 |
+
reason="Adjust expected when infer_string is default, no bug here, "
|
1756 |
+
"just a complicated parametrization",
|
1757 |
+
)
|
1758 |
+
@pytest.mark.parametrize(
|
1759 |
+
"orient,expected",
|
1760 |
+
[
|
1761 |
+
(
|
1762 |
+
"split",
|
1763 |
+
"""{
|
1764 |
+
"columns":[
|
1765 |
+
"a",
|
1766 |
+
"b"
|
1767 |
+
],
|
1768 |
+
"index":[
|
1769 |
+
0,
|
1770 |
+
1
|
1771 |
+
],
|
1772 |
+
"data":[
|
1773 |
+
[
|
1774 |
+
"foo",
|
1775 |
+
"bar"
|
1776 |
+
],
|
1777 |
+
[
|
1778 |
+
"baz",
|
1779 |
+
"qux"
|
1780 |
+
]
|
1781 |
+
]
|
1782 |
+
}""",
|
1783 |
+
),
|
1784 |
+
(
|
1785 |
+
"records",
|
1786 |
+
"""[
|
1787 |
+
{
|
1788 |
+
"a":"foo",
|
1789 |
+
"b":"bar"
|
1790 |
+
},
|
1791 |
+
{
|
1792 |
+
"a":"baz",
|
1793 |
+
"b":"qux"
|
1794 |
+
}
|
1795 |
+
]""",
|
1796 |
+
),
|
1797 |
+
(
|
1798 |
+
"index",
|
1799 |
+
"""{
|
1800 |
+
"0":{
|
1801 |
+
"a":"foo",
|
1802 |
+
"b":"bar"
|
1803 |
+
},
|
1804 |
+
"1":{
|
1805 |
+
"a":"baz",
|
1806 |
+
"b":"qux"
|
1807 |
+
}
|
1808 |
+
}""",
|
1809 |
+
),
|
1810 |
+
(
|
1811 |
+
"columns",
|
1812 |
+
"""{
|
1813 |
+
"a":{
|
1814 |
+
"0":"foo",
|
1815 |
+
"1":"baz"
|
1816 |
+
},
|
1817 |
+
"b":{
|
1818 |
+
"0":"bar",
|
1819 |
+
"1":"qux"
|
1820 |
+
}
|
1821 |
+
}""",
|
1822 |
+
),
|
1823 |
+
(
|
1824 |
+
"values",
|
1825 |
+
"""[
|
1826 |
+
[
|
1827 |
+
"foo",
|
1828 |
+
"bar"
|
1829 |
+
],
|
1830 |
+
[
|
1831 |
+
"baz",
|
1832 |
+
"qux"
|
1833 |
+
]
|
1834 |
+
]""",
|
1835 |
+
),
|
1836 |
+
(
|
1837 |
+
"table",
|
1838 |
+
"""{
|
1839 |
+
"schema":{
|
1840 |
+
"fields":[
|
1841 |
+
{
|
1842 |
+
"name":"index",
|
1843 |
+
"type":"integer"
|
1844 |
+
},
|
1845 |
+
{
|
1846 |
+
"name":"a",
|
1847 |
+
"type":"string"
|
1848 |
+
},
|
1849 |
+
{
|
1850 |
+
"name":"b",
|
1851 |
+
"type":"string"
|
1852 |
+
}
|
1853 |
+
],
|
1854 |
+
"primaryKey":[
|
1855 |
+
"index"
|
1856 |
+
],
|
1857 |
+
"pandas_version":"1.4.0"
|
1858 |
+
},
|
1859 |
+
"data":[
|
1860 |
+
{
|
1861 |
+
"index":0,
|
1862 |
+
"a":"foo",
|
1863 |
+
"b":"bar"
|
1864 |
+
},
|
1865 |
+
{
|
1866 |
+
"index":1,
|
1867 |
+
"a":"baz",
|
1868 |
+
"b":"qux"
|
1869 |
+
}
|
1870 |
+
]
|
1871 |
+
}""",
|
1872 |
+
),
|
1873 |
+
],
|
1874 |
+
)
|
1875 |
+
def test_json_indent_all_orients(self, orient, expected):
|
1876 |
+
# GH 12004
|
1877 |
+
df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])
|
1878 |
+
result = df.to_json(orient=orient, indent=4)
|
1879 |
+
assert result == expected
|
1880 |
+
|
1881 |
+
def test_json_negative_indent_raises(self):
|
1882 |
+
with pytest.raises(ValueError, match="must be a nonnegative integer"):
|
1883 |
+
DataFrame().to_json(indent=-1)
|
1884 |
+
|
1885 |
+
def test_emca_262_nan_inf_support(self):
|
1886 |
+
# GH 12213
|
1887 |
+
data = StringIO(
|
1888 |
+
'["a", NaN, "NaN", Infinity, "Infinity", -Infinity, "-Infinity"]'
|
1889 |
+
)
|
1890 |
+
result = read_json(data)
|
1891 |
+
expected = DataFrame(
|
1892 |
+
["a", None, "NaN", np.inf, "Infinity", -np.inf, "-Infinity"]
|
1893 |
+
)
|
1894 |
+
tm.assert_frame_equal(result, expected)
|
1895 |
+
|
1896 |
+
def test_frame_int_overflow(self):
|
1897 |
+
# GH 30320
|
1898 |
+
encoded_json = json.dumps([{"col": "31900441201190696999"}, {"col": "Text"}])
|
1899 |
+
expected = DataFrame({"col": ["31900441201190696999", "Text"]})
|
1900 |
+
result = read_json(StringIO(encoded_json))
|
1901 |
+
tm.assert_frame_equal(result, expected)
|
1902 |
+
|
1903 |
+
@pytest.mark.parametrize(
|
1904 |
+
"dataframe,expected",
|
1905 |
+
[
|
1906 |
+
(
|
1907 |
+
DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]}),
|
1908 |
+
'{"(0, \'x\')":1,"(0, \'y\')":"a","(1, \'x\')":2,'
|
1909 |
+
'"(1, \'y\')":"b","(2, \'x\')":3,"(2, \'y\')":"c"}',
|
1910 |
+
)
|
1911 |
+
],
|
1912 |
+
)
|
1913 |
+
def test_json_multiindex(self, dataframe, expected):
|
1914 |
+
series = dataframe.stack(future_stack=True)
|
1915 |
+
result = series.to_json(orient="index")
|
1916 |
+
assert result == expected
|
1917 |
+
|
1918 |
+
@pytest.mark.single_cpu
|
1919 |
+
def test_to_s3(self, s3_public_bucket, s3so):
|
1920 |
+
# GH 28375
|
1921 |
+
mock_bucket_name, target_file = s3_public_bucket.name, "test.json"
|
1922 |
+
df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]})
|
1923 |
+
df.to_json(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so)
|
1924 |
+
timeout = 5
|
1925 |
+
while True:
|
1926 |
+
if target_file in (obj.key for obj in s3_public_bucket.objects.all()):
|
1927 |
+
break
|
1928 |
+
time.sleep(0.1)
|
1929 |
+
timeout -= 0.1
|
1930 |
+
assert timeout > 0, "Timed out waiting for file to appear on moto"
|
1931 |
+
|
1932 |
+
def test_json_pandas_nulls(self, nulls_fixture, request):
|
1933 |
+
# GH 31615
|
1934 |
+
if isinstance(nulls_fixture, Decimal):
|
1935 |
+
mark = pytest.mark.xfail(reason="not implemented")
|
1936 |
+
request.applymarker(mark)
|
1937 |
+
|
1938 |
+
result = DataFrame([[nulls_fixture]]).to_json()
|
1939 |
+
assert result == '{"0":{"0":null}}'
|
1940 |
+
|
1941 |
+
def test_readjson_bool_series(self):
|
1942 |
+
# GH31464
|
1943 |
+
result = read_json(StringIO("[true, true, false]"), typ="series")
|
1944 |
+
expected = Series([True, True, False])
|
1945 |
+
tm.assert_series_equal(result, expected)
|
1946 |
+
|
1947 |
+
def test_to_json_multiindex_escape(self):
|
1948 |
+
# GH 15273
|
1949 |
+
df = DataFrame(
|
1950 |
+
True,
|
1951 |
+
index=date_range("2017-01-20", "2017-01-23"),
|
1952 |
+
columns=["foo", "bar"],
|
1953 |
+
).stack(future_stack=True)
|
1954 |
+
result = df.to_json()
|
1955 |
+
expected = (
|
1956 |
+
"{\"(Timestamp('2017-01-20 00:00:00'), 'foo')\":true,"
|
1957 |
+
"\"(Timestamp('2017-01-20 00:00:00'), 'bar')\":true,"
|
1958 |
+
"\"(Timestamp('2017-01-21 00:00:00'), 'foo')\":true,"
|
1959 |
+
"\"(Timestamp('2017-01-21 00:00:00'), 'bar')\":true,"
|
1960 |
+
"\"(Timestamp('2017-01-22 00:00:00'), 'foo')\":true,"
|
1961 |
+
"\"(Timestamp('2017-01-22 00:00:00'), 'bar')\":true,"
|
1962 |
+
"\"(Timestamp('2017-01-23 00:00:00'), 'foo')\":true,"
|
1963 |
+
"\"(Timestamp('2017-01-23 00:00:00'), 'bar')\":true}"
|
1964 |
+
)
|
1965 |
+
assert result == expected
|
1966 |
+
|
1967 |
+
def test_to_json_series_of_objects(self):
|
1968 |
+
class _TestObject:
|
1969 |
+
def __init__(self, a, b, _c, d) -> None:
|
1970 |
+
self.a = a
|
1971 |
+
self.b = b
|
1972 |
+
self._c = _c
|
1973 |
+
self.d = d
|
1974 |
+
|
1975 |
+
def e(self):
|
1976 |
+
return 5
|
1977 |
+
|
1978 |
+
# JSON keys should be all non-callable non-underscore attributes, see GH-42768
|
1979 |
+
series = Series([_TestObject(a=1, b=2, _c=3, d=4)])
|
1980 |
+
assert json.loads(series.to_json()) == {"0": {"a": 1, "b": 2, "d": 4}}
|
1981 |
+
|
1982 |
+
@pytest.mark.parametrize(
|
1983 |
+
"data,expected",
|
1984 |
+
[
|
1985 |
+
(
|
1986 |
+
Series({0: -6 + 8j, 1: 0 + 1j, 2: 9 - 5j}),
|
1987 |
+
'{"0":{"imag":8.0,"real":-6.0},'
|
1988 |
+
'"1":{"imag":1.0,"real":0.0},'
|
1989 |
+
'"2":{"imag":-5.0,"real":9.0}}',
|
1990 |
+
),
|
1991 |
+
(
|
1992 |
+
Series({0: -9.39 + 0.66j, 1: 3.95 + 9.32j, 2: 4.03 - 0.17j}),
|
1993 |
+
'{"0":{"imag":0.66,"real":-9.39},'
|
1994 |
+
'"1":{"imag":9.32,"real":3.95},'
|
1995 |
+
'"2":{"imag":-0.17,"real":4.03}}',
|
1996 |
+
),
|
1997 |
+
(
|
1998 |
+
DataFrame([[-2 + 3j, -1 - 0j], [4 - 3j, -0 - 10j]]),
|
1999 |
+
'{"0":{"0":{"imag":3.0,"real":-2.0},'
|
2000 |
+
'"1":{"imag":-3.0,"real":4.0}},'
|
2001 |
+
'"1":{"0":{"imag":0.0,"real":-1.0},'
|
2002 |
+
'"1":{"imag":-10.0,"real":0.0}}}',
|
2003 |
+
),
|
2004 |
+
(
|
2005 |
+
DataFrame(
|
2006 |
+
[[-0.28 + 0.34j, -1.08 - 0.39j], [0.41 - 0.34j, -0.78 - 1.35j]]
|
2007 |
+
),
|
2008 |
+
'{"0":{"0":{"imag":0.34,"real":-0.28},'
|
2009 |
+
'"1":{"imag":-0.34,"real":0.41}},'
|
2010 |
+
'"1":{"0":{"imag":-0.39,"real":-1.08},'
|
2011 |
+
'"1":{"imag":-1.35,"real":-0.78}}}',
|
2012 |
+
),
|
2013 |
+
],
|
2014 |
+
)
|
2015 |
+
def test_complex_data_tojson(self, data, expected):
|
2016 |
+
# GH41174
|
2017 |
+
result = data.to_json()
|
2018 |
+
assert result == expected
|
2019 |
+
|
2020 |
+
def test_json_uint64(self):
|
2021 |
+
# GH21073
|
2022 |
+
expected = (
|
2023 |
+
'{"columns":["col1"],"index":[0,1],'
|
2024 |
+
'"data":[[13342205958987758245],[12388075603347835679]]}'
|
2025 |
+
)
|
2026 |
+
df = DataFrame(data={"col1": [13342205958987758245, 12388075603347835679]})
|
2027 |
+
result = df.to_json(orient="split")
|
2028 |
+
assert result == expected
|
2029 |
+
|
2030 |
+
@pytest.mark.parametrize(
|
2031 |
+
"orient", ["split", "records", "values", "index", "columns"]
|
2032 |
+
)
|
2033 |
+
def test_read_json_dtype_backend(
|
2034 |
+
self, string_storage, dtype_backend, orient, using_infer_string
|
2035 |
+
):
|
2036 |
+
# GH#50750
|
2037 |
+
pa = pytest.importorskip("pyarrow")
|
2038 |
+
df = DataFrame(
|
2039 |
+
{
|
2040 |
+
"a": Series([1, np.nan, 3], dtype="Int64"),
|
2041 |
+
"b": Series([1, 2, 3], dtype="Int64"),
|
2042 |
+
"c": Series([1.5, np.nan, 2.5], dtype="Float64"),
|
2043 |
+
"d": Series([1.5, 2.0, 2.5], dtype="Float64"),
|
2044 |
+
"e": [True, False, None],
|
2045 |
+
"f": [True, False, True],
|
2046 |
+
"g": ["a", "b", "c"],
|
2047 |
+
"h": ["a", "b", None],
|
2048 |
+
}
|
2049 |
+
)
|
2050 |
+
|
2051 |
+
if using_infer_string:
|
2052 |
+
string_array = ArrowStringArrayNumpySemantics(pa.array(["a", "b", "c"]))
|
2053 |
+
string_array_na = ArrowStringArrayNumpySemantics(pa.array(["a", "b", None]))
|
2054 |
+
elif string_storage == "python":
|
2055 |
+
string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
|
2056 |
+
string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
|
2057 |
+
|
2058 |
+
elif dtype_backend == "pyarrow":
|
2059 |
+
pa = pytest.importorskip("pyarrow")
|
2060 |
+
from pandas.arrays import ArrowExtensionArray
|
2061 |
+
|
2062 |
+
string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
|
2063 |
+
string_array_na = ArrowExtensionArray(pa.array(["a", "b", None]))
|
2064 |
+
|
2065 |
+
else:
|
2066 |
+
string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
|
2067 |
+
string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
|
2068 |
+
|
2069 |
+
out = df.to_json(orient=orient)
|
2070 |
+
with pd.option_context("mode.string_storage", string_storage):
|
2071 |
+
result = read_json(
|
2072 |
+
StringIO(out), dtype_backend=dtype_backend, orient=orient
|
2073 |
+
)
|
2074 |
+
|
2075 |
+
expected = DataFrame(
|
2076 |
+
{
|
2077 |
+
"a": Series([1, np.nan, 3], dtype="Int64"),
|
2078 |
+
"b": Series([1, 2, 3], dtype="Int64"),
|
2079 |
+
"c": Series([1.5, np.nan, 2.5], dtype="Float64"),
|
2080 |
+
"d": Series([1.5, 2.0, 2.5], dtype="Float64"),
|
2081 |
+
"e": Series([True, False, NA], dtype="boolean"),
|
2082 |
+
"f": Series([True, False, True], dtype="boolean"),
|
2083 |
+
"g": string_array,
|
2084 |
+
"h": string_array_na,
|
2085 |
+
}
|
2086 |
+
)
|
2087 |
+
|
2088 |
+
if dtype_backend == "pyarrow":
|
2089 |
+
from pandas.arrays import ArrowExtensionArray
|
2090 |
+
|
2091 |
+
expected = DataFrame(
|
2092 |
+
{
|
2093 |
+
col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))
|
2094 |
+
for col in expected.columns
|
2095 |
+
}
|
2096 |
+
)
|
2097 |
+
|
2098 |
+
if orient == "values":
|
2099 |
+
expected.columns = list(range(8))
|
2100 |
+
|
2101 |
+
tm.assert_frame_equal(result, expected)
|
2102 |
+
|
2103 |
+
@pytest.mark.parametrize("orient", ["split", "records", "index"])
|
2104 |
+
def test_read_json_nullable_series(self, string_storage, dtype_backend, orient):
|
2105 |
+
# GH#50750
|
2106 |
+
pa = pytest.importorskip("pyarrow")
|
2107 |
+
ser = Series([1, np.nan, 3], dtype="Int64")
|
2108 |
+
|
2109 |
+
out = ser.to_json(orient=orient)
|
2110 |
+
with pd.option_context("mode.string_storage", string_storage):
|
2111 |
+
result = read_json(
|
2112 |
+
StringIO(out), dtype_backend=dtype_backend, orient=orient, typ="series"
|
2113 |
+
)
|
2114 |
+
|
2115 |
+
expected = Series([1, np.nan, 3], dtype="Int64")
|
2116 |
+
|
2117 |
+
if dtype_backend == "pyarrow":
|
2118 |
+
from pandas.arrays import ArrowExtensionArray
|
2119 |
+
|
2120 |
+
expected = Series(ArrowExtensionArray(pa.array(expected, from_pandas=True)))
|
2121 |
+
|
2122 |
+
tm.assert_series_equal(result, expected)
|
2123 |
+
|
2124 |
+
def test_invalid_dtype_backend(self):
|
2125 |
+
msg = (
|
2126 |
+
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
|
2127 |
+
"'pyarrow' are allowed."
|
2128 |
+
)
|
2129 |
+
with pytest.raises(ValueError, match=msg):
|
2130 |
+
read_json("test", dtype_backend="numpy")
|
2131 |
+
|
2132 |
+
|
2133 |
+
def test_invalid_engine():
|
2134 |
+
# GH 48893
|
2135 |
+
ser = Series(range(1))
|
2136 |
+
out = ser.to_json()
|
2137 |
+
with pytest.raises(ValueError, match="The engine type foo"):
|
2138 |
+
read_json(out, engine="foo")
|
2139 |
+
|
2140 |
+
|
2141 |
+
def test_pyarrow_engine_lines_false():
|
2142 |
+
# GH 48893
|
2143 |
+
ser = Series(range(1))
|
2144 |
+
out = ser.to_json()
|
2145 |
+
with pytest.raises(ValueError, match="currently pyarrow engine only supports"):
|
2146 |
+
read_json(out, engine="pyarrow", lines=False)
|
2147 |
+
|
2148 |
+
|
2149 |
+
def test_json_roundtrip_string_inference(orient):
|
2150 |
+
pytest.importorskip("pyarrow")
|
2151 |
+
df = DataFrame(
|
2152 |
+
[["a", "b"], ["c", "d"]], index=["row 1", "row 2"], columns=["col 1", "col 2"]
|
2153 |
+
)
|
2154 |
+
out = df.to_json()
|
2155 |
+
with pd.option_context("future.infer_string", True):
|
2156 |
+
result = read_json(StringIO(out))
|
2157 |
+
expected = DataFrame(
|
2158 |
+
[["a", "b"], ["c", "d"]],
|
2159 |
+
dtype="string[pyarrow_numpy]",
|
2160 |
+
index=Index(["row 1", "row 2"], dtype="string[pyarrow_numpy]"),
|
2161 |
+
columns=Index(["col 1", "col 2"], dtype="string[pyarrow_numpy]"),
|
2162 |
+
)
|
2163 |
+
tm.assert_frame_equal(result, expected)
|
2164 |
+
|
2165 |
+
|
2166 |
+
def test_json_pos_args_deprecation():
|
2167 |
+
# GH-54229
|
2168 |
+
df = DataFrame({"a": [1, 2, 3]})
|
2169 |
+
msg = (
|
2170 |
+
r"Starting with pandas version 3.0 all arguments of to_json except for the "
|
2171 |
+
r"argument 'path_or_buf' will be keyword-only."
|
2172 |
+
)
|
2173 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
2174 |
+
buf = BytesIO()
|
2175 |
+
df.to_json(buf, "split")
|
2176 |
+
|
2177 |
+
|
2178 |
+
@td.skip_if_no("pyarrow")
|
2179 |
+
def test_to_json_ea_null():
|
2180 |
+
# GH#57224
|
2181 |
+
df = DataFrame(
|
2182 |
+
{
|
2183 |
+
"a": Series([1, NA], dtype="int64[pyarrow]"),
|
2184 |
+
"b": Series([2, NA], dtype="Int64"),
|
2185 |
+
}
|
2186 |
+
)
|
2187 |
+
result = df.to_json(orient="records", lines=True)
|
2188 |
+
expected = """{"a":1,"b":2}
|
2189 |
+
{"a":null,"b":null}
|
2190 |
+
"""
|
2191 |
+
assert result == expected
|
2192 |
+
|
2193 |
+
|
2194 |
+
def test_read_json_lines_rangeindex():
|
2195 |
+
# GH 57429
|
2196 |
+
data = """
|
2197 |
+
{"a": 1, "b": 2}
|
2198 |
+
{"a": 3, "b": 4}
|
2199 |
+
"""
|
2200 |
+
result = read_json(StringIO(data), lines=True).index
|
2201 |
+
expected = RangeIndex(2)
|
2202 |
+
tm.assert_index_equal(result, expected, exact=True)
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_dialect.cpython-310.pyc
ADDED
Binary file (4.49 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_index_col.cpython-310.pyc
ADDED
Binary file (9.54 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_skiprows.cpython-310.pyc
ADDED
Binary file (8.64 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_textreader.cpython-310.pyc
ADDED
Binary file (10.7 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/conftest.py
ADDED
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import os
|
4 |
+
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
from pandas.compat._optional import VERSIONS
|
8 |
+
|
9 |
+
from pandas import (
|
10 |
+
read_csv,
|
11 |
+
read_table,
|
12 |
+
)
|
13 |
+
import pandas._testing as tm
|
14 |
+
|
15 |
+
|
16 |
+
class BaseParser:
|
17 |
+
engine: str | None = None
|
18 |
+
low_memory = True
|
19 |
+
float_precision_choices: list[str | None] = []
|
20 |
+
|
21 |
+
def update_kwargs(self, kwargs):
|
22 |
+
kwargs = kwargs.copy()
|
23 |
+
kwargs.update({"engine": self.engine, "low_memory": self.low_memory})
|
24 |
+
|
25 |
+
return kwargs
|
26 |
+
|
27 |
+
def read_csv(self, *args, **kwargs):
|
28 |
+
kwargs = self.update_kwargs(kwargs)
|
29 |
+
return read_csv(*args, **kwargs)
|
30 |
+
|
31 |
+
def read_csv_check_warnings(
|
32 |
+
self,
|
33 |
+
warn_type: type[Warning],
|
34 |
+
warn_msg: str,
|
35 |
+
*args,
|
36 |
+
raise_on_extra_warnings=True,
|
37 |
+
check_stacklevel: bool = True,
|
38 |
+
**kwargs,
|
39 |
+
):
|
40 |
+
# We need to check the stacklevel here instead of in the tests
|
41 |
+
# since this is where read_csv is called and where the warning
|
42 |
+
# should point to.
|
43 |
+
kwargs = self.update_kwargs(kwargs)
|
44 |
+
with tm.assert_produces_warning(
|
45 |
+
warn_type,
|
46 |
+
match=warn_msg,
|
47 |
+
raise_on_extra_warnings=raise_on_extra_warnings,
|
48 |
+
check_stacklevel=check_stacklevel,
|
49 |
+
):
|
50 |
+
return read_csv(*args, **kwargs)
|
51 |
+
|
52 |
+
def read_table(self, *args, **kwargs):
|
53 |
+
kwargs = self.update_kwargs(kwargs)
|
54 |
+
return read_table(*args, **kwargs)
|
55 |
+
|
56 |
+
def read_table_check_warnings(
|
57 |
+
self,
|
58 |
+
warn_type: type[Warning],
|
59 |
+
warn_msg: str,
|
60 |
+
*args,
|
61 |
+
raise_on_extra_warnings=True,
|
62 |
+
**kwargs,
|
63 |
+
):
|
64 |
+
# We need to check the stacklevel here instead of in the tests
|
65 |
+
# since this is where read_table is called and where the warning
|
66 |
+
# should point to.
|
67 |
+
kwargs = self.update_kwargs(kwargs)
|
68 |
+
with tm.assert_produces_warning(
|
69 |
+
warn_type, match=warn_msg, raise_on_extra_warnings=raise_on_extra_warnings
|
70 |
+
):
|
71 |
+
return read_table(*args, **kwargs)
|
72 |
+
|
73 |
+
|
74 |
+
class CParser(BaseParser):
|
75 |
+
engine = "c"
|
76 |
+
float_precision_choices = [None, "high", "round_trip"]
|
77 |
+
|
78 |
+
|
79 |
+
class CParserHighMemory(CParser):
|
80 |
+
low_memory = False
|
81 |
+
|
82 |
+
|
83 |
+
class CParserLowMemory(CParser):
|
84 |
+
low_memory = True
|
85 |
+
|
86 |
+
|
87 |
+
class PythonParser(BaseParser):
|
88 |
+
engine = "python"
|
89 |
+
float_precision_choices = [None]
|
90 |
+
|
91 |
+
|
92 |
+
class PyArrowParser(BaseParser):
|
93 |
+
engine = "pyarrow"
|
94 |
+
float_precision_choices = [None]
|
95 |
+
|
96 |
+
|
97 |
+
@pytest.fixture
|
98 |
+
def csv_dir_path(datapath):
|
99 |
+
"""
|
100 |
+
The directory path to the data files needed for parser tests.
|
101 |
+
"""
|
102 |
+
return datapath("io", "parser", "data")
|
103 |
+
|
104 |
+
|
105 |
+
@pytest.fixture
|
106 |
+
def csv1(datapath):
|
107 |
+
"""
|
108 |
+
The path to the data file "test1.csv" needed for parser tests.
|
109 |
+
"""
|
110 |
+
return os.path.join(datapath("io", "data", "csv"), "test1.csv")
|
111 |
+
|
112 |
+
|
113 |
+
_cParserHighMemory = CParserHighMemory
|
114 |
+
_cParserLowMemory = CParserLowMemory
|
115 |
+
_pythonParser = PythonParser
|
116 |
+
_pyarrowParser = PyArrowParser
|
117 |
+
|
118 |
+
_py_parsers_only = [_pythonParser]
|
119 |
+
_c_parsers_only = [_cParserHighMemory, _cParserLowMemory]
|
120 |
+
_pyarrow_parsers_only = [pytest.param(_pyarrowParser, marks=pytest.mark.single_cpu)]
|
121 |
+
|
122 |
+
_all_parsers = [*_c_parsers_only, *_py_parsers_only, *_pyarrow_parsers_only]
|
123 |
+
|
124 |
+
_py_parser_ids = ["python"]
|
125 |
+
_c_parser_ids = ["c_high", "c_low"]
|
126 |
+
_pyarrow_parsers_ids = ["pyarrow"]
|
127 |
+
|
128 |
+
_all_parser_ids = [*_c_parser_ids, *_py_parser_ids, *_pyarrow_parsers_ids]
|
129 |
+
|
130 |
+
|
131 |
+
@pytest.fixture(params=_all_parsers, ids=_all_parser_ids)
|
132 |
+
def all_parsers(request):
|
133 |
+
"""
|
134 |
+
Fixture all of the CSV parsers.
|
135 |
+
"""
|
136 |
+
parser = request.param()
|
137 |
+
if parser.engine == "pyarrow":
|
138 |
+
pytest.importorskip("pyarrow", VERSIONS["pyarrow"])
|
139 |
+
# Try finding a way to disable threads all together
|
140 |
+
# for more stable CI runs
|
141 |
+
import pyarrow
|
142 |
+
|
143 |
+
pyarrow.set_cpu_count(1)
|
144 |
+
return parser
|
145 |
+
|
146 |
+
|
147 |
+
@pytest.fixture(params=_c_parsers_only, ids=_c_parser_ids)
|
148 |
+
def c_parser_only(request):
|
149 |
+
"""
|
150 |
+
Fixture all of the CSV parsers using the C engine.
|
151 |
+
"""
|
152 |
+
return request.param()
|
153 |
+
|
154 |
+
|
155 |
+
@pytest.fixture(params=_py_parsers_only, ids=_py_parser_ids)
|
156 |
+
def python_parser_only(request):
|
157 |
+
"""
|
158 |
+
Fixture all of the CSV parsers using the Python engine.
|
159 |
+
"""
|
160 |
+
return request.param()
|
161 |
+
|
162 |
+
|
163 |
+
@pytest.fixture(params=_pyarrow_parsers_only, ids=_pyarrow_parsers_ids)
|
164 |
+
def pyarrow_parser_only(request):
|
165 |
+
"""
|
166 |
+
Fixture all of the CSV parsers using the Pyarrow engine.
|
167 |
+
"""
|
168 |
+
return request.param()
|
169 |
+
|
170 |
+
|
171 |
+
def _get_all_parser_float_precision_combinations():
|
172 |
+
"""
|
173 |
+
Return all allowable parser and float precision
|
174 |
+
combinations and corresponding ids.
|
175 |
+
"""
|
176 |
+
params = []
|
177 |
+
ids = []
|
178 |
+
for parser, parser_id in zip(_all_parsers, _all_parser_ids):
|
179 |
+
if hasattr(parser, "values"):
|
180 |
+
# Wrapped in pytest.param, get the actual parser back
|
181 |
+
parser = parser.values[0]
|
182 |
+
for precision in parser.float_precision_choices:
|
183 |
+
# Re-wrap in pytest.param for pyarrow
|
184 |
+
mark = pytest.mark.single_cpu if parser.engine == "pyarrow" else ()
|
185 |
+
param = pytest.param((parser(), precision), marks=mark)
|
186 |
+
params.append(param)
|
187 |
+
ids.append(f"{parser_id}-{precision}")
|
188 |
+
|
189 |
+
return {"params": params, "ids": ids}
|
190 |
+
|
191 |
+
|
192 |
+
@pytest.fixture(
|
193 |
+
params=_get_all_parser_float_precision_combinations()["params"],
|
194 |
+
ids=_get_all_parser_float_precision_combinations()["ids"],
|
195 |
+
)
|
196 |
+
def all_parsers_all_precisions(request):
|
197 |
+
"""
|
198 |
+
Fixture for all allowable combinations of parser
|
199 |
+
and float precision
|
200 |
+
"""
|
201 |
+
return request.param
|
202 |
+
|
203 |
+
|
204 |
+
_utf_values = [8, 16, 32]
|
205 |
+
|
206 |
+
_encoding_seps = ["", "-", "_"]
|
207 |
+
_encoding_prefixes = ["utf", "UTF"]
|
208 |
+
|
209 |
+
_encoding_fmts = [
|
210 |
+
f"{prefix}{sep}{{0}}" for sep in _encoding_seps for prefix in _encoding_prefixes
|
211 |
+
]
|
212 |
+
|
213 |
+
|
214 |
+
@pytest.fixture(params=_utf_values)
|
215 |
+
def utf_value(request):
|
216 |
+
"""
|
217 |
+
Fixture for all possible integer values for a UTF encoding.
|
218 |
+
"""
|
219 |
+
return request.param
|
220 |
+
|
221 |
+
|
222 |
+
@pytest.fixture(params=_encoding_fmts)
|
223 |
+
def encoding_fmt(request):
|
224 |
+
"""
|
225 |
+
Fixture for all possible string formats of a UTF encoding.
|
226 |
+
"""
|
227 |
+
return request.param
|
228 |
+
|
229 |
+
|
230 |
+
@pytest.fixture(
|
231 |
+
params=[
|
232 |
+
("-1,0", -1.0),
|
233 |
+
("-1,2e0", -1.2),
|
234 |
+
("-1e0", -1.0),
|
235 |
+
("+1e0", 1.0),
|
236 |
+
("+1e+0", 1.0),
|
237 |
+
("+1e-1", 0.1),
|
238 |
+
("+,1e1", 1.0),
|
239 |
+
("+1,e0", 1.0),
|
240 |
+
("-,1e1", -1.0),
|
241 |
+
("-1,e0", -1.0),
|
242 |
+
("0,1", 0.1),
|
243 |
+
("1,", 1.0),
|
244 |
+
(",1", 0.1),
|
245 |
+
("-,1", -0.1),
|
246 |
+
("1_,", 1.0),
|
247 |
+
("1_234,56", 1234.56),
|
248 |
+
("1_234,56e0", 1234.56),
|
249 |
+
# negative cases; must not parse as float
|
250 |
+
("_", "_"),
|
251 |
+
("-_", "-_"),
|
252 |
+
("-_1", "-_1"),
|
253 |
+
("-_1e0", "-_1e0"),
|
254 |
+
("_1", "_1"),
|
255 |
+
("_1,", "_1,"),
|
256 |
+
("_1,_", "_1,_"),
|
257 |
+
("_1e0", "_1e0"),
|
258 |
+
("1,2e_1", "1,2e_1"),
|
259 |
+
("1,2e1_0", "1,2e1_0"),
|
260 |
+
("1,_2", "1,_2"),
|
261 |
+
(",1__2", ",1__2"),
|
262 |
+
(",1e", ",1e"),
|
263 |
+
("-,1e", "-,1e"),
|
264 |
+
("1_000,000_000", "1_000,000_000"),
|
265 |
+
("1,e1_2", "1,e1_2"),
|
266 |
+
("e11,2", "e11,2"),
|
267 |
+
("1e11,2", "1e11,2"),
|
268 |
+
("1,2,2", "1,2,2"),
|
269 |
+
("1,2_1", "1,2_1"),
|
270 |
+
("1,2e-10e1", "1,2e-10e1"),
|
271 |
+
("--1,2", "--1,2"),
|
272 |
+
("1a_2,1", "1a_2,1"),
|
273 |
+
("1,2E-1", 0.12),
|
274 |
+
("1,2E1", 12.0),
|
275 |
+
]
|
276 |
+
)
|
277 |
+
def numeric_decimal(request):
|
278 |
+
"""
|
279 |
+
Fixture for all numeric formats which should get recognized. The first entry
|
280 |
+
represents the value to read while the second represents the expected result.
|
281 |
+
"""
|
282 |
+
return request.param
|
283 |
+
|
284 |
+
|
285 |
+
@pytest.fixture
|
286 |
+
def pyarrow_xfail(request):
|
287 |
+
"""
|
288 |
+
Fixture that xfails a test if the engine is pyarrow.
|
289 |
+
|
290 |
+
Use if failure is do to unsupported keywords or inconsistent results.
|
291 |
+
"""
|
292 |
+
if "all_parsers" in request.fixturenames:
|
293 |
+
parser = request.getfixturevalue("all_parsers")
|
294 |
+
elif "all_parsers_all_precisions" in request.fixturenames:
|
295 |
+
# Return value is tuple of (engine, precision)
|
296 |
+
parser = request.getfixturevalue("all_parsers_all_precisions")[0]
|
297 |
+
else:
|
298 |
+
return
|
299 |
+
if parser.engine == "pyarrow":
|
300 |
+
mark = pytest.mark.xfail(reason="pyarrow doesn't support this.")
|
301 |
+
request.applymarker(mark)
|
302 |
+
|
303 |
+
|
304 |
+
@pytest.fixture
|
305 |
+
def pyarrow_skip(request):
|
306 |
+
"""
|
307 |
+
Fixture that skips a test if the engine is pyarrow.
|
308 |
+
|
309 |
+
Use if failure is do a parsing failure from pyarrow.csv.read_csv
|
310 |
+
"""
|
311 |
+
if "all_parsers" in request.fixturenames:
|
312 |
+
parser = request.getfixturevalue("all_parsers")
|
313 |
+
elif "all_parsers_all_precisions" in request.fixturenames:
|
314 |
+
# Return value is tuple of (engine, precision)
|
315 |
+
parser = request.getfixturevalue("all_parsers_all_precisions")[0]
|
316 |
+
else:
|
317 |
+
return
|
318 |
+
if parser.engine == "pyarrow":
|
319 |
+
pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (197 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_categorical.cpython-310.pyc
ADDED
Binary file (8.28 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_dtypes_basic.cpython-310.pyc
ADDED
Binary file (15.2 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_empty.cpython-310.pyc
ADDED
Binary file (4.15 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_categorical.py
ADDED
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests dtype specification during parsing
|
3 |
+
for all of the parsers defined in parsers.py
|
4 |
+
"""
|
5 |
+
from io import StringIO
|
6 |
+
import os
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
import pytest
|
10 |
+
|
11 |
+
from pandas._libs import parsers as libparsers
|
12 |
+
|
13 |
+
from pandas.core.dtypes.dtypes import CategoricalDtype
|
14 |
+
|
15 |
+
import pandas as pd
|
16 |
+
from pandas import (
|
17 |
+
Categorical,
|
18 |
+
DataFrame,
|
19 |
+
Timestamp,
|
20 |
+
)
|
21 |
+
import pandas._testing as tm
|
22 |
+
|
23 |
+
pytestmark = pytest.mark.filterwarnings(
|
24 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
25 |
+
)
|
26 |
+
|
27 |
+
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
|
28 |
+
|
29 |
+
|
30 |
+
@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different
|
31 |
+
@pytest.mark.parametrize(
|
32 |
+
"dtype",
|
33 |
+
[
|
34 |
+
"category",
|
35 |
+
CategoricalDtype(),
|
36 |
+
{"a": "category", "b": "category", "c": CategoricalDtype()},
|
37 |
+
],
|
38 |
+
)
|
39 |
+
def test_categorical_dtype(all_parsers, dtype):
|
40 |
+
# see gh-10153
|
41 |
+
parser = all_parsers
|
42 |
+
data = """a,b,c
|
43 |
+
1,a,3.4
|
44 |
+
1,a,3.4
|
45 |
+
2,b,4.5"""
|
46 |
+
expected = DataFrame(
|
47 |
+
{
|
48 |
+
"a": Categorical(["1", "1", "2"]),
|
49 |
+
"b": Categorical(["a", "a", "b"]),
|
50 |
+
"c": Categorical(["3.4", "3.4", "4.5"]),
|
51 |
+
}
|
52 |
+
)
|
53 |
+
actual = parser.read_csv(StringIO(data), dtype=dtype)
|
54 |
+
tm.assert_frame_equal(actual, expected)
|
55 |
+
|
56 |
+
|
57 |
+
@pytest.mark.parametrize("dtype", [{"b": "category"}, {1: "category"}])
|
58 |
+
def test_categorical_dtype_single(all_parsers, dtype, request):
|
59 |
+
# see gh-10153
|
60 |
+
parser = all_parsers
|
61 |
+
data = """a,b,c
|
62 |
+
1,a,3.4
|
63 |
+
1,a,3.4
|
64 |
+
2,b,4.5"""
|
65 |
+
expected = DataFrame(
|
66 |
+
{"a": [1, 1, 2], "b": Categorical(["a", "a", "b"]), "c": [3.4, 3.4, 4.5]}
|
67 |
+
)
|
68 |
+
if parser.engine == "pyarrow":
|
69 |
+
mark = pytest.mark.xfail(
|
70 |
+
strict=False,
|
71 |
+
reason="Flaky test sometimes gives object dtype instead of Categorical",
|
72 |
+
)
|
73 |
+
request.applymarker(mark)
|
74 |
+
|
75 |
+
actual = parser.read_csv(StringIO(data), dtype=dtype)
|
76 |
+
tm.assert_frame_equal(actual, expected)
|
77 |
+
|
78 |
+
|
79 |
+
@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different
|
80 |
+
def test_categorical_dtype_unsorted(all_parsers):
|
81 |
+
# see gh-10153
|
82 |
+
parser = all_parsers
|
83 |
+
data = """a,b,c
|
84 |
+
1,b,3.4
|
85 |
+
1,b,3.4
|
86 |
+
2,a,4.5"""
|
87 |
+
expected = DataFrame(
|
88 |
+
{
|
89 |
+
"a": Categorical(["1", "1", "2"]),
|
90 |
+
"b": Categorical(["b", "b", "a"]),
|
91 |
+
"c": Categorical(["3.4", "3.4", "4.5"]),
|
92 |
+
}
|
93 |
+
)
|
94 |
+
actual = parser.read_csv(StringIO(data), dtype="category")
|
95 |
+
tm.assert_frame_equal(actual, expected)
|
96 |
+
|
97 |
+
|
98 |
+
@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different
|
99 |
+
def test_categorical_dtype_missing(all_parsers):
|
100 |
+
# see gh-10153
|
101 |
+
parser = all_parsers
|
102 |
+
data = """a,b,c
|
103 |
+
1,b,3.4
|
104 |
+
1,nan,3.4
|
105 |
+
2,a,4.5"""
|
106 |
+
expected = DataFrame(
|
107 |
+
{
|
108 |
+
"a": Categorical(["1", "1", "2"]),
|
109 |
+
"b": Categorical(["b", np.nan, "a"]),
|
110 |
+
"c": Categorical(["3.4", "3.4", "4.5"]),
|
111 |
+
}
|
112 |
+
)
|
113 |
+
actual = parser.read_csv(StringIO(data), dtype="category")
|
114 |
+
tm.assert_frame_equal(actual, expected)
|
115 |
+
|
116 |
+
|
117 |
+
@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different
|
118 |
+
@pytest.mark.slow
|
119 |
+
def test_categorical_dtype_high_cardinality_numeric(all_parsers, monkeypatch):
|
120 |
+
# see gh-18186
|
121 |
+
# was an issue with C parser, due to DEFAULT_BUFFER_HEURISTIC
|
122 |
+
parser = all_parsers
|
123 |
+
heuristic = 2**5
|
124 |
+
data = np.sort([str(i) for i in range(heuristic + 1)])
|
125 |
+
expected = DataFrame({"a": Categorical(data, ordered=True)})
|
126 |
+
with monkeypatch.context() as m:
|
127 |
+
m.setattr(libparsers, "DEFAULT_BUFFER_HEURISTIC", heuristic)
|
128 |
+
actual = parser.read_csv(StringIO("a\n" + "\n".join(data)), dtype="category")
|
129 |
+
actual["a"] = actual["a"].cat.reorder_categories(
|
130 |
+
np.sort(actual.a.cat.categories), ordered=True
|
131 |
+
)
|
132 |
+
tm.assert_frame_equal(actual, expected)
|
133 |
+
|
134 |
+
|
135 |
+
def test_categorical_dtype_utf16(all_parsers, csv_dir_path):
|
136 |
+
# see gh-10153
|
137 |
+
pth = os.path.join(csv_dir_path, "utf16_ex.txt")
|
138 |
+
parser = all_parsers
|
139 |
+
encoding = "utf-16"
|
140 |
+
sep = "\t"
|
141 |
+
|
142 |
+
expected = parser.read_csv(pth, sep=sep, encoding=encoding)
|
143 |
+
expected = expected.apply(Categorical)
|
144 |
+
|
145 |
+
actual = parser.read_csv(pth, sep=sep, encoding=encoding, dtype="category")
|
146 |
+
tm.assert_frame_equal(actual, expected)
|
147 |
+
|
148 |
+
|
149 |
+
def test_categorical_dtype_chunksize_infer_categories(all_parsers):
|
150 |
+
# see gh-10153
|
151 |
+
parser = all_parsers
|
152 |
+
data = """a,b
|
153 |
+
1,a
|
154 |
+
1,b
|
155 |
+
1,b
|
156 |
+
2,c"""
|
157 |
+
expecteds = [
|
158 |
+
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"])}),
|
159 |
+
DataFrame({"a": [1, 2], "b": Categorical(["b", "c"])}, index=[2, 3]),
|
160 |
+
]
|
161 |
+
|
162 |
+
if parser.engine == "pyarrow":
|
163 |
+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
|
164 |
+
with pytest.raises(ValueError, match=msg):
|
165 |
+
parser.read_csv(StringIO(data), dtype={"b": "category"}, chunksize=2)
|
166 |
+
return
|
167 |
+
|
168 |
+
with parser.read_csv(
|
169 |
+
StringIO(data), dtype={"b": "category"}, chunksize=2
|
170 |
+
) as actuals:
|
171 |
+
for actual, expected in zip(actuals, expecteds):
|
172 |
+
tm.assert_frame_equal(actual, expected)
|
173 |
+
|
174 |
+
|
175 |
+
def test_categorical_dtype_chunksize_explicit_categories(all_parsers):
|
176 |
+
# see gh-10153
|
177 |
+
parser = all_parsers
|
178 |
+
data = """a,b
|
179 |
+
1,a
|
180 |
+
1,b
|
181 |
+
1,b
|
182 |
+
2,c"""
|
183 |
+
cats = ["a", "b", "c"]
|
184 |
+
expecteds = [
|
185 |
+
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"], categories=cats)}),
|
186 |
+
DataFrame(
|
187 |
+
{"a": [1, 2], "b": Categorical(["b", "c"], categories=cats)},
|
188 |
+
index=[2, 3],
|
189 |
+
),
|
190 |
+
]
|
191 |
+
dtype = CategoricalDtype(cats)
|
192 |
+
|
193 |
+
if parser.engine == "pyarrow":
|
194 |
+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
|
195 |
+
with pytest.raises(ValueError, match=msg):
|
196 |
+
parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2)
|
197 |
+
return
|
198 |
+
|
199 |
+
with parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2) as actuals:
|
200 |
+
for actual, expected in zip(actuals, expecteds):
|
201 |
+
tm.assert_frame_equal(actual, expected)
|
202 |
+
|
203 |
+
|
204 |
+
def test_categorical_dtype_latin1(all_parsers, csv_dir_path):
|
205 |
+
# see gh-10153
|
206 |
+
pth = os.path.join(csv_dir_path, "unicode_series.csv")
|
207 |
+
parser = all_parsers
|
208 |
+
encoding = "latin-1"
|
209 |
+
|
210 |
+
expected = parser.read_csv(pth, header=None, encoding=encoding)
|
211 |
+
expected[1] = Categorical(expected[1])
|
212 |
+
|
213 |
+
actual = parser.read_csv(pth, header=None, encoding=encoding, dtype={1: "category"})
|
214 |
+
tm.assert_frame_equal(actual, expected)
|
215 |
+
|
216 |
+
|
217 |
+
@pytest.mark.parametrize("ordered", [False, True])
|
218 |
+
@pytest.mark.parametrize(
|
219 |
+
"categories",
|
220 |
+
[["a", "b", "c"], ["a", "c", "b"], ["a", "b", "c", "d"], ["c", "b", "a"]],
|
221 |
+
)
|
222 |
+
def test_categorical_category_dtype(all_parsers, categories, ordered):
|
223 |
+
parser = all_parsers
|
224 |
+
data = """a,b
|
225 |
+
1,a
|
226 |
+
1,b
|
227 |
+
1,b
|
228 |
+
2,c"""
|
229 |
+
expected = DataFrame(
|
230 |
+
{
|
231 |
+
"a": [1, 1, 1, 2],
|
232 |
+
"b": Categorical(
|
233 |
+
["a", "b", "b", "c"], categories=categories, ordered=ordered
|
234 |
+
),
|
235 |
+
}
|
236 |
+
)
|
237 |
+
|
238 |
+
dtype = {"b": CategoricalDtype(categories=categories, ordered=ordered)}
|
239 |
+
result = parser.read_csv(StringIO(data), dtype=dtype)
|
240 |
+
tm.assert_frame_equal(result, expected)
|
241 |
+
|
242 |
+
|
243 |
+
def test_categorical_category_dtype_unsorted(all_parsers):
|
244 |
+
parser = all_parsers
|
245 |
+
data = """a,b
|
246 |
+
1,a
|
247 |
+
1,b
|
248 |
+
1,b
|
249 |
+
2,c"""
|
250 |
+
dtype = CategoricalDtype(["c", "b", "a"])
|
251 |
+
expected = DataFrame(
|
252 |
+
{
|
253 |
+
"a": [1, 1, 1, 2],
|
254 |
+
"b": Categorical(["a", "b", "b", "c"], categories=["c", "b", "a"]),
|
255 |
+
}
|
256 |
+
)
|
257 |
+
|
258 |
+
result = parser.read_csv(StringIO(data), dtype={"b": dtype})
|
259 |
+
tm.assert_frame_equal(result, expected)
|
260 |
+
|
261 |
+
|
262 |
+
def test_categorical_coerces_numeric(all_parsers):
|
263 |
+
parser = all_parsers
|
264 |
+
dtype = {"b": CategoricalDtype([1, 2, 3])}
|
265 |
+
|
266 |
+
data = "b\n1\n1\n2\n3"
|
267 |
+
expected = DataFrame({"b": Categorical([1, 1, 2, 3])})
|
268 |
+
|
269 |
+
result = parser.read_csv(StringIO(data), dtype=dtype)
|
270 |
+
tm.assert_frame_equal(result, expected)
|
271 |
+
|
272 |
+
|
273 |
+
def test_categorical_coerces_datetime(all_parsers):
|
274 |
+
parser = all_parsers
|
275 |
+
dti = pd.DatetimeIndex(["2017-01-01", "2018-01-01", "2019-01-01"], freq=None)
|
276 |
+
dtype = {"b": CategoricalDtype(dti)}
|
277 |
+
|
278 |
+
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
|
279 |
+
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
|
280 |
+
|
281 |
+
result = parser.read_csv(StringIO(data), dtype=dtype)
|
282 |
+
tm.assert_frame_equal(result, expected)
|
283 |
+
|
284 |
+
|
285 |
+
def test_categorical_coerces_timestamp(all_parsers):
|
286 |
+
parser = all_parsers
|
287 |
+
dtype = {"b": CategoricalDtype([Timestamp("2014")])}
|
288 |
+
|
289 |
+
data = "b\n2014-01-01\n2014-01-01"
|
290 |
+
expected = DataFrame({"b": Categorical([Timestamp("2014")] * 2)})
|
291 |
+
|
292 |
+
result = parser.read_csv(StringIO(data), dtype=dtype)
|
293 |
+
tm.assert_frame_equal(result, expected)
|
294 |
+
|
295 |
+
|
296 |
+
def test_categorical_coerces_timedelta(all_parsers):
|
297 |
+
parser = all_parsers
|
298 |
+
dtype = {"b": CategoricalDtype(pd.to_timedelta(["1h", "2h", "3h"]))}
|
299 |
+
|
300 |
+
data = "b\n1h\n2h\n3h"
|
301 |
+
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
|
302 |
+
|
303 |
+
result = parser.read_csv(StringIO(data), dtype=dtype)
|
304 |
+
tm.assert_frame_equal(result, expected)
|
305 |
+
|
306 |
+
|
307 |
+
@pytest.mark.parametrize(
|
308 |
+
"data",
|
309 |
+
[
|
310 |
+
"b\nTrue\nFalse\nNA\nFalse",
|
311 |
+
"b\ntrue\nfalse\nNA\nfalse",
|
312 |
+
"b\nTRUE\nFALSE\nNA\nFALSE",
|
313 |
+
"b\nTrue\nFalse\nNA\nFALSE",
|
314 |
+
],
|
315 |
+
)
|
316 |
+
def test_categorical_dtype_coerces_boolean(all_parsers, data):
|
317 |
+
# see gh-20498
|
318 |
+
parser = all_parsers
|
319 |
+
dtype = {"b": CategoricalDtype([False, True])}
|
320 |
+
expected = DataFrame({"b": Categorical([True, False, None, False])})
|
321 |
+
|
322 |
+
result = parser.read_csv(StringIO(data), dtype=dtype)
|
323 |
+
tm.assert_frame_equal(result, expected)
|
324 |
+
|
325 |
+
|
326 |
+
def test_categorical_unexpected_categories(all_parsers):
|
327 |
+
parser = all_parsers
|
328 |
+
dtype = {"b": CategoricalDtype(["a", "b", "d", "e"])}
|
329 |
+
|
330 |
+
data = "b\nd\na\nc\nd" # Unexpected c
|
331 |
+
expected = DataFrame({"b": Categorical(list("dacd"), dtype=dtype["b"])})
|
332 |
+
|
333 |
+
result = parser.read_csv(StringIO(data), dtype=dtype)
|
334 |
+
tm.assert_frame_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
ADDED
@@ -0,0 +1,643 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests dtype specification during parsing
|
3 |
+
for all of the parsers defined in parsers.py
|
4 |
+
"""
|
5 |
+
from collections import defaultdict
|
6 |
+
from io import StringIO
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
import pytest
|
10 |
+
|
11 |
+
from pandas.errors import ParserWarning
|
12 |
+
|
13 |
+
import pandas as pd
|
14 |
+
from pandas import (
|
15 |
+
DataFrame,
|
16 |
+
Timestamp,
|
17 |
+
)
|
18 |
+
import pandas._testing as tm
|
19 |
+
from pandas.core.arrays import (
|
20 |
+
ArrowStringArray,
|
21 |
+
IntegerArray,
|
22 |
+
StringArray,
|
23 |
+
)
|
24 |
+
|
25 |
+
pytestmark = pytest.mark.filterwarnings(
|
26 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
27 |
+
)
|
28 |
+
|
29 |
+
|
30 |
+
@pytest.mark.parametrize("dtype", [str, object])
|
31 |
+
@pytest.mark.parametrize("check_orig", [True, False])
|
32 |
+
@pytest.mark.usefixtures("pyarrow_xfail")
|
33 |
+
def test_dtype_all_columns(all_parsers, dtype, check_orig):
|
34 |
+
# see gh-3795, gh-6607
|
35 |
+
parser = all_parsers
|
36 |
+
|
37 |
+
df = DataFrame(
|
38 |
+
np.random.default_rng(2).random((5, 2)).round(4),
|
39 |
+
columns=list("AB"),
|
40 |
+
index=["1A", "1B", "1C", "1D", "1E"],
|
41 |
+
)
|
42 |
+
|
43 |
+
with tm.ensure_clean("__passing_str_as_dtype__.csv") as path:
|
44 |
+
df.to_csv(path)
|
45 |
+
|
46 |
+
result = parser.read_csv(path, dtype=dtype, index_col=0)
|
47 |
+
|
48 |
+
if check_orig:
|
49 |
+
expected = df.copy()
|
50 |
+
result = result.astype(float)
|
51 |
+
else:
|
52 |
+
expected = df.astype(str)
|
53 |
+
|
54 |
+
tm.assert_frame_equal(result, expected)
|
55 |
+
|
56 |
+
|
57 |
+
@pytest.mark.usefixtures("pyarrow_xfail")
|
58 |
+
def test_dtype_per_column(all_parsers):
|
59 |
+
parser = all_parsers
|
60 |
+
data = """\
|
61 |
+
one,two
|
62 |
+
1,2.5
|
63 |
+
2,3.5
|
64 |
+
3,4.5
|
65 |
+
4,5.5"""
|
66 |
+
expected = DataFrame(
|
67 |
+
[[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"]
|
68 |
+
)
|
69 |
+
expected["one"] = expected["one"].astype(np.float64)
|
70 |
+
expected["two"] = expected["two"].astype(object)
|
71 |
+
|
72 |
+
result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str})
|
73 |
+
tm.assert_frame_equal(result, expected)
|
74 |
+
|
75 |
+
|
76 |
+
def test_invalid_dtype_per_column(all_parsers):
|
77 |
+
parser = all_parsers
|
78 |
+
data = """\
|
79 |
+
one,two
|
80 |
+
1,2.5
|
81 |
+
2,3.5
|
82 |
+
3,4.5
|
83 |
+
4,5.5"""
|
84 |
+
|
85 |
+
with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"):
|
86 |
+
parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
|
87 |
+
|
88 |
+
|
89 |
+
def test_raise_on_passed_int_dtype_with_nas(all_parsers):
|
90 |
+
# see gh-2631
|
91 |
+
parser = all_parsers
|
92 |
+
data = """YEAR, DOY, a
|
93 |
+
2001,106380451,10
|
94 |
+
2001,,11
|
95 |
+
2001,106380451,67"""
|
96 |
+
|
97 |
+
if parser.engine == "c":
|
98 |
+
msg = "Integer column has NA values"
|
99 |
+
elif parser.engine == "pyarrow":
|
100 |
+
msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine"
|
101 |
+
else:
|
102 |
+
msg = "Unable to convert column DOY"
|
103 |
+
|
104 |
+
with pytest.raises(ValueError, match=msg):
|
105 |
+
parser.read_csv(StringIO(data), dtype={"DOY": np.int64}, skipinitialspace=True)
|
106 |
+
|
107 |
+
|
108 |
+
def test_dtype_with_converters(all_parsers):
|
109 |
+
parser = all_parsers
|
110 |
+
data = """a,b
|
111 |
+
1.1,2.2
|
112 |
+
1.2,2.3"""
|
113 |
+
|
114 |
+
if parser.engine == "pyarrow":
|
115 |
+
msg = "The 'converters' option is not supported with the 'pyarrow' engine"
|
116 |
+
with pytest.raises(ValueError, match=msg):
|
117 |
+
parser.read_csv(
|
118 |
+
StringIO(data), dtype={"a": "i8"}, converters={"a": lambda x: str(x)}
|
119 |
+
)
|
120 |
+
return
|
121 |
+
|
122 |
+
# Dtype spec ignored if converted specified.
|
123 |
+
result = parser.read_csv_check_warnings(
|
124 |
+
ParserWarning,
|
125 |
+
"Both a converter and dtype were specified for column a "
|
126 |
+
"- only the converter will be used.",
|
127 |
+
StringIO(data),
|
128 |
+
dtype={"a": "i8"},
|
129 |
+
converters={"a": lambda x: str(x)},
|
130 |
+
)
|
131 |
+
expected = DataFrame({"a": ["1.1", "1.2"], "b": [2.2, 2.3]})
|
132 |
+
tm.assert_frame_equal(result, expected)
|
133 |
+
|
134 |
+
|
135 |
+
@pytest.mark.parametrize(
|
136 |
+
"dtype", list(np.typecodes["AllInteger"] + np.typecodes["Float"])
|
137 |
+
)
|
138 |
+
def test_numeric_dtype(all_parsers, dtype):
|
139 |
+
data = "0\n1"
|
140 |
+
parser = all_parsers
|
141 |
+
expected = DataFrame([0, 1], dtype=dtype)
|
142 |
+
|
143 |
+
result = parser.read_csv(StringIO(data), header=None, dtype=dtype)
|
144 |
+
tm.assert_frame_equal(expected, result)
|
145 |
+
|
146 |
+
|
147 |
+
@pytest.mark.usefixtures("pyarrow_xfail")
|
148 |
+
def test_boolean_dtype(all_parsers):
|
149 |
+
parser = all_parsers
|
150 |
+
data = "\n".join(
|
151 |
+
[
|
152 |
+
"a",
|
153 |
+
"True",
|
154 |
+
"TRUE",
|
155 |
+
"true",
|
156 |
+
"1",
|
157 |
+
"1.0",
|
158 |
+
"False",
|
159 |
+
"FALSE",
|
160 |
+
"false",
|
161 |
+
"0",
|
162 |
+
"0.0",
|
163 |
+
"NaN",
|
164 |
+
"nan",
|
165 |
+
"NA",
|
166 |
+
"null",
|
167 |
+
"NULL",
|
168 |
+
]
|
169 |
+
)
|
170 |
+
|
171 |
+
result = parser.read_csv(StringIO(data), dtype="boolean")
|
172 |
+
expected = DataFrame(
|
173 |
+
{
|
174 |
+
"a": pd.array(
|
175 |
+
[
|
176 |
+
True,
|
177 |
+
True,
|
178 |
+
True,
|
179 |
+
True,
|
180 |
+
True,
|
181 |
+
False,
|
182 |
+
False,
|
183 |
+
False,
|
184 |
+
False,
|
185 |
+
False,
|
186 |
+
None,
|
187 |
+
None,
|
188 |
+
None,
|
189 |
+
None,
|
190 |
+
None,
|
191 |
+
],
|
192 |
+
dtype="boolean",
|
193 |
+
)
|
194 |
+
}
|
195 |
+
)
|
196 |
+
|
197 |
+
tm.assert_frame_equal(result, expected)
|
198 |
+
|
199 |
+
|
200 |
+
@pytest.mark.usefixtures("pyarrow_xfail")
|
201 |
+
def test_delimiter_with_usecols_and_parse_dates(all_parsers):
|
202 |
+
# GH#35873
|
203 |
+
result = all_parsers.read_csv(
|
204 |
+
StringIO('"dump","-9,1","-9,1",20101010'),
|
205 |
+
engine="python",
|
206 |
+
names=["col", "col1", "col2", "col3"],
|
207 |
+
usecols=["col1", "col2", "col3"],
|
208 |
+
parse_dates=["col3"],
|
209 |
+
decimal=",",
|
210 |
+
)
|
211 |
+
expected = DataFrame(
|
212 |
+
{"col1": [-9.1], "col2": [-9.1], "col3": [Timestamp("2010-10-10")]}
|
213 |
+
)
|
214 |
+
tm.assert_frame_equal(result, expected)
|
215 |
+
|
216 |
+
|
217 |
+
@pytest.mark.parametrize("thousands", ["_", None])
|
218 |
+
def test_decimal_and_exponential(
|
219 |
+
request, python_parser_only, numeric_decimal, thousands
|
220 |
+
):
|
221 |
+
# GH#31920
|
222 |
+
decimal_number_check(request, python_parser_only, numeric_decimal, thousands, None)
|
223 |
+
|
224 |
+
|
225 |
+
@pytest.mark.parametrize("thousands", ["_", None])
|
226 |
+
@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
|
227 |
+
def test_1000_sep_decimal_float_precision(
|
228 |
+
request, c_parser_only, numeric_decimal, float_precision, thousands
|
229 |
+
):
|
230 |
+
# test decimal and thousand sep handling in across 'float_precision'
|
231 |
+
# parsers
|
232 |
+
decimal_number_check(
|
233 |
+
request, c_parser_only, numeric_decimal, thousands, float_precision
|
234 |
+
)
|
235 |
+
text, value = numeric_decimal
|
236 |
+
text = " " + text + " "
|
237 |
+
if isinstance(value, str): # the negative cases (parse as text)
|
238 |
+
value = " " + value + " "
|
239 |
+
decimal_number_check(
|
240 |
+
request, c_parser_only, (text, value), thousands, float_precision
|
241 |
+
)
|
242 |
+
|
243 |
+
|
244 |
+
def decimal_number_check(request, parser, numeric_decimal, thousands, float_precision):
|
245 |
+
# GH#31920
|
246 |
+
value = numeric_decimal[0]
|
247 |
+
if thousands is None and value in ("1_,", "1_234,56", "1_234,56e0"):
|
248 |
+
request.applymarker(
|
249 |
+
pytest.mark.xfail(reason=f"thousands={thousands} and sep is in {value}")
|
250 |
+
)
|
251 |
+
df = parser.read_csv(
|
252 |
+
StringIO(value),
|
253 |
+
float_precision=float_precision,
|
254 |
+
sep="|",
|
255 |
+
thousands=thousands,
|
256 |
+
decimal=",",
|
257 |
+
header=None,
|
258 |
+
)
|
259 |
+
val = df.iloc[0, 0]
|
260 |
+
assert val == numeric_decimal[1]
|
261 |
+
|
262 |
+
|
263 |
+
@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
|
264 |
+
def test_skip_whitespace(c_parser_only, float_precision):
|
265 |
+
DATA = """id\tnum\t
|
266 |
+
1\t1.2 \t
|
267 |
+
1\t 2.1\t
|
268 |
+
2\t 1\t
|
269 |
+
2\t 1.2 \t
|
270 |
+
"""
|
271 |
+
df = c_parser_only.read_csv(
|
272 |
+
StringIO(DATA),
|
273 |
+
float_precision=float_precision,
|
274 |
+
sep="\t",
|
275 |
+
header=0,
|
276 |
+
dtype={1: np.float64},
|
277 |
+
)
|
278 |
+
tm.assert_series_equal(df.iloc[:, 1], pd.Series([1.2, 2.1, 1.0, 1.2], name="num"))
|
279 |
+
|
280 |
+
|
281 |
+
@pytest.mark.usefixtures("pyarrow_xfail")
|
282 |
+
def test_true_values_cast_to_bool(all_parsers):
|
283 |
+
# GH#34655
|
284 |
+
text = """a,b
|
285 |
+
yes,xxx
|
286 |
+
no,yyy
|
287 |
+
1,zzz
|
288 |
+
0,aaa
|
289 |
+
"""
|
290 |
+
parser = all_parsers
|
291 |
+
result = parser.read_csv(
|
292 |
+
StringIO(text),
|
293 |
+
true_values=["yes"],
|
294 |
+
false_values=["no"],
|
295 |
+
dtype={"a": "boolean"},
|
296 |
+
)
|
297 |
+
expected = DataFrame(
|
298 |
+
{"a": [True, False, True, False], "b": ["xxx", "yyy", "zzz", "aaa"]}
|
299 |
+
)
|
300 |
+
expected["a"] = expected["a"].astype("boolean")
|
301 |
+
tm.assert_frame_equal(result, expected)
|
302 |
+
|
303 |
+
|
304 |
+
@pytest.mark.usefixtures("pyarrow_xfail")
|
305 |
+
@pytest.mark.parametrize("dtypes, exp_value", [({}, "1"), ({"a.1": "int64"}, 1)])
|
306 |
+
def test_dtype_mangle_dup_cols(all_parsers, dtypes, exp_value):
|
307 |
+
# GH#35211
|
308 |
+
parser = all_parsers
|
309 |
+
data = """a,a\n1,1"""
|
310 |
+
dtype_dict = {"a": str, **dtypes}
|
311 |
+
# GH#42462
|
312 |
+
dtype_dict_copy = dtype_dict.copy()
|
313 |
+
result = parser.read_csv(StringIO(data), dtype=dtype_dict)
|
314 |
+
expected = DataFrame({"a": ["1"], "a.1": [exp_value]})
|
315 |
+
assert dtype_dict == dtype_dict_copy, "dtype dict changed"
|
316 |
+
tm.assert_frame_equal(result, expected)
|
317 |
+
|
318 |
+
|
319 |
+
@pytest.mark.usefixtures("pyarrow_xfail")
|
320 |
+
def test_dtype_mangle_dup_cols_single_dtype(all_parsers):
|
321 |
+
# GH#42022
|
322 |
+
parser = all_parsers
|
323 |
+
data = """a,a\n1,1"""
|
324 |
+
result = parser.read_csv(StringIO(data), dtype=str)
|
325 |
+
expected = DataFrame({"a": ["1"], "a.1": ["1"]})
|
326 |
+
tm.assert_frame_equal(result, expected)
|
327 |
+
|
328 |
+
|
329 |
+
@pytest.mark.usefixtures("pyarrow_xfail")
|
330 |
+
def test_dtype_multi_index(all_parsers):
|
331 |
+
# GH 42446
|
332 |
+
parser = all_parsers
|
333 |
+
data = "A,B,B\nX,Y,Z\n1,2,3"
|
334 |
+
|
335 |
+
result = parser.read_csv(
|
336 |
+
StringIO(data),
|
337 |
+
header=list(range(2)),
|
338 |
+
dtype={
|
339 |
+
("A", "X"): np.int32,
|
340 |
+
("B", "Y"): np.int32,
|
341 |
+
("B", "Z"): np.float32,
|
342 |
+
},
|
343 |
+
)
|
344 |
+
|
345 |
+
expected = DataFrame(
|
346 |
+
{
|
347 |
+
("A", "X"): np.int32([1]),
|
348 |
+
("B", "Y"): np.int32([2]),
|
349 |
+
("B", "Z"): np.float32([3]),
|
350 |
+
}
|
351 |
+
)
|
352 |
+
|
353 |
+
tm.assert_frame_equal(result, expected)
|
354 |
+
|
355 |
+
|
356 |
+
def test_nullable_int_dtype(all_parsers, any_int_ea_dtype):
|
357 |
+
# GH 25472
|
358 |
+
parser = all_parsers
|
359 |
+
dtype = any_int_ea_dtype
|
360 |
+
|
361 |
+
data = """a,b,c
|
362 |
+
,3,5
|
363 |
+
1,,6
|
364 |
+
2,4,"""
|
365 |
+
expected = DataFrame(
|
366 |
+
{
|
367 |
+
"a": pd.array([pd.NA, 1, 2], dtype=dtype),
|
368 |
+
"b": pd.array([3, pd.NA, 4], dtype=dtype),
|
369 |
+
"c": pd.array([5, 6, pd.NA], dtype=dtype),
|
370 |
+
}
|
371 |
+
)
|
372 |
+
actual = parser.read_csv(StringIO(data), dtype=dtype)
|
373 |
+
tm.assert_frame_equal(actual, expected)
|
374 |
+
|
375 |
+
|
376 |
+
@pytest.mark.usefixtures("pyarrow_xfail")
|
377 |
+
@pytest.mark.parametrize("default", ["float", "float64"])
|
378 |
+
def test_dtypes_defaultdict(all_parsers, default):
|
379 |
+
# GH#41574
|
380 |
+
data = """a,b
|
381 |
+
1,2
|
382 |
+
"""
|
383 |
+
dtype = defaultdict(lambda: default, a="int64")
|
384 |
+
parser = all_parsers
|
385 |
+
result = parser.read_csv(StringIO(data), dtype=dtype)
|
386 |
+
expected = DataFrame({"a": [1], "b": 2.0})
|
387 |
+
tm.assert_frame_equal(result, expected)
|
388 |
+
|
389 |
+
|
390 |
+
@pytest.mark.usefixtures("pyarrow_xfail")
|
391 |
+
def test_dtypes_defaultdict_mangle_dup_cols(all_parsers):
|
392 |
+
# GH#41574
|
393 |
+
data = """a,b,a,b,b.1
|
394 |
+
1,2,3,4,5
|
395 |
+
"""
|
396 |
+
dtype = defaultdict(lambda: "float64", a="int64")
|
397 |
+
dtype["b.1"] = "int64"
|
398 |
+
parser = all_parsers
|
399 |
+
result = parser.read_csv(StringIO(data), dtype=dtype)
|
400 |
+
expected = DataFrame({"a": [1], "b": [2.0], "a.1": [3], "b.2": [4.0], "b.1": [5]})
|
401 |
+
tm.assert_frame_equal(result, expected)
|
402 |
+
|
403 |
+
|
404 |
+
@pytest.mark.usefixtures("pyarrow_xfail")
|
405 |
+
def test_dtypes_defaultdict_invalid(all_parsers):
|
406 |
+
# GH#41574
|
407 |
+
data = """a,b
|
408 |
+
1,2
|
409 |
+
"""
|
410 |
+
dtype = defaultdict(lambda: "invalid_dtype", a="int64")
|
411 |
+
parser = all_parsers
|
412 |
+
with pytest.raises(TypeError, match="not understood"):
|
413 |
+
parser.read_csv(StringIO(data), dtype=dtype)
|
414 |
+
|
415 |
+
|
416 |
+
def test_dtype_backend(all_parsers):
|
417 |
+
# GH#36712
|
418 |
+
|
419 |
+
parser = all_parsers
|
420 |
+
|
421 |
+
data = """a,b,c,d,e,f,g,h,i,j
|
422 |
+
1,2.5,True,a,,,,,12-31-2019,
|
423 |
+
3,4.5,False,b,6,7.5,True,a,12-31-2019,
|
424 |
+
"""
|
425 |
+
result = parser.read_csv(
|
426 |
+
StringIO(data), dtype_backend="numpy_nullable", parse_dates=["i"]
|
427 |
+
)
|
428 |
+
expected = DataFrame(
|
429 |
+
{
|
430 |
+
"a": pd.Series([1, 3], dtype="Int64"),
|
431 |
+
"b": pd.Series([2.5, 4.5], dtype="Float64"),
|
432 |
+
"c": pd.Series([True, False], dtype="boolean"),
|
433 |
+
"d": pd.Series(["a", "b"], dtype="string"),
|
434 |
+
"e": pd.Series([pd.NA, 6], dtype="Int64"),
|
435 |
+
"f": pd.Series([pd.NA, 7.5], dtype="Float64"),
|
436 |
+
"g": pd.Series([pd.NA, True], dtype="boolean"),
|
437 |
+
"h": pd.Series([pd.NA, "a"], dtype="string"),
|
438 |
+
"i": pd.Series([Timestamp("2019-12-31")] * 2),
|
439 |
+
"j": pd.Series([pd.NA, pd.NA], dtype="Int64"),
|
440 |
+
}
|
441 |
+
)
|
442 |
+
tm.assert_frame_equal(result, expected)
|
443 |
+
|
444 |
+
|
445 |
+
def test_dtype_backend_and_dtype(all_parsers):
|
446 |
+
# GH#36712
|
447 |
+
|
448 |
+
parser = all_parsers
|
449 |
+
|
450 |
+
data = """a,b
|
451 |
+
1,2.5
|
452 |
+
,
|
453 |
+
"""
|
454 |
+
result = parser.read_csv(
|
455 |
+
StringIO(data), dtype_backend="numpy_nullable", dtype="float64"
|
456 |
+
)
|
457 |
+
expected = DataFrame({"a": [1.0, np.nan], "b": [2.5, np.nan]})
|
458 |
+
tm.assert_frame_equal(result, expected)
|
459 |
+
|
460 |
+
|
461 |
+
def test_dtype_backend_string(all_parsers, string_storage):
|
462 |
+
# GH#36712
|
463 |
+
pa = pytest.importorskip("pyarrow")
|
464 |
+
|
465 |
+
with pd.option_context("mode.string_storage", string_storage):
|
466 |
+
parser = all_parsers
|
467 |
+
|
468 |
+
data = """a,b
|
469 |
+
a,x
|
470 |
+
b,
|
471 |
+
"""
|
472 |
+
result = parser.read_csv(StringIO(data), dtype_backend="numpy_nullable")
|
473 |
+
|
474 |
+
if string_storage == "python":
|
475 |
+
expected = DataFrame(
|
476 |
+
{
|
477 |
+
"a": StringArray(np.array(["a", "b"], dtype=np.object_)),
|
478 |
+
"b": StringArray(np.array(["x", pd.NA], dtype=np.object_)),
|
479 |
+
}
|
480 |
+
)
|
481 |
+
else:
|
482 |
+
expected = DataFrame(
|
483 |
+
{
|
484 |
+
"a": ArrowStringArray(pa.array(["a", "b"])),
|
485 |
+
"b": ArrowStringArray(pa.array(["x", None])),
|
486 |
+
}
|
487 |
+
)
|
488 |
+
tm.assert_frame_equal(result, expected)
|
489 |
+
|
490 |
+
|
491 |
+
def test_dtype_backend_ea_dtype_specified(all_parsers):
|
492 |
+
# GH#491496
|
493 |
+
data = """a,b
|
494 |
+
1,2
|
495 |
+
"""
|
496 |
+
parser = all_parsers
|
497 |
+
result = parser.read_csv(
|
498 |
+
StringIO(data), dtype="Int64", dtype_backend="numpy_nullable"
|
499 |
+
)
|
500 |
+
expected = DataFrame({"a": [1], "b": 2}, dtype="Int64")
|
501 |
+
tm.assert_frame_equal(result, expected)
|
502 |
+
|
503 |
+
|
504 |
+
def test_dtype_backend_pyarrow(all_parsers, request):
|
505 |
+
# GH#36712
|
506 |
+
pa = pytest.importorskip("pyarrow")
|
507 |
+
parser = all_parsers
|
508 |
+
|
509 |
+
data = """a,b,c,d,e,f,g,h,i,j
|
510 |
+
1,2.5,True,a,,,,,12-31-2019,
|
511 |
+
3,4.5,False,b,6,7.5,True,a,12-31-2019,
|
512 |
+
"""
|
513 |
+
result = parser.read_csv(StringIO(data), dtype_backend="pyarrow", parse_dates=["i"])
|
514 |
+
expected = DataFrame(
|
515 |
+
{
|
516 |
+
"a": pd.Series([1, 3], dtype="int64[pyarrow]"),
|
517 |
+
"b": pd.Series([2.5, 4.5], dtype="float64[pyarrow]"),
|
518 |
+
"c": pd.Series([True, False], dtype="bool[pyarrow]"),
|
519 |
+
"d": pd.Series(["a", "b"], dtype=pd.ArrowDtype(pa.string())),
|
520 |
+
"e": pd.Series([pd.NA, 6], dtype="int64[pyarrow]"),
|
521 |
+
"f": pd.Series([pd.NA, 7.5], dtype="float64[pyarrow]"),
|
522 |
+
"g": pd.Series([pd.NA, True], dtype="bool[pyarrow]"),
|
523 |
+
"h": pd.Series(
|
524 |
+
[pd.NA, "a"],
|
525 |
+
dtype=pd.ArrowDtype(pa.string()),
|
526 |
+
),
|
527 |
+
"i": pd.Series([Timestamp("2019-12-31")] * 2),
|
528 |
+
"j": pd.Series([pd.NA, pd.NA], dtype="null[pyarrow]"),
|
529 |
+
}
|
530 |
+
)
|
531 |
+
tm.assert_frame_equal(result, expected)
|
532 |
+
|
533 |
+
|
534 |
+
# pyarrow engine failing:
|
535 |
+
# https://github.com/pandas-dev/pandas/issues/56136
|
536 |
+
@pytest.mark.usefixtures("pyarrow_xfail")
|
537 |
+
def test_ea_int_avoid_overflow(all_parsers):
|
538 |
+
# GH#32134
|
539 |
+
parser = all_parsers
|
540 |
+
data = """a,b
|
541 |
+
1,1
|
542 |
+
,1
|
543 |
+
1582218195625938945,1
|
544 |
+
"""
|
545 |
+
result = parser.read_csv(StringIO(data), dtype={"a": "Int64"})
|
546 |
+
expected = DataFrame(
|
547 |
+
{
|
548 |
+
"a": IntegerArray(
|
549 |
+
np.array([1, 1, 1582218195625938945]), np.array([False, True, False])
|
550 |
+
),
|
551 |
+
"b": 1,
|
552 |
+
}
|
553 |
+
)
|
554 |
+
tm.assert_frame_equal(result, expected)
|
555 |
+
|
556 |
+
|
557 |
+
def test_string_inference(all_parsers):
|
558 |
+
# GH#54430
|
559 |
+
pytest.importorskip("pyarrow")
|
560 |
+
dtype = "string[pyarrow_numpy]"
|
561 |
+
|
562 |
+
data = """a,b
|
563 |
+
x,1
|
564 |
+
y,2
|
565 |
+
,3"""
|
566 |
+
parser = all_parsers
|
567 |
+
with pd.option_context("future.infer_string", True):
|
568 |
+
result = parser.read_csv(StringIO(data))
|
569 |
+
|
570 |
+
expected = DataFrame(
|
571 |
+
{"a": pd.Series(["x", "y", None], dtype=dtype), "b": [1, 2, 3]},
|
572 |
+
columns=pd.Index(["a", "b"], dtype=dtype),
|
573 |
+
)
|
574 |
+
tm.assert_frame_equal(result, expected)
|
575 |
+
|
576 |
+
|
577 |
+
@pytest.mark.parametrize("dtype", ["O", object, "object", np.object_, str, np.str_])
|
578 |
+
def test_string_inference_object_dtype(all_parsers, dtype):
|
579 |
+
# GH#56047
|
580 |
+
pytest.importorskip("pyarrow")
|
581 |
+
|
582 |
+
data = """a,b
|
583 |
+
x,a
|
584 |
+
y,a
|
585 |
+
z,a"""
|
586 |
+
parser = all_parsers
|
587 |
+
with pd.option_context("future.infer_string", True):
|
588 |
+
result = parser.read_csv(StringIO(data), dtype=dtype)
|
589 |
+
|
590 |
+
expected = DataFrame(
|
591 |
+
{
|
592 |
+
"a": pd.Series(["x", "y", "z"], dtype=object),
|
593 |
+
"b": pd.Series(["a", "a", "a"], dtype=object),
|
594 |
+
},
|
595 |
+
columns=pd.Index(["a", "b"], dtype="string[pyarrow_numpy]"),
|
596 |
+
)
|
597 |
+
tm.assert_frame_equal(result, expected)
|
598 |
+
|
599 |
+
with pd.option_context("future.infer_string", True):
|
600 |
+
result = parser.read_csv(StringIO(data), dtype={"a": dtype})
|
601 |
+
|
602 |
+
expected = DataFrame(
|
603 |
+
{
|
604 |
+
"a": pd.Series(["x", "y", "z"], dtype=object),
|
605 |
+
"b": pd.Series(["a", "a", "a"], dtype="string[pyarrow_numpy]"),
|
606 |
+
},
|
607 |
+
columns=pd.Index(["a", "b"], dtype="string[pyarrow_numpy]"),
|
608 |
+
)
|
609 |
+
tm.assert_frame_equal(result, expected)
|
610 |
+
|
611 |
+
|
612 |
+
def test_accurate_parsing_of_large_integers(all_parsers):
|
613 |
+
# GH#52505
|
614 |
+
data = """SYMBOL,MOMENT,ID,ID_DEAL
|
615 |
+
AAPL,20230301181139587,1925036343869802844,
|
616 |
+
AAPL,20230301181139587,2023552585717889863,2023552585717263358
|
617 |
+
NVDA,20230301181139587,2023552585717889863,2023552585717263359
|
618 |
+
AMC,20230301181139587,2023552585717889863,2023552585717263360
|
619 |
+
AMZN,20230301181139587,2023552585717889759,2023552585717263360
|
620 |
+
MSFT,20230301181139587,2023552585717889863,2023552585717263361
|
621 |
+
NVDA,20230301181139587,2023552585717889827,2023552585717263361"""
|
622 |
+
orders = pd.read_csv(StringIO(data), dtype={"ID_DEAL": pd.Int64Dtype()})
|
623 |
+
assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263358, "ID_DEAL"]) == 1
|
624 |
+
assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263359, "ID_DEAL"]) == 1
|
625 |
+
assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263360, "ID_DEAL"]) == 2
|
626 |
+
assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263361, "ID_DEAL"]) == 2
|
627 |
+
|
628 |
+
|
629 |
+
def test_dtypes_with_usecols(all_parsers):
|
630 |
+
# GH#54868
|
631 |
+
|
632 |
+
parser = all_parsers
|
633 |
+
data = """a,b,c
|
634 |
+
1,2,3
|
635 |
+
4,5,6"""
|
636 |
+
|
637 |
+
result = parser.read_csv(StringIO(data), usecols=["a", "c"], dtype={"a": object})
|
638 |
+
if parser.engine == "pyarrow":
|
639 |
+
values = [1, 4]
|
640 |
+
else:
|
641 |
+
values = ["1", "4"]
|
642 |
+
expected = DataFrame({"a": pd.Series(values, dtype=object), "c": [3, 6]})
|
643 |
+
tm.assert_frame_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_empty.py
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests dtype specification during parsing
|
3 |
+
for all of the parsers defined in parsers.py
|
4 |
+
"""
|
5 |
+
from io import StringIO
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import pytest
|
9 |
+
|
10 |
+
from pandas import (
|
11 |
+
Categorical,
|
12 |
+
DataFrame,
|
13 |
+
Index,
|
14 |
+
MultiIndex,
|
15 |
+
Series,
|
16 |
+
concat,
|
17 |
+
)
|
18 |
+
import pandas._testing as tm
|
19 |
+
|
20 |
+
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
|
21 |
+
|
22 |
+
|
23 |
+
@skip_pyarrow # CSV parse error: Empty CSV file or block
|
24 |
+
def test_dtype_all_columns_empty(all_parsers):
|
25 |
+
# see gh-12048
|
26 |
+
parser = all_parsers
|
27 |
+
result = parser.read_csv(StringIO("A,B"), dtype=str)
|
28 |
+
|
29 |
+
expected = DataFrame({"A": [], "B": []}, dtype=str)
|
30 |
+
tm.assert_frame_equal(result, expected)
|
31 |
+
|
32 |
+
|
33 |
+
@skip_pyarrow # CSV parse error: Empty CSV file or block
|
34 |
+
def test_empty_pass_dtype(all_parsers):
|
35 |
+
parser = all_parsers
|
36 |
+
|
37 |
+
data = "one,two"
|
38 |
+
result = parser.read_csv(StringIO(data), dtype={"one": "u1"})
|
39 |
+
|
40 |
+
expected = DataFrame(
|
41 |
+
{"one": np.empty(0, dtype="u1"), "two": np.empty(0, dtype=object)},
|
42 |
+
)
|
43 |
+
tm.assert_frame_equal(result, expected)
|
44 |
+
|
45 |
+
|
46 |
+
@skip_pyarrow # CSV parse error: Empty CSV file or block
|
47 |
+
def test_empty_with_index_pass_dtype(all_parsers):
|
48 |
+
parser = all_parsers
|
49 |
+
|
50 |
+
data = "one,two"
|
51 |
+
result = parser.read_csv(
|
52 |
+
StringIO(data), index_col=["one"], dtype={"one": "u1", 1: "f"}
|
53 |
+
)
|
54 |
+
|
55 |
+
expected = DataFrame(
|
56 |
+
{"two": np.empty(0, dtype="f")}, index=Index([], dtype="u1", name="one")
|
57 |
+
)
|
58 |
+
tm.assert_frame_equal(result, expected)
|
59 |
+
|
60 |
+
|
61 |
+
@skip_pyarrow # CSV parse error: Empty CSV file or block
|
62 |
+
def test_empty_with_multi_index_pass_dtype(all_parsers):
|
63 |
+
parser = all_parsers
|
64 |
+
|
65 |
+
data = "one,two,three"
|
66 |
+
result = parser.read_csv(
|
67 |
+
StringIO(data), index_col=["one", "two"], dtype={"one": "u1", 1: "f8"}
|
68 |
+
)
|
69 |
+
|
70 |
+
exp_idx = MultiIndex.from_arrays(
|
71 |
+
[np.empty(0, dtype="u1"), np.empty(0, dtype=np.float64)],
|
72 |
+
names=["one", "two"],
|
73 |
+
)
|
74 |
+
expected = DataFrame({"three": np.empty(0, dtype=object)}, index=exp_idx)
|
75 |
+
tm.assert_frame_equal(result, expected)
|
76 |
+
|
77 |
+
|
78 |
+
@skip_pyarrow # CSV parse error: Empty CSV file or block
|
79 |
+
def test_empty_with_mangled_column_pass_dtype_by_names(all_parsers):
|
80 |
+
parser = all_parsers
|
81 |
+
|
82 |
+
data = "one,one"
|
83 |
+
result = parser.read_csv(StringIO(data), dtype={"one": "u1", "one.1": "f"})
|
84 |
+
|
85 |
+
expected = DataFrame(
|
86 |
+
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
|
87 |
+
)
|
88 |
+
tm.assert_frame_equal(result, expected)
|
89 |
+
|
90 |
+
|
91 |
+
@skip_pyarrow # CSV parse error: Empty CSV file or block
|
92 |
+
def test_empty_with_mangled_column_pass_dtype_by_indexes(all_parsers):
|
93 |
+
parser = all_parsers
|
94 |
+
|
95 |
+
data = "one,one"
|
96 |
+
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
|
97 |
+
|
98 |
+
expected = DataFrame(
|
99 |
+
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
|
100 |
+
)
|
101 |
+
tm.assert_frame_equal(result, expected)
|
102 |
+
|
103 |
+
|
104 |
+
@skip_pyarrow # CSV parse error: Empty CSV file or block
|
105 |
+
def test_empty_with_dup_column_pass_dtype_by_indexes(all_parsers):
|
106 |
+
# see gh-9424
|
107 |
+
parser = all_parsers
|
108 |
+
expected = concat(
|
109 |
+
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
|
110 |
+
axis=1,
|
111 |
+
)
|
112 |
+
|
113 |
+
data = "one,one"
|
114 |
+
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
|
115 |
+
tm.assert_frame_equal(result, expected)
|
116 |
+
|
117 |
+
|
118 |
+
def test_empty_with_dup_column_pass_dtype_by_indexes_raises(all_parsers):
|
119 |
+
# see gh-9424
|
120 |
+
parser = all_parsers
|
121 |
+
expected = concat(
|
122 |
+
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
|
123 |
+
axis=1,
|
124 |
+
)
|
125 |
+
expected.index = expected.index.astype(object)
|
126 |
+
|
127 |
+
with pytest.raises(ValueError, match="Duplicate names"):
|
128 |
+
data = ""
|
129 |
+
parser.read_csv(StringIO(data), names=["one", "one"], dtype={0: "u1", 1: "f"})
|
130 |
+
|
131 |
+
|
132 |
+
@pytest.mark.parametrize(
|
133 |
+
"dtype,expected",
|
134 |
+
[
|
135 |
+
(np.float64, DataFrame(columns=["a", "b"], dtype=np.float64)),
|
136 |
+
(
|
137 |
+
"category",
|
138 |
+
DataFrame({"a": Categorical([]), "b": Categorical([])}),
|
139 |
+
),
|
140 |
+
(
|
141 |
+
{"a": "category", "b": "category"},
|
142 |
+
DataFrame({"a": Categorical([]), "b": Categorical([])}),
|
143 |
+
),
|
144 |
+
("datetime64[ns]", DataFrame(columns=["a", "b"], dtype="datetime64[ns]")),
|
145 |
+
(
|
146 |
+
"timedelta64[ns]",
|
147 |
+
DataFrame(
|
148 |
+
{
|
149 |
+
"a": Series([], dtype="timedelta64[ns]"),
|
150 |
+
"b": Series([], dtype="timedelta64[ns]"),
|
151 |
+
},
|
152 |
+
),
|
153 |
+
),
|
154 |
+
(
|
155 |
+
{"a": np.int64, "b": np.int32},
|
156 |
+
DataFrame(
|
157 |
+
{"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},
|
158 |
+
),
|
159 |
+
),
|
160 |
+
(
|
161 |
+
{0: np.int64, 1: np.int32},
|
162 |
+
DataFrame(
|
163 |
+
{"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},
|
164 |
+
),
|
165 |
+
),
|
166 |
+
(
|
167 |
+
{"a": np.int64, 1: np.int32},
|
168 |
+
DataFrame(
|
169 |
+
{"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},
|
170 |
+
),
|
171 |
+
),
|
172 |
+
],
|
173 |
+
)
|
174 |
+
@skip_pyarrow # CSV parse error: Empty CSV file or block
|
175 |
+
def test_empty_dtype(all_parsers, dtype, expected):
|
176 |
+
# see gh-14712
|
177 |
+
parser = all_parsers
|
178 |
+
data = "a,b"
|
179 |
+
|
180 |
+
result = parser.read_csv(StringIO(data), header=0, dtype=dtype)
|
181 |
+
tm.assert_frame_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_c_parser_only.py
ADDED
@@ -0,0 +1,643 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests that apply specifically to the CParser. Unless specifically stated
|
3 |
+
as a CParser-specific issue, the goal is to eventually move as many of
|
4 |
+
these tests out of this module as soon as the Python parser can accept
|
5 |
+
further arguments when parsing.
|
6 |
+
"""
|
7 |
+
from decimal import Decimal
|
8 |
+
from io import (
|
9 |
+
BytesIO,
|
10 |
+
StringIO,
|
11 |
+
TextIOWrapper,
|
12 |
+
)
|
13 |
+
import mmap
|
14 |
+
import os
|
15 |
+
import tarfile
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
import pytest
|
19 |
+
|
20 |
+
from pandas.compat.numpy import np_version_gte1p24
|
21 |
+
from pandas.errors import (
|
22 |
+
ParserError,
|
23 |
+
ParserWarning,
|
24 |
+
)
|
25 |
+
import pandas.util._test_decorators as td
|
26 |
+
|
27 |
+
from pandas import (
|
28 |
+
DataFrame,
|
29 |
+
concat,
|
30 |
+
)
|
31 |
+
import pandas._testing as tm
|
32 |
+
|
33 |
+
|
34 |
+
@pytest.mark.parametrize(
|
35 |
+
"malformed",
|
36 |
+
["1\r1\r1\r 1\r 1\r", "1\r1\r1\r 1\r 1\r11\r", "1\r1\r1\r 1\r 1\r11\r1\r"],
|
37 |
+
ids=["words pointer", "stream pointer", "lines pointer"],
|
38 |
+
)
|
39 |
+
def test_buffer_overflow(c_parser_only, malformed):
|
40 |
+
# see gh-9205: test certain malformed input files that cause
|
41 |
+
# buffer overflows in tokenizer.c
|
42 |
+
msg = "Buffer overflow caught - possible malformed input file."
|
43 |
+
parser = c_parser_only
|
44 |
+
|
45 |
+
with pytest.raises(ParserError, match=msg):
|
46 |
+
parser.read_csv(StringIO(malformed))
|
47 |
+
|
48 |
+
|
49 |
+
def test_delim_whitespace_custom_terminator(c_parser_only):
|
50 |
+
# See gh-12912
|
51 |
+
data = "a b c~1 2 3~4 5 6~7 8 9"
|
52 |
+
parser = c_parser_only
|
53 |
+
|
54 |
+
depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
|
55 |
+
with tm.assert_produces_warning(
|
56 |
+
FutureWarning, match=depr_msg, check_stacklevel=False
|
57 |
+
):
|
58 |
+
df = parser.read_csv(StringIO(data), lineterminator="~", delim_whitespace=True)
|
59 |
+
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])
|
60 |
+
tm.assert_frame_equal(df, expected)
|
61 |
+
|
62 |
+
|
63 |
+
def test_dtype_and_names_error(c_parser_only):
|
64 |
+
# see gh-8833: passing both dtype and names
|
65 |
+
# resulting in an error reporting issue
|
66 |
+
parser = c_parser_only
|
67 |
+
data = """
|
68 |
+
1.0 1
|
69 |
+
2.0 2
|
70 |
+
3.0 3
|
71 |
+
"""
|
72 |
+
# base cases
|
73 |
+
result = parser.read_csv(StringIO(data), sep=r"\s+", header=None)
|
74 |
+
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
|
75 |
+
tm.assert_frame_equal(result, expected)
|
76 |
+
|
77 |
+
result = parser.read_csv(StringIO(data), sep=r"\s+", header=None, names=["a", "b"])
|
78 |
+
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["a", "b"])
|
79 |
+
tm.assert_frame_equal(result, expected)
|
80 |
+
|
81 |
+
# fallback casting
|
82 |
+
result = parser.read_csv(
|
83 |
+
StringIO(data), sep=r"\s+", header=None, names=["a", "b"], dtype={"a": np.int32}
|
84 |
+
)
|
85 |
+
expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=["a", "b"])
|
86 |
+
expected["a"] = expected["a"].astype(np.int32)
|
87 |
+
tm.assert_frame_equal(result, expected)
|
88 |
+
|
89 |
+
data = """
|
90 |
+
1.0 1
|
91 |
+
nan 2
|
92 |
+
3.0 3
|
93 |
+
"""
|
94 |
+
# fallback casting, but not castable
|
95 |
+
warning = RuntimeWarning if np_version_gte1p24 else None
|
96 |
+
with pytest.raises(ValueError, match="cannot safely convert"):
|
97 |
+
with tm.assert_produces_warning(warning, check_stacklevel=False):
|
98 |
+
parser.read_csv(
|
99 |
+
StringIO(data),
|
100 |
+
sep=r"\s+",
|
101 |
+
header=None,
|
102 |
+
names=["a", "b"],
|
103 |
+
dtype={"a": np.int32},
|
104 |
+
)
|
105 |
+
|
106 |
+
|
107 |
+
@pytest.mark.parametrize(
|
108 |
+
"match,kwargs",
|
109 |
+
[
|
110 |
+
# For each of these cases, all of the dtypes are valid, just unsupported.
|
111 |
+
(
|
112 |
+
(
|
113 |
+
"the dtype datetime64 is not supported for parsing, "
|
114 |
+
"pass this column using parse_dates instead"
|
115 |
+
),
|
116 |
+
{"dtype": {"A": "datetime64", "B": "float64"}},
|
117 |
+
),
|
118 |
+
(
|
119 |
+
(
|
120 |
+
"the dtype datetime64 is not supported for parsing, "
|
121 |
+
"pass this column using parse_dates instead"
|
122 |
+
),
|
123 |
+
{"dtype": {"A": "datetime64", "B": "float64"}, "parse_dates": ["B"]},
|
124 |
+
),
|
125 |
+
(
|
126 |
+
"the dtype timedelta64 is not supported for parsing",
|
127 |
+
{"dtype": {"A": "timedelta64", "B": "float64"}},
|
128 |
+
),
|
129 |
+
(
|
130 |
+
f"the dtype {tm.ENDIAN}U8 is not supported for parsing",
|
131 |
+
{"dtype": {"A": "U8"}},
|
132 |
+
),
|
133 |
+
],
|
134 |
+
ids=["dt64-0", "dt64-1", "td64", f"{tm.ENDIAN}U8"],
|
135 |
+
)
|
136 |
+
def test_unsupported_dtype(c_parser_only, match, kwargs):
|
137 |
+
parser = c_parser_only
|
138 |
+
df = DataFrame(
|
139 |
+
np.random.default_rng(2).random((5, 2)),
|
140 |
+
columns=list("AB"),
|
141 |
+
index=["1A", "1B", "1C", "1D", "1E"],
|
142 |
+
)
|
143 |
+
|
144 |
+
with tm.ensure_clean("__unsupported_dtype__.csv") as path:
|
145 |
+
df.to_csv(path)
|
146 |
+
|
147 |
+
with pytest.raises(TypeError, match=match):
|
148 |
+
parser.read_csv(path, index_col=0, **kwargs)
|
149 |
+
|
150 |
+
|
151 |
+
@td.skip_if_32bit
|
152 |
+
@pytest.mark.slow
|
153 |
+
# test numbers between 1 and 2
|
154 |
+
@pytest.mark.parametrize("num", np.linspace(1.0, 2.0, num=21))
|
155 |
+
def test_precise_conversion(c_parser_only, num):
|
156 |
+
parser = c_parser_only
|
157 |
+
|
158 |
+
normal_errors = []
|
159 |
+
precise_errors = []
|
160 |
+
|
161 |
+
def error(val: float, actual_val: Decimal) -> Decimal:
|
162 |
+
return abs(Decimal(f"{val:.100}") - actual_val)
|
163 |
+
|
164 |
+
# 25 decimal digits of precision
|
165 |
+
text = f"a\n{num:.25}"
|
166 |
+
|
167 |
+
normal_val = float(
|
168 |
+
parser.read_csv(StringIO(text), float_precision="legacy")["a"][0]
|
169 |
+
)
|
170 |
+
precise_val = float(parser.read_csv(StringIO(text), float_precision="high")["a"][0])
|
171 |
+
roundtrip_val = float(
|
172 |
+
parser.read_csv(StringIO(text), float_precision="round_trip")["a"][0]
|
173 |
+
)
|
174 |
+
actual_val = Decimal(text[2:])
|
175 |
+
|
176 |
+
normal_errors.append(error(normal_val, actual_val))
|
177 |
+
precise_errors.append(error(precise_val, actual_val))
|
178 |
+
|
179 |
+
# round-trip should match float()
|
180 |
+
assert roundtrip_val == float(text[2:])
|
181 |
+
|
182 |
+
assert sum(precise_errors) <= sum(normal_errors)
|
183 |
+
assert max(precise_errors) <= max(normal_errors)
|
184 |
+
|
185 |
+
|
186 |
+
def test_usecols_dtypes(c_parser_only):
|
187 |
+
parser = c_parser_only
|
188 |
+
data = """\
|
189 |
+
1,2,3
|
190 |
+
4,5,6
|
191 |
+
7,8,9
|
192 |
+
10,11,12"""
|
193 |
+
|
194 |
+
result = parser.read_csv(
|
195 |
+
StringIO(data),
|
196 |
+
usecols=(0, 1, 2),
|
197 |
+
names=("a", "b", "c"),
|
198 |
+
header=None,
|
199 |
+
converters={"a": str},
|
200 |
+
dtype={"b": int, "c": float},
|
201 |
+
)
|
202 |
+
result2 = parser.read_csv(
|
203 |
+
StringIO(data),
|
204 |
+
usecols=(0, 2),
|
205 |
+
names=("a", "b", "c"),
|
206 |
+
header=None,
|
207 |
+
converters={"a": str},
|
208 |
+
dtype={"b": int, "c": float},
|
209 |
+
)
|
210 |
+
|
211 |
+
assert (result.dtypes == [object, int, float]).all()
|
212 |
+
assert (result2.dtypes == [object, float]).all()
|
213 |
+
|
214 |
+
|
215 |
+
def test_disable_bool_parsing(c_parser_only):
|
216 |
+
# see gh-2090
|
217 |
+
|
218 |
+
parser = c_parser_only
|
219 |
+
data = """A,B,C
|
220 |
+
Yes,No,Yes
|
221 |
+
No,Yes,Yes
|
222 |
+
Yes,,Yes
|
223 |
+
No,No,No"""
|
224 |
+
|
225 |
+
result = parser.read_csv(StringIO(data), dtype=object)
|
226 |
+
assert (result.dtypes == object).all()
|
227 |
+
|
228 |
+
result = parser.read_csv(StringIO(data), dtype=object, na_filter=False)
|
229 |
+
assert result["B"][2] == ""
|
230 |
+
|
231 |
+
|
232 |
+
def test_custom_lineterminator(c_parser_only):
|
233 |
+
parser = c_parser_only
|
234 |
+
data = "a,b,c~1,2,3~4,5,6"
|
235 |
+
|
236 |
+
result = parser.read_csv(StringIO(data), lineterminator="~")
|
237 |
+
expected = parser.read_csv(StringIO(data.replace("~", "\n")))
|
238 |
+
|
239 |
+
tm.assert_frame_equal(result, expected)
|
240 |
+
|
241 |
+
|
242 |
+
def test_parse_ragged_csv(c_parser_only):
|
243 |
+
parser = c_parser_only
|
244 |
+
data = """1,2,3
|
245 |
+
1,2,3,4
|
246 |
+
1,2,3,4,5
|
247 |
+
1,2
|
248 |
+
1,2,3,4"""
|
249 |
+
|
250 |
+
nice_data = """1,2,3,,
|
251 |
+
1,2,3,4,
|
252 |
+
1,2,3,4,5
|
253 |
+
1,2,,,
|
254 |
+
1,2,3,4,"""
|
255 |
+
result = parser.read_csv(
|
256 |
+
StringIO(data), header=None, names=["a", "b", "c", "d", "e"]
|
257 |
+
)
|
258 |
+
|
259 |
+
expected = parser.read_csv(
|
260 |
+
StringIO(nice_data), header=None, names=["a", "b", "c", "d", "e"]
|
261 |
+
)
|
262 |
+
|
263 |
+
tm.assert_frame_equal(result, expected)
|
264 |
+
|
265 |
+
# too many columns, cause segfault if not careful
|
266 |
+
data = "1,2\n3,4,5"
|
267 |
+
|
268 |
+
result = parser.read_csv(StringIO(data), header=None, names=range(50))
|
269 |
+
expected = parser.read_csv(StringIO(data), header=None, names=range(3)).reindex(
|
270 |
+
columns=range(50)
|
271 |
+
)
|
272 |
+
|
273 |
+
tm.assert_frame_equal(result, expected)
|
274 |
+
|
275 |
+
|
276 |
+
def test_tokenize_CR_with_quoting(c_parser_only):
|
277 |
+
# see gh-3453
|
278 |
+
parser = c_parser_only
|
279 |
+
data = ' a,b,c\r"a,b","e,d","f,f"'
|
280 |
+
|
281 |
+
result = parser.read_csv(StringIO(data), header=None)
|
282 |
+
expected = parser.read_csv(StringIO(data.replace("\r", "\n")), header=None)
|
283 |
+
tm.assert_frame_equal(result, expected)
|
284 |
+
|
285 |
+
result = parser.read_csv(StringIO(data))
|
286 |
+
expected = parser.read_csv(StringIO(data.replace("\r", "\n")))
|
287 |
+
tm.assert_frame_equal(result, expected)
|
288 |
+
|
289 |
+
|
290 |
+
@pytest.mark.slow
|
291 |
+
@pytest.mark.parametrize("count", [3 * 2**n for n in range(6)])
|
292 |
+
def test_grow_boundary_at_cap(c_parser_only, count):
|
293 |
+
# See gh-12494
|
294 |
+
#
|
295 |
+
# Cause of error was that the C parser
|
296 |
+
# was not increasing the buffer size when
|
297 |
+
# the desired space would fill the buffer
|
298 |
+
# to capacity, which would later cause a
|
299 |
+
# buffer overflow error when checking the
|
300 |
+
# EOF terminator of the CSV stream.
|
301 |
+
# 3 * 2^n commas was observed to break the parser
|
302 |
+
parser = c_parser_only
|
303 |
+
|
304 |
+
with StringIO("," * count) as s:
|
305 |
+
expected = DataFrame(columns=[f"Unnamed: {i}" for i in range(count + 1)])
|
306 |
+
df = parser.read_csv(s)
|
307 |
+
tm.assert_frame_equal(df, expected)
|
308 |
+
|
309 |
+
|
310 |
+
@pytest.mark.slow
|
311 |
+
@pytest.mark.parametrize("encoding", [None, "utf-8"])
|
312 |
+
def test_parse_trim_buffers(c_parser_only, encoding):
|
313 |
+
# This test is part of a bugfix for gh-13703. It attempts to
|
314 |
+
# to stress the system memory allocator, to cause it to move the
|
315 |
+
# stream buffer and either let the OS reclaim the region, or let
|
316 |
+
# other memory requests of parser otherwise modify the contents
|
317 |
+
# of memory space, where it was formally located.
|
318 |
+
# This test is designed to cause a `segfault` with unpatched
|
319 |
+
# `tokenizer.c`. Sometimes the test fails on `segfault`, other
|
320 |
+
# times it fails due to memory corruption, which causes the
|
321 |
+
# loaded DataFrame to differ from the expected one.
|
322 |
+
|
323 |
+
# Also force 'utf-8' encoding, so that `_string_convert` would take
|
324 |
+
# a different execution branch.
|
325 |
+
|
326 |
+
parser = c_parser_only
|
327 |
+
|
328 |
+
# Generate a large mixed-type CSV file on-the-fly (one record is
|
329 |
+
# approx 1.5KiB).
|
330 |
+
record_ = (
|
331 |
+
"""9999-9,99:99,,,,ZZ,ZZ,,,ZZZ-ZZZZ,.Z-ZZZZ,-9.99,,,9.99,Z"""
|
332 |
+
"""ZZZZ,,-99,9,ZZZ-ZZZZ,ZZ-ZZZZ,,9.99,ZZZ-ZZZZZ,ZZZ-ZZZZZ,"""
|
333 |
+
"""ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,9"""
|
334 |
+
"""99,ZZZ-ZZZZ,,ZZ-ZZZZ,,,,,ZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,,,9,9,"""
|
335 |
+
"""9,9,99,99,999,999,ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,9,ZZ-ZZZZ,9."""
|
336 |
+
"""99,ZZ-ZZZZ,ZZ-ZZZZ,,,,ZZZZ,,,ZZ,ZZ,,,,,,,,,,,,,9,,,999."""
|
337 |
+
"""99,999.99,,,ZZZZZ,,,Z9,,,,,,,ZZZ,ZZZ,,,,,,,,,,,ZZZZZ,ZZ"""
|
338 |
+
"""ZZZ,ZZZ-ZZZZZZ,ZZZ-ZZZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZ"""
|
339 |
+
"""ZZ,,,999999,999999,ZZZ,ZZZ,,,ZZZ,ZZZ,999.99,999.99,,,,Z"""
|
340 |
+
"""ZZ-ZZZ,ZZZ-ZZZ,-9.99,-9.99,9,9,,99,,9.99,9.99,9,9,9.99,"""
|
341 |
+
"""9.99,,,,9.99,9.99,,99,,99,9.99,9.99,,,ZZZ,ZZZ,,999.99,,"""
|
342 |
+
"""999.99,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,ZZZZZ,ZZZZZ,ZZZ,ZZZ,9,9,"""
|
343 |
+
""",,,,,ZZZ-ZZZZ,ZZZ999Z,,,999.99,,999.99,ZZZ-ZZZZ,,,9.999"""
|
344 |
+
""",9.999,9.999,9.999,-9.999,-9.999,-9.999,-9.999,9.999,9."""
|
345 |
+
"""999,9.999,9.999,9.999,9.999,9.999,9.999,99999,ZZZ-ZZZZ,"""
|
346 |
+
""",9.99,ZZZ,,,,,,,,ZZZ,,,,,9,,,,9,,,,,,,,,,ZZZ-ZZZZ,ZZZ-Z"""
|
347 |
+
"""ZZZ,,ZZZZZ,ZZZZZ,ZZZZZ,ZZZZZ,,,9.99,,ZZ-ZZZZ,ZZ-ZZZZ,ZZ"""
|
348 |
+
""",999,,,,ZZ-ZZZZ,ZZZ,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,99.99,99.99"""
|
349 |
+
""",,,9.99,9.99,9.99,9.99,ZZZ-ZZZZ,,,ZZZ-ZZZZZ,,,,,-9.99,-"""
|
350 |
+
"""9.99,-9.99,-9.99,,,,,,,,,ZZZ-ZZZZ,,9,9.99,9.99,99ZZ,,-9"""
|
351 |
+
""".99,-9.99,ZZZ-ZZZZ,,,,,,,ZZZ-ZZZZ,9.99,9.99,9999,,,,,,,"""
|
352 |
+
""",,,-9.9,Z/Z-ZZZZ,999.99,9.99,,999.99,ZZ-ZZZZ,ZZ-ZZZZ,9."""
|
353 |
+
"""99,9.99,9.99,9.99,9.99,9.99,,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZ"""
|
354 |
+
"""ZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ,ZZZ,ZZZ,ZZZ,9.99,,,-9.99,ZZ"""
|
355 |
+
"""-ZZZZ,-999.99,,-9999,,999.99,,,,999.99,99.99,,,ZZ-ZZZZZ"""
|
356 |
+
"""ZZZ,ZZ-ZZZZ-ZZZZZZZ,,,,ZZ-ZZ-ZZZZZZZZ,ZZZZZZZZ,ZZZ-ZZZZ"""
|
357 |
+
""",9999,999.99,ZZZ-ZZZZ,-9.99,-9.99,ZZZ-ZZZZ,99:99:99,,99"""
|
358 |
+
""",99,,9.99,,-99.99,,,,,,9.99,ZZZ-ZZZZ,-9.99,-9.99,9.99,9"""
|
359 |
+
""".99,,ZZZ,,,,,,,ZZZ,ZZZ,,,,,"""
|
360 |
+
)
|
361 |
+
|
362 |
+
# Set the number of lines so that a call to `parser_trim_buffers`
|
363 |
+
# is triggered: after a couple of full chunks are consumed a
|
364 |
+
# relatively small 'residual' chunk would cause reallocation
|
365 |
+
# within the parser.
|
366 |
+
chunksize, n_lines = 128, 2 * 128 + 15
|
367 |
+
csv_data = "\n".join([record_] * n_lines) + "\n"
|
368 |
+
|
369 |
+
# We will use StringIO to load the CSV from this text buffer.
|
370 |
+
# pd.read_csv() will iterate over the file in chunks and will
|
371 |
+
# finally read a residual chunk of really small size.
|
372 |
+
|
373 |
+
# Generate the expected output: manually create the dataframe
|
374 |
+
# by splitting by comma and repeating the `n_lines` times.
|
375 |
+
row = tuple(val_ if val_ else np.nan for val_ in record_.split(","))
|
376 |
+
expected = DataFrame(
|
377 |
+
[row for _ in range(n_lines)], dtype=object, columns=None, index=None
|
378 |
+
)
|
379 |
+
|
380 |
+
# Iterate over the CSV file in chunks of `chunksize` lines
|
381 |
+
with parser.read_csv(
|
382 |
+
StringIO(csv_data),
|
383 |
+
header=None,
|
384 |
+
dtype=object,
|
385 |
+
chunksize=chunksize,
|
386 |
+
encoding=encoding,
|
387 |
+
) as chunks_:
|
388 |
+
result = concat(chunks_, axis=0, ignore_index=True)
|
389 |
+
|
390 |
+
# Check for data corruption if there was no segfault
|
391 |
+
tm.assert_frame_equal(result, expected)
|
392 |
+
|
393 |
+
|
394 |
+
def test_internal_null_byte(c_parser_only):
|
395 |
+
# see gh-14012
|
396 |
+
#
|
397 |
+
# The null byte ('\x00') should not be used as a
|
398 |
+
# true line terminator, escape character, or comment
|
399 |
+
# character, only as a placeholder to indicate that
|
400 |
+
# none was specified.
|
401 |
+
#
|
402 |
+
# This test should be moved to test_common.py ONLY when
|
403 |
+
# Python's csv class supports parsing '\x00'.
|
404 |
+
parser = c_parser_only
|
405 |
+
|
406 |
+
names = ["a", "b", "c"]
|
407 |
+
data = "1,2,3\n4,\x00,6\n7,8,9"
|
408 |
+
expected = DataFrame([[1, 2.0, 3], [4, np.nan, 6], [7, 8, 9]], columns=names)
|
409 |
+
|
410 |
+
result = parser.read_csv(StringIO(data), names=names)
|
411 |
+
tm.assert_frame_equal(result, expected)
|
412 |
+
|
413 |
+
|
414 |
+
def test_read_nrows_large(c_parser_only):
|
415 |
+
# gh-7626 - Read only nrows of data in for large inputs (>262144b)
|
416 |
+
parser = c_parser_only
|
417 |
+
header_narrow = "\t".join(["COL_HEADER_" + str(i) for i in range(10)]) + "\n"
|
418 |
+
data_narrow = "\t".join(["somedatasomedatasomedata1" for _ in range(10)]) + "\n"
|
419 |
+
header_wide = "\t".join(["COL_HEADER_" + str(i) for i in range(15)]) + "\n"
|
420 |
+
data_wide = "\t".join(["somedatasomedatasomedata2" for _ in range(15)]) + "\n"
|
421 |
+
test_input = header_narrow + data_narrow * 1050 + header_wide + data_wide * 2
|
422 |
+
|
423 |
+
df = parser.read_csv(StringIO(test_input), sep="\t", nrows=1010)
|
424 |
+
|
425 |
+
assert df.size == 1010 * 10
|
426 |
+
|
427 |
+
|
428 |
+
def test_float_precision_round_trip_with_text(c_parser_only):
|
429 |
+
# see gh-15140
|
430 |
+
parser = c_parser_only
|
431 |
+
df = parser.read_csv(StringIO("a"), header=None, float_precision="round_trip")
|
432 |
+
tm.assert_frame_equal(df, DataFrame({0: ["a"]}))
|
433 |
+
|
434 |
+
|
435 |
+
def test_large_difference_in_columns(c_parser_only):
|
436 |
+
# see gh-14125
|
437 |
+
parser = c_parser_only
|
438 |
+
|
439 |
+
count = 10000
|
440 |
+
large_row = ("X," * count)[:-1] + "\n"
|
441 |
+
normal_row = "XXXXXX XXXXXX,111111111111111\n"
|
442 |
+
test_input = (large_row + normal_row * 6)[:-1]
|
443 |
+
|
444 |
+
result = parser.read_csv(StringIO(test_input), header=None, usecols=[0])
|
445 |
+
rows = test_input.split("\n")
|
446 |
+
|
447 |
+
expected = DataFrame([row.split(",")[0] for row in rows])
|
448 |
+
tm.assert_frame_equal(result, expected)
|
449 |
+
|
450 |
+
|
451 |
+
def test_data_after_quote(c_parser_only):
|
452 |
+
# see gh-15910
|
453 |
+
parser = c_parser_only
|
454 |
+
|
455 |
+
data = 'a\n1\n"b"a'
|
456 |
+
result = parser.read_csv(StringIO(data))
|
457 |
+
|
458 |
+
expected = DataFrame({"a": ["1", "ba"]})
|
459 |
+
tm.assert_frame_equal(result, expected)
|
460 |
+
|
461 |
+
|
462 |
+
def test_comment_whitespace_delimited(c_parser_only):
|
463 |
+
parser = c_parser_only
|
464 |
+
test_input = """\
|
465 |
+
1 2
|
466 |
+
2 2 3
|
467 |
+
3 2 3 # 3 fields
|
468 |
+
4 2 3# 3 fields
|
469 |
+
5 2 # 2 fields
|
470 |
+
6 2# 2 fields
|
471 |
+
7 # 1 field, NaN
|
472 |
+
8# 1 field, NaN
|
473 |
+
9 2 3 # skipped line
|
474 |
+
# comment"""
|
475 |
+
with tm.assert_produces_warning(
|
476 |
+
ParserWarning, match="Skipping line", check_stacklevel=False
|
477 |
+
):
|
478 |
+
df = parser.read_csv(
|
479 |
+
StringIO(test_input),
|
480 |
+
comment="#",
|
481 |
+
header=None,
|
482 |
+
delimiter="\\s+",
|
483 |
+
skiprows=0,
|
484 |
+
on_bad_lines="warn",
|
485 |
+
)
|
486 |
+
expected = DataFrame([[1, 2], [5, 2], [6, 2], [7, np.nan], [8, np.nan]])
|
487 |
+
tm.assert_frame_equal(df, expected)
|
488 |
+
|
489 |
+
|
490 |
+
def test_file_like_no_next(c_parser_only):
|
491 |
+
# gh-16530: the file-like need not have a "next" or "__next__"
|
492 |
+
# attribute despite having an "__iter__" attribute.
|
493 |
+
#
|
494 |
+
# NOTE: This is only true for the C engine, not Python engine.
|
495 |
+
class NoNextBuffer(StringIO):
|
496 |
+
def __next__(self):
|
497 |
+
raise AttributeError("No next method")
|
498 |
+
|
499 |
+
next = __next__
|
500 |
+
|
501 |
+
parser = c_parser_only
|
502 |
+
data = "a\n1"
|
503 |
+
|
504 |
+
expected = DataFrame({"a": [1]})
|
505 |
+
result = parser.read_csv(NoNextBuffer(data))
|
506 |
+
|
507 |
+
tm.assert_frame_equal(result, expected)
|
508 |
+
|
509 |
+
|
510 |
+
def test_buffer_rd_bytes_bad_unicode(c_parser_only):
|
511 |
+
# see gh-22748
|
512 |
+
t = BytesIO(b"\xB0")
|
513 |
+
t = TextIOWrapper(t, encoding="ascii", errors="surrogateescape")
|
514 |
+
msg = "'utf-8' codec can't encode character"
|
515 |
+
with pytest.raises(UnicodeError, match=msg):
|
516 |
+
c_parser_only.read_csv(t, encoding="UTF-8")
|
517 |
+
|
518 |
+
|
519 |
+
@pytest.mark.parametrize("tar_suffix", [".tar", ".tar.gz"])
|
520 |
+
def test_read_tarfile(c_parser_only, csv_dir_path, tar_suffix):
|
521 |
+
# see gh-16530
|
522 |
+
#
|
523 |
+
# Unfortunately, Python's CSV library can't handle
|
524 |
+
# tarfile objects (expects string, not bytes when
|
525 |
+
# iterating through a file-like).
|
526 |
+
parser = c_parser_only
|
527 |
+
tar_path = os.path.join(csv_dir_path, "tar_csv" + tar_suffix)
|
528 |
+
|
529 |
+
with tarfile.open(tar_path, "r") as tar:
|
530 |
+
data_file = tar.extractfile("tar_data.csv")
|
531 |
+
|
532 |
+
out = parser.read_csv(data_file)
|
533 |
+
expected = DataFrame({"a": [1]})
|
534 |
+
tm.assert_frame_equal(out, expected)
|
535 |
+
|
536 |
+
|
537 |
+
def test_chunk_whitespace_on_boundary(c_parser_only):
|
538 |
+
# see gh-9735: this issue is C parser-specific (bug when
|
539 |
+
# parsing whitespace and characters at chunk boundary)
|
540 |
+
#
|
541 |
+
# This test case has a field too large for the Python parser / CSV library.
|
542 |
+
parser = c_parser_only
|
543 |
+
|
544 |
+
chunk1 = "a" * (1024 * 256 - 2) + "\na"
|
545 |
+
chunk2 = "\n a"
|
546 |
+
result = parser.read_csv(StringIO(chunk1 + chunk2), header=None)
|
547 |
+
|
548 |
+
expected = DataFrame(["a" * (1024 * 256 - 2), "a", " a"])
|
549 |
+
tm.assert_frame_equal(result, expected)
|
550 |
+
|
551 |
+
|
552 |
+
def test_file_handles_mmap(c_parser_only, csv1):
|
553 |
+
# gh-14418
|
554 |
+
#
|
555 |
+
# Don't close user provided file handles.
|
556 |
+
parser = c_parser_only
|
557 |
+
|
558 |
+
with open(csv1, encoding="utf-8") as f:
|
559 |
+
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as m:
|
560 |
+
parser.read_csv(m)
|
561 |
+
assert not m.closed
|
562 |
+
|
563 |
+
|
564 |
+
def test_file_binary_mode(c_parser_only):
|
565 |
+
# see gh-23779
|
566 |
+
parser = c_parser_only
|
567 |
+
expected = DataFrame([[1, 2, 3], [4, 5, 6]])
|
568 |
+
|
569 |
+
with tm.ensure_clean() as path:
|
570 |
+
with open(path, "w", encoding="utf-8") as f:
|
571 |
+
f.write("1,2,3\n4,5,6")
|
572 |
+
|
573 |
+
with open(path, "rb") as f:
|
574 |
+
result = parser.read_csv(f, header=None)
|
575 |
+
tm.assert_frame_equal(result, expected)
|
576 |
+
|
577 |
+
|
578 |
+
def test_unix_style_breaks(c_parser_only):
|
579 |
+
# GH 11020
|
580 |
+
parser = c_parser_only
|
581 |
+
with tm.ensure_clean() as path:
|
582 |
+
with open(path, "w", newline="\n", encoding="utf-8") as f:
|
583 |
+
f.write("blah\n\ncol_1,col_2,col_3\n\n")
|
584 |
+
result = parser.read_csv(path, skiprows=2, encoding="utf-8", engine="c")
|
585 |
+
expected = DataFrame(columns=["col_1", "col_2", "col_3"])
|
586 |
+
tm.assert_frame_equal(result, expected)
|
587 |
+
|
588 |
+
|
589 |
+
@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
|
590 |
+
@pytest.mark.parametrize(
|
591 |
+
"data,thousands,decimal",
|
592 |
+
[
|
593 |
+
(
|
594 |
+
"""A|B|C
|
595 |
+
1|2,334.01|5
|
596 |
+
10|13|10.
|
597 |
+
""",
|
598 |
+
",",
|
599 |
+
".",
|
600 |
+
),
|
601 |
+
(
|
602 |
+
"""A|B|C
|
603 |
+
1|2.334,01|5
|
604 |
+
10|13|10,
|
605 |
+
""",
|
606 |
+
".",
|
607 |
+
",",
|
608 |
+
),
|
609 |
+
],
|
610 |
+
)
|
611 |
+
def test_1000_sep_with_decimal(
|
612 |
+
c_parser_only, data, thousands, decimal, float_precision
|
613 |
+
):
|
614 |
+
parser = c_parser_only
|
615 |
+
expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
|
616 |
+
|
617 |
+
result = parser.read_csv(
|
618 |
+
StringIO(data),
|
619 |
+
sep="|",
|
620 |
+
thousands=thousands,
|
621 |
+
decimal=decimal,
|
622 |
+
float_precision=float_precision,
|
623 |
+
)
|
624 |
+
tm.assert_frame_equal(result, expected)
|
625 |
+
|
626 |
+
|
627 |
+
def test_float_precision_options(c_parser_only):
|
628 |
+
# GH 17154, 36228
|
629 |
+
parser = c_parser_only
|
630 |
+
s = "foo\n243.164\n"
|
631 |
+
df = parser.read_csv(StringIO(s))
|
632 |
+
df2 = parser.read_csv(StringIO(s), float_precision="high")
|
633 |
+
|
634 |
+
tm.assert_frame_equal(df, df2)
|
635 |
+
|
636 |
+
df3 = parser.read_csv(StringIO(s), float_precision="legacy")
|
637 |
+
|
638 |
+
assert not df.iloc[0, 0] == df3.iloc[0, 0]
|
639 |
+
|
640 |
+
msg = "Unrecognized float_precision option: junk"
|
641 |
+
|
642 |
+
with pytest.raises(ValueError, match=msg):
|
643 |
+
parser.read_csv(StringIO(s), float_precision="junk")
|
venv/lib/python3.10/site-packages/pandas/tests/io/parser/test_comment.py
ADDED
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests that comments are properly handled during parsing
|
3 |
+
for all of the parsers defined in parsers.py
|
4 |
+
"""
|
5 |
+
from io import StringIO
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import pytest
|
9 |
+
|
10 |
+
from pandas import DataFrame
|
11 |
+
import pandas._testing as tm
|
12 |
+
|
13 |
+
|
14 |
+
@pytest.mark.parametrize("na_values", [None, ["NaN"]])
|
15 |
+
def test_comment(all_parsers, na_values):
|
16 |
+
parser = all_parsers
|
17 |
+
data = """A,B,C
|
18 |
+
1,2.,4.#hello world
|
19 |
+
5.,NaN,10.0
|
20 |
+
"""
|
21 |
+
expected = DataFrame(
|
22 |
+
[[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
|
23 |
+
)
|
24 |
+
if parser.engine == "pyarrow":
|
25 |
+
msg = "The 'comment' option is not supported with the 'pyarrow' engine"
|
26 |
+
with pytest.raises(ValueError, match=msg):
|
27 |
+
parser.read_csv(StringIO(data), comment="#", na_values=na_values)
|
28 |
+
return
|
29 |
+
result = parser.read_csv(StringIO(data), comment="#", na_values=na_values)
|
30 |
+
tm.assert_frame_equal(result, expected)
|
31 |
+
|
32 |
+
|
33 |
+
@pytest.mark.parametrize(
|
34 |
+
"read_kwargs", [{}, {"lineterminator": "*"}, {"delim_whitespace": True}]
|
35 |
+
)
|
36 |
+
def test_line_comment(all_parsers, read_kwargs, request):
|
37 |
+
parser = all_parsers
|
38 |
+
data = """# empty
|
39 |
+
A,B,C
|
40 |
+
1,2.,4.#hello world
|
41 |
+
#ignore this line
|
42 |
+
5.,NaN,10.0
|
43 |
+
"""
|
44 |
+
warn = None
|
45 |
+
depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
|
46 |
+
|
47 |
+
if read_kwargs.get("delim_whitespace"):
|
48 |
+
data = data.replace(",", " ")
|
49 |
+
warn = FutureWarning
|
50 |
+
elif read_kwargs.get("lineterminator"):
|
51 |
+
data = data.replace("\n", read_kwargs.get("lineterminator"))
|
52 |
+
|
53 |
+
read_kwargs["comment"] = "#"
|
54 |
+
if parser.engine == "pyarrow":
|
55 |
+
if "lineterminator" in read_kwargs:
|
56 |
+
msg = (
|
57 |
+
"The 'lineterminator' option is not supported with the 'pyarrow' engine"
|
58 |
+
)
|
59 |
+
else:
|
60 |
+
msg = "The 'comment' option is not supported with the 'pyarrow' engine"
|
61 |
+
with pytest.raises(ValueError, match=msg):
|
62 |
+
with tm.assert_produces_warning(
|
63 |
+
warn, match=depr_msg, check_stacklevel=False
|
64 |
+
):
|
65 |
+
parser.read_csv(StringIO(data), **read_kwargs)
|
66 |
+
return
|
67 |
+
elif parser.engine == "python" and read_kwargs.get("lineterminator"):
|
68 |
+
msg = r"Custom line terminators not supported in python parser \(yet\)"
|
69 |
+
with pytest.raises(ValueError, match=msg):
|
70 |
+
with tm.assert_produces_warning(
|
71 |
+
warn, match=depr_msg, check_stacklevel=False
|
72 |
+
):
|
73 |
+
parser.read_csv(StringIO(data), **read_kwargs)
|
74 |
+
return
|
75 |
+
|
76 |
+
with tm.assert_produces_warning(warn, match=depr_msg, check_stacklevel=False):
|
77 |
+
result = parser.read_csv(StringIO(data), **read_kwargs)
|
78 |
+
|
79 |
+
expected = DataFrame(
|
80 |
+
[[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
|
81 |
+
)
|
82 |
+
tm.assert_frame_equal(result, expected)
|
83 |
+
|
84 |
+
|
85 |
+
def test_comment_skiprows(all_parsers):
|
86 |
+
parser = all_parsers
|
87 |
+
data = """# empty
|
88 |
+
random line
|
89 |
+
# second empty line
|
90 |
+
1,2,3
|
91 |
+
A,B,C
|
92 |
+
1,2.,4.
|
93 |
+
5.,NaN,10.0
|
94 |
+
"""
|
95 |
+
# This should ignore the first four lines (including comments).
|
96 |
+
expected = DataFrame(
|
97 |
+
[[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
|
98 |
+
)
|
99 |
+
if parser.engine == "pyarrow":
|
100 |
+
msg = "The 'comment' option is not supported with the 'pyarrow' engine"
|
101 |
+
with pytest.raises(ValueError, match=msg):
|
102 |
+
parser.read_csv(StringIO(data), comment="#", skiprows=4)
|
103 |
+
return
|
104 |
+
|
105 |
+
result = parser.read_csv(StringIO(data), comment="#", skiprows=4)
|
106 |
+
tm.assert_frame_equal(result, expected)
|
107 |
+
|
108 |
+
|
109 |
+
def test_comment_header(all_parsers):
|
110 |
+
parser = all_parsers
|
111 |
+
data = """# empty
|
112 |
+
# second empty line
|
113 |
+
1,2,3
|
114 |
+
A,B,C
|
115 |
+
1,2.,4.
|
116 |
+
5.,NaN,10.0
|
117 |
+
"""
|
118 |
+
# Header should begin at the second non-comment line.
|
119 |
+
expected = DataFrame(
|
120 |
+
[[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
|
121 |
+
)
|
122 |
+
if parser.engine == "pyarrow":
|
123 |
+
msg = "The 'comment' option is not supported with the 'pyarrow' engine"
|
124 |
+
with pytest.raises(ValueError, match=msg):
|
125 |
+
parser.read_csv(StringIO(data), comment="#", header=1)
|
126 |
+
return
|
127 |
+
result = parser.read_csv(StringIO(data), comment="#", header=1)
|
128 |
+
tm.assert_frame_equal(result, expected)
|
129 |
+
|
130 |
+
|
131 |
+
def test_comment_skiprows_header(all_parsers):
|
132 |
+
parser = all_parsers
|
133 |
+
data = """# empty
|
134 |
+
# second empty line
|
135 |
+
# third empty line
|
136 |
+
X,Y,Z
|
137 |
+
1,2,3
|
138 |
+
A,B,C
|
139 |
+
1,2.,4.
|
140 |
+
5.,NaN,10.0
|
141 |
+
"""
|
142 |
+
# Skiprows should skip the first 4 lines (including comments),
|
143 |
+
# while header should start from the second non-commented line,
|
144 |
+
# starting with line 5.
|
145 |
+
expected = DataFrame(
|
146 |
+
[[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
|
147 |
+
)
|
148 |
+
if parser.engine == "pyarrow":
|
149 |
+
msg = "The 'comment' option is not supported with the 'pyarrow' engine"
|
150 |
+
with pytest.raises(ValueError, match=msg):
|
151 |
+
parser.read_csv(StringIO(data), comment="#", skiprows=4, header=1)
|
152 |
+
return
|
153 |
+
|
154 |
+
result = parser.read_csv(StringIO(data), comment="#", skiprows=4, header=1)
|
155 |
+
tm.assert_frame_equal(result, expected)
|
156 |
+
|
157 |
+
|
158 |
+
@pytest.mark.parametrize("comment_char", ["#", "~", "&", "^", "*", "@"])
|
159 |
+
def test_custom_comment_char(all_parsers, comment_char):
|
160 |
+
parser = all_parsers
|
161 |
+
data = "a,b,c\n1,2,3#ignore this!\n4,5,6#ignorethistoo"
|
162 |
+
|
163 |
+
if parser.engine == "pyarrow":
|
164 |
+
msg = "The 'comment' option is not supported with the 'pyarrow' engine"
|
165 |
+
with pytest.raises(ValueError, match=msg):
|
166 |
+
parser.read_csv(
|
167 |
+
StringIO(data.replace("#", comment_char)), comment=comment_char
|
168 |
+
)
|
169 |
+
return
|
170 |
+
result = parser.read_csv(
|
171 |
+
StringIO(data.replace("#", comment_char)), comment=comment_char
|
172 |
+
)
|
173 |
+
|
174 |
+
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "b", "c"])
|
175 |
+
tm.assert_frame_equal(result, expected)
|
176 |
+
|
177 |
+
|
178 |
+
@pytest.mark.parametrize("header", ["infer", None])
|
179 |
+
def test_comment_first_line(all_parsers, header):
|
180 |
+
# see gh-4623
|
181 |
+
parser = all_parsers
|
182 |
+
data = "# notes\na,b,c\n# more notes\n1,2,3"
|
183 |
+
|
184 |
+
if header is None:
|
185 |
+
expected = DataFrame({0: ["a", "1"], 1: ["b", "2"], 2: ["c", "3"]})
|
186 |
+
else:
|
187 |
+
expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"])
|
188 |
+
|
189 |
+
if parser.engine == "pyarrow":
|
190 |
+
msg = "The 'comment' option is not supported with the 'pyarrow' engine"
|
191 |
+
with pytest.raises(ValueError, match=msg):
|
192 |
+
parser.read_csv(StringIO(data), comment="#", header=header)
|
193 |
+
return
|
194 |
+
result = parser.read_csv(StringIO(data), comment="#", header=header)
|
195 |
+
tm.assert_frame_equal(result, expected)
|
196 |
+
|
197 |
+
|
198 |
+
def test_comment_char_in_default_value(all_parsers, request):
|
199 |
+
# GH#34002
|
200 |
+
if all_parsers.engine == "c":
|
201 |
+
reason = "see gh-34002: works on the python engine but not the c engine"
|
202 |
+
# NA value containing comment char is interpreted as comment
|
203 |
+
request.applymarker(pytest.mark.xfail(reason=reason, raises=AssertionError))
|
204 |
+
parser = all_parsers
|
205 |
+
|
206 |
+
data = (
|
207 |
+
"# this is a comment\n"
|
208 |
+
"col1,col2,col3,col4\n"
|
209 |
+
"1,2,3,4#inline comment\n"
|
210 |
+
"4,5#,6,10\n"
|
211 |
+
"7,8,#N/A,11\n"
|
212 |
+
)
|
213 |
+
if parser.engine == "pyarrow":
|
214 |
+
msg = "The 'comment' option is not supported with the 'pyarrow' engine"
|
215 |
+
with pytest.raises(ValueError, match=msg):
|
216 |
+
parser.read_csv(StringIO(data), comment="#", na_values="#N/A")
|
217 |
+
return
|
218 |
+
result = parser.read_csv(StringIO(data), comment="#", na_values="#N/A")
|
219 |
+
expected = DataFrame(
|
220 |
+
{
|
221 |
+
"col1": [1, 4, 7],
|
222 |
+
"col2": [2, 5, 8],
|
223 |
+
"col3": [3.0, np.nan, np.nan],
|
224 |
+
"col4": [4.0, np.nan, 11.0],
|
225 |
+
}
|
226 |
+
)
|
227 |
+
tm.assert_frame_equal(result, expected)
|