diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..904573f6824e527ea0a091f93c25d6a624552bf5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odf.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8a0e5eb98871b3a1c9357aecce6e8cc12d244c26
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odf.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odswriter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odswriter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..078eb35fe8b969fb65b0b1cbf7c8cc1826ddab4e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odswriter.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_openpyxl.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_openpyxl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..49b1c15c6cf78a2f179a848d13cf0ed82372cc27
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_openpyxl.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_readers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_readers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c0e568e6df1975b329fa7730e91d3bc06cec2df
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_readers.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_style.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_style.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5d4edd565fac421e5f8911094157c2ad891c7f82
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_style.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_writers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_writers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..84b3fc442309fb1340c8ec53e5b7e23ff5f8229e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_writers.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlrd.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlrd.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0bba68ec7a01792ab07f26602fdc1852cfe11422
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlrd.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlsxwriter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlsxwriter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..459e12bcde9b7591d61539f354a6bde6a60e1cc4
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlsxwriter.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_style.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_style.py
new file mode 100644
index 0000000000000000000000000000000000000000..89615172688d7b56fbb070dbcd4750365d7d612d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/excel/test_style.py
@@ -0,0 +1,298 @@
+import contextlib
+import time
+
+import numpy as np
+import pytest
+
+from pandas.compat import is_platform_windows
+import pandas.util._test_decorators as td
+
+from pandas import (
+ DataFrame,
+ read_excel,
+)
+import pandas._testing as tm
+
+from pandas.io.excel import ExcelWriter
+from pandas.io.formats.excel import ExcelFormatter
+
+pytest.importorskip("jinja2")
+# jinja2 is currently required for Styler.__init__(). Technically Styler.to_excel
+# could compute styles and render to excel without jinja2, since there is no
+# 'template' file, but this needs the import error to delayed until render time.
+
+if is_platform_windows():
+ pytestmark = pytest.mark.single_cpu
+
+
+def assert_equal_cell_styles(cell1, cell2):
+ # TODO: should find a better way to check equality
+ assert cell1.alignment.__dict__ == cell2.alignment.__dict__
+ assert cell1.border.__dict__ == cell2.border.__dict__
+ assert cell1.fill.__dict__ == cell2.fill.__dict__
+ assert cell1.font.__dict__ == cell2.font.__dict__
+ assert cell1.number_format == cell2.number_format
+ assert cell1.protection.__dict__ == cell2.protection.__dict__
+
+
+@pytest.mark.parametrize(
+ "engine",
+ ["xlsxwriter", "openpyxl"],
+)
+def test_styler_to_excel_unstyled(engine):
+ # compare DataFrame.to_excel and Styler.to_excel when no styles applied
+ pytest.importorskip(engine)
+ df = DataFrame(np.random.default_rng(2).standard_normal((2, 2)))
+ with tm.ensure_clean(".xlsx") as path:
+ with ExcelWriter(path, engine=engine) as writer:
+ df.to_excel(writer, sheet_name="dataframe")
+ df.style.to_excel(writer, sheet_name="unstyled")
+
+ openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
+ with contextlib.closing(openpyxl.load_workbook(path)) as wb:
+ for col1, col2 in zip(wb["dataframe"].columns, wb["unstyled"].columns):
+ assert len(col1) == len(col2)
+ for cell1, cell2 in zip(col1, col2):
+ assert cell1.value == cell2.value
+ assert_equal_cell_styles(cell1, cell2)
+
+
+shared_style_params = [
+ (
+ "background-color: #111222",
+ ["fill", "fgColor", "rgb"],
+ {"xlsxwriter": "FF111222", "openpyxl": "00111222"},
+ ),
+ (
+ "color: #111222",
+ ["font", "color", "value"],
+ {"xlsxwriter": "FF111222", "openpyxl": "00111222"},
+ ),
+ ("font-family: Arial;", ["font", "name"], "arial"),
+ ("font-weight: bold;", ["font", "b"], True),
+ ("font-style: italic;", ["font", "i"], True),
+ ("text-decoration: underline;", ["font", "u"], "single"),
+ ("number-format: $??,???.00;", ["number_format"], "$??,???.00"),
+ ("text-align: left;", ["alignment", "horizontal"], "left"),
+ (
+ "vertical-align: bottom;",
+ ["alignment", "vertical"],
+ {"xlsxwriter": None, "openpyxl": "bottom"}, # xlsxwriter Fails
+ ),
+ ("vertical-align: middle;", ["alignment", "vertical"], "center"),
+ # Border widths
+ ("border-left: 2pt solid red", ["border", "left", "style"], "medium"),
+ ("border-left: 1pt dotted red", ["border", "left", "style"], "dotted"),
+ ("border-left: 2pt dotted red", ["border", "left", "style"], "mediumDashDotDot"),
+ ("border-left: 1pt dashed red", ["border", "left", "style"], "dashed"),
+ ("border-left: 2pt dashed red", ["border", "left", "style"], "mediumDashed"),
+ ("border-left: 1pt solid red", ["border", "left", "style"], "thin"),
+ ("border-left: 3pt solid red", ["border", "left", "style"], "thick"),
+ # Border expansion
+ (
+ "border-left: 2pt solid #111222",
+ ["border", "left", "color", "rgb"],
+ {"xlsxwriter": "FF111222", "openpyxl": "00111222"},
+ ),
+ ("border: 1pt solid red", ["border", "top", "style"], "thin"),
+ (
+ "border: 1pt solid #111222",
+ ["border", "top", "color", "rgb"],
+ {"xlsxwriter": "FF111222", "openpyxl": "00111222"},
+ ),
+ ("border: 1pt solid red", ["border", "right", "style"], "thin"),
+ (
+ "border: 1pt solid #111222",
+ ["border", "right", "color", "rgb"],
+ {"xlsxwriter": "FF111222", "openpyxl": "00111222"},
+ ),
+ ("border: 1pt solid red", ["border", "bottom", "style"], "thin"),
+ (
+ "border: 1pt solid #111222",
+ ["border", "bottom", "color", "rgb"],
+ {"xlsxwriter": "FF111222", "openpyxl": "00111222"},
+ ),
+ ("border: 1pt solid red", ["border", "left", "style"], "thin"),
+ (
+ "border: 1pt solid #111222",
+ ["border", "left", "color", "rgb"],
+ {"xlsxwriter": "FF111222", "openpyxl": "00111222"},
+ ),
+ # Border styles
+ (
+ "border-left-style: hair; border-left-color: black",
+ ["border", "left", "style"],
+ "hair",
+ ),
+]
+
+
+@pytest.mark.parametrize(
+ "engine",
+ ["xlsxwriter", "openpyxl"],
+)
+@pytest.mark.parametrize("css, attrs, expected", shared_style_params)
+def test_styler_to_excel_basic(engine, css, attrs, expected):
+ pytest.importorskip(engine)
+ df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
+ styler = df.style.map(lambda x: css)
+
+ with tm.ensure_clean(".xlsx") as path:
+ with ExcelWriter(path, engine=engine) as writer:
+ df.to_excel(writer, sheet_name="dataframe")
+ styler.to_excel(writer, sheet_name="styled")
+
+ openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
+ with contextlib.closing(openpyxl.load_workbook(path)) as wb:
+ # test unstyled data cell does not have expected styles
+ # test styled cell has expected styles
+ u_cell, s_cell = wb["dataframe"].cell(2, 2), wb["styled"].cell(2, 2)
+ for attr in attrs:
+ u_cell, s_cell = getattr(u_cell, attr, None), getattr(s_cell, attr)
+
+ if isinstance(expected, dict):
+ assert u_cell is None or u_cell != expected[engine]
+ assert s_cell == expected[engine]
+ else:
+ assert u_cell is None or u_cell != expected
+ assert s_cell == expected
+
+
+@pytest.mark.parametrize(
+ "engine",
+ ["xlsxwriter", "openpyxl"],
+)
+@pytest.mark.parametrize("css, attrs, expected", shared_style_params)
+def test_styler_to_excel_basic_indexes(engine, css, attrs, expected):
+ pytest.importorskip(engine)
+ df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
+
+ styler = df.style
+ styler.map_index(lambda x: css, axis=0)
+ styler.map_index(lambda x: css, axis=1)
+
+ null_styler = df.style
+ null_styler.map(lambda x: "null: css;")
+ null_styler.map_index(lambda x: "null: css;", axis=0)
+ null_styler.map_index(lambda x: "null: css;", axis=1)
+
+ with tm.ensure_clean(".xlsx") as path:
+ with ExcelWriter(path, engine=engine) as writer:
+ null_styler.to_excel(writer, sheet_name="null_styled")
+ styler.to_excel(writer, sheet_name="styled")
+
+ openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
+ with contextlib.closing(openpyxl.load_workbook(path)) as wb:
+ # test null styled index cells does not have expected styles
+ # test styled cell has expected styles
+ ui_cell, si_cell = wb["null_styled"].cell(2, 1), wb["styled"].cell(2, 1)
+ uc_cell, sc_cell = wb["null_styled"].cell(1, 2), wb["styled"].cell(1, 2)
+ for attr in attrs:
+ ui_cell, si_cell = getattr(ui_cell, attr, None), getattr(si_cell, attr)
+ uc_cell, sc_cell = getattr(uc_cell, attr, None), getattr(sc_cell, attr)
+
+ if isinstance(expected, dict):
+ assert ui_cell is None or ui_cell != expected[engine]
+ assert si_cell == expected[engine]
+ assert uc_cell is None or uc_cell != expected[engine]
+ assert sc_cell == expected[engine]
+ else:
+ assert ui_cell is None or ui_cell != expected
+ assert si_cell == expected
+ assert uc_cell is None or uc_cell != expected
+ assert sc_cell == expected
+
+
+# From https://openpyxl.readthedocs.io/en/stable/api/openpyxl.styles.borders.html
+# Note: Leaving behavior of "width"-type styles undefined; user should use border-width
+# instead
+excel_border_styles = [
+ # "thin",
+ "dashed",
+ "mediumDashDot",
+ "dashDotDot",
+ "hair",
+ "dotted",
+ "mediumDashDotDot",
+ # "medium",
+ "double",
+ "dashDot",
+ "slantDashDot",
+ # "thick",
+ "mediumDashed",
+]
+
+
+@pytest.mark.parametrize(
+ "engine",
+ ["xlsxwriter", "openpyxl"],
+)
+@pytest.mark.parametrize("border_style", excel_border_styles)
+def test_styler_to_excel_border_style(engine, border_style):
+ css = f"border-left: {border_style} black thin"
+ attrs = ["border", "left", "style"]
+ expected = border_style
+
+ pytest.importorskip(engine)
+ df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
+ styler = df.style.map(lambda x: css)
+
+ with tm.ensure_clean(".xlsx") as path:
+ with ExcelWriter(path, engine=engine) as writer:
+ df.to_excel(writer, sheet_name="dataframe")
+ styler.to_excel(writer, sheet_name="styled")
+
+ openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl
+ with contextlib.closing(openpyxl.load_workbook(path)) as wb:
+ # test unstyled data cell does not have expected styles
+ # test styled cell has expected styles
+ u_cell, s_cell = wb["dataframe"].cell(2, 2), wb["styled"].cell(2, 2)
+ for attr in attrs:
+ u_cell, s_cell = getattr(u_cell, attr, None), getattr(s_cell, attr)
+
+ if isinstance(expected, dict):
+ assert u_cell is None or u_cell != expected[engine]
+ assert s_cell == expected[engine]
+ else:
+ assert u_cell is None or u_cell != expected
+ assert s_cell == expected
+
+
+def test_styler_custom_converter():
+ openpyxl = pytest.importorskip("openpyxl")
+
+ def custom_converter(css):
+ return {"font": {"color": {"rgb": "111222"}}}
+
+ df = DataFrame(np.random.default_rng(2).standard_normal((1, 1)))
+ styler = df.style.map(lambda x: "color: #888999")
+ with tm.ensure_clean(".xlsx") as path:
+ with ExcelWriter(path, engine="openpyxl") as writer:
+ ExcelFormatter(styler, style_converter=custom_converter).write(
+ writer, sheet_name="custom"
+ )
+
+ with contextlib.closing(openpyxl.load_workbook(path)) as wb:
+ assert wb["custom"].cell(2, 2).font.color.value == "00111222"
+
+
+@pytest.mark.single_cpu
+@td.skip_if_not_us_locale
+def test_styler_to_s3(s3_public_bucket, s3so):
+ # GH#46381
+
+ mock_bucket_name, target_file = s3_public_bucket.name, "test.xlsx"
+ df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]})
+ styler = df.style.set_sticky(axis="index")
+ styler.to_excel(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so)
+ timeout = 5
+ while True:
+ if target_file in (obj.key for obj in s3_public_bucket.objects.all()):
+ break
+ time.sleep(0.1)
+ timeout -= 0.1
+ assert timeout > 0, "Timed out waiting for file to appear on moto"
+ result = read_excel(
+ f"s3://{mock_bucket_name}/{target_file}", index_col=0, storage_options=s3so
+ )
+ tm.assert_frame_equal(result, df)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c8f83fd7f3481ef90ce0fd92e7a816ba27151de9
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_console.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_console.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d8fef55bf1eabbdf4718d4c98f95efebdec668c5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_console.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_css.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_css.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bdf91975269b4fc2cd47683fc0a002d026371227
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_css.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_eng_formatting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_eng_formatting.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e015beab633ca94e518d109b81b60cdad7a8b9b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_eng_formatting.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_format.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_format.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b4e524de6f8d186fa9d860d9527061b88bd5bac
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_format.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_ipython_compat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_ipython_compat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..15f0072bffd02573b8257bc4ecdc471cec8ac24f
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_ipython_compat.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_printing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_printing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..112a3572d4a1fdd1642edcaa506863d616a1601c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_printing.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_csv.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_csv.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..02370ce5a8cf0503f7b0f33c9924327f86dd7661
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_csv.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_excel.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_excel.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..290b633d0cb0cc6fedc3f90060359e44738866ea
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_excel.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_html.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_html.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ede944a68f88c1f9121250f99b4e3463200fcf5c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_html.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_latex.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_latex.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b8dfacc2ceb5e83cf965f480a308f61c71765ffe
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_latex.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_markdown.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_markdown.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..042cba41307068d0fbc1490fe53713257da2f0a6
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_markdown.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_string.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_string.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b69e6c86356adb28ee0449f479b46814a8e8803
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_string.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dab2986153be9fdd840c6f7a3f9bb0b9fcaeacf4
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_bar.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_bar.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b331952e1e03c58bfd84c600f4843746225a3fc
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_bar.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_exceptions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_exceptions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0e5ed417477ed38109fa584dbaad9d4a8b5c979f
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_exceptions.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_format.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_format.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..285e551b4e107a0ddfc45d62d45c7849af7e0a31
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_format.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_html.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_html.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0fefccb08cea353bb25d4d95f97a9dce1e7bbd11
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_html.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_matplotlib.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_matplotlib.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..07357a884b5d6701322dc5281fa370f6cac62b34
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_matplotlib.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_non_unique.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_non_unique.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9cfed5d3fadad67c99b3aae6b1db14e04fc41d9e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_non_unique.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_style.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_style.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5194cbe3569acdab2c292b5440c801dfe0454ad2
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_style.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_string.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_string.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e6c08273a203c8fa5d7255310b236159a69f67de
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_string.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_tooltip.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_tooltip.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5054a0e6a60c469da925c74a5a7e75cdf916e8f7
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_tooltip.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_bar.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_bar.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0e4712e8bb3d15959bddc0bd8697981b16bd8ef
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_bar.py
@@ -0,0 +1,358 @@
+import io
+
+import numpy as np
+import pytest
+
+from pandas import (
+ NA,
+ DataFrame,
+ read_csv,
+)
+
+pytest.importorskip("jinja2")
+
+
+def bar_grad(a=None, b=None, c=None, d=None):
+ """Used in multiple tests to simplify formatting of expected result"""
+ ret = [("width", "10em")]
+ if all(x is None for x in [a, b, c, d]):
+ return ret
+ return ret + [
+ (
+ "background",
+ f"linear-gradient(90deg,{','.join([x for x in [a, b, c, d] if x])})",
+ )
+ ]
+
+
+def no_bar():
+ return bar_grad()
+
+
+def bar_to(x, color="#d65f5f"):
+ return bar_grad(f" {color} {x:.1f}%", f" transparent {x:.1f}%")
+
+
+def bar_from_to(x, y, color="#d65f5f"):
+ return bar_grad(
+ f" transparent {x:.1f}%",
+ f" {color} {x:.1f}%",
+ f" {color} {y:.1f}%",
+ f" transparent {y:.1f}%",
+ )
+
+
+@pytest.fixture
+def df_pos():
+ return DataFrame([[1], [2], [3]])
+
+
+@pytest.fixture
+def df_neg():
+ return DataFrame([[-1], [-2], [-3]])
+
+
+@pytest.fixture
+def df_mix():
+ return DataFrame([[-3], [1], [2]])
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ ("left", [no_bar(), bar_to(50), bar_to(100)]),
+ ("right", [bar_to(100), bar_from_to(50, 100), no_bar()]),
+ ("mid", [bar_to(33.33), bar_to(66.66), bar_to(100)]),
+ ("zero", [bar_from_to(50, 66.7), bar_from_to(50, 83.3), bar_from_to(50, 100)]),
+ ("mean", [bar_to(50), no_bar(), bar_from_to(50, 100)]),
+ (2.0, [bar_to(50), no_bar(), bar_from_to(50, 100)]),
+ (np.median, [bar_to(50), no_bar(), bar_from_to(50, 100)]),
+ ],
+)
+def test_align_positive_cases(df_pos, align, exp):
+ # test different align cases for all positive values
+ result = df_pos.style.bar(align=align)._compute().ctx
+ expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ ("left", [bar_to(100), bar_to(50), no_bar()]),
+ ("right", [no_bar(), bar_from_to(50, 100), bar_to(100)]),
+ ("mid", [bar_from_to(66.66, 100), bar_from_to(33.33, 100), bar_to(100)]),
+ ("zero", [bar_from_to(33.33, 50), bar_from_to(16.66, 50), bar_to(50)]),
+ ("mean", [bar_from_to(50, 100), no_bar(), bar_to(50)]),
+ (-2.0, [bar_from_to(50, 100), no_bar(), bar_to(50)]),
+ (np.median, [bar_from_to(50, 100), no_bar(), bar_to(50)]),
+ ],
+)
+def test_align_negative_cases(df_neg, align, exp):
+ # test different align cases for all negative values
+ result = df_neg.style.bar(align=align)._compute().ctx
+ expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ ("left", [no_bar(), bar_to(80), bar_to(100)]),
+ ("right", [bar_to(100), bar_from_to(80, 100), no_bar()]),
+ ("mid", [bar_to(60), bar_from_to(60, 80), bar_from_to(60, 100)]),
+ ("zero", [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
+ ("mean", [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
+ (-0.0, [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
+ (np.nanmedian, [bar_to(50), no_bar(), bar_from_to(50, 62.5)]),
+ ],
+)
+@pytest.mark.parametrize("nans", [True, False])
+def test_align_mixed_cases(df_mix, align, exp, nans):
+ # test different align cases for mixed positive and negative values
+ # also test no impact of NaNs and no_bar
+ expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
+ if nans:
+ df_mix.loc[3, :] = np.nan
+ expected.update({(3, 0): no_bar()})
+ result = df_mix.style.bar(align=align)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ (
+ "left",
+ {
+ "index": [[no_bar(), no_bar()], [bar_to(100), bar_to(100)]],
+ "columns": [[no_bar(), bar_to(100)], [no_bar(), bar_to(100)]],
+ "none": [[no_bar(), bar_to(33.33)], [bar_to(66.66), bar_to(100)]],
+ },
+ ),
+ (
+ "mid",
+ {
+ "index": [[bar_to(33.33), bar_to(50)], [bar_to(100), bar_to(100)]],
+ "columns": [[bar_to(50), bar_to(100)], [bar_to(75), bar_to(100)]],
+ "none": [[bar_to(25), bar_to(50)], [bar_to(75), bar_to(100)]],
+ },
+ ),
+ (
+ "zero",
+ {
+ "index": [
+ [bar_from_to(50, 66.66), bar_from_to(50, 75)],
+ [bar_from_to(50, 100), bar_from_to(50, 100)],
+ ],
+ "columns": [
+ [bar_from_to(50, 75), bar_from_to(50, 100)],
+ [bar_from_to(50, 87.5), bar_from_to(50, 100)],
+ ],
+ "none": [
+ [bar_from_to(50, 62.5), bar_from_to(50, 75)],
+ [bar_from_to(50, 87.5), bar_from_to(50, 100)],
+ ],
+ },
+ ),
+ (
+ 2,
+ {
+ "index": [
+ [bar_to(50), no_bar()],
+ [bar_from_to(50, 100), bar_from_to(50, 100)],
+ ],
+ "columns": [
+ [bar_to(50), no_bar()],
+ [bar_from_to(50, 75), bar_from_to(50, 100)],
+ ],
+ "none": [
+ [bar_from_to(25, 50), no_bar()],
+ [bar_from_to(50, 75), bar_from_to(50, 100)],
+ ],
+ },
+ ),
+ ],
+)
+@pytest.mark.parametrize("axis", ["index", "columns", "none"])
+def test_align_axis(align, exp, axis):
+ # test all axis combinations with positive values and different aligns
+ data = DataFrame([[1, 2], [3, 4]])
+ result = (
+ data.style.bar(align=align, axis=None if axis == "none" else axis)
+ ._compute()
+ .ctx
+ )
+ expected = {
+ (0, 0): exp[axis][0][0],
+ (0, 1): exp[axis][0][1],
+ (1, 0): exp[axis][1][0],
+ (1, 1): exp[axis][1][1],
+ }
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "values, vmin, vmax",
+ [
+ ("positive", 1.5, 2.5),
+ ("negative", -2.5, -1.5),
+ ("mixed", -2.5, 1.5),
+ ],
+)
+@pytest.mark.parametrize("nullify", [None, "vmin", "vmax"]) # test min/max separately
+@pytest.mark.parametrize("align", ["left", "right", "zero", "mid"])
+def test_vmin_vmax_clipping(df_pos, df_neg, df_mix, values, vmin, vmax, nullify, align):
+ # test that clipping occurs if any vmin > data_values or vmax < data_values
+ if align == "mid": # mid acts as left or right in each case
+ if values == "positive":
+ align = "left"
+ elif values == "negative":
+ align = "right"
+ df = {"positive": df_pos, "negative": df_neg, "mixed": df_mix}[values]
+ vmin = None if nullify == "vmin" else vmin
+ vmax = None if nullify == "vmax" else vmax
+
+ clip_df = df.where(df <= (vmax if vmax else 999), other=vmax)
+ clip_df = clip_df.where(clip_df >= (vmin if vmin else -999), other=vmin)
+
+ result = (
+ df.style.bar(align=align, vmin=vmin, vmax=vmax, color=["red", "green"])
+ ._compute()
+ .ctx
+ )
+ expected = clip_df.style.bar(align=align, color=["red", "green"])._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "values, vmin, vmax",
+ [
+ ("positive", 0.5, 4.5),
+ ("negative", -4.5, -0.5),
+ ("mixed", -4.5, 4.5),
+ ],
+)
+@pytest.mark.parametrize("nullify", [None, "vmin", "vmax"]) # test min/max separately
+@pytest.mark.parametrize("align", ["left", "right", "zero", "mid"])
+def test_vmin_vmax_widening(df_pos, df_neg, df_mix, values, vmin, vmax, nullify, align):
+ # test that widening occurs if any vmax > data_values or vmin < data_values
+ if align == "mid": # mid acts as left or right in each case
+ if values == "positive":
+ align = "left"
+ elif values == "negative":
+ align = "right"
+ df = {"positive": df_pos, "negative": df_neg, "mixed": df_mix}[values]
+ vmin = None if nullify == "vmin" else vmin
+ vmax = None if nullify == "vmax" else vmax
+
+ expand_df = df.copy()
+ expand_df.loc[3, :], expand_df.loc[4, :] = vmin, vmax
+
+ result = (
+ df.style.bar(align=align, vmin=vmin, vmax=vmax, color=["red", "green"])
+ ._compute()
+ .ctx
+ )
+ expected = expand_df.style.bar(align=align, color=["red", "green"])._compute().ctx
+ assert result.items() <= expected.items()
+
+
+def test_numerics():
+ # test data is pre-selected for numeric values
+ data = DataFrame([[1, "a"], [2, "b"]])
+ result = data.style.bar()._compute().ctx
+ assert (0, 1) not in result
+ assert (1, 1) not in result
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ ("left", [no_bar(), bar_to(100, "green")]),
+ ("right", [bar_to(100, "red"), no_bar()]),
+ ("mid", [bar_to(25, "red"), bar_from_to(25, 100, "green")]),
+ ("zero", [bar_from_to(33.33, 50, "red"), bar_from_to(50, 100, "green")]),
+ ],
+)
+def test_colors_mixed(align, exp):
+ data = DataFrame([[-1], [3]])
+ result = data.style.bar(align=align, color=["red", "green"])._compute().ctx
+ assert result == {(0, 0): exp[0], (1, 0): exp[1]}
+
+
+def test_bar_align_height():
+ # test when keyword height is used 'no-repeat center' and 'background-size' present
+ data = DataFrame([[1], [2]])
+ result = data.style.bar(align="left", height=50)._compute().ctx
+ bg_s = "linear-gradient(90deg, #d65f5f 100.0%, transparent 100.0%) no-repeat center"
+ expected = {
+ (0, 0): [("width", "10em")],
+ (1, 0): [
+ ("width", "10em"),
+ ("background", bg_s),
+ ("background-size", "100% 50.0%"),
+ ],
+ }
+ assert result == expected
+
+
+def test_bar_value_error_raises():
+ df = DataFrame({"A": [-100, -60, -30, -20]})
+
+ msg = "`align` should be in {'left', 'right', 'mid', 'mean', 'zero'} or"
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(align="poorly", color=["#d65f5f", "#5fba7d"]).to_html()
+
+ msg = r"`width` must be a value in \[0, 100\]"
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(width=200).to_html()
+
+ msg = r"`height` must be a value in \[0, 100\]"
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(height=200).to_html()
+
+
+def test_bar_color_and_cmap_error_raises():
+ df = DataFrame({"A": [1, 2, 3, 4]})
+ msg = "`color` and `cmap` cannot both be given"
+ # Test that providing both color and cmap raises a ValueError
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color="#d65f5f", cmap="viridis").to_html()
+
+
+def test_bar_invalid_color_type_error_raises():
+ df = DataFrame({"A": [1, 2, 3, 4]})
+ msg = (
+ r"`color` must be string or list or tuple of 2 strings,"
+ r"\(eg: color=\['#d65f5f', '#5fba7d'\]\)"
+ )
+ # Test that providing an invalid color type raises a ValueError
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color=123).to_html()
+
+ # Test that providing a color list with more than two elements raises a ValueError
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color=["#d65f5f", "#5fba7d", "#abcdef"]).to_html()
+
+
+def test_styler_bar_with_NA_values():
+ df1 = DataFrame({"A": [1, 2, NA, 4]})
+ df2 = DataFrame([[NA, NA], [NA, NA]])
+ expected_substring = "style type="
+ html_output1 = df1.style.bar(subset="A").to_html()
+ html_output2 = df2.style.bar(align="left", axis=None).to_html()
+ assert expected_substring in html_output1
+ assert expected_substring in html_output2
+
+
+def test_style_bar_with_pyarrow_NA_values():
+ data = """name,age,test1,test2,teacher
+ Adam,15,95.0,80,Ashby
+ Bob,16,81.0,82,Ashby
+ Dave,16,89.0,84,Jones
+ Fred,15,,88,Jones"""
+ df = read_csv(io.StringIO(data), dtype_backend="pyarrow")
+ expected_substring = "style type="
+ html_output = df.style.bar(subset="test1").to_html()
+ assert expected_substring in html_output
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_exceptions.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..d52e3a37e7693dadce34f73fc03a0790c7a0b4d3
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_exceptions.py
@@ -0,0 +1,44 @@
+import pytest
+
+jinja2 = pytest.importorskip("jinja2")
+
+from pandas import (
+ DataFrame,
+ MultiIndex,
+)
+
+from pandas.io.formats.style import Styler
+
+
+@pytest.fixture
+def df():
+ return DataFrame(
+ data=[[0, -0.609], [1, -1.228]],
+ columns=["A", "B"],
+ index=["x", "y"],
+ )
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+def test_concat_bad_columns(styler):
+ msg = "`other.data` must have same columns as `Styler.data"
+ with pytest.raises(ValueError, match=msg):
+ styler.concat(DataFrame([[1, 2]]).style)
+
+
+def test_concat_bad_type(styler):
+ msg = "`other` must be of type `Styler`"
+ with pytest.raises(TypeError, match=msg):
+ styler.concat(DataFrame([[1, 2]]))
+
+
+def test_concat_bad_index_levels(styler, df):
+ df = df.copy()
+ df.index = MultiIndex.from_tuples([(0, 0), (1, 1)])
+ msg = "number of index levels must be same in `other`"
+ with pytest.raises(ValueError, match=msg):
+ styler.concat(df.style)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_format.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c84816ead140b95f14df8dbeccc83b317ac239a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_format.py
@@ -0,0 +1,562 @@
+import numpy as np
+import pytest
+
+from pandas import (
+ NA,
+ DataFrame,
+ IndexSlice,
+ MultiIndex,
+ NaT,
+ Timestamp,
+ option_context,
+)
+
+pytest.importorskip("jinja2")
+from pandas.io.formats.style import Styler
+from pandas.io.formats.style_render import _str_escape
+
+
+@pytest.fixture
+def df():
+ return DataFrame(
+ data=[[0, -0.609], [1, -1.228]],
+ columns=["A", "B"],
+ index=["x", "y"],
+ )
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+@pytest.fixture
+def df_multi():
+ return DataFrame(
+ data=np.arange(16).reshape(4, 4),
+ columns=MultiIndex.from_product([["A", "B"], ["a", "b"]]),
+ index=MultiIndex.from_product([["X", "Y"], ["x", "y"]]),
+ )
+
+
+@pytest.fixture
+def styler_multi(df_multi):
+ return Styler(df_multi, uuid_len=0)
+
+
+def test_display_format(styler):
+ ctx = styler.format("{:0.1f}")._translate(True, True)
+ assert all(["display_value" in c for c in row] for row in ctx["body"])
+ assert all([len(c["display_value"]) <= 3 for c in row[1:]] for row in ctx["body"])
+ assert len(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3
+
+
+@pytest.mark.parametrize("index", [True, False])
+@pytest.mark.parametrize("columns", [True, False])
+def test_display_format_index(styler, index, columns):
+ exp_index = ["x", "y"]
+ if index:
+ styler.format_index(lambda v: v.upper(), axis=0) # test callable
+ exp_index = ["X", "Y"]
+
+ exp_columns = ["A", "B"]
+ if columns:
+ styler.format_index("*{}*", axis=1) # test string
+ exp_columns = ["*A*", "*B*"]
+
+ ctx = styler._translate(True, True)
+
+ for r, row in enumerate(ctx["body"]):
+ assert row[0]["display_value"] == exp_index[r]
+
+ for c, col in enumerate(ctx["head"][1:]):
+ assert col["display_value"] == exp_columns[c]
+
+
+def test_format_dict(styler):
+ ctx = styler.format({"A": "{:0.1f}", "B": "{0:.2%}"})._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "0.0"
+ assert ctx["body"][0][2]["display_value"] == "-60.90%"
+
+
+def test_format_index_dict(styler):
+ ctx = styler.format_index({0: lambda v: v.upper()})._translate(True, True)
+ for i, val in enumerate(["X", "Y"]):
+ assert ctx["body"][i][0]["display_value"] == val
+
+
+def test_format_string(styler):
+ ctx = styler.format("{:.2f}")._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "0.00"
+ assert ctx["body"][0][2]["display_value"] == "-0.61"
+ assert ctx["body"][1][1]["display_value"] == "1.00"
+ assert ctx["body"][1][2]["display_value"] == "-1.23"
+
+
+def test_format_callable(styler):
+ ctx = styler.format(lambda v: "neg" if v < 0 else "pos")._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "pos"
+ assert ctx["body"][0][2]["display_value"] == "neg"
+ assert ctx["body"][1][1]["display_value"] == "pos"
+ assert ctx["body"][1][2]["display_value"] == "neg"
+
+
+def test_format_with_na_rep():
+ # GH 21527 28358
+ df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
+
+ ctx = df.style.format(None, na_rep="-")._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "-"
+ assert ctx["body"][0][2]["display_value"] == "-"
+
+ ctx = df.style.format("{:.2%}", na_rep="-")._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "-"
+ assert ctx["body"][0][2]["display_value"] == "-"
+ assert ctx["body"][1][1]["display_value"] == "110.00%"
+ assert ctx["body"][1][2]["display_value"] == "120.00%"
+
+ ctx = df.style.format("{:.2%}", na_rep="-", subset=["B"])._translate(True, True)
+ assert ctx["body"][0][2]["display_value"] == "-"
+ assert ctx["body"][1][2]["display_value"] == "120.00%"
+
+
+def test_format_index_with_na_rep():
+ df = DataFrame([[1, 2, 3, 4, 5]], columns=["A", None, np.nan, NaT, NA])
+ ctx = df.style.format_index(None, na_rep="--", axis=1)._translate(True, True)
+ assert ctx["head"][0][1]["display_value"] == "A"
+ for i in [2, 3, 4, 5]:
+ assert ctx["head"][0][i]["display_value"] == "--"
+
+
+def test_format_non_numeric_na():
+ # GH 21527 28358
+ df = DataFrame(
+ {
+ "object": [None, np.nan, "foo"],
+ "datetime": [None, NaT, Timestamp("20120101")],
+ }
+ )
+ ctx = df.style.format(None, na_rep="-")._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "-"
+ assert ctx["body"][0][2]["display_value"] == "-"
+ assert ctx["body"][1][1]["display_value"] == "-"
+ assert ctx["body"][1][2]["display_value"] == "-"
+
+
+@pytest.mark.parametrize(
+ "func, attr, kwargs",
+ [
+ ("format", "_display_funcs", {}),
+ ("format_index", "_display_funcs_index", {"axis": 0}),
+ ("format_index", "_display_funcs_columns", {"axis": 1}),
+ ],
+)
+def test_format_clear(styler, func, attr, kwargs):
+ assert (0, 0) not in getattr(styler, attr) # using default
+ getattr(styler, func)("{:.2f}", **kwargs)
+ assert (0, 0) in getattr(styler, attr) # formatter is specified
+ getattr(styler, func)(**kwargs)
+ assert (0, 0) not in getattr(styler, attr) # formatter cleared to default
+
+
+@pytest.mark.parametrize(
+ "escape, exp",
+ [
+ ("html", "<>&"%$#_{}~^\\~ ^ \\ "),
+ (
+ "latex",
+ '<>\\&"\\%\\$\\#\\_\\{\\}\\textasciitilde \\textasciicircum '
+ "\\textbackslash \\textasciitilde \\space \\textasciicircum \\space "
+ "\\textbackslash \\space ",
+ ),
+ ],
+)
+def test_format_escape_html(escape, exp):
+ chars = '<>&"%$#_{}~^\\~ ^ \\ '
+ df = DataFrame([[chars]])
+
+ s = Styler(df, uuid_len=0).format("&{0}&", escape=None)
+ expected = f'
&{chars}& '
+ assert expected in s.to_html()
+
+ # only the value should be escaped before passing to the formatter
+ s = Styler(df, uuid_len=0).format("&{0}&", escape=escape)
+ expected = f'&{exp}& '
+ assert expected in s.to_html()
+
+ # also test format_index()
+ styler = Styler(DataFrame(columns=[chars]), uuid_len=0)
+ styler.format_index("&{0}&", escape=None, axis=1)
+ assert styler._translate(True, True)["head"][0][1]["display_value"] == f"&{chars}&"
+ styler.format_index("&{0}&", escape=escape, axis=1)
+ assert styler._translate(True, True)["head"][0][1]["display_value"] == f"&{exp}&"
+
+
+@pytest.mark.parametrize(
+ "chars, expected",
+ [
+ (
+ r"$ \$&%#_{}~^\ $ &%#_{}~^\ $",
+ "".join(
+ [
+ r"$ \$&%#_{}~^\ $ ",
+ r"\&\%\#\_\{\}\textasciitilde \textasciicircum ",
+ r"\textbackslash \space \$",
+ ]
+ ),
+ ),
+ (
+ r"\( &%#_{}~^\ \) &%#_{}~^\ \(",
+ "".join(
+ [
+ r"\( &%#_{}~^\ \) ",
+ r"\&\%\#\_\{\}\textasciitilde \textasciicircum ",
+ r"\textbackslash \space \textbackslash (",
+ ]
+ ),
+ ),
+ (
+ r"$\&%#_{}^\$",
+ r"\$\textbackslash \&\%\#\_\{\}\textasciicircum \textbackslash \$",
+ ),
+ (
+ r"$ \frac{1}{2} $ \( \frac{1}{2} \)",
+ "".join(
+ [
+ r"$ \frac{1}{2} $",
+ r" \textbackslash ( \textbackslash frac\{1\}\{2\} \textbackslash )",
+ ]
+ ),
+ ),
+ ],
+)
+def test_format_escape_latex_math(chars, expected):
+ # GH 51903
+ # latex-math escape works for each DataFrame cell separately. If we have
+ # a combination of dollar signs and brackets, the dollar sign would apply.
+ df = DataFrame([[chars]])
+ s = df.style.format("{0}", escape="latex-math")
+ assert s._translate(True, True)["body"][0][1]["display_value"] == expected
+
+
+def test_format_escape_na_rep():
+ # tests the na_rep is not escaped
+ df = DataFrame([['<>&"', None]])
+ s = Styler(df, uuid_len=0).format("X&{0}>X", escape="html", na_rep="&")
+ ex = 'X&<>&">X '
+ expected2 = '& '
+ assert ex in s.to_html()
+ assert expected2 in s.to_html()
+
+ # also test for format_index()
+ df = DataFrame(columns=['<>&"', None])
+ styler = Styler(df, uuid_len=0)
+ styler.format_index("X&{0}>X", escape="html", na_rep="&", axis=1)
+ ctx = styler._translate(True, True)
+ assert ctx["head"][0][1]["display_value"] == "X&<>&">X"
+ assert ctx["head"][0][2]["display_value"] == "&"
+
+
+def test_format_escape_floats(styler):
+ # test given formatter for number format is not impacted by escape
+ s = styler.format("{:.1f}", escape="html")
+ for expected in [">0.0<", ">1.0<", ">-1.2<", ">-0.6<"]:
+ assert expected in s.to_html()
+ # tests precision of floats is not impacted by escape
+ s = styler.format(precision=1, escape="html")
+ for expected in [">0<", ">1<", ">-1.2<", ">-0.6<"]:
+ assert expected in s.to_html()
+
+
+@pytest.mark.parametrize("formatter", [5, True, [2.0]])
+@pytest.mark.parametrize("func", ["format", "format_index"])
+def test_format_raises(styler, formatter, func):
+ with pytest.raises(TypeError, match="expected str or callable"):
+ getattr(styler, func)(formatter)
+
+
+@pytest.mark.parametrize(
+ "precision, expected",
+ [
+ (1, ["1.0", "2.0", "3.2", "4.6"]),
+ (2, ["1.00", "2.01", "3.21", "4.57"]),
+ (3, ["1.000", "2.009", "3.212", "4.566"]),
+ ],
+)
+def test_format_with_precision(precision, expected):
+ # Issue #13257
+ df = DataFrame([[1.0, 2.0090, 3.2121, 4.566]], columns=[1.0, 2.0090, 3.2121, 4.566])
+ styler = Styler(df)
+ styler.format(precision=precision)
+ styler.format_index(precision=precision, axis=1)
+
+ ctx = styler._translate(True, True)
+ for col, exp in enumerate(expected):
+ assert ctx["body"][0][col + 1]["display_value"] == exp # format test
+ assert ctx["head"][0][col + 1]["display_value"] == exp # format_index test
+
+
+@pytest.mark.parametrize("axis", [0, 1])
+@pytest.mark.parametrize(
+ "level, expected",
+ [
+ (0, ["X", "X", "_", "_"]), # level int
+ ("zero", ["X", "X", "_", "_"]), # level name
+ (1, ["_", "_", "X", "X"]), # other level int
+ ("one", ["_", "_", "X", "X"]), # other level name
+ ([0, 1], ["X", "X", "X", "X"]), # both levels
+ ([0, "zero"], ["X", "X", "_", "_"]), # level int and name simultaneous
+ ([0, "one"], ["X", "X", "X", "X"]), # both levels as int and name
+ (["one", "zero"], ["X", "X", "X", "X"]), # both level names, reversed
+ ],
+)
+def test_format_index_level(axis, level, expected):
+ midx = MultiIndex.from_arrays([["_", "_"], ["_", "_"]], names=["zero", "one"])
+ df = DataFrame([[1, 2], [3, 4]])
+ if axis == 0:
+ df.index = midx
+ else:
+ df.columns = midx
+
+ styler = df.style.format_index(lambda v: "X", level=level, axis=axis)
+ ctx = styler._translate(True, True)
+
+ if axis == 0: # compare index
+ result = [ctx["body"][s][0]["display_value"] for s in range(2)]
+ result += [ctx["body"][s][1]["display_value"] for s in range(2)]
+ else: # compare columns
+ result = [ctx["head"][0][s + 1]["display_value"] for s in range(2)]
+ result += [ctx["head"][1][s + 1]["display_value"] for s in range(2)]
+
+ assert expected == result
+
+
+def test_format_subset():
+ df = DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"])
+ ctx = df.style.format(
+ {"a": "{:0.1f}", "b": "{0:.2%}"}, subset=IndexSlice[0, :]
+ )._translate(True, True)
+ expected = "0.1"
+ raw_11 = "1.123400"
+ assert ctx["body"][0][1]["display_value"] == expected
+ assert ctx["body"][1][1]["display_value"] == raw_11
+ assert ctx["body"][0][2]["display_value"] == "12.34%"
+
+ ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, :])._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == expected
+ assert ctx["body"][1][1]["display_value"] == raw_11
+
+ ctx = df.style.format("{:0.1f}", subset=IndexSlice["a"])._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == expected
+ assert ctx["body"][0][2]["display_value"] == "0.123400"
+
+ ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, "a"])._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == expected
+ assert ctx["body"][1][1]["display_value"] == raw_11
+
+ ctx = df.style.format("{:0.1f}", subset=IndexSlice[[0, 1], ["a"]])._translate(
+ True, True
+ )
+ assert ctx["body"][0][1]["display_value"] == expected
+ assert ctx["body"][1][1]["display_value"] == "1.1"
+ assert ctx["body"][0][2]["display_value"] == "0.123400"
+ assert ctx["body"][1][2]["display_value"] == raw_11
+
+
+@pytest.mark.parametrize("formatter", [None, "{:,.1f}"])
+@pytest.mark.parametrize("decimal", [".", "*"])
+@pytest.mark.parametrize("precision", [None, 2])
+@pytest.mark.parametrize("func, col", [("format", 1), ("format_index", 0)])
+def test_format_thousands(formatter, decimal, precision, func, col):
+ styler = DataFrame([[1000000.123456789]], index=[1000000.123456789]).style
+ result = getattr(styler, func)( # testing float
+ thousands="_", formatter=formatter, decimal=decimal, precision=precision
+ )._translate(True, True)
+ assert "1_000_000" in result["body"][0][col]["display_value"]
+
+ styler = DataFrame([[1000000]], index=[1000000]).style
+ result = getattr(styler, func)( # testing int
+ thousands="_", formatter=formatter, decimal=decimal, precision=precision
+ )._translate(True, True)
+ assert "1_000_000" in result["body"][0][col]["display_value"]
+
+ styler = DataFrame([[1 + 1000000.123456789j]], index=[1 + 1000000.123456789j]).style
+ result = getattr(styler, func)( # testing complex
+ thousands="_", formatter=formatter, decimal=decimal, precision=precision
+ )._translate(True, True)
+ assert "1_000_000" in result["body"][0][col]["display_value"]
+
+
+@pytest.mark.parametrize("formatter", [None, "{:,.4f}"])
+@pytest.mark.parametrize("thousands", [None, ",", "*"])
+@pytest.mark.parametrize("precision", [None, 4])
+@pytest.mark.parametrize("func, col", [("format", 1), ("format_index", 0)])
+def test_format_decimal(formatter, thousands, precision, func, col):
+ styler = DataFrame([[1000000.123456789]], index=[1000000.123456789]).style
+ result = getattr(styler, func)( # testing float
+ decimal="_", formatter=formatter, thousands=thousands, precision=precision
+ )._translate(True, True)
+ assert "000_123" in result["body"][0][col]["display_value"]
+
+ styler = DataFrame([[1 + 1000000.123456789j]], index=[1 + 1000000.123456789j]).style
+ result = getattr(styler, func)( # testing complex
+ decimal="_", formatter=formatter, thousands=thousands, precision=precision
+ )._translate(True, True)
+ assert "000_123" in result["body"][0][col]["display_value"]
+
+
+def test_str_escape_error():
+ msg = "`escape` only permitted in {'html', 'latex', 'latex-math'}, got "
+ with pytest.raises(ValueError, match=msg):
+ _str_escape("text", "bad_escape")
+
+ with pytest.raises(ValueError, match=msg):
+ _str_escape("text", [])
+
+ _str_escape(2.00, "bad_escape") # OK since dtype is float
+
+
+def test_long_int_formatting():
+ df = DataFrame(data=[[1234567890123456789]], columns=["test"])
+ styler = df.style
+ ctx = styler._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "1234567890123456789"
+
+ styler = df.style.format(thousands="_")
+ ctx = styler._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "1_234_567_890_123_456_789"
+
+
+def test_format_options():
+ df = DataFrame({"int": [2000, 1], "float": [1.009, None], "str": ["&<", "&~"]})
+ ctx = df.style._translate(True, True)
+
+ # test option: na_rep
+ assert ctx["body"][1][2]["display_value"] == "nan"
+ with option_context("styler.format.na_rep", "MISSING"):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][1][2]["display_value"] == "MISSING"
+
+ # test option: decimal and precision
+ assert ctx["body"][0][2]["display_value"] == "1.009000"
+ with option_context("styler.format.decimal", "_"):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][0][2]["display_value"] == "1_009000"
+ with option_context("styler.format.precision", 2):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][0][2]["display_value"] == "1.01"
+
+ # test option: thousands
+ assert ctx["body"][0][1]["display_value"] == "2000"
+ with option_context("styler.format.thousands", "_"):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][0][1]["display_value"] == "2_000"
+
+ # test option: escape
+ assert ctx["body"][0][3]["display_value"] == "&<"
+ assert ctx["body"][1][3]["display_value"] == "&~"
+ with option_context("styler.format.escape", "html"):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][0][3]["display_value"] == "&<"
+ with option_context("styler.format.escape", "latex"):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][1][3]["display_value"] == "\\&\\textasciitilde "
+ with option_context("styler.format.escape", "latex-math"):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][1][3]["display_value"] == "\\&\\textasciitilde "
+
+ # test option: formatter
+ with option_context("styler.format.formatter", {"int": "{:,.2f}"}):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][0][1]["display_value"] == "2,000.00"
+
+
+def test_precision_zero(df):
+ styler = Styler(df, precision=0)
+ ctx = styler._translate(True, True)
+ assert ctx["body"][0][2]["display_value"] == "-1"
+ assert ctx["body"][1][2]["display_value"] == "-1"
+
+
+@pytest.mark.parametrize(
+ "formatter, exp",
+ [
+ (lambda x: f"{x:.3f}", "9.000"),
+ ("{:.2f}", "9.00"),
+ ({0: "{:.1f}"}, "9.0"),
+ (None, "9"),
+ ],
+)
+def test_formatter_options_validator(formatter, exp):
+ df = DataFrame([[9]])
+ with option_context("styler.format.formatter", formatter):
+ assert f" {exp} " in df.style.to_latex()
+
+
+def test_formatter_options_raises():
+ msg = "Value must be an instance of"
+ with pytest.raises(ValueError, match=msg):
+ with option_context("styler.format.formatter", ["bad", "type"]):
+ DataFrame().style.to_latex()
+
+
+def test_1level_multiindex():
+ # GH 43383
+ midx = MultiIndex.from_product([[1, 2]], names=[""])
+ df = DataFrame(-1, index=midx, columns=[0, 1])
+ ctx = df.style._translate(True, True)
+ assert ctx["body"][0][0]["display_value"] == "1"
+ assert ctx["body"][0][0]["is_visible"] is True
+ assert ctx["body"][1][0]["display_value"] == "2"
+ assert ctx["body"][1][0]["is_visible"] is True
+
+
+def test_boolean_format():
+ # gh 46384: booleans do not collapse to integer representation on display
+ df = DataFrame([[True, False]])
+ ctx = df.style._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] is True
+ assert ctx["body"][0][2]["display_value"] is False
+
+
+@pytest.mark.parametrize(
+ "hide, labels",
+ [
+ (False, [1, 2]),
+ (True, [1, 2, 3, 4]),
+ ],
+)
+def test_relabel_raise_length(styler_multi, hide, labels):
+ if hide:
+ styler_multi.hide(axis=0, subset=[("X", "x"), ("Y", "y")])
+ with pytest.raises(ValueError, match="``labels`` must be of length equal"):
+ styler_multi.relabel_index(labels=labels)
+
+
+def test_relabel_index(styler_multi):
+ labels = [(1, 2), (3, 4)]
+ styler_multi.hide(axis=0, subset=[("X", "x"), ("Y", "y")])
+ styler_multi.relabel_index(labels=labels)
+ ctx = styler_multi._translate(True, True)
+ assert {"value": "X", "display_value": 1}.items() <= ctx["body"][0][0].items()
+ assert {"value": "y", "display_value": 2}.items() <= ctx["body"][0][1].items()
+ assert {"value": "Y", "display_value": 3}.items() <= ctx["body"][1][0].items()
+ assert {"value": "x", "display_value": 4}.items() <= ctx["body"][1][1].items()
+
+
+def test_relabel_columns(styler_multi):
+ labels = [(1, 2), (3, 4)]
+ styler_multi.hide(axis=1, subset=[("A", "a"), ("B", "b")])
+ styler_multi.relabel_index(axis=1, labels=labels)
+ ctx = styler_multi._translate(True, True)
+ assert {"value": "A", "display_value": 1}.items() <= ctx["head"][0][3].items()
+ assert {"value": "B", "display_value": 3}.items() <= ctx["head"][0][4].items()
+ assert {"value": "b", "display_value": 2}.items() <= ctx["head"][1][3].items()
+ assert {"value": "a", "display_value": 4}.items() <= ctx["head"][1][4].items()
+
+
+def test_relabel_roundtrip(styler):
+ styler.relabel_index(["{}", "{}"])
+ ctx = styler._translate(True, True)
+ assert {"value": "x", "display_value": "x"}.items() <= ctx["body"][0][0].items()
+ assert {"value": "y", "display_value": "y"}.items() <= ctx["body"][1][0].items()
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_highlight.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_highlight.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d59719010ee03cc53373a1c96f5f8c5611d7681
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_highlight.py
@@ -0,0 +1,218 @@
+import numpy as np
+import pytest
+
+from pandas import (
+ NA,
+ DataFrame,
+ IndexSlice,
+)
+
+pytest.importorskip("jinja2")
+
+from pandas.io.formats.style import Styler
+
+
+@pytest.fixture(params=[(None, "float64"), (NA, "Int64")])
+def df(request):
+ # GH 45804
+ return DataFrame(
+ {"A": [0, np.nan, 10], "B": [1, request.param[0], 2]}, dtype=request.param[1]
+ )
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+def test_highlight_null(styler):
+ result = styler.highlight_null()._compute().ctx
+ expected = {
+ (1, 0): [("background-color", "red")],
+ (1, 1): [("background-color", "red")],
+ }
+ assert result == expected
+
+
+def test_highlight_null_subset(styler):
+ # GH 31345
+ result = (
+ styler.highlight_null(color="red", subset=["A"])
+ .highlight_null(color="green", subset=["B"])
+ ._compute()
+ .ctx
+ )
+ expected = {
+ (1, 0): [("background-color", "red")],
+ (1, 1): [("background-color", "green")],
+ }
+ assert result == expected
+
+
+@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"])
+def test_highlight_minmax_basic(df, f):
+ expected = {
+ (0, 1): [("background-color", "red")],
+ # ignores NaN row,
+ (2, 0): [("background-color", "red")],
+ }
+ if f == "highlight_min":
+ df = -df
+ result = getattr(df.style, f)(axis=1, color="red")._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"])
+@pytest.mark.parametrize(
+ "kwargs",
+ [
+ {"axis": None, "color": "red"}, # test axis
+ {"axis": 0, "subset": ["A"], "color": "red"}, # test subset and ignores NaN
+ {"axis": None, "props": "background-color: red"}, # test props
+ ],
+)
+def test_highlight_minmax_ext(df, f, kwargs):
+ expected = {(2, 0): [("background-color", "red")]}
+ if f == "highlight_min":
+ df = -df
+ result = getattr(df.style, f)(**kwargs)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"])
+@pytest.mark.parametrize("axis", [None, 0, 1])
+def test_highlight_minmax_nulls(f, axis):
+ # GH 42750
+ expected = {
+ (1, 0): [("background-color", "yellow")],
+ (1, 1): [("background-color", "yellow")],
+ }
+ if axis == 1:
+ expected.update({(2, 1): [("background-color", "yellow")]})
+
+ if f == "highlight_max":
+ df = DataFrame({"a": [NA, 1, None], "b": [np.nan, 1, -1]})
+ else:
+ df = DataFrame({"a": [NA, -1, None], "b": [np.nan, -1, 1]})
+
+ result = getattr(df.style, f)(axis=axis)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "kwargs",
+ [
+ {"left": 0, "right": 1}, # test basic range
+ {"left": 0, "right": 1, "props": "background-color: yellow"}, # test props
+ {"left": -100, "right": 100, "subset": IndexSlice[[0, 1], :]}, # test subset
+ {"left": 0, "subset": IndexSlice[[0, 1], :]}, # test no right
+ {"right": 1}, # test no left
+ {"left": [0, 0, 11], "axis": 0}, # test left as sequence
+ {"left": DataFrame({"A": [0, 0, 11], "B": [1, 1, 11]}), "axis": None}, # axis
+ {"left": 0, "right": [0, 1], "axis": 1}, # test sequence right
+ ],
+)
+def test_highlight_between(styler, kwargs):
+ expected = {
+ (0, 0): [("background-color", "yellow")],
+ (0, 1): [("background-color", "yellow")],
+ }
+ result = styler.highlight_between(**kwargs)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "arg, map, axis",
+ [
+ ("left", [1, 2], 0), # 0 axis has 3 elements not 2
+ ("left", [1, 2, 3], 1), # 1 axis has 2 elements not 3
+ ("left", np.array([[1, 2], [1, 2]]), None), # df is (2,3) not (2,2)
+ ("right", [1, 2], 0), # same tests as above for 'right' not 'left'
+ ("right", [1, 2, 3], 1), # ..
+ ("right", np.array([[1, 2], [1, 2]]), None), # ..
+ ],
+)
+def test_highlight_between_raises(arg, styler, map, axis):
+ msg = f"supplied '{arg}' is not correct shape"
+ with pytest.raises(ValueError, match=msg):
+ styler.highlight_between(**{arg: map, "axis": axis})._compute()
+
+
+def test_highlight_between_raises2(styler):
+ msg = "values can be 'both', 'left', 'right', or 'neither'"
+ with pytest.raises(ValueError, match=msg):
+ styler.highlight_between(inclusive="badstring")._compute()
+
+ with pytest.raises(ValueError, match=msg):
+ styler.highlight_between(inclusive=1)._compute()
+
+
+@pytest.mark.parametrize(
+ "inclusive, expected",
+ [
+ (
+ "both",
+ {
+ (0, 0): [("background-color", "yellow")],
+ (0, 1): [("background-color", "yellow")],
+ },
+ ),
+ ("neither", {}),
+ ("left", {(0, 0): [("background-color", "yellow")]}),
+ ("right", {(0, 1): [("background-color", "yellow")]}),
+ ],
+)
+def test_highlight_between_inclusive(styler, inclusive, expected):
+ kwargs = {"left": 0, "right": 1, "subset": IndexSlice[[0, 1], :]}
+ result = styler.highlight_between(**kwargs, inclusive=inclusive)._compute()
+ assert result.ctx == expected
+
+
+@pytest.mark.parametrize(
+ "kwargs",
+ [
+ {"q_left": 0.5, "q_right": 1, "axis": 0}, # base case
+ {"q_left": 0.5, "q_right": 1, "axis": None}, # test axis
+ {"q_left": 0, "q_right": 1, "subset": IndexSlice[2, :]}, # test subset
+ {"q_left": 0.5, "axis": 0}, # test no high
+ {"q_right": 1, "subset": IndexSlice[2, :], "axis": 1}, # test no low
+ {"q_left": 0.5, "axis": 0, "props": "background-color: yellow"}, # tst prop
+ ],
+)
+def test_highlight_quantile(styler, kwargs):
+ expected = {
+ (2, 0): [("background-color", "yellow")],
+ (2, 1): [("background-color", "yellow")],
+ }
+ result = styler.highlight_quantile(**kwargs)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "f,kwargs",
+ [
+ ("highlight_min", {"axis": 1, "subset": IndexSlice[1, :]}),
+ ("highlight_max", {"axis": 0, "subset": [0]}),
+ ("highlight_quantile", {"axis": None, "q_left": 0.6, "q_right": 0.8}),
+ ("highlight_between", {"subset": [0]}),
+ ],
+)
+@pytest.mark.parametrize(
+ "df",
+ [
+ DataFrame([[0, 10], [20, 30]], dtype=int),
+ DataFrame([[0, 10], [20, 30]], dtype=float),
+ DataFrame([[0, 10], [20, 30]], dtype="datetime64[ns]"),
+ DataFrame([[0, 10], [20, 30]], dtype=str),
+ DataFrame([[0, 10], [20, 30]], dtype="timedelta64[ns]"),
+ ],
+)
+def test_all_highlight_dtypes(f, kwargs, df):
+ if f == "highlight_quantile" and isinstance(df.iloc[0, 0], (str)):
+ return None # quantile incompatible with str
+ if f == "highlight_between":
+ kwargs["left"] = df.iloc[1, 0] # set the range low for testing
+
+ expected = {(1, 0): [("background-color", "yellow")]}
+ result = getattr(df.style, f)(**kwargs)._compute().ctx
+ assert result == expected
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_html.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_html.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e345eb82ed3c31e7a5e0f89fa574aea84923dd7
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_html.py
@@ -0,0 +1,1009 @@
+from textwrap import (
+ dedent,
+ indent,
+)
+
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ MultiIndex,
+ option_context,
+)
+
+jinja2 = pytest.importorskip("jinja2")
+from pandas.io.formats.style import Styler
+
+
+@pytest.fixture
+def env():
+ loader = jinja2.PackageLoader("pandas", "io/formats/templates")
+ env = jinja2.Environment(loader=loader, trim_blocks=True)
+ return env
+
+
+@pytest.fixture
+def styler():
+ return Styler(DataFrame([[2.61], [2.69]], index=["a", "b"], columns=["A"]))
+
+
+@pytest.fixture
+def styler_mi():
+ midx = MultiIndex.from_product([["a", "b"], ["c", "d"]])
+ return Styler(DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=midx))
+
+
+@pytest.fixture
+def tpl_style(env):
+ return env.get_template("html_style.tpl")
+
+
+@pytest.fixture
+def tpl_table(env):
+ return env.get_template("html_table.tpl")
+
+
+def test_html_template_extends_options():
+ # make sure if templates are edited tests are updated as are setup fixtures
+ # to understand the dependency
+ with open("pandas/io/formats/templates/html.tpl", encoding="utf-8") as file:
+ result = file.read()
+ assert "{% include html_style_tpl %}" in result
+ assert "{% include html_table_tpl %}" in result
+
+
+def test_exclude_styles(styler):
+ result = styler.to_html(exclude_styles=True, doctype_html=True)
+ expected = dedent(
+ """\
+
+
+
+
+
+
+
+
+
+
+ A
+
+
+
+
+ a
+ 2.610000
+
+
+ b
+ 2.690000
+
+
+
+
+
+ """
+ )
+ assert result == expected
+
+
+def test_w3_html_format(styler):
+ styler.set_uuid("").set_table_styles([{"selector": "th", "props": "att2:v2;"}]).map(
+ lambda x: "att1:v1;"
+ ).set_table_attributes('class="my-cls1" style="attr3:v3;"').set_td_classes(
+ DataFrame(["my-cls2"], index=["a"], columns=["A"])
+ ).format(
+ "{:.1f}"
+ ).set_caption(
+ "A comprehensive test"
+ )
+ expected = dedent(
+ """\
+
+
+ A comprehensive test
+
+
+
+ A
+
+
+
+
+ a
+ 2.6
+
+
+ b
+ 2.7
+
+
+
+ """
+ )
+ assert expected == styler.to_html()
+
+
+def test_colspan_w3():
+ # GH 36223
+ df = DataFrame(data=[[1, 2]], columns=[["l0", "l0"], ["l1a", "l1b"]])
+ styler = Styler(df, uuid="_", cell_ids=False)
+ assert 'l0 ' in styler.to_html()
+
+
+def test_rowspan_w3():
+ # GH 38533
+ df = DataFrame(data=[[1, 2]], index=[["l0", "l0"], ["l1a", "l1b"]])
+ styler = Styler(df, uuid="_", cell_ids=False)
+ assert 'l0 ' in styler.to_html()
+
+
+def test_styles(styler):
+ styler.set_uuid("abc")
+ styler.set_table_styles([{"selector": "td", "props": "color: red;"}])
+ result = styler.to_html(doctype_html=True)
+ expected = dedent(
+ """\
+
+
+
+
+
+
+
+
+
+
+
+ A
+
+
+
+
+ a
+ 2.610000
+
+
+ b
+ 2.690000
+
+
+
+
+
+ """
+ )
+ assert result == expected
+
+
+def test_doctype(styler):
+ result = styler.to_html(doctype_html=False)
+ assert "" not in result
+ assert "" not in result
+ assert "" not in result
+ assert "" not in result
+
+
+def test_doctype_encoding(styler):
+ with option_context("styler.render.encoding", "ASCII"):
+ result = styler.to_html(doctype_html=True)
+ assert ' ' in result
+ result = styler.to_html(doctype_html=True, encoding="ANSI")
+ assert ' ' in result
+
+
+def test_bold_headers_arg(styler):
+ result = styler.to_html(bold_headers=True)
+ assert "th {\n font-weight: bold;\n}" in result
+ result = styler.to_html()
+ assert "th {\n font-weight: bold;\n}" not in result
+
+
+def test_caption_arg(styler):
+ result = styler.to_html(caption="foo bar")
+ assert "foo bar " in result
+ result = styler.to_html()
+ assert "foo bar " not in result
+
+
+def test_block_names(tpl_style, tpl_table):
+ # catch accidental removal of a block
+ expected_style = {
+ "before_style",
+ "style",
+ "table_styles",
+ "before_cellstyle",
+ "cellstyle",
+ }
+ expected_table = {
+ "before_table",
+ "table",
+ "caption",
+ "thead",
+ "tbody",
+ "after_table",
+ "before_head_rows",
+ "head_tr",
+ "after_head_rows",
+ "before_rows",
+ "tr",
+ "after_rows",
+ }
+ result1 = set(tpl_style.blocks)
+ assert result1 == expected_style
+
+ result2 = set(tpl_table.blocks)
+ assert result2 == expected_table
+
+
+def test_from_custom_template_table(tmpdir):
+ p = tmpdir.mkdir("tpl").join("myhtml_table.tpl")
+ p.write(
+ dedent(
+ """\
+ {% extends "html_table.tpl" %}
+ {% block table %}
+ {{custom_title}}
+ {{ super() }}
+ {% endblock table %}"""
+ )
+ )
+ result = Styler.from_custom_template(str(tmpdir.join("tpl")), "myhtml_table.tpl")
+ assert issubclass(result, Styler)
+ assert result.env is not Styler.env
+ assert result.template_html_table is not Styler.template_html_table
+ styler = result(DataFrame({"A": [1, 2]}))
+ assert "My Title \n\n\n
+ {{ super() }}
+ {% endblock style %}"""
+ )
+ )
+ result = Styler.from_custom_template(
+ str(tmpdir.join("tpl")), html_style="myhtml_style.tpl"
+ )
+ assert issubclass(result, Styler)
+ assert result.env is not Styler.env
+ assert result.template_html_style is not Styler.template_html_style
+ styler = result(DataFrame({"A": [1, 2]}))
+ assert ' \n\n
+
+
+
+
+ n1
+ a
+
+
+
+ n2
+ c
+
+
+ n1
+ n2
+
+
+
+
+
+ a
+ c
+ 0
+
+
+
+ """
+ )
+ result = styler_mi.to_html()
+ assert result == expected
+
+
+def test_include_css_style_rules_only_for_visible_cells(styler_mi):
+ # GH 43619
+ result = (
+ styler_mi.set_uuid("")
+ .map(lambda v: "color: blue;")
+ .hide(styler_mi.data.columns[1:], axis="columns")
+ .hide(styler_mi.data.index[1:], axis="index")
+ .to_html()
+ )
+ expected_styles = dedent(
+ """\
+
+ """
+ )
+ assert expected_styles in result
+
+
+def test_include_css_style_rules_only_for_visible_index_labels(styler_mi):
+ # GH 43619
+ result = (
+ styler_mi.set_uuid("")
+ .map_index(lambda v: "color: blue;", axis="index")
+ .hide(styler_mi.data.columns, axis="columns")
+ .hide(styler_mi.data.index[1:], axis="index")
+ .to_html()
+ )
+ expected_styles = dedent(
+ """\
+
+ """
+ )
+ assert expected_styles in result
+
+
+def test_include_css_style_rules_only_for_visible_column_labels(styler_mi):
+ # GH 43619
+ result = (
+ styler_mi.set_uuid("")
+ .map_index(lambda v: "color: blue;", axis="columns")
+ .hide(styler_mi.data.columns[1:], axis="columns")
+ .hide(styler_mi.data.index, axis="index")
+ .to_html()
+ )
+ expected_styles = dedent(
+ """\
+
+ """
+ )
+ assert expected_styles in result
+
+
+def test_hiding_index_columns_multiindex_alignment():
+ # gh 43644
+ midx = MultiIndex.from_product(
+ [["i0", "j0"], ["i1"], ["i2", "j2"]], names=["i-0", "i-1", "i-2"]
+ )
+ cidx = MultiIndex.from_product(
+ [["c0"], ["c1", "d1"], ["c2", "d2"]], names=["c-0", "c-1", "c-2"]
+ )
+ df = DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=cidx)
+ styler = Styler(df, uuid_len=0)
+ styler.hide(level=1, axis=0).hide(level=0, axis=1)
+ styler.hide([("j0", "i1", "j2")], axis=0)
+ styler.hide([("c0", "d1", "d2")], axis=1)
+ result = styler.to_html()
+ expected = dedent(
+ """\
+
+
+
+
+
+ c-1
+ c1
+ d1
+
+
+
+ c-2
+ c2
+ d2
+ c2
+
+
+ i-0
+ i-2
+
+
+
+
+
+
+
+ i0
+ i2
+ 0
+ 1
+ 2
+
+
+ j2
+ 4
+ 5
+ 6
+
+
+ j0
+ i2
+ 8
+ 9
+ 10
+
+
+
+ """
+ )
+ assert result == expected
+
+
+def test_hiding_index_columns_multiindex_trimming():
+ # gh 44272
+ df = DataFrame(np.arange(64).reshape(8, 8))
+ df.columns = MultiIndex.from_product([[0, 1, 2, 3], [0, 1]])
+ df.index = MultiIndex.from_product([[0, 1, 2, 3], [0, 1]])
+ df.index.names, df.columns.names = ["a", "b"], ["c", "d"]
+ styler = Styler(df, cell_ids=False, uuid_len=0)
+ styler.hide([(0, 0), (0, 1), (1, 0)], axis=1).hide([(0, 0), (0, 1), (1, 0)], axis=0)
+ with option_context("styler.render.max_rows", 4, "styler.render.max_columns", 4):
+ result = styler.to_html()
+
+ expected = dedent(
+ """\
+
+
+
+
+
+ c
+ 1
+ 2
+ 3
+
+
+
+ d
+ 1
+ 0
+ 1
+ 0
+ ...
+
+
+ a
+ b
+
+
+
+
+
+
+
+
+
+ 1
+ 1
+ 27
+ 28
+ 29
+ 30
+ ...
+
+
+ 2
+ 0
+ 35
+ 36
+ 37
+ 38
+ ...
+
+
+ 1
+ 43
+ 44
+ 45
+ 46
+ ...
+
+
+ 3
+ 0
+ 51
+ 52
+ 53
+ 54
+ ...
+
+
+ ...
+ ...
+ ...
+ ...
+ ...
+ ...
+ ...
+
+
+
+ """
+ )
+
+ assert result == expected
+
+
+@pytest.mark.parametrize("type", ["data", "index"])
+@pytest.mark.parametrize(
+ "text, exp, found",
+ [
+ ("no link, just text", False, ""),
+ ("subdomain not www: sub.web.com", False, ""),
+ ("www subdomain: www.web.com other", True, "www.web.com"),
+ ("scheme full structure: http://www.web.com", True, "http://www.web.com"),
+ ("scheme no top-level: http://www.web", True, "http://www.web"),
+ ("no scheme, no top-level: www.web", False, "www.web"),
+ ("https scheme: https://www.web.com", True, "https://www.web.com"),
+ ("ftp scheme: ftp://www.web", True, "ftp://www.web"),
+ ("ftps scheme: ftps://www.web", True, "ftps://www.web"),
+ ("subdirectories: www.web.com/directory", True, "www.web.com/directory"),
+ ("Multiple domains: www.1.2.3.4", True, "www.1.2.3.4"),
+ ("with port: http://web.com:80", True, "http://web.com:80"),
+ (
+ "full net_loc scheme: http://user:pass@web.com",
+ True,
+ "http://user:pass@web.com",
+ ),
+ (
+ "with valid special chars: http://web.com/,.':;~!@#$*()[]",
+ True,
+ "http://web.com/,.':;~!@#$*()[]",
+ ),
+ ],
+)
+def test_rendered_links(type, text, exp, found):
+ if type == "data":
+ df = DataFrame([text])
+ styler = df.style.format(hyperlinks="html")
+ else:
+ df = DataFrame([0], index=[text])
+ styler = df.style.format_index(hyperlinks="html")
+
+ rendered = f'{found} '
+ result = styler.to_html()
+ assert (rendered in result) is exp
+ assert (text in result) is not exp # test conversion done when expected and not
+
+
+def test_multiple_rendered_links():
+ links = ("www.a.b", "http://a.c", "https://a.d", "ftp://a.e")
+ # pylint: disable-next=consider-using-f-string
+ df = DataFrame(["text {} {} text {} {}".format(*links)])
+ result = df.style.format(hyperlinks="html").to_html()
+ href = '{0} '
+ for link in links:
+ assert href.format(link) in result
+ assert href.format("text") not in result
+
+
+def test_concat(styler):
+ other = styler.data.agg(["mean"]).style
+ styler.concat(other).set_uuid("X")
+ result = styler.to_html()
+ fp = "foot0_"
+ expected = dedent(
+ f"""\
+
+ b
+ 2.690000
+
+
+ mean
+ 2.650000
+
+
+
+ """
+ )
+ assert expected in result
+
+
+def test_concat_recursion(styler):
+ df = styler.data
+ styler1 = styler
+ styler2 = Styler(df.agg(["mean"]), precision=3)
+ styler3 = Styler(df.agg(["mean"]), precision=4)
+ styler1.concat(styler2.concat(styler3)).set_uuid("X")
+ result = styler.to_html()
+ # notice that the second concat (last of the output html),
+ # there are two `foot_` in the id and class
+ fp1 = "foot0_"
+ fp2 = "foot0_foot0_"
+ expected = dedent(
+ f"""\
+
+ b
+ 2.690000
+
+
+ mean
+ 2.650
+
+
+ mean
+ 2.6500
+
+
+
+ """
+ )
+ assert expected in result
+
+
+def test_concat_chain(styler):
+ df = styler.data
+ styler1 = styler
+ styler2 = Styler(df.agg(["mean"]), precision=3)
+ styler3 = Styler(df.agg(["mean"]), precision=4)
+ styler1.concat(styler2).concat(styler3).set_uuid("X")
+ result = styler.to_html()
+ fp1 = "foot0_"
+ fp2 = "foot1_"
+ expected = dedent(
+ f"""\
+
+ b
+ 2.690000
+
+
+ mean
+ 2.650
+
+
+ mean
+ 2.6500
+
+
+
+ """
+ )
+ assert expected in result
+
+
+def test_concat_combined():
+ def html_lines(foot_prefix: str):
+ assert foot_prefix.endswith("_") or foot_prefix == ""
+ fp = foot_prefix
+ return indent(
+ dedent(
+ f"""\
+
+ a
+ 2.610000
+
+
+ b
+ 2.690000
+
+ """
+ ),
+ prefix=" " * 4,
+ )
+
+ df = DataFrame([[2.61], [2.69]], index=["a", "b"], columns=["A"])
+ s1 = df.style.highlight_max(color="red")
+ s2 = df.style.highlight_max(color="green")
+ s3 = df.style.highlight_max(color="blue")
+ s4 = df.style.highlight_max(color="yellow")
+
+ result = s1.concat(s2).concat(s3.concat(s4)).set_uuid("X").to_html()
+ expected_css = dedent(
+ """\
+
+ """
+ )
+ expected_table = (
+ dedent(
+ """\
+
+
+
+
+ A
+
+
+
+ """
+ )
+ + html_lines("")
+ + html_lines("foot0_")
+ + html_lines("foot1_")
+ + html_lines("foot1_foot0_")
+ + dedent(
+ """\
+
+
+ """
+ )
+ )
+ assert expected_css + expected_table == result
+
+
+def test_to_html_na_rep_non_scalar_data(datapath):
+ # GH47103
+ df = DataFrame([{"a": 1, "b": [1, 2, 3], "c": np.nan}])
+ result = df.style.format(na_rep="-").to_html(table_uuid="test")
+ expected = """\
+
+
+
+
+
+ a
+ b
+ c
+
+
+
+
+ 0
+ 1
+ [1, 2, 3]
+ -
+
+
+
+"""
+ assert result == expected
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_matplotlib.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_matplotlib.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb7a77f1ddb27db66a847fc1a1d87d14d95822aa
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_matplotlib.py
@@ -0,0 +1,335 @@
+import gc
+
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ IndexSlice,
+ Series,
+)
+
+pytest.importorskip("matplotlib")
+pytest.importorskip("jinja2")
+
+import matplotlib as mpl
+
+from pandas.io.formats.style import Styler
+
+
+@pytest.fixture(autouse=True)
+def mpl_cleanup():
+ # matplotlib/testing/decorators.py#L24
+ # 1) Resets units registry
+ # 2) Resets rc_context
+ # 3) Closes all figures
+ mpl = pytest.importorskip("matplotlib")
+ mpl_units = pytest.importorskip("matplotlib.units")
+ plt = pytest.importorskip("matplotlib.pyplot")
+ orig_units_registry = mpl_units.registry.copy()
+ with mpl.rc_context():
+ mpl.use("template")
+ yield
+ mpl_units.registry.clear()
+ mpl_units.registry.update(orig_units_registry)
+ plt.close("all")
+ # https://matplotlib.org/stable/users/prev_whats_new/whats_new_3.6.0.html#garbage-collection-is-no-longer-run-on-figure-close # noqa: E501
+ gc.collect(1)
+
+
+@pytest.fixture
+def df():
+ return DataFrame([[1, 2], [2, 4]], columns=["A", "B"])
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+@pytest.fixture
+def df_blank():
+ return DataFrame([[0, 0], [0, 0]], columns=["A", "B"], index=["X", "Y"])
+
+
+@pytest.fixture
+def styler_blank(df_blank):
+ return Styler(df_blank, uuid_len=0)
+
+
+@pytest.mark.parametrize("f", ["background_gradient", "text_gradient"])
+def test_function_gradient(styler, f):
+ for c_map in [None, "YlOrRd"]:
+ result = getattr(styler, f)(cmap=c_map)._compute().ctx
+ assert all("#" in x[0][1] for x in result.values())
+ assert result[(0, 0)] == result[(0, 1)]
+ assert result[(1, 0)] == result[(1, 1)]
+
+
+@pytest.mark.parametrize("f", ["background_gradient", "text_gradient"])
+def test_background_gradient_color(styler, f):
+ result = getattr(styler, f)(subset=IndexSlice[1, "A"])._compute().ctx
+ if f == "background_gradient":
+ assert result[(1, 0)] == [("background-color", "#fff7fb"), ("color", "#000000")]
+ elif f == "text_gradient":
+ assert result[(1, 0)] == [("color", "#fff7fb")]
+
+
+@pytest.mark.parametrize(
+ "axis, expected",
+ [
+ (0, ["low", "low", "high", "high"]),
+ (1, ["low", "high", "low", "high"]),
+ (None, ["low", "mid", "mid", "high"]),
+ ],
+)
+@pytest.mark.parametrize("f", ["background_gradient", "text_gradient"])
+def test_background_gradient_axis(styler, axis, expected, f):
+ if f == "background_gradient":
+ colors = {
+ "low": [("background-color", "#f7fbff"), ("color", "#000000")],
+ "mid": [("background-color", "#abd0e6"), ("color", "#000000")],
+ "high": [("background-color", "#08306b"), ("color", "#f1f1f1")],
+ }
+ elif f == "text_gradient":
+ colors = {
+ "low": [("color", "#f7fbff")],
+ "mid": [("color", "#abd0e6")],
+ "high": [("color", "#08306b")],
+ }
+ result = getattr(styler, f)(cmap="Blues", axis=axis)._compute().ctx
+ for i, cell in enumerate([(0, 0), (0, 1), (1, 0), (1, 1)]):
+ assert result[cell] == colors[expected[i]]
+
+
+@pytest.mark.parametrize(
+ "cmap, expected",
+ [
+ (
+ "PuBu",
+ {
+ (4, 5): [("background-color", "#86b0d3"), ("color", "#000000")],
+ (4, 6): [("background-color", "#83afd3"), ("color", "#f1f1f1")],
+ },
+ ),
+ (
+ "YlOrRd",
+ {
+ (4, 8): [("background-color", "#fd913e"), ("color", "#000000")],
+ (4, 9): [("background-color", "#fd8f3d"), ("color", "#f1f1f1")],
+ },
+ ),
+ (
+ None,
+ {
+ (7, 0): [("background-color", "#48c16e"), ("color", "#f1f1f1")],
+ (7, 1): [("background-color", "#4cc26c"), ("color", "#000000")],
+ },
+ ),
+ ],
+)
+def test_text_color_threshold(cmap, expected):
+ # GH 39888
+ df = DataFrame(np.arange(100).reshape(10, 10))
+ result = df.style.background_gradient(cmap=cmap, axis=None)._compute().ctx
+ for k in expected.keys():
+ assert result[k] == expected[k]
+
+
+def test_background_gradient_vmin_vmax():
+ # GH 12145
+ df = DataFrame(range(5))
+ ctx = df.style.background_gradient(vmin=1, vmax=3)._compute().ctx
+ assert ctx[(0, 0)] == ctx[(1, 0)]
+ assert ctx[(4, 0)] == ctx[(3, 0)]
+
+
+def test_background_gradient_int64():
+ # GH 28869
+ df1 = Series(range(3)).to_frame()
+ df2 = Series(range(3), dtype="Int64").to_frame()
+ ctx1 = df1.style.background_gradient()._compute().ctx
+ ctx2 = df2.style.background_gradient()._compute().ctx
+ assert ctx2[(0, 0)] == ctx1[(0, 0)]
+ assert ctx2[(1, 0)] == ctx1[(1, 0)]
+ assert ctx2[(2, 0)] == ctx1[(2, 0)]
+
+
+@pytest.mark.parametrize(
+ "axis, gmap, expected",
+ [
+ (
+ 0,
+ [1, 2],
+ {
+ (0, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (1, 0): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ (0, 1): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ },
+ ),
+ (
+ 1,
+ [1, 2],
+ {
+ (0, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (1, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (0, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ },
+ ),
+ (
+ None,
+ np.array([[2, 1], [1, 2]]),
+ {
+ (0, 0): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ (1, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (0, 1): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ },
+ ),
+ ],
+)
+def test_background_gradient_gmap_array(styler_blank, axis, gmap, expected):
+ # tests when gmap is given as a sequence and converted to ndarray
+ result = styler_blank.background_gradient(axis=axis, gmap=gmap)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "gmap, axis", [([1, 2, 3], 0), ([1, 2], 1), (np.array([[1, 2], [1, 2]]), None)]
+)
+def test_background_gradient_gmap_array_raises(gmap, axis):
+ # test when gmap as converted ndarray is bad shape
+ df = DataFrame([[0, 0, 0], [0, 0, 0]])
+ msg = "supplied 'gmap' is not correct shape"
+ with pytest.raises(ValueError, match=msg):
+ df.style.background_gradient(gmap=gmap, axis=axis)._compute()
+
+
+@pytest.mark.parametrize(
+ "gmap",
+ [
+ DataFrame( # reverse the columns
+ [[2, 1], [1, 2]], columns=["B", "A"], index=["X", "Y"]
+ ),
+ DataFrame( # reverse the index
+ [[2, 1], [1, 2]], columns=["A", "B"], index=["Y", "X"]
+ ),
+ DataFrame( # reverse the index and columns
+ [[1, 2], [2, 1]], columns=["B", "A"], index=["Y", "X"]
+ ),
+ DataFrame( # add unnecessary columns
+ [[1, 2, 3], [2, 1, 3]], columns=["A", "B", "C"], index=["X", "Y"]
+ ),
+ DataFrame( # add unnecessary index
+ [[1, 2], [2, 1], [3, 3]], columns=["A", "B"], index=["X", "Y", "Z"]
+ ),
+ ],
+)
+@pytest.mark.parametrize(
+ "subset, exp_gmap", # exp_gmap is underlying map DataFrame should conform to
+ [
+ (None, [[1, 2], [2, 1]]),
+ (["A"], [[1], [2]]), # slice only column "A" in data and gmap
+ (["B", "A"], [[2, 1], [1, 2]]), # reverse the columns in data
+ (IndexSlice["X", :], [[1, 2]]), # slice only index "X" in data and gmap
+ (IndexSlice[["Y", "X"], :], [[2, 1], [1, 2]]), # reverse the index in data
+ ],
+)
+def test_background_gradient_gmap_dataframe_align(styler_blank, gmap, subset, exp_gmap):
+ # test gmap given as DataFrame that it aligns to the data including subset
+ expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap, subset=subset)
+ result = styler_blank.background_gradient(axis=None, gmap=gmap, subset=subset)
+ assert expected._compute().ctx == result._compute().ctx
+
+
+@pytest.mark.parametrize(
+ "gmap, axis, exp_gmap",
+ [
+ (Series([2, 1], index=["Y", "X"]), 0, [[1, 1], [2, 2]]), # revrse the index
+ (Series([2, 1], index=["B", "A"]), 1, [[1, 2], [1, 2]]), # revrse the cols
+ (Series([1, 2, 3], index=["X", "Y", "Z"]), 0, [[1, 1], [2, 2]]), # add idx
+ (Series([1, 2, 3], index=["A", "B", "C"]), 1, [[1, 2], [1, 2]]), # add col
+ ],
+)
+def test_background_gradient_gmap_series_align(styler_blank, gmap, axis, exp_gmap):
+ # test gmap given as Series that it aligns to the data including subset
+ expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap)._compute()
+ result = styler_blank.background_gradient(axis=axis, gmap=gmap)._compute()
+ assert expected.ctx == result.ctx
+
+
+@pytest.mark.parametrize(
+ "gmap, axis",
+ [
+ (DataFrame([[1, 2], [2, 1]], columns=["A", "B"], index=["X", "Y"]), 1),
+ (DataFrame([[1, 2], [2, 1]], columns=["A", "B"], index=["X", "Y"]), 0),
+ ],
+)
+def test_background_gradient_gmap_wrong_dataframe(styler_blank, gmap, axis):
+ # test giving a gmap in DataFrame but with wrong axis
+ msg = "'gmap' is a DataFrame but underlying data for operations is a Series"
+ with pytest.raises(ValueError, match=msg):
+ styler_blank.background_gradient(gmap=gmap, axis=axis)._compute()
+
+
+def test_background_gradient_gmap_wrong_series(styler_blank):
+ # test giving a gmap in Series form but with wrong axis
+ msg = "'gmap' is a Series but underlying data for operations is a DataFrame"
+ gmap = Series([1, 2], index=["X", "Y"])
+ with pytest.raises(ValueError, match=msg):
+ styler_blank.background_gradient(gmap=gmap, axis=None)._compute()
+
+
+def test_background_gradient_nullable_dtypes():
+ # GH 50712
+ df1 = DataFrame([[1], [0], [np.nan]], dtype=float)
+ df2 = DataFrame([[1], [0], [None]], dtype="Int64")
+
+ ctx1 = df1.style.background_gradient()._compute().ctx
+ ctx2 = df2.style.background_gradient()._compute().ctx
+ assert ctx1 == ctx2
+
+
+@pytest.mark.parametrize(
+ "cmap",
+ ["PuBu", mpl.colormaps["PuBu"]],
+)
+def test_bar_colormap(cmap):
+ data = DataFrame([[1, 2], [3, 4]])
+ ctx = data.style.bar(cmap=cmap, axis=None)._compute().ctx
+ pubu_colors = {
+ (0, 0): "#d0d1e6",
+ (1, 0): "#056faf",
+ (0, 1): "#73a9cf",
+ (1, 1): "#023858",
+ }
+ for k, v in pubu_colors.items():
+ assert v in ctx[k][1][1]
+
+
+def test_bar_color_raises(df):
+ msg = "`color` must be string or list or tuple of 2 strings"
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color={"a", "b"}).to_html()
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color=["a", "b", "c"]).to_html()
+
+ msg = "`color` and `cmap` cannot both be given"
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color="something", cmap="something else").to_html()
+
+
+@pytest.mark.parametrize(
+ "plot_method",
+ ["scatter", "hexbin"],
+)
+def test_pass_colormap_instance(df, plot_method):
+ # https://github.com/pandas-dev/pandas/issues/49374
+ cmap = mpl.colors.ListedColormap([[1, 1, 1], [0, 0, 0]])
+ df["c"] = df.A + df.B
+ kwargs = {"x": "A", "y": "B", "c": "c", "colormap": cmap}
+ if plot_method == "hexbin":
+ kwargs["C"] = kwargs.pop("c")
+ getattr(df.plot, plot_method)(**kwargs)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_non_unique.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_non_unique.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4d31fe21f2c9cf3454a67f8c7443382f7f1c0ef
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_non_unique.py
@@ -0,0 +1,140 @@
+from textwrap import dedent
+
+import pytest
+
+from pandas import (
+ DataFrame,
+ IndexSlice,
+)
+
+pytest.importorskip("jinja2")
+
+from pandas.io.formats.style import Styler
+
+
+@pytest.fixture
+def df():
+ return DataFrame(
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ index=["i", "j", "j"],
+ columns=["c", "d", "d"],
+ dtype=float,
+ )
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+def test_format_non_unique(df):
+ # GH 41269
+
+ # test dict
+ html = df.style.format({"d": "{:.1f}"}).to_html()
+ for val in ["1.000000<", "4.000000<", "7.000000<"]:
+ assert val in html
+ for val in ["2.0<", "3.0<", "5.0<", "6.0<", "8.0<", "9.0<"]:
+ assert val in html
+
+ # test subset
+ html = df.style.format(precision=1, subset=IndexSlice["j", "d"]).to_html()
+ for val in ["1.000000<", "4.000000<", "7.000000<", "2.000000<", "3.000000<"]:
+ assert val in html
+ for val in ["5.0<", "6.0<", "8.0<", "9.0<"]:
+ assert val in html
+
+
+@pytest.mark.parametrize("func", ["apply", "map"])
+def test_apply_map_non_unique_raises(df, func):
+ # GH 41269
+ if func == "apply":
+ op = lambda s: ["color: red;"] * len(s)
+ else:
+ op = lambda v: "color: red;"
+
+ with pytest.raises(KeyError, match="`Styler.apply` and `.map` are not"):
+ getattr(df.style, func)(op)._compute()
+
+
+def test_table_styles_dict_non_unique_index(styler):
+ styles = styler.set_table_styles(
+ {"j": [{"selector": "td", "props": "a: v;"}]}, axis=1
+ ).table_styles
+ assert styles == [
+ {"selector": "td.row1", "props": [("a", "v")]},
+ {"selector": "td.row2", "props": [("a", "v")]},
+ ]
+
+
+def test_table_styles_dict_non_unique_columns(styler):
+ styles = styler.set_table_styles(
+ {"d": [{"selector": "td", "props": "a: v;"}]}, axis=0
+ ).table_styles
+ assert styles == [
+ {"selector": "td.col1", "props": [("a", "v")]},
+ {"selector": "td.col2", "props": [("a", "v")]},
+ ]
+
+
+def test_tooltips_non_unique_raises(styler):
+ # ttips has unique keys
+ ttips = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "b"])
+ styler.set_tooltips(ttips=ttips) # OK
+
+ # ttips has non-unique columns
+ ttips = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "c"], index=["a", "b"])
+ with pytest.raises(KeyError, match="Tooltips render only if `ttips` has unique"):
+ styler.set_tooltips(ttips=ttips)
+
+ # ttips has non-unique index
+ ttips = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "a"])
+ with pytest.raises(KeyError, match="Tooltips render only if `ttips` has unique"):
+ styler.set_tooltips(ttips=ttips)
+
+
+def test_set_td_classes_non_unique_raises(styler):
+ # classes has unique keys
+ classes = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "b"])
+ styler.set_td_classes(classes=classes) # OK
+
+ # classes has non-unique columns
+ classes = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "c"], index=["a", "b"])
+ with pytest.raises(KeyError, match="Classes render only if `classes` has unique"):
+ styler.set_td_classes(classes=classes)
+
+ # classes has non-unique index
+ classes = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "a"])
+ with pytest.raises(KeyError, match="Classes render only if `classes` has unique"):
+ styler.set_td_classes(classes=classes)
+
+
+def test_hide_columns_non_unique(styler):
+ ctx = styler.hide(["d"], axis="columns")._translate(True, True)
+
+ assert ctx["head"][0][1]["display_value"] == "c"
+ assert ctx["head"][0][1]["is_visible"] is True
+
+ assert ctx["head"][0][2]["display_value"] == "d"
+ assert ctx["head"][0][2]["is_visible"] is False
+
+ assert ctx["head"][0][3]["display_value"] == "d"
+ assert ctx["head"][0][3]["is_visible"] is False
+
+ assert ctx["body"][0][1]["is_visible"] is True
+ assert ctx["body"][0][2]["is_visible"] is False
+ assert ctx["body"][0][3]["is_visible"] is False
+
+
+def test_latex_non_unique(styler):
+ result = styler.to_latex()
+ assert result == dedent(
+ """\
+ \\begin{tabular}{lrrr}
+ & c & d & d \\\\
+ i & 1.000000 & 2.000000 & 3.000000 \\\\
+ j & 4.000000 & 5.000000 & 6.000000 \\\\
+ j & 7.000000 & 8.000000 & 9.000000 \\\\
+ \\end{tabular}
+ """
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_style.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_style.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fa72bd48031cca999b81cccfcedafcd3abcd924
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_style.py
@@ -0,0 +1,1588 @@
+import contextlib
+import copy
+import re
+from textwrap import dedent
+
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ IndexSlice,
+ MultiIndex,
+ Series,
+ option_context,
+)
+import pandas._testing as tm
+
+jinja2 = pytest.importorskip("jinja2")
+from pandas.io.formats.style import ( # isort:skip
+ Styler,
+)
+from pandas.io.formats.style_render import (
+ _get_level_lengths,
+ _get_trimming_maximums,
+ maybe_convert_css_to_tuples,
+ non_reducing_slice,
+)
+
+
+@pytest.fixture
+def mi_df():
+ return DataFrame(
+ [[1, 2], [3, 4]],
+ index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
+ columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
+ dtype=int,
+ )
+
+
+@pytest.fixture
+def mi_styler(mi_df):
+ return Styler(mi_df, uuid_len=0)
+
+
+@pytest.fixture
+def mi_styler_comp(mi_styler):
+ # comprehensively add features to mi_styler
+ mi_styler = mi_styler._copy(deepcopy=True)
+ mi_styler.css = {**mi_styler.css, "row": "ROW", "col": "COL"}
+ mi_styler.uuid_len = 5
+ mi_styler.uuid = "abcde"
+ mi_styler.set_caption("capt")
+ mi_styler.set_table_styles([{"selector": "a", "props": "a:v;"}])
+ mi_styler.hide(axis="columns")
+ mi_styler.hide([("c0", "c1_a")], axis="columns", names=True)
+ mi_styler.hide(axis="index")
+ mi_styler.hide([("i0", "i1_a")], axis="index", names=True)
+ mi_styler.set_table_attributes('class="box"')
+ other = mi_styler.data.agg(["mean"])
+ other.index = MultiIndex.from_product([[""], other.index])
+ mi_styler.concat(other.style)
+ mi_styler.format(na_rep="MISSING", precision=3)
+ mi_styler.format_index(precision=2, axis=0)
+ mi_styler.format_index(precision=4, axis=1)
+ mi_styler.highlight_max(axis=None)
+ mi_styler.map_index(lambda x: "color: white;", axis=0)
+ mi_styler.map_index(lambda x: "color: black;", axis=1)
+ mi_styler.set_td_classes(
+ DataFrame(
+ [["a", "b"], ["a", "c"]], index=mi_styler.index, columns=mi_styler.columns
+ )
+ )
+ mi_styler.set_tooltips(
+ DataFrame(
+ [["a2", "b2"], ["a2", "c2"]],
+ index=mi_styler.index,
+ columns=mi_styler.columns,
+ )
+ )
+ return mi_styler
+
+
+@pytest.fixture
+def blank_value():
+ return " "
+
+
+@pytest.fixture
+def df():
+ df = DataFrame({"A": [0, 1], "B": np.random.default_rng(2).standard_normal(2)})
+ return df
+
+
+@pytest.fixture
+def styler(df):
+ df = DataFrame({"A": [0, 1], "B": np.random.default_rng(2).standard_normal(2)})
+ return Styler(df)
+
+
+@pytest.mark.parametrize(
+ "sparse_columns, exp_cols",
+ [
+ (
+ True,
+ [
+ {"is_visible": True, "attributes": 'colspan="2"', "value": "c0"},
+ {"is_visible": False, "attributes": "", "value": "c0"},
+ ],
+ ),
+ (
+ False,
+ [
+ {"is_visible": True, "attributes": "", "value": "c0"},
+ {"is_visible": True, "attributes": "", "value": "c0"},
+ ],
+ ),
+ ],
+)
+def test_mi_styler_sparsify_columns(mi_styler, sparse_columns, exp_cols):
+ exp_l1_c0 = {"is_visible": True, "attributes": "", "display_value": "c1_a"}
+ exp_l1_c1 = {"is_visible": True, "attributes": "", "display_value": "c1_b"}
+
+ ctx = mi_styler._translate(True, sparse_columns)
+
+ assert exp_cols[0].items() <= ctx["head"][0][2].items()
+ assert exp_cols[1].items() <= ctx["head"][0][3].items()
+ assert exp_l1_c0.items() <= ctx["head"][1][2].items()
+ assert exp_l1_c1.items() <= ctx["head"][1][3].items()
+
+
+@pytest.mark.parametrize(
+ "sparse_index, exp_rows",
+ [
+ (
+ True,
+ [
+ {"is_visible": True, "attributes": 'rowspan="2"', "value": "i0"},
+ {"is_visible": False, "attributes": "", "value": "i0"},
+ ],
+ ),
+ (
+ False,
+ [
+ {"is_visible": True, "attributes": "", "value": "i0"},
+ {"is_visible": True, "attributes": "", "value": "i0"},
+ ],
+ ),
+ ],
+)
+def test_mi_styler_sparsify_index(mi_styler, sparse_index, exp_rows):
+ exp_l1_r0 = {"is_visible": True, "attributes": "", "display_value": "i1_a"}
+ exp_l1_r1 = {"is_visible": True, "attributes": "", "display_value": "i1_b"}
+
+ ctx = mi_styler._translate(sparse_index, True)
+
+ assert exp_rows[0].items() <= ctx["body"][0][0].items()
+ assert exp_rows[1].items() <= ctx["body"][1][0].items()
+ assert exp_l1_r0.items() <= ctx["body"][0][1].items()
+ assert exp_l1_r1.items() <= ctx["body"][1][1].items()
+
+
+def test_mi_styler_sparsify_options(mi_styler):
+ with option_context("styler.sparse.index", False):
+ html1 = mi_styler.to_html()
+ with option_context("styler.sparse.index", True):
+ html2 = mi_styler.to_html()
+
+ assert html1 != html2
+
+ with option_context("styler.sparse.columns", False):
+ html1 = mi_styler.to_html()
+ with option_context("styler.sparse.columns", True):
+ html2 = mi_styler.to_html()
+
+ assert html1 != html2
+
+
+@pytest.mark.parametrize(
+ "rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn",
+ [
+ (100, 100, 100, None, None, 12, 6), # reduce to (12, 6) < 100 elements
+ (1000, 3, 750, None, None, 250, 3), # dynamically reduce rows to 250, keep cols
+ (4, 1000, 500, None, None, 4, 125), # dynamically reduce cols to 125, keep rows
+ (1000, 3, 750, 10, None, 10, 3), # overwrite above dynamics with max_row
+ (4, 1000, 500, None, 5, 4, 5), # overwrite above dynamics with max_col
+ (100, 100, 700, 50, 50, 25, 25), # rows cols below given maxes so < 700 elmts
+ ],
+)
+def test_trimming_maximum(rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn):
+ rn, cn = _get_trimming_maximums(
+ rn, cn, max_els, max_rows, max_cols, scaling_factor=0.5
+ )
+ assert (rn, cn) == (exp_rn, exp_cn)
+
+
+@pytest.mark.parametrize(
+ "option, val",
+ [
+ ("styler.render.max_elements", 6),
+ ("styler.render.max_rows", 3),
+ ],
+)
+def test_render_trimming_rows(option, val):
+ # test auto and specific trimming of rows
+ df = DataFrame(np.arange(120).reshape(60, 2))
+ with option_context(option, val):
+ ctx = df.style._translate(True, True)
+ assert len(ctx["head"][0]) == 3 # index + 2 data cols
+ assert len(ctx["body"]) == 4 # 3 data rows + trimming row
+ assert len(ctx["body"][0]) == 3 # index + 2 data cols
+
+
+@pytest.mark.parametrize(
+ "option, val",
+ [
+ ("styler.render.max_elements", 6),
+ ("styler.render.max_columns", 2),
+ ],
+)
+def test_render_trimming_cols(option, val):
+ # test auto and specific trimming of cols
+ df = DataFrame(np.arange(30).reshape(3, 10))
+ with option_context(option, val):
+ ctx = df.style._translate(True, True)
+ assert len(ctx["head"][0]) == 4 # index + 2 data cols + trimming col
+ assert len(ctx["body"]) == 3 # 3 data rows
+ assert len(ctx["body"][0]) == 4 # index + 2 data cols + trimming col
+
+
+def test_render_trimming_mi():
+ midx = MultiIndex.from_product([[1, 2], [1, 2, 3]])
+ df = DataFrame(np.arange(36).reshape(6, 6), columns=midx, index=midx)
+ with option_context("styler.render.max_elements", 4):
+ ctx = df.style._translate(True, True)
+
+ assert len(ctx["body"][0]) == 5 # 2 indexes + 2 data cols + trimming row
+ assert {"attributes": 'rowspan="2"'}.items() <= ctx["body"][0][0].items()
+ assert {"class": "data row0 col_trim"}.items() <= ctx["body"][0][4].items()
+ assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items()
+ assert len(ctx["body"]) == 3 # 2 data rows + trimming row
+
+
+def test_render_empty_mi():
+ # GH 43305
+ df = DataFrame(index=MultiIndex.from_product([["A"], [0, 1]], names=[None, "one"]))
+ expected = dedent(
+ """\
+ >
+
+
+
+ one
+
+
+ """
+ )
+ assert expected in df.style.to_html()
+
+
+@pytest.mark.parametrize("comprehensive", [True, False])
+@pytest.mark.parametrize("render", [True, False])
+@pytest.mark.parametrize("deepcopy", [True, False])
+def test_copy(comprehensive, render, deepcopy, mi_styler, mi_styler_comp):
+ styler = mi_styler_comp if comprehensive else mi_styler
+ styler.uuid_len = 5
+
+ s2 = copy.deepcopy(styler) if deepcopy else copy.copy(styler) # make copy and check
+ assert s2 is not styler
+
+ if render:
+ styler.to_html()
+
+ excl = [
+ "cellstyle_map", # render time vars..
+ "cellstyle_map_columns",
+ "cellstyle_map_index",
+ "template_latex", # render templates are class level
+ "template_html",
+ "template_html_style",
+ "template_html_table",
+ ]
+ if not deepcopy: # check memory locations are equal for all included attributes
+ for attr in [a for a in styler.__dict__ if (not callable(a) and a not in excl)]:
+ assert id(getattr(s2, attr)) == id(getattr(styler, attr))
+ else: # check memory locations are different for nested or mutable vars
+ shallow = [
+ "data",
+ "columns",
+ "index",
+ "uuid_len",
+ "uuid",
+ "caption",
+ "cell_ids",
+ "hide_index_",
+ "hide_columns_",
+ "hide_index_names",
+ "hide_column_names",
+ "table_attributes",
+ ]
+ for attr in shallow:
+ assert id(getattr(s2, attr)) == id(getattr(styler, attr))
+
+ for attr in [
+ a
+ for a in styler.__dict__
+ if (not callable(a) and a not in excl and a not in shallow)
+ ]:
+ if getattr(s2, attr) is None:
+ assert id(getattr(s2, attr)) == id(getattr(styler, attr))
+ else:
+ assert id(getattr(s2, attr)) != id(getattr(styler, attr))
+
+
+@pytest.mark.parametrize("deepcopy", [True, False])
+def test_inherited_copy(mi_styler, deepcopy):
+ # Ensure that the inherited class is preserved when a Styler object is copied.
+ # GH 52728
+ class CustomStyler(Styler):
+ pass
+
+ custom_styler = CustomStyler(mi_styler.data)
+ custom_styler_copy = (
+ copy.deepcopy(custom_styler) if deepcopy else copy.copy(custom_styler)
+ )
+ assert isinstance(custom_styler_copy, CustomStyler)
+
+
+def test_clear(mi_styler_comp):
+ # NOTE: if this test fails for new features then 'mi_styler_comp' should be updated
+ # to ensure proper testing of the 'copy', 'clear', 'export' methods with new feature
+ # GH 40675
+ styler = mi_styler_comp
+ styler._compute() # execute applied methods
+
+ clean_copy = Styler(styler.data, uuid=styler.uuid)
+
+ excl = [
+ "data",
+ "index",
+ "columns",
+ "uuid",
+ "uuid_len", # uuid is set to be the same on styler and clean_copy
+ "cell_ids",
+ "cellstyle_map", # execution time only
+ "cellstyle_map_columns", # execution time only
+ "cellstyle_map_index", # execution time only
+ "template_latex", # render templates are class level
+ "template_html",
+ "template_html_style",
+ "template_html_table",
+ ]
+ # tests vars are not same vals on obj and clean copy before clear (except for excl)
+ for attr in [a for a in styler.__dict__ if not (callable(a) or a in excl)]:
+ res = getattr(styler, attr) == getattr(clean_copy, attr)
+ if hasattr(res, "__iter__") and len(res) > 0:
+ assert not all(res) # some element in iterable differs
+ elif hasattr(res, "__iter__") and len(res) == 0:
+ pass # empty array
+ else:
+ assert not res # explicit var differs
+
+ # test vars have same vales on obj and clean copy after clearing
+ styler.clear()
+ for attr in [a for a in styler.__dict__ if not callable(a)]:
+ res = getattr(styler, attr) == getattr(clean_copy, attr)
+ assert all(res) if hasattr(res, "__iter__") else res
+
+
+def test_export(mi_styler_comp, mi_styler):
+ exp_attrs = [
+ "_todo",
+ "hide_index_",
+ "hide_index_names",
+ "hide_columns_",
+ "hide_column_names",
+ "table_attributes",
+ "table_styles",
+ "css",
+ ]
+ for attr in exp_attrs:
+ check = getattr(mi_styler, attr) == getattr(mi_styler_comp, attr)
+ assert not (
+ all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
+ )
+
+ export = mi_styler_comp.export()
+ used = mi_styler.use(export)
+ for attr in exp_attrs:
+ check = getattr(used, attr) == getattr(mi_styler_comp, attr)
+ assert all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
+
+ used.to_html()
+
+
+def test_hide_raises(mi_styler):
+ msg = "`subset` and `level` cannot be passed simultaneously"
+ with pytest.raises(ValueError, match=msg):
+ mi_styler.hide(axis="index", subset="something", level="something else")
+
+ msg = "`level` must be of type `int`, `str` or list of such"
+ with pytest.raises(ValueError, match=msg):
+ mi_styler.hide(axis="index", level={"bad": 1, "type": 2})
+
+
+@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
+def test_hide_index_level(mi_styler, level):
+ mi_styler.index.names, mi_styler.columns.names = ["zero", "one"], ["zero", "one"]
+ ctx = mi_styler.hide(axis="index", level=level)._translate(False, True)
+ assert len(ctx["head"][0]) == 3
+ assert len(ctx["head"][1]) == 3
+ assert len(ctx["head"][2]) == 4
+ assert ctx["head"][2][0]["is_visible"]
+ assert not ctx["head"][2][1]["is_visible"]
+
+ assert ctx["body"][0][0]["is_visible"]
+ assert not ctx["body"][0][1]["is_visible"]
+ assert ctx["body"][1][0]["is_visible"]
+ assert not ctx["body"][1][1]["is_visible"]
+
+
+@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
+@pytest.mark.parametrize("names", [True, False])
+def test_hide_columns_level(mi_styler, level, names):
+ mi_styler.columns.names = ["zero", "one"]
+ if names:
+ mi_styler.index.names = ["zero", "one"]
+ ctx = mi_styler.hide(axis="columns", level=level)._translate(True, False)
+ assert len(ctx["head"]) == (2 if names else 1)
+
+
+@pytest.mark.parametrize("method", ["map", "apply"])
+@pytest.mark.parametrize("axis", ["index", "columns"])
+def test_apply_map_header(method, axis):
+ # GH 41893
+ df = DataFrame({"A": [0, 0], "B": [1, 1]}, index=["C", "D"])
+ func = {
+ "apply": lambda s: ["attr: val" if ("A" in v or "C" in v) else "" for v in s],
+ "map": lambda v: "attr: val" if ("A" in v or "C" in v) else "",
+ }
+
+ # test execution added to todo
+ result = getattr(df.style, f"{method}_index")(func[method], axis=axis)
+ assert len(result._todo) == 1
+ assert len(getattr(result, f"ctx_{axis}")) == 0
+
+ # test ctx object on compute
+ result._compute()
+ expected = {
+ (0, 0): [("attr", "val")],
+ }
+ assert getattr(result, f"ctx_{axis}") == expected
+
+
+@pytest.mark.parametrize("method", ["apply", "map"])
+@pytest.mark.parametrize("axis", ["index", "columns"])
+def test_apply_map_header_mi(mi_styler, method, axis):
+ # GH 41893
+ func = {
+ "apply": lambda s: ["attr: val;" if "b" in v else "" for v in s],
+ "map": lambda v: "attr: val" if "b" in v else "",
+ }
+ result = getattr(mi_styler, f"{method}_index")(func[method], axis=axis)._compute()
+ expected = {(1, 1): [("attr", "val")]}
+ assert getattr(result, f"ctx_{axis}") == expected
+
+
+def test_apply_map_header_raises(mi_styler):
+ # GH 41893
+ with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"):
+ mi_styler.map_index(lambda v: "attr: val;", axis="bad")._compute()
+
+
+class TestStyler:
+ def test_init_non_pandas(self):
+ msg = "``data`` must be a Series or DataFrame"
+ with pytest.raises(TypeError, match=msg):
+ Styler([1, 2, 3])
+
+ def test_init_series(self):
+ result = Styler(Series([1, 2]))
+ assert result.data.ndim == 2
+
+ def test_repr_html_ok(self, styler):
+ styler._repr_html_()
+
+ def test_repr_html_mathjax(self, styler):
+ # gh-19824 / 41395
+ assert "tex2jax_ignore" not in styler._repr_html_()
+
+ with option_context("styler.html.mathjax", False):
+ assert "tex2jax_ignore" in styler._repr_html_()
+
+ def test_update_ctx(self, styler):
+ styler._update_ctx(DataFrame({"A": ["color: red", "color: blue"]}))
+ expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]}
+ assert styler.ctx == expected
+
+ def test_update_ctx_flatten_multi_and_trailing_semi(self, styler):
+ attrs = DataFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]})
+ styler._update_ctx(attrs)
+ expected = {
+ (0, 0): [("color", "red"), ("foo", "bar")],
+ (1, 0): [("color", "blue"), ("foo", "baz")],
+ }
+ assert styler.ctx == expected
+
+ def test_render(self):
+ df = DataFrame({"A": [0, 1]})
+ style = lambda x: Series(["color: red", "color: blue"], name=x.name)
+ s = Styler(df, uuid="AB").apply(style)
+ s.to_html()
+ # it worked?
+
+ def test_multiple_render(self, df):
+ # GH 39396
+ s = Styler(df, uuid_len=0).map(lambda x: "color: red;", subset=["A"])
+ s.to_html() # do 2 renders to ensure css styles not duplicated
+ assert (
+ '" in s.to_html()
+ )
+
+ def test_render_empty_dfs(self):
+ empty_df = DataFrame()
+ es = Styler(empty_df)
+ es.to_html()
+ # An index but no columns
+ DataFrame(columns=["a"]).style.to_html()
+ # A column but no index
+ DataFrame(index=["a"]).style.to_html()
+ # No IndexError raised?
+
+ def test_render_double(self):
+ df = DataFrame({"A": [0, 1]})
+ style = lambda x: Series(
+ ["color: red; border: 1px", "color: blue; border: 2px"], name=x.name
+ )
+ s = Styler(df, uuid="AB").apply(style)
+ s.to_html()
+ # it worked?
+
+ def test_set_properties(self):
+ df = DataFrame({"A": [0, 1]})
+ result = df.style.set_properties(color="white", size="10px")._compute().ctx
+ # order is deterministic
+ v = [("color", "white"), ("size", "10px")]
+ expected = {(0, 0): v, (1, 0): v}
+ assert result.keys() == expected.keys()
+ for v1, v2 in zip(result.values(), expected.values()):
+ assert sorted(v1) == sorted(v2)
+
+ def test_set_properties_subset(self):
+ df = DataFrame({"A": [0, 1]})
+ result = (
+ df.style.set_properties(subset=IndexSlice[0, "A"], color="white")
+ ._compute()
+ .ctx
+ )
+ expected = {(0, 0): [("color", "white")]}
+ assert result == expected
+
+ def test_empty_index_name_doesnt_display(self, blank_value):
+ # https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
+ df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
+ result = df.style._translate(True, True)
+ assert len(result["head"]) == 1
+ expected = {
+ "class": "blank level0",
+ "type": "th",
+ "value": blank_value,
+ "is_visible": True,
+ "display_value": blank_value,
+ }
+ assert expected.items() <= result["head"][0][0].items()
+
+ def test_index_name(self):
+ # https://github.com/pandas-dev/pandas/issues/11655
+ df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
+ result = df.set_index("A").style._translate(True, True)
+ expected = {
+ "class": "index_name level0",
+ "type": "th",
+ "value": "A",
+ "is_visible": True,
+ "display_value": "A",
+ }
+ assert expected.items() <= result["head"][1][0].items()
+
+ def test_numeric_columns(self):
+ # https://github.com/pandas-dev/pandas/issues/12125
+ # smoke test for _translate
+ df = DataFrame({0: [1, 2, 3]})
+ df.style._translate(True, True)
+
+ def test_apply_axis(self):
+ df = DataFrame({"A": [0, 0], "B": [1, 1]})
+ f = lambda x: [f"val: {x.max()}" for v in x]
+ result = df.style.apply(f, axis=1)
+ assert len(result._todo) == 1
+ assert len(result.ctx) == 0
+ result._compute()
+ expected = {
+ (0, 0): [("val", "1")],
+ (0, 1): [("val", "1")],
+ (1, 0): [("val", "1")],
+ (1, 1): [("val", "1")],
+ }
+ assert result.ctx == expected
+
+ result = df.style.apply(f, axis=0)
+ expected = {
+ (0, 0): [("val", "0")],
+ (0, 1): [("val", "1")],
+ (1, 0): [("val", "0")],
+ (1, 1): [("val", "1")],
+ }
+ result._compute()
+ assert result.ctx == expected
+ result = df.style.apply(f) # default
+ result._compute()
+ assert result.ctx == expected
+
+ @pytest.mark.parametrize("axis", [0, 1])
+ def test_apply_series_return(self, axis):
+ # GH 42014
+ df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
+
+ # test Series return where len(Series) < df.index or df.columns but labels OK
+ func = lambda s: Series(["color: red;"], index=["Y"])
+ result = df.style.apply(func, axis=axis)._compute().ctx
+ assert result[(1, 1)] == [("color", "red")]
+ assert result[(1 - axis, axis)] == [("color", "red")]
+
+ # test Series return where labels align but different order
+ func = lambda s: Series(["color: red;", "color: blue;"], index=["Y", "X"])
+ result = df.style.apply(func, axis=axis)._compute().ctx
+ assert result[(0, 0)] == [("color", "blue")]
+ assert result[(1, 1)] == [("color", "red")]
+ assert result[(1 - axis, axis)] == [("color", "red")]
+ assert result[(axis, 1 - axis)] == [("color", "blue")]
+
+ @pytest.mark.parametrize("index", [False, True])
+ @pytest.mark.parametrize("columns", [False, True])
+ def test_apply_dataframe_return(self, index, columns):
+ # GH 42014
+ df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
+ idxs = ["X", "Y"] if index else ["Y"]
+ cols = ["X", "Y"] if columns else ["Y"]
+ df_styles = DataFrame("color: red;", index=idxs, columns=cols)
+ result = df.style.apply(lambda x: df_styles, axis=None)._compute().ctx
+
+ assert result[(1, 1)] == [("color", "red")] # (Y,Y) styles always present
+ assert (result[(0, 1)] == [("color", "red")]) is index # (X,Y) only if index
+ assert (result[(1, 0)] == [("color", "red")]) is columns # (Y,X) only if cols
+ assert (result[(0, 0)] == [("color", "red")]) is (index and columns) # (X,X)
+
+ @pytest.mark.parametrize(
+ "slice_",
+ [
+ IndexSlice[:],
+ IndexSlice[:, ["A"]],
+ IndexSlice[[1], :],
+ IndexSlice[[1], ["A"]],
+ IndexSlice[:2, ["A", "B"]],
+ ],
+ )
+ @pytest.mark.parametrize("axis", [0, 1])
+ def test_apply_subset(self, slice_, axis, df):
+ def h(x, color="bar"):
+ return Series(f"color: {color}", index=x.index, name=x.name)
+
+ result = df.style.apply(h, axis=axis, subset=slice_, color="baz")._compute().ctx
+ expected = {
+ (r, c): [("color", "baz")]
+ for r, row in enumerate(df.index)
+ for c, col in enumerate(df.columns)
+ if row in df.loc[slice_].index and col in df.loc[slice_].columns
+ }
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "slice_",
+ [
+ IndexSlice[:],
+ IndexSlice[:, ["A"]],
+ IndexSlice[[1], :],
+ IndexSlice[[1], ["A"]],
+ IndexSlice[:2, ["A", "B"]],
+ ],
+ )
+ def test_map_subset(self, slice_, df):
+ result = df.style.map(lambda x: "color:baz;", subset=slice_)._compute().ctx
+ expected = {
+ (r, c): [("color", "baz")]
+ for r, row in enumerate(df.index)
+ for c, col in enumerate(df.columns)
+ if row in df.loc[slice_].index and col in df.loc[slice_].columns
+ }
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "slice_",
+ [
+ IndexSlice[:, IndexSlice["x", "A"]],
+ IndexSlice[:, IndexSlice[:, "A"]],
+ IndexSlice[:, IndexSlice[:, ["A", "C"]]], # missing col element
+ IndexSlice[IndexSlice["a", 1], :],
+ IndexSlice[IndexSlice[:, 1], :],
+ IndexSlice[IndexSlice[:, [1, 3]], :], # missing row element
+ IndexSlice[:, ("x", "A")],
+ IndexSlice[("a", 1), :],
+ ],
+ )
+ def test_map_subset_multiindex(self, slice_):
+ # GH 19861
+ # edited for GH 33562
+ if (
+ isinstance(slice_[-1], tuple)
+ and isinstance(slice_[-1][-1], list)
+ and "C" in slice_[-1][-1]
+ ):
+ ctx = pytest.raises(KeyError, match="C")
+ elif (
+ isinstance(slice_[0], tuple)
+ and isinstance(slice_[0][1], list)
+ and 3 in slice_[0][1]
+ ):
+ ctx = pytest.raises(KeyError, match="3")
+ else:
+ ctx = contextlib.nullcontext()
+
+ idx = MultiIndex.from_product([["a", "b"], [1, 2]])
+ col = MultiIndex.from_product([["x", "y"], ["A", "B"]])
+ df = DataFrame(np.random.default_rng(2).random((4, 4)), columns=col, index=idx)
+
+ with ctx:
+ df.style.map(lambda x: "color: red;", subset=slice_).to_html()
+
+ def test_map_subset_multiindex_code(self):
+ # https://github.com/pandas-dev/pandas/issues/25858
+ # Checks styler.map works with multindex when codes are provided
+ codes = np.array([[0, 0, 1, 1], [0, 1, 0, 1]])
+ columns = MultiIndex(
+ levels=[["a", "b"], ["%", "#"]], codes=codes, names=["", ""]
+ )
+ df = DataFrame(
+ [[1, -1, 1, 1], [-1, 1, 1, 1]], index=["hello", "world"], columns=columns
+ )
+ pct_subset = IndexSlice[:, IndexSlice[:, "%":"%"]]
+
+ def color_negative_red(val):
+ color = "red" if val < 0 else "black"
+ return f"color: {color}"
+
+ df.loc[pct_subset]
+ df.style.map(color_negative_red, subset=pct_subset)
+
+ @pytest.mark.parametrize(
+ "stylefunc", ["background_gradient", "bar", "text_gradient"]
+ )
+ def test_subset_for_boolean_cols(self, stylefunc):
+ # GH47838
+ df = DataFrame(
+ [
+ [1, 2],
+ [3, 4],
+ ],
+ columns=[False, True],
+ )
+ styled = getattr(df.style, stylefunc)()
+ styled._compute()
+ assert set(styled.ctx) == {(0, 0), (0, 1), (1, 0), (1, 1)}
+
+ def test_empty(self):
+ df = DataFrame({"A": [1, 0]})
+ s = df.style
+ s.ctx = {(0, 0): [("color", "red")], (1, 0): [("", "")]}
+
+ result = s._translate(True, True)["cellstyle"]
+ expected = [
+ {"props": [("color", "red")], "selectors": ["row0_col0"]},
+ {"props": [("", "")], "selectors": ["row1_col0"]},
+ ]
+ assert result == expected
+
+ def test_duplicate(self):
+ df = DataFrame({"A": [1, 0]})
+ s = df.style
+ s.ctx = {(0, 0): [("color", "red")], (1, 0): [("color", "red")]}
+
+ result = s._translate(True, True)["cellstyle"]
+ expected = [
+ {"props": [("color", "red")], "selectors": ["row0_col0", "row1_col0"]}
+ ]
+ assert result == expected
+
+ def test_init_with_na_rep(self):
+ # GH 21527 28358
+ df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
+
+ ctx = Styler(df, na_rep="NA")._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "NA"
+ assert ctx["body"][0][2]["display_value"] == "NA"
+
+ def test_caption(self, df):
+ styler = Styler(df, caption="foo")
+ result = styler.to_html()
+ assert all(["caption" in result, "foo" in result])
+
+ styler = df.style
+ result = styler.set_caption("baz")
+ assert styler is result
+ assert styler.caption == "baz"
+
+ def test_uuid(self, df):
+ styler = Styler(df, uuid="abc123")
+ result = styler.to_html()
+ assert "abc123" in result
+
+ styler = df.style
+ result = styler.set_uuid("aaa")
+ assert result is styler
+ assert result.uuid == "aaa"
+
+ def test_unique_id(self):
+ # See https://github.com/pandas-dev/pandas/issues/16780
+ df = DataFrame({"a": [1, 3, 5, 6], "b": [2, 4, 12, 21]})
+ result = df.style.to_html(uuid="test")
+ assert "test" in result
+ ids = re.findall('id="(.*?)"', result)
+ assert np.unique(ids).size == len(ids)
+
+ def test_table_styles(self, df):
+ style = [{"selector": "th", "props": [("foo", "bar")]}] # default format
+ styler = Styler(df, table_styles=style)
+ result = " ".join(styler.to_html().split())
+ assert "th { foo: bar; }" in result
+
+ styler = df.style
+ result = styler.set_table_styles(style)
+ assert styler is result
+ assert styler.table_styles == style
+
+ # GH 39563
+ style = [{"selector": "th", "props": "foo:bar;"}] # css string format
+ styler = df.style.set_table_styles(style)
+ result = " ".join(styler.to_html().split())
+ assert "th { foo: bar; }" in result
+
+ def test_table_styles_multiple(self, df):
+ ctx = df.style.set_table_styles(
+ [
+ {"selector": "th,td", "props": "color:red;"},
+ {"selector": "tr", "props": "color:green;"},
+ ]
+ )._translate(True, True)["table_styles"]
+ assert ctx == [
+ {"selector": "th", "props": [("color", "red")]},
+ {"selector": "td", "props": [("color", "red")]},
+ {"selector": "tr", "props": [("color", "green")]},
+ ]
+
+ def test_table_styles_dict_multiple_selectors(self, df):
+ # GH 44011
+ result = df.style.set_table_styles(
+ {
+ "B": [
+ {"selector": "th,td", "props": [("border-left", "2px solid black")]}
+ ]
+ }
+ )._translate(True, True)["table_styles"]
+
+ expected = [
+ {"selector": "th.col1", "props": [("border-left", "2px solid black")]},
+ {"selector": "td.col1", "props": [("border-left", "2px solid black")]},
+ ]
+
+ assert result == expected
+
+ def test_maybe_convert_css_to_tuples(self):
+ expected = [("a", "b"), ("c", "d e")]
+ assert maybe_convert_css_to_tuples("a:b;c:d e;") == expected
+ assert maybe_convert_css_to_tuples("a: b ;c: d e ") == expected
+ expected = []
+ assert maybe_convert_css_to_tuples("") == expected
+
+ def test_maybe_convert_css_to_tuples_err(self):
+ msg = "Styles supplied as string must follow CSS rule formats"
+ with pytest.raises(ValueError, match=msg):
+ maybe_convert_css_to_tuples("err")
+
+ def test_table_attributes(self, df):
+ attributes = 'class="foo" data-bar'
+ styler = Styler(df, table_attributes=attributes)
+ result = styler.to_html()
+ assert 'class="foo" data-bar' in result
+
+ result = df.style.set_table_attributes(attributes).to_html()
+ assert 'class="foo" data-bar' in result
+
+ def test_apply_none(self):
+ def f(x):
+ return DataFrame(
+ np.where(x == x.max(), "color: red", ""),
+ index=x.index,
+ columns=x.columns,
+ )
+
+ result = DataFrame([[1, 2], [3, 4]]).style.apply(f, axis=None)._compute().ctx
+ assert result[(1, 1)] == [("color", "red")]
+
+ def test_trim(self, df):
+ result = df.style.to_html() # trim=True
+ assert result.count("#") == 0
+
+ result = df.style.highlight_max().to_html()
+ assert result.count("#") == len(df.columns)
+
+ def test_export(self, df, styler):
+ f = lambda x: "color: red" if x > 0 else "color: blue"
+ g = lambda x, z: f"color: {z}" if x > 0 else f"color: {z}"
+ style1 = styler
+ style1.map(f).map(g, z="b").highlight_max()._compute() # = render
+ result = style1.export()
+ style2 = df.style
+ style2.use(result)
+ assert style1._todo == style2._todo
+ style2.to_html()
+
+ def test_bad_apply_shape(self):
+ df = DataFrame([[1, 2], [3, 4]], index=["A", "B"], columns=["X", "Y"])
+
+ msg = "resulted in the apply method collapsing to a Series."
+ with pytest.raises(ValueError, match=msg):
+ df.style._apply(lambda x: "x")
+
+ msg = "created invalid {} labels"
+ with pytest.raises(ValueError, match=msg.format("index")):
+ df.style._apply(lambda x: [""])
+
+ with pytest.raises(ValueError, match=msg.format("index")):
+ df.style._apply(lambda x: ["", "", "", ""])
+
+ with pytest.raises(ValueError, match=msg.format("index")):
+ df.style._apply(lambda x: Series(["a:v;", ""], index=["A", "C"]), axis=0)
+
+ with pytest.raises(ValueError, match=msg.format("columns")):
+ df.style._apply(lambda x: ["", "", ""], axis=1)
+
+ with pytest.raises(ValueError, match=msg.format("columns")):
+ df.style._apply(lambda x: Series(["a:v;", ""], index=["X", "Z"]), axis=1)
+
+ msg = "returned ndarray with wrong shape"
+ with pytest.raises(ValueError, match=msg):
+ df.style._apply(lambda x: np.array([[""], [""]]), axis=None)
+
+ def test_apply_bad_return(self):
+ def f(x):
+ return ""
+
+ df = DataFrame([[1, 2], [3, 4]])
+ msg = (
+ "must return a DataFrame or ndarray when passed to `Styler.apply` "
+ "with axis=None"
+ )
+ with pytest.raises(TypeError, match=msg):
+ df.style._apply(f, axis=None)
+
+ @pytest.mark.parametrize("axis", ["index", "columns"])
+ def test_apply_bad_labels(self, axis):
+ def f(x):
+ return DataFrame(**{axis: ["bad", "labels"]})
+
+ df = DataFrame([[1, 2], [3, 4]])
+ msg = f"created invalid {axis} labels."
+ with pytest.raises(ValueError, match=msg):
+ df.style._apply(f, axis=None)
+
+ def test_get_level_lengths(self):
+ index = MultiIndex.from_product([["a", "b"], [0, 1, 2]])
+ expected = {
+ (0, 0): 3,
+ (0, 3): 3,
+ (1, 0): 1,
+ (1, 1): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ (1, 4): 1,
+ (1, 5): 1,
+ }
+ result = _get_level_lengths(index, sparsify=True, max_index=100)
+ tm.assert_dict_equal(result, expected)
+
+ expected = {
+ (0, 0): 1,
+ (0, 1): 1,
+ (0, 2): 1,
+ (0, 3): 1,
+ (0, 4): 1,
+ (0, 5): 1,
+ (1, 0): 1,
+ (1, 1): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ (1, 4): 1,
+ (1, 5): 1,
+ }
+ result = _get_level_lengths(index, sparsify=False, max_index=100)
+ tm.assert_dict_equal(result, expected)
+
+ def test_get_level_lengths_un_sorted(self):
+ index = MultiIndex.from_arrays([[1, 1, 2, 1], ["a", "b", "b", "d"]])
+ expected = {
+ (0, 0): 2,
+ (0, 2): 1,
+ (0, 3): 1,
+ (1, 0): 1,
+ (1, 1): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ }
+ result = _get_level_lengths(index, sparsify=True, max_index=100)
+ tm.assert_dict_equal(result, expected)
+
+ expected = {
+ (0, 0): 1,
+ (0, 1): 1,
+ (0, 2): 1,
+ (0, 3): 1,
+ (1, 0): 1,
+ (1, 1): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ }
+ result = _get_level_lengths(index, sparsify=False, max_index=100)
+ tm.assert_dict_equal(result, expected)
+
+ def test_mi_sparse_index_names(self, blank_value):
+ # Test the class names and displayed value are correct on rendering MI names
+ df = DataFrame(
+ {"A": [1, 2]},
+ index=MultiIndex.from_arrays(
+ [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"]
+ ),
+ )
+ result = df.style._translate(True, True)
+ head = result["head"][1]
+ expected = [
+ {
+ "class": "index_name level0",
+ "display_value": "idx_level_0",
+ "is_visible": True,
+ },
+ {
+ "class": "index_name level1",
+ "display_value": "idx_level_1",
+ "is_visible": True,
+ },
+ {
+ "class": "blank col0",
+ "display_value": blank_value,
+ "is_visible": True,
+ },
+ ]
+ for i, expected_dict in enumerate(expected):
+ assert expected_dict.items() <= head[i].items()
+
+ def test_mi_sparse_column_names(self, blank_value):
+ df = DataFrame(
+ np.arange(16).reshape(4, 4),
+ index=MultiIndex.from_arrays(
+ [["a", "a", "b", "a"], [0, 1, 1, 2]],
+ names=["idx_level_0", "idx_level_1"],
+ ),
+ columns=MultiIndex.from_arrays(
+ [["C1", "C1", "C2", "C2"], [1, 0, 1, 0]], names=["colnam_0", "colnam_1"]
+ ),
+ )
+ result = Styler(df, cell_ids=False)._translate(True, True)
+
+ for level in [0, 1]:
+ head = result["head"][level]
+ expected = [
+ {
+ "class": "blank",
+ "display_value": blank_value,
+ "is_visible": True,
+ },
+ {
+ "class": f"index_name level{level}",
+ "display_value": f"colnam_{level}",
+ "is_visible": True,
+ },
+ ]
+ for i, expected_dict in enumerate(expected):
+ assert expected_dict.items() <= head[i].items()
+
+ def test_hide_column_headers(self, df, styler):
+ ctx = styler.hide(axis="columns")._translate(True, True)
+ assert len(ctx["head"]) == 0 # no header entries with an unnamed index
+
+ df.index.name = "some_name"
+ ctx = df.style.hide(axis="columns")._translate(True, True)
+ assert len(ctx["head"]) == 1
+ # index names still visible, changed in #42101, reverted in 43404
+
+ def test_hide_single_index(self, df):
+ # GH 14194
+ # single unnamed index
+ ctx = df.style._translate(True, True)
+ assert ctx["body"][0][0]["is_visible"]
+ assert ctx["head"][0][0]["is_visible"]
+ ctx2 = df.style.hide(axis="index")._translate(True, True)
+ assert not ctx2["body"][0][0]["is_visible"]
+ assert not ctx2["head"][0][0]["is_visible"]
+
+ # single named index
+ ctx3 = df.set_index("A").style._translate(True, True)
+ assert ctx3["body"][0][0]["is_visible"]
+ assert len(ctx3["head"]) == 2 # 2 header levels
+ assert ctx3["head"][0][0]["is_visible"]
+
+ ctx4 = df.set_index("A").style.hide(axis="index")._translate(True, True)
+ assert not ctx4["body"][0][0]["is_visible"]
+ assert len(ctx4["head"]) == 1 # only 1 header levels
+ assert not ctx4["head"][0][0]["is_visible"]
+
+ def test_hide_multiindex(self):
+ # GH 14194
+ df = DataFrame(
+ {"A": [1, 2], "B": [1, 2]},
+ index=MultiIndex.from_arrays(
+ [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"]
+ ),
+ )
+ ctx1 = df.style._translate(True, True)
+ # tests for 'a' and '0'
+ assert ctx1["body"][0][0]["is_visible"]
+ assert ctx1["body"][0][1]["is_visible"]
+ # check for blank header rows
+ assert len(ctx1["head"][0]) == 4 # two visible indexes and two data columns
+
+ ctx2 = df.style.hide(axis="index")._translate(True, True)
+ # tests for 'a' and '0'
+ assert not ctx2["body"][0][0]["is_visible"]
+ assert not ctx2["body"][0][1]["is_visible"]
+ # check for blank header rows
+ assert len(ctx2["head"][0]) == 3 # one hidden (col name) and two data columns
+ assert not ctx2["head"][0][0]["is_visible"]
+
+ def test_hide_columns_single_level(self, df):
+ # GH 14194
+ # test hiding single column
+ ctx = df.style._translate(True, True)
+ assert ctx["head"][0][1]["is_visible"]
+ assert ctx["head"][0][1]["display_value"] == "A"
+ assert ctx["head"][0][2]["is_visible"]
+ assert ctx["head"][0][2]["display_value"] == "B"
+ assert ctx["body"][0][1]["is_visible"] # col A, row 1
+ assert ctx["body"][1][2]["is_visible"] # col B, row 1
+
+ ctx = df.style.hide("A", axis="columns")._translate(True, True)
+ assert not ctx["head"][0][1]["is_visible"]
+ assert not ctx["body"][0][1]["is_visible"] # col A, row 1
+ assert ctx["body"][1][2]["is_visible"] # col B, row 1
+
+ # test hiding multiple columns
+ ctx = df.style.hide(["A", "B"], axis="columns")._translate(True, True)
+ assert not ctx["head"][0][1]["is_visible"]
+ assert not ctx["head"][0][2]["is_visible"]
+ assert not ctx["body"][0][1]["is_visible"] # col A, row 1
+ assert not ctx["body"][1][2]["is_visible"] # col B, row 1
+
+ def test_hide_columns_index_mult_levels(self):
+ # GH 14194
+ # setup dataframe with multiple column levels and indices
+ i1 = MultiIndex.from_arrays(
+ [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"]
+ )
+ i2 = MultiIndex.from_arrays(
+ [["b", "b"], [0, 1]], names=["col_level_0", "col_level_1"]
+ )
+ df = DataFrame([[1, 2], [3, 4]], index=i1, columns=i2)
+ ctx = df.style._translate(True, True)
+ # column headers
+ assert ctx["head"][0][2]["is_visible"]
+ assert ctx["head"][1][2]["is_visible"]
+ assert ctx["head"][1][3]["display_value"] == "1"
+ # indices
+ assert ctx["body"][0][0]["is_visible"]
+ # data
+ assert ctx["body"][1][2]["is_visible"]
+ assert ctx["body"][1][2]["display_value"] == "3"
+ assert ctx["body"][1][3]["is_visible"]
+ assert ctx["body"][1][3]["display_value"] == "4"
+
+ # hide top column level, which hides both columns
+ ctx = df.style.hide("b", axis="columns")._translate(True, True)
+ assert not ctx["head"][0][2]["is_visible"] # b
+ assert not ctx["head"][1][2]["is_visible"] # 0
+ assert not ctx["body"][1][2]["is_visible"] # 3
+ assert ctx["body"][0][0]["is_visible"] # index
+
+ # hide first column only
+ ctx = df.style.hide([("b", 0)], axis="columns")._translate(True, True)
+ assert not ctx["head"][0][2]["is_visible"] # b
+ assert ctx["head"][0][3]["is_visible"] # b
+ assert not ctx["head"][1][2]["is_visible"] # 0
+ assert not ctx["body"][1][2]["is_visible"] # 3
+ assert ctx["body"][1][3]["is_visible"]
+ assert ctx["body"][1][3]["display_value"] == "4"
+
+ # hide second column and index
+ ctx = df.style.hide([("b", 1)], axis=1).hide(axis=0)._translate(True, True)
+ assert not ctx["body"][0][0]["is_visible"] # index
+ assert len(ctx["head"][0]) == 3
+ assert ctx["head"][0][1]["is_visible"] # b
+ assert ctx["head"][1][1]["is_visible"] # 0
+ assert not ctx["head"][1][2]["is_visible"] # 1
+ assert not ctx["body"][1][3]["is_visible"] # 4
+ assert ctx["body"][1][2]["is_visible"]
+ assert ctx["body"][1][2]["display_value"] == "3"
+
+ # hide top row level, which hides both rows so body empty
+ ctx = df.style.hide("a", axis="index")._translate(True, True)
+ assert ctx["body"] == []
+
+ # hide first row only
+ ctx = df.style.hide(("a", 0), axis="index")._translate(True, True)
+ for i in [0, 1, 2, 3]:
+ assert "row1" in ctx["body"][0][i]["class"] # row0 not included in body
+ assert ctx["body"][0][i]["is_visible"]
+
+ def test_pipe(self, df):
+ def set_caption_from_template(styler, a, b):
+ return styler.set_caption(f"Dataframe with a = {a} and b = {b}")
+
+ styler = df.style.pipe(set_caption_from_template, "A", b="B")
+ assert "Dataframe with a = A and b = B" in styler.to_html()
+
+ # Test with an argument that is a (callable, keyword_name) pair.
+ def f(a, b, styler):
+ return (a, b, styler)
+
+ styler = df.style
+ result = styler.pipe((f, "styler"), a=1, b=2)
+ assert result == (1, 2, styler)
+
+ def test_no_cell_ids(self):
+ # GH 35588
+ # GH 35663
+ df = DataFrame(data=[[0]])
+ styler = Styler(df, uuid="_", cell_ids=False)
+ styler.to_html()
+ s = styler.to_html() # render twice to ensure ctx is not updated
+ assert s.find('') != -1
+
+ @pytest.mark.parametrize(
+ "classes",
+ [
+ DataFrame(
+ data=[["", "test-class"], [np.nan, None]],
+ columns=["A", "B"],
+ index=["a", "b"],
+ ),
+ DataFrame(data=[["test-class"]], columns=["B"], index=["a"]),
+ DataFrame(data=[["test-class", "unused"]], columns=["B", "C"], index=["a"]),
+ ],
+ )
+ def test_set_data_classes(self, classes):
+ # GH 36159
+ df = DataFrame(data=[[0, 1], [2, 3]], columns=["A", "B"], index=["a", "b"])
+ s = Styler(df, uuid_len=0, cell_ids=False).set_td_classes(classes).to_html()
+ assert ' 0 ' in s
+ assert '1 ' in s
+ assert '2 ' in s
+ assert '3 ' in s
+ # GH 39317
+ s = Styler(df, uuid_len=0, cell_ids=True).set_td_classes(classes).to_html()
+ assert '0 ' in s
+ assert '1 ' in s
+ assert '2 ' in s
+ assert '3 ' in s
+
+ def test_set_data_classes_reindex(self):
+ # GH 39317
+ df = DataFrame(
+ data=[[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=[0, 1, 2], index=[0, 1, 2]
+ )
+ classes = DataFrame(
+ data=[["mi", "ma"], ["mu", "mo"]],
+ columns=[0, 2],
+ index=[0, 2],
+ )
+ s = Styler(df, uuid_len=0).set_td_classes(classes).to_html()
+ assert '0 ' in s
+ assert '2 ' in s
+ assert '4 ' in s
+ assert '6 ' in s
+ assert '8 ' in s
+
+ def test_chaining_table_styles(self):
+ # GH 35607
+ df = DataFrame(data=[[0, 1], [1, 2]], columns=["A", "B"])
+ styler = df.style.set_table_styles(
+ [{"selector": "", "props": [("background-color", "yellow")]}]
+ ).set_table_styles(
+ [{"selector": ".col0", "props": [("background-color", "blue")]}],
+ overwrite=False,
+ )
+ assert len(styler.table_styles) == 2
+
+ def test_column_and_row_styling(self):
+ # GH 35607
+ df = DataFrame(data=[[0, 1], [1, 2]], columns=["A", "B"])
+ s = Styler(df, uuid_len=0)
+ s = s.set_table_styles({"A": [{"selector": "", "props": [("color", "blue")]}]})
+ assert "#T_ .col0 {\n color: blue;\n}" in s.to_html()
+ s = s.set_table_styles(
+ {0: [{"selector": "", "props": [("color", "blue")]}]}, axis=1
+ )
+ assert "#T_ .row0 {\n color: blue;\n}" in s.to_html()
+
+ @pytest.mark.parametrize("len_", [1, 5, 32, 33, 100])
+ def test_uuid_len(self, len_):
+ # GH 36345
+ df = DataFrame(data=[["A"]])
+ s = Styler(df, uuid_len=len_, cell_ids=False).to_html()
+ strt = s.find('id="T_')
+ end = s[strt + 6 :].find('"')
+ if len_ > 32:
+ assert end == 32
+ else:
+ assert end == len_
+
+ @pytest.mark.parametrize("len_", [-2, "bad", None])
+ def test_uuid_len_raises(self, len_):
+ # GH 36345
+ df = DataFrame(data=[["A"]])
+ msg = "``uuid_len`` must be an integer in range \\[0, 32\\]."
+ with pytest.raises(TypeError, match=msg):
+ Styler(df, uuid_len=len_, cell_ids=False).to_html()
+
+ @pytest.mark.parametrize(
+ "slc",
+ [
+ IndexSlice[:, :],
+ IndexSlice[:, 1],
+ IndexSlice[1, :],
+ IndexSlice[[1], [1]],
+ IndexSlice[1, [1]],
+ IndexSlice[[1], 1],
+ IndexSlice[1],
+ IndexSlice[1, 1],
+ slice(None, None, None),
+ [0, 1],
+ np.array([0, 1]),
+ Series([0, 1]),
+ ],
+ )
+ def test_non_reducing_slice(self, slc):
+ df = DataFrame([[0, 1], [2, 3]])
+
+ tslice_ = non_reducing_slice(slc)
+ assert isinstance(df.loc[tslice_], DataFrame)
+
+ @pytest.mark.parametrize("box", [list, Series, np.array])
+ def test_list_slice(self, box):
+ # like dataframe getitem
+ subset = box(["A"])
+
+ df = DataFrame({"A": [1, 2], "B": [3, 4]}, index=["A", "B"])
+ expected = IndexSlice[:, ["A"]]
+
+ result = non_reducing_slice(subset)
+ tm.assert_frame_equal(df.loc[result], df.loc[expected])
+
+ def test_non_reducing_slice_on_multiindex(self):
+ # GH 19861
+ dic = {
+ ("a", "d"): [1, 4],
+ ("a", "c"): [2, 3],
+ ("b", "c"): [3, 2],
+ ("b", "d"): [4, 1],
+ }
+ df = DataFrame(dic, index=[0, 1])
+ idx = IndexSlice
+ slice_ = idx[:, idx["b", "d"]]
+ tslice_ = non_reducing_slice(slice_)
+
+ result = df.loc[tslice_]
+ expected = DataFrame({("b", "d"): [4, 1]})
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "slice_",
+ [
+ IndexSlice[:, :],
+ # check cols
+ IndexSlice[:, IndexSlice[["a"]]], # inferred deeper need list
+ IndexSlice[:, IndexSlice[["a"], ["c"]]], # inferred deeper need list
+ IndexSlice[:, IndexSlice["a", "c", :]],
+ IndexSlice[:, IndexSlice["a", :, "e"]],
+ IndexSlice[:, IndexSlice[:, "c", "e"]],
+ IndexSlice[:, IndexSlice["a", ["c", "d"], :]], # check list
+ IndexSlice[:, IndexSlice["a", ["c", "d", "-"], :]], # don't allow missing
+ IndexSlice[:, IndexSlice["a", ["c", "d", "-"], "e"]], # no slice
+ # check rows
+ IndexSlice[IndexSlice[["U"]], :], # inferred deeper need list
+ IndexSlice[IndexSlice[["U"], ["W"]], :], # inferred deeper need list
+ IndexSlice[IndexSlice["U", "W", :], :],
+ IndexSlice[IndexSlice["U", :, "Y"], :],
+ IndexSlice[IndexSlice[:, "W", "Y"], :],
+ IndexSlice[IndexSlice[:, "W", ["Y", "Z"]], :], # check list
+ IndexSlice[IndexSlice[:, "W", ["Y", "Z", "-"]], :], # don't allow missing
+ IndexSlice[IndexSlice["U", "W", ["Y", "Z", "-"]], :], # no slice
+ # check simultaneous
+ IndexSlice[IndexSlice[:, "W", "Y"], IndexSlice["a", "c", :]],
+ ],
+ )
+ def test_non_reducing_multi_slice_on_multiindex(self, slice_):
+ # GH 33562
+ cols = MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]])
+ idxs = MultiIndex.from_product([["U", "V"], ["W", "X"], ["Y", "Z"]])
+ df = DataFrame(np.arange(64).reshape(8, 8), columns=cols, index=idxs)
+
+ for lvl in [0, 1]:
+ key = slice_[lvl]
+ if isinstance(key, tuple):
+ for subkey in key:
+ if isinstance(subkey, list) and "-" in subkey:
+ # not present in the index level, raises KeyError since 2.0
+ with pytest.raises(KeyError, match="-"):
+ df.loc[slice_]
+ return
+
+ expected = df.loc[slice_]
+ result = df.loc[non_reducing_slice(slice_)]
+ tm.assert_frame_equal(result, expected)
+
+
+def test_hidden_index_names(mi_df):
+ mi_df.index.names = ["Lev0", "Lev1"]
+ mi_styler = mi_df.style
+ ctx = mi_styler._translate(True, True)
+ assert len(ctx["head"]) == 3 # 2 column index levels + 1 index names row
+
+ mi_styler.hide(axis="index", names=True)
+ ctx = mi_styler._translate(True, True)
+ assert len(ctx["head"]) == 2 # index names row is unparsed
+ for i in range(4):
+ assert ctx["body"][0][i]["is_visible"] # 2 index levels + 2 data values visible
+
+ mi_styler.hide(axis="index", level=1)
+ ctx = mi_styler._translate(True, True)
+ assert len(ctx["head"]) == 2 # index names row is still hidden
+ assert ctx["body"][0][0]["is_visible"] is True
+ assert ctx["body"][0][1]["is_visible"] is False
+
+
+def test_hidden_column_names(mi_df):
+ mi_df.columns.names = ["Lev0", "Lev1"]
+ mi_styler = mi_df.style
+ ctx = mi_styler._translate(True, True)
+ assert ctx["head"][0][1]["display_value"] == "Lev0"
+ assert ctx["head"][1][1]["display_value"] == "Lev1"
+
+ mi_styler.hide(names=True, axis="columns")
+ ctx = mi_styler._translate(True, True)
+ assert ctx["head"][0][1]["display_value"] == " "
+ assert ctx["head"][1][1]["display_value"] == " "
+
+ mi_styler.hide(level=0, axis="columns")
+ ctx = mi_styler._translate(True, True)
+ assert len(ctx["head"]) == 1 # no index names and only one visible column headers
+ assert ctx["head"][0][1]["display_value"] == " "
+
+
+@pytest.mark.parametrize("caption", [1, ("a", "b", "c"), (1, "s")])
+def test_caption_raises(mi_styler, caption):
+ msg = "`caption` must be either a string or 2-tuple of strings."
+ with pytest.raises(ValueError, match=msg):
+ mi_styler.set_caption(caption)
+
+
+def test_hiding_headers_over_index_no_sparsify():
+ # GH 43464
+ midx = MultiIndex.from_product([[1, 2], ["a", "a", "b"]])
+ df = DataFrame(9, index=midx, columns=[0])
+ ctx = df.style._translate(False, False)
+ assert len(ctx["body"]) == 6
+ ctx = df.style.hide((1, "a"), axis=0)._translate(False, False)
+ assert len(ctx["body"]) == 4
+ assert "row2" in ctx["body"][0][0]["class"]
+
+
+def test_hiding_headers_over_columns_no_sparsify():
+ # GH 43464
+ midx = MultiIndex.from_product([[1, 2], ["a", "a", "b"]])
+ df = DataFrame(9, columns=midx, index=[0])
+ ctx = df.style._translate(False, False)
+ for ix in [(0, 1), (0, 2), (1, 1), (1, 2)]:
+ assert ctx["head"][ix[0]][ix[1]]["is_visible"] is True
+ ctx = df.style.hide((1, "a"), axis="columns")._translate(False, False)
+ for ix in [(0, 1), (0, 2), (1, 1), (1, 2)]:
+ assert ctx["head"][ix[0]][ix[1]]["is_visible"] is False
+
+
+def test_get_level_lengths_mi_hidden():
+ # GH 43464
+ index = MultiIndex.from_arrays([[1, 1, 1, 2, 2, 2], ["a", "a", "b", "a", "a", "b"]])
+ expected = {
+ (0, 2): 1,
+ (0, 3): 1,
+ (0, 4): 1,
+ (0, 5): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ (1, 4): 1,
+ (1, 5): 1,
+ }
+ result = _get_level_lengths(
+ index,
+ sparsify=False,
+ max_index=100,
+ hidden_elements=[0, 1, 0, 1], # hidden element can repeat if duplicated index
+ )
+ tm.assert_dict_equal(result, expected)
+
+
+def test_row_trimming_hide_index():
+ # gh 43703
+ df = DataFrame([[1], [2], [3], [4], [5]])
+ with option_context("styler.render.max_rows", 2):
+ ctx = df.style.hide([0, 1], axis="index")._translate(True, True)
+ assert len(ctx["body"]) == 3
+ for r, val in enumerate(["3", "4", "..."]):
+ assert ctx["body"][r][1]["display_value"] == val
+
+
+def test_row_trimming_hide_index_mi():
+ # gh 44247
+ df = DataFrame([[1], [2], [3], [4], [5]])
+ df.index = MultiIndex.from_product([[0], [0, 1, 2, 3, 4]])
+ with option_context("styler.render.max_rows", 2):
+ ctx = df.style.hide([(0, 0), (0, 1)], axis="index")._translate(True, True)
+ assert len(ctx["body"]) == 3
+
+ # level 0 index headers (sparsified)
+ assert {"value": 0, "attributes": 'rowspan="2"', "is_visible": True}.items() <= ctx[
+ "body"
+ ][0][0].items()
+ assert {"value": 0, "attributes": "", "is_visible": False}.items() <= ctx["body"][
+ 1
+ ][0].items()
+ assert {"value": "...", "is_visible": True}.items() <= ctx["body"][2][0].items()
+
+ for r, val in enumerate(["2", "3", "..."]):
+ assert ctx["body"][r][1]["display_value"] == val # level 1 index headers
+ for r, val in enumerate(["3", "4", "..."]):
+ assert ctx["body"][r][2]["display_value"] == val # data values
+
+
+def test_col_trimming_hide_columns():
+ # gh 44272
+ df = DataFrame([[1, 2, 3, 4, 5]])
+ with option_context("styler.render.max_columns", 2):
+ ctx = df.style.hide([0, 1], axis="columns")._translate(True, True)
+
+ assert len(ctx["head"][0]) == 6 # blank, [0, 1 (hidden)], [2 ,3 (visible)], + trim
+ for c, vals in enumerate([(1, False), (2, True), (3, True), ("...", True)]):
+ assert ctx["head"][0][c + 2]["value"] == vals[0]
+ assert ctx["head"][0][c + 2]["is_visible"] == vals[1]
+
+ assert len(ctx["body"][0]) == 6 # index + 2 hidden + 2 visible + trimming col
+
+
+def test_no_empty_apply(mi_styler):
+ # 45313
+ mi_styler.apply(lambda s: ["a:v;"] * 2, subset=[False, False])
+ mi_styler._compute()
+
+
+@pytest.mark.parametrize("format", ["html", "latex", "string"])
+def test_output_buffer(mi_styler, format):
+ # gh 47053
+ with tm.ensure_clean(f"delete_me.{format}") as f:
+ getattr(mi_styler, f"to_{format}")(f)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_to_latex.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_to_latex.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f1443c3ee66be040f668f546682924207cfd31e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_to_latex.py
@@ -0,0 +1,1090 @@
+from textwrap import dedent
+
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ MultiIndex,
+ Series,
+ option_context,
+)
+
+pytest.importorskip("jinja2")
+from pandas.io.formats.style import Styler
+from pandas.io.formats.style_render import (
+ _parse_latex_cell_styles,
+ _parse_latex_css_conversion,
+ _parse_latex_header_span,
+ _parse_latex_table_styles,
+ _parse_latex_table_wrapping,
+)
+
+
+@pytest.fixture
+def df():
+ return DataFrame(
+ {"A": [0, 1], "B": [-0.61, -1.22], "C": Series(["ab", "cd"], dtype=object)}
+ )
+
+
+@pytest.fixture
+def df_ext():
+ return DataFrame(
+ {"A": [0, 1, 2], "B": [-0.61, -1.22, -2.22], "C": ["ab", "cd", "de"]}
+ )
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0, precision=2)
+
+
+def test_minimal_latex_tabular(styler):
+ expected = dedent(
+ """\
+ \\begin{tabular}{lrrl}
+ & A & B & C \\\\
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{tabular}
+ """
+ )
+ assert styler.to_latex() == expected
+
+
+def test_tabular_hrules(styler):
+ expected = dedent(
+ """\
+ \\begin{tabular}{lrrl}
+ \\toprule
+ & A & B & C \\\\
+ \\midrule
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\bottomrule
+ \\end{tabular}
+ """
+ )
+ assert styler.to_latex(hrules=True) == expected
+
+
+def test_tabular_custom_hrules(styler):
+ styler.set_table_styles(
+ [
+ {"selector": "toprule", "props": ":hline"},
+ {"selector": "bottomrule", "props": ":otherline"},
+ ]
+ ) # no midrule
+ expected = dedent(
+ """\
+ \\begin{tabular}{lrrl}
+ \\hline
+ & A & B & C \\\\
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\otherline
+ \\end{tabular}
+ """
+ )
+ assert styler.to_latex() == expected
+
+
+def test_column_format(styler):
+ # default setting is already tested in `test_latex_minimal_tabular`
+ styler.set_table_styles([{"selector": "column_format", "props": ":cccc"}])
+
+ assert "\\begin{tabular}{rrrr}" in styler.to_latex(column_format="rrrr")
+ styler.set_table_styles([{"selector": "column_format", "props": ":r|r|cc"}])
+ assert "\\begin{tabular}{r|r|cc}" in styler.to_latex()
+
+
+def test_siunitx_cols(styler):
+ expected = dedent(
+ """\
+ \\begin{tabular}{lSSl}
+ {} & {A} & {B} & {C} \\\\
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{tabular}
+ """
+ )
+ assert styler.to_latex(siunitx=True) == expected
+
+
+def test_position(styler):
+ assert "\\begin{table}[h!]" in styler.to_latex(position="h!")
+ assert "\\end{table}" in styler.to_latex(position="h!")
+ styler.set_table_styles([{"selector": "position", "props": ":b!"}])
+ assert "\\begin{table}[b!]" in styler.to_latex()
+ assert "\\end{table}" in styler.to_latex()
+
+
+@pytest.mark.parametrize("env", [None, "longtable"])
+def test_label(styler, env):
+ assert "\n\\label{text}" in styler.to_latex(label="text", environment=env)
+ styler.set_table_styles([{"selector": "label", "props": ":{more §text}"}])
+ assert "\n\\label{more :text}" in styler.to_latex(environment=env)
+
+
+def test_position_float_raises(styler):
+ msg = "`position_float` should be one of 'raggedright', 'raggedleft', 'centering',"
+ with pytest.raises(ValueError, match=msg):
+ styler.to_latex(position_float="bad_string")
+
+ msg = "`position_float` cannot be used in 'longtable' `environment`"
+ with pytest.raises(ValueError, match=msg):
+ styler.to_latex(position_float="centering", environment="longtable")
+
+
+@pytest.mark.parametrize("label", [(None, ""), ("text", "\\label{text}")])
+@pytest.mark.parametrize("position", [(None, ""), ("h!", "{table}[h!]")])
+@pytest.mark.parametrize("caption", [(None, ""), ("text", "\\caption{text}")])
+@pytest.mark.parametrize("column_format", [(None, ""), ("rcrl", "{tabular}{rcrl}")])
+@pytest.mark.parametrize("position_float", [(None, ""), ("centering", "\\centering")])
+def test_kwargs_combinations(
+ styler, label, position, caption, column_format, position_float
+):
+ result = styler.to_latex(
+ label=label[0],
+ position=position[0],
+ caption=caption[0],
+ column_format=column_format[0],
+ position_float=position_float[0],
+ )
+ assert label[1] in result
+ assert position[1] in result
+ assert caption[1] in result
+ assert column_format[1] in result
+ assert position_float[1] in result
+
+
+def test_custom_table_styles(styler):
+ styler.set_table_styles(
+ [
+ {"selector": "mycommand", "props": ":{myoptions}"},
+ {"selector": "mycommand2", "props": ":{myoptions2}"},
+ ]
+ )
+ expected = dedent(
+ """\
+ \\begin{table}
+ \\mycommand{myoptions}
+ \\mycommand2{myoptions2}
+ """
+ )
+ assert expected in styler.to_latex()
+
+
+def test_cell_styling(styler):
+ styler.highlight_max(props="itshape:;Huge:--wrap;")
+ expected = dedent(
+ """\
+ \\begin{tabular}{lrrl}
+ & A & B & C \\\\
+ 0 & 0 & \\itshape {\\Huge -0.61} & ab \\\\
+ 1 & \\itshape {\\Huge 1} & -1.22 & \\itshape {\\Huge cd} \\\\
+ \\end{tabular}
+ """
+ )
+ assert expected == styler.to_latex()
+
+
+def test_multiindex_columns(df):
+ cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df.columns = cidx
+ expected = dedent(
+ """\
+ \\begin{tabular}{lrrl}
+ & \\multicolumn{2}{r}{A} & B \\\\
+ & a & b & c \\\\
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{tabular}
+ """
+ )
+ s = df.style.format(precision=2)
+ assert expected == s.to_latex()
+
+ # non-sparse
+ expected = dedent(
+ """\
+ \\begin{tabular}{lrrl}
+ & A & A & B \\\\
+ & a & b & c \\\\
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{tabular}
+ """
+ )
+ s = df.style.format(precision=2)
+ assert expected == s.to_latex(sparse_columns=False)
+
+
+def test_multiindex_row(df_ext):
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df_ext.index = ridx
+ expected = dedent(
+ """\
+ \\begin{tabular}{llrrl}
+ & & A & B & C \\\\
+ \\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
+ & b & 1 & -1.22 & cd \\\\
+ B & c & 2 & -2.22 & de \\\\
+ \\end{tabular}
+ """
+ )
+ styler = df_ext.style.format(precision=2)
+ result = styler.to_latex()
+ assert expected == result
+
+ # non-sparse
+ expected = dedent(
+ """\
+ \\begin{tabular}{llrrl}
+ & & A & B & C \\\\
+ A & a & 0 & -0.61 & ab \\\\
+ A & b & 1 & -1.22 & cd \\\\
+ B & c & 2 & -2.22 & de \\\\
+ \\end{tabular}
+ """
+ )
+ result = styler.to_latex(sparse_index=False)
+ assert expected == result
+
+
+def test_multirow_naive(df_ext):
+ ridx = MultiIndex.from_tuples([("X", "x"), ("X", "y"), ("Y", "z")])
+ df_ext.index = ridx
+ expected = dedent(
+ """\
+ \\begin{tabular}{llrrl}
+ & & A & B & C \\\\
+ X & x & 0 & -0.61 & ab \\\\
+ & y & 1 & -1.22 & cd \\\\
+ Y & z & 2 & -2.22 & de \\\\
+ \\end{tabular}
+ """
+ )
+ styler = df_ext.style.format(precision=2)
+ result = styler.to_latex(multirow_align="naive")
+ assert expected == result
+
+
+def test_multiindex_row_and_col(df_ext):
+ cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df_ext.index, df_ext.columns = ridx, cidx
+ expected = dedent(
+ """\
+ \\begin{tabular}{llrrl}
+ & & \\multicolumn{2}{l}{Z} & Y \\\\
+ & & a & b & c \\\\
+ \\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
+ & b & 1 & -1.22 & cd \\\\
+ B & c & 2 & -2.22 & de \\\\
+ \\end{tabular}
+ """
+ )
+ styler = df_ext.style.format(precision=2)
+ result = styler.to_latex(multirow_align="b", multicol_align="l")
+ assert result == expected
+
+ # non-sparse
+ expected = dedent(
+ """\
+ \\begin{tabular}{llrrl}
+ & & Z & Z & Y \\\\
+ & & a & b & c \\\\
+ A & a & 0 & -0.61 & ab \\\\
+ A & b & 1 & -1.22 & cd \\\\
+ B & c & 2 & -2.22 & de \\\\
+ \\end{tabular}
+ """
+ )
+ result = styler.to_latex(sparse_index=False, sparse_columns=False)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "multicol_align, siunitx, header",
+ [
+ ("naive-l", False, " & A & &"),
+ ("naive-r", False, " & & & A"),
+ ("naive-l", True, "{} & {A} & {} & {}"),
+ ("naive-r", True, "{} & {} & {} & {A}"),
+ ],
+)
+def test_multicol_naive(df, multicol_align, siunitx, header):
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")])
+ df.columns = ridx
+ level1 = " & a & b & c" if not siunitx else "{} & {a} & {b} & {c}"
+ col_format = "lrrl" if not siunitx else "lSSl"
+ expected = dedent(
+ f"""\
+ \\begin{{tabular}}{{{col_format}}}
+ {header} \\\\
+ {level1} \\\\
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{{tabular}}
+ """
+ )
+ styler = df.style.format(precision=2)
+ result = styler.to_latex(multicol_align=multicol_align, siunitx=siunitx)
+ assert expected == result
+
+
+def test_multi_options(df_ext):
+ cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df_ext.index, df_ext.columns = ridx, cidx
+ styler = df_ext.style.format(precision=2)
+
+ expected = dedent(
+ """\
+ & & \\multicolumn{2}{r}{Z} & Y \\\\
+ & & a & b & c \\\\
+ \\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
+ """
+ )
+ result = styler.to_latex()
+ assert expected in result
+
+ with option_context("styler.latex.multicol_align", "l"):
+ assert " & & \\multicolumn{2}{l}{Z} & Y \\\\" in styler.to_latex()
+
+ with option_context("styler.latex.multirow_align", "b"):
+ assert "\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\" in styler.to_latex()
+
+
+def test_multiindex_columns_hidden():
+ df = DataFrame([[1, 2, 3, 4]])
+ df.columns = MultiIndex.from_tuples([("A", 1), ("A", 2), ("A", 3), ("B", 1)])
+ s = df.style
+ assert "{tabular}{lrrrr}" in s.to_latex()
+ s.set_table_styles([]) # reset the position command
+ s.hide([("A", 2)], axis="columns")
+ assert "{tabular}{lrrr}" in s.to_latex()
+
+
+@pytest.mark.parametrize(
+ "option, value",
+ [
+ ("styler.sparse.index", True),
+ ("styler.sparse.index", False),
+ ("styler.sparse.columns", True),
+ ("styler.sparse.columns", False),
+ ],
+)
+def test_sparse_options(df_ext, option, value):
+ cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df_ext.index, df_ext.columns = ridx, cidx
+ styler = df_ext.style
+
+ latex1 = styler.to_latex()
+ with option_context(option, value):
+ latex2 = styler.to_latex()
+ assert (latex1 == latex2) is value
+
+
+def test_hidden_index(styler):
+ styler.hide(axis="index")
+ expected = dedent(
+ """\
+ \\begin{tabular}{rrl}
+ A & B & C \\\\
+ 0 & -0.61 & ab \\\\
+ 1 & -1.22 & cd \\\\
+ \\end{tabular}
+ """
+ )
+ assert styler.to_latex() == expected
+
+
+@pytest.mark.parametrize("environment", ["table", "figure*", None])
+def test_comprehensive(df_ext, environment):
+ # test as many low level features simultaneously as possible
+ cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df_ext.index, df_ext.columns = ridx, cidx
+ stlr = df_ext.style
+ stlr.set_caption("mycap")
+ stlr.set_table_styles(
+ [
+ {"selector": "label", "props": ":{fig§item}"},
+ {"selector": "position", "props": ":h!"},
+ {"selector": "position_float", "props": ":centering"},
+ {"selector": "column_format", "props": ":rlrlr"},
+ {"selector": "toprule", "props": ":toprule"},
+ {"selector": "midrule", "props": ":midrule"},
+ {"selector": "bottomrule", "props": ":bottomrule"},
+ {"selector": "rowcolors", "props": ":{3}{pink}{}"}, # custom command
+ ]
+ )
+ stlr.highlight_max(axis=0, props="textbf:--rwrap;cellcolor:[rgb]{1,1,0.6}--rwrap")
+ stlr.highlight_max(axis=None, props="Huge:--wrap;", subset=[("Z", "a"), ("Z", "b")])
+
+ expected = (
+ """\
+\\begin{table}[h!]
+\\centering
+\\caption{mycap}
+\\label{fig:item}
+\\rowcolors{3}{pink}{}
+\\begin{tabular}{rlrlr}
+\\toprule
+ & & \\multicolumn{2}{r}{Z} & Y \\\\
+ & & a & b & c \\\\
+\\midrule
+\\multirow[c]{2}{*}{A} & a & 0 & \\textbf{\\cellcolor[rgb]{1,1,0.6}{-0.61}} & ab \\\\
+ & b & 1 & -1.22 & cd \\\\
+B & c & \\textbf{\\cellcolor[rgb]{1,1,0.6}{{\\Huge 2}}} & -2.22 & """
+ """\
+\\textbf{\\cellcolor[rgb]{1,1,0.6}{de}} \\\\
+\\bottomrule
+\\end{tabular}
+\\end{table}
+"""
+ ).replace("table", environment if environment else "table")
+ result = stlr.format(precision=2).to_latex(environment=environment)
+ assert result == expected
+
+
+def test_environment_option(styler):
+ with option_context("styler.latex.environment", "bar-env"):
+ assert "\\begin{bar-env}" in styler.to_latex()
+ assert "\\begin{foo-env}" in styler.to_latex(environment="foo-env")
+
+
+def test_parse_latex_table_styles(styler):
+ styler.set_table_styles(
+ [
+ {"selector": "foo", "props": [("attr", "value")]},
+ {"selector": "bar", "props": [("attr", "overwritten")]},
+ {"selector": "bar", "props": [("attr", "baz"), ("attr2", "ignored")]},
+ {"selector": "label", "props": [("", "{fig§item}")]},
+ ]
+ )
+ assert _parse_latex_table_styles(styler.table_styles, "bar") == "baz"
+
+ # test '§' replaced by ':' [for CSS compatibility]
+ assert _parse_latex_table_styles(styler.table_styles, "label") == "{fig:item}"
+
+
+def test_parse_latex_cell_styles_basic(): # test nesting
+ cell_style = [("itshape", "--rwrap"), ("cellcolor", "[rgb]{0,1,1}--rwrap")]
+ expected = "\\itshape{\\cellcolor[rgb]{0,1,1}{text}}"
+ assert _parse_latex_cell_styles(cell_style, "text") == expected
+
+
+@pytest.mark.parametrize(
+ "wrap_arg, expected",
+ [ # test wrapping
+ ("", "\\ "),
+ ("--wrap", "{\\ }"),
+ ("--nowrap", "\\ "),
+ ("--lwrap", "{\\} "),
+ ("--dwrap", "{\\}{}"),
+ ("--rwrap", "\\{}"),
+ ],
+)
+def test_parse_latex_cell_styles_braces(wrap_arg, expected):
+ cell_style = [("", f"{wrap_arg}")]
+ assert _parse_latex_cell_styles(cell_style, "") == expected
+
+
+def test_parse_latex_header_span():
+ cell = {"attributes": 'colspan="3"', "display_value": "text", "cellstyle": []}
+ expected = "\\multicolumn{3}{Y}{text}"
+ assert _parse_latex_header_span(cell, "X", "Y") == expected
+
+ cell = {"attributes": 'rowspan="5"', "display_value": "text", "cellstyle": []}
+ expected = "\\multirow[X]{5}{*}{text}"
+ assert _parse_latex_header_span(cell, "X", "Y") == expected
+
+ cell = {"display_value": "text", "cellstyle": []}
+ assert _parse_latex_header_span(cell, "X", "Y") == "text"
+
+ cell = {"display_value": "text", "cellstyle": [("bfseries", "--rwrap")]}
+ assert _parse_latex_header_span(cell, "X", "Y") == "\\bfseries{text}"
+
+
+def test_parse_latex_table_wrapping(styler):
+ styler.set_table_styles(
+ [
+ {"selector": "toprule", "props": ":value"},
+ {"selector": "bottomrule", "props": ":value"},
+ {"selector": "midrule", "props": ":value"},
+ {"selector": "column_format", "props": ":value"},
+ ]
+ )
+ assert _parse_latex_table_wrapping(styler.table_styles, styler.caption) is False
+ assert _parse_latex_table_wrapping(styler.table_styles, "some caption") is True
+ styler.set_table_styles(
+ [
+ {"selector": "not-ignored", "props": ":value"},
+ ],
+ overwrite=False,
+ )
+ assert _parse_latex_table_wrapping(styler.table_styles, None) is True
+
+
+def test_short_caption(styler):
+ result = styler.to_latex(caption=("full cap", "short cap"))
+ assert "\\caption[short cap]{full cap}" in result
+
+
+@pytest.mark.parametrize(
+ "css, expected",
+ [
+ ([("color", "red")], [("color", "{red}")]), # test color and input format types
+ (
+ [("color", "rgb(128, 128, 128 )")],
+ [("color", "[rgb]{0.502, 0.502, 0.502}")],
+ ),
+ (
+ [("color", "rgb(128, 50%, 25% )")],
+ [("color", "[rgb]{0.502, 0.500, 0.250}")],
+ ),
+ (
+ [("color", "rgba(128,128,128,1)")],
+ [("color", "[rgb]{0.502, 0.502, 0.502}")],
+ ),
+ ([("color", "#FF00FF")], [("color", "[HTML]{FF00FF}")]),
+ ([("color", "#F0F")], [("color", "[HTML]{FF00FF}")]),
+ ([("font-weight", "bold")], [("bfseries", "")]), # test font-weight and types
+ ([("font-weight", "bolder")], [("bfseries", "")]),
+ ([("font-weight", "normal")], []),
+ ([("background-color", "red")], [("cellcolor", "{red}--lwrap")]),
+ (
+ [("background-color", "#FF00FF")], # test background-color command and wrap
+ [("cellcolor", "[HTML]{FF00FF}--lwrap")],
+ ),
+ ([("font-style", "italic")], [("itshape", "")]), # test font-style and types
+ ([("font-style", "oblique")], [("slshape", "")]),
+ ([("font-style", "normal")], []),
+ ([("color", "red /*--dwrap*/")], [("color", "{red}--dwrap")]), # css comments
+ ([("background-color", "red /* --dwrap */")], [("cellcolor", "{red}--dwrap")]),
+ ],
+)
+def test_parse_latex_css_conversion(css, expected):
+ result = _parse_latex_css_conversion(css)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "env, inner_env",
+ [
+ (None, "tabular"),
+ ("table", "tabular"),
+ ("longtable", "longtable"),
+ ],
+)
+@pytest.mark.parametrize(
+ "convert, exp", [(True, "bfseries"), (False, "font-weightbold")]
+)
+def test_parse_latex_css_convert_minimal(styler, env, inner_env, convert, exp):
+ # parameters ensure longtable template is also tested
+ styler.highlight_max(props="font-weight:bold;")
+ result = styler.to_latex(convert_css=convert, environment=env)
+ expected = dedent(
+ f"""\
+ 0 & 0 & \\{exp} -0.61 & ab \\\\
+ 1 & \\{exp} 1 & -1.22 & \\{exp} cd \\\\
+ \\end{{{inner_env}}}
+ """
+ )
+ assert expected in result
+
+
+def test_parse_latex_css_conversion_option():
+ css = [("command", "option--latex--wrap")]
+ expected = [("command", "option--wrap")]
+ result = _parse_latex_css_conversion(css)
+ assert result == expected
+
+
+def test_styler_object_after_render(styler):
+ # GH 42320
+ pre_render = styler._copy(deepcopy=True)
+ styler.to_latex(
+ column_format="rllr",
+ position="h",
+ position_float="centering",
+ hrules=True,
+ label="my lab",
+ caption="my cap",
+ )
+
+ assert pre_render.table_styles == styler.table_styles
+ assert pre_render.caption == styler.caption
+
+
+def test_longtable_comprehensive(styler):
+ result = styler.to_latex(
+ environment="longtable", hrules=True, label="fig:A", caption=("full", "short")
+ )
+ expected = dedent(
+ """\
+ \\begin{longtable}{lrrl}
+ \\caption[short]{full} \\label{fig:A} \\\\
+ \\toprule
+ & A & B & C \\\\
+ \\midrule
+ \\endfirsthead
+ \\caption[]{full} \\\\
+ \\toprule
+ & A & B & C \\\\
+ \\midrule
+ \\endhead
+ \\midrule
+ \\multicolumn{4}{r}{Continued on next page} \\\\
+ \\midrule
+ \\endfoot
+ \\bottomrule
+ \\endlastfoot
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{longtable}
+ """
+ )
+ assert result == expected
+
+
+def test_longtable_minimal(styler):
+ result = styler.to_latex(environment="longtable")
+ expected = dedent(
+ """\
+ \\begin{longtable}{lrrl}
+ & A & B & C \\\\
+ \\endfirsthead
+ & A & B & C \\\\
+ \\endhead
+ \\multicolumn{4}{r}{Continued on next page} \\\\
+ \\endfoot
+ \\endlastfoot
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{longtable}
+ """
+ )
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "sparse, exp, siunitx",
+ [
+ (True, "{} & \\multicolumn{2}{r}{A} & {B}", True),
+ (False, "{} & {A} & {A} & {B}", True),
+ (True, " & \\multicolumn{2}{r}{A} & B", False),
+ (False, " & A & A & B", False),
+ ],
+)
+def test_longtable_multiindex_columns(df, sparse, exp, siunitx):
+ cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df.columns = cidx
+ with_si = "{} & {a} & {b} & {c} \\\\"
+ without_si = " & a & b & c \\\\"
+ expected = dedent(
+ f"""\
+ \\begin{{longtable}}{{l{"SS" if siunitx else "rr"}l}}
+ {exp} \\\\
+ {with_si if siunitx else without_si}
+ \\endfirsthead
+ {exp} \\\\
+ {with_si if siunitx else without_si}
+ \\endhead
+ """
+ )
+ result = df.style.to_latex(
+ environment="longtable", sparse_columns=sparse, siunitx=siunitx
+ )
+ assert expected in result
+
+
+@pytest.mark.parametrize(
+ "caption, cap_exp",
+ [
+ ("full", ("{full}", "")),
+ (("full", "short"), ("{full}", "[short]")),
+ ],
+)
+@pytest.mark.parametrize("label, lab_exp", [(None, ""), ("tab:A", " \\label{tab:A}")])
+def test_longtable_caption_label(styler, caption, cap_exp, label, lab_exp):
+ cap_exp1 = f"\\caption{cap_exp[1]}{cap_exp[0]}"
+ cap_exp2 = f"\\caption[]{cap_exp[0]}"
+
+ expected = dedent(
+ f"""\
+ {cap_exp1}{lab_exp} \\\\
+ & A & B & C \\\\
+ \\endfirsthead
+ {cap_exp2} \\\\
+ """
+ )
+ assert expected in styler.to_latex(
+ environment="longtable", caption=caption, label=label
+ )
+
+
+@pytest.mark.parametrize("index", [True, False])
+@pytest.mark.parametrize(
+ "columns, siunitx",
+ [
+ (True, True),
+ (True, False),
+ (False, False),
+ ],
+)
+def test_apply_map_header_render_mi(df_ext, index, columns, siunitx):
+ cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df_ext.index, df_ext.columns = ridx, cidx
+ styler = df_ext.style
+
+ func = lambda v: "bfseries: --rwrap" if "A" in v or "Z" in v or "c" in v else None
+
+ if index:
+ styler.map_index(func, axis="index")
+ if columns:
+ styler.map_index(func, axis="columns")
+
+ result = styler.to_latex(siunitx=siunitx)
+
+ expected_index = dedent(
+ """\
+ \\multirow[c]{2}{*}{\\bfseries{A}} & a & 0 & -0.610000 & ab \\\\
+ \\bfseries{} & b & 1 & -1.220000 & cd \\\\
+ B & \\bfseries{c} & 2 & -2.220000 & de \\\\
+ """
+ )
+ assert (expected_index in result) is index
+
+ exp_cols_si = dedent(
+ """\
+ {} & {} & \\multicolumn{2}{r}{\\bfseries{Z}} & {Y} \\\\
+ {} & {} & {a} & {b} & {\\bfseries{c}} \\\\
+ """
+ )
+ exp_cols_no_si = """\
+ & & \\multicolumn{2}{r}{\\bfseries{Z}} & Y \\\\
+ & & a & b & \\bfseries{c} \\\\
+"""
+ assert ((exp_cols_si if siunitx else exp_cols_no_si) in result) is columns
+
+
+def test_repr_option(styler):
+ assert "' in result
+ assert ' ' not in result
+
+
+def test_tooltip_css_class(styler):
+ # GH 21266
+ result = styler.set_tooltips(
+ DataFrame([["tooltip"]], index=["x"], columns=["A"]),
+ css_class="other-class",
+ props=[("color", "green")],
+ ).to_html()
+ assert "#T_ .other-class {\n color: green;\n" in result
+ assert '#T_ #T__row0_col0 .other-class::after {\n content: "tooltip";\n' in result
+
+ # GH 39563
+ result = styler.set_tooltips( # set_tooltips overwrites previous
+ DataFrame([["tooltip"]], index=["x"], columns=["A"]),
+ css_class="another-class",
+ props="color:green;color:red;",
+ ).to_html()
+ assert "#T_ .another-class {\n color: green;\n color: red;\n}" in result
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_console.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_console.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd7b57df9baed18b172dc8398a61a49e9435f82a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_console.py
@@ -0,0 +1,72 @@
+import locale
+
+import pytest
+
+from pandas._config import detect_console_encoding
+
+
+class MockEncoding:
+ """
+ Used to add a side effect when accessing the 'encoding' property. If the
+ side effect is a str in nature, the value will be returned. Otherwise, the
+ side effect should be an exception that will be raised.
+ """
+
+ def __init__(self, encoding) -> None:
+ super().__init__()
+ self.val = encoding
+
+ @property
+ def encoding(self):
+ return self.raise_or_return(self.val)
+
+ @staticmethod
+ def raise_or_return(val):
+ if isinstance(val, str):
+ return val
+ else:
+ raise val
+
+
+@pytest.mark.parametrize("empty,filled", [["stdin", "stdout"], ["stdout", "stdin"]])
+def test_detect_console_encoding_from_stdout_stdin(monkeypatch, empty, filled):
+ # Ensures that when sys.stdout.encoding or sys.stdin.encoding is used when
+ # they have values filled.
+ # GH 21552
+ with monkeypatch.context() as context:
+ context.setattr(f"sys.{empty}", MockEncoding(""))
+ context.setattr(f"sys.{filled}", MockEncoding(filled))
+ assert detect_console_encoding() == filled
+
+
+@pytest.mark.parametrize("encoding", [AttributeError, OSError, "ascii"])
+def test_detect_console_encoding_fallback_to_locale(monkeypatch, encoding):
+ # GH 21552
+ with monkeypatch.context() as context:
+ context.setattr("locale.getpreferredencoding", lambda: "foo")
+ context.setattr("sys.stdout", MockEncoding(encoding))
+ assert detect_console_encoding() == "foo"
+
+
+@pytest.mark.parametrize(
+ "std,locale",
+ [
+ ["ascii", "ascii"],
+ ["ascii", locale.Error],
+ [AttributeError, "ascii"],
+ [AttributeError, locale.Error],
+ [OSError, "ascii"],
+ [OSError, locale.Error],
+ ],
+)
+def test_detect_console_encoding_fallback_to_default(monkeypatch, std, locale):
+ # When both the stdout/stdin encoding and locale preferred encoding checks
+ # fail (or return 'ascii', we should default to the sys default encoding.
+ # GH 21552
+ with monkeypatch.context() as context:
+ context.setattr(
+ "locale.getpreferredencoding", lambda: MockEncoding.raise_or_return(locale)
+ )
+ context.setattr("sys.stdout", MockEncoding(std))
+ context.setattr("sys.getdefaultencoding", lambda: "sysDefaultEncoding")
+ assert detect_console_encoding() == "sysDefaultEncoding"
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_eng_formatting.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_eng_formatting.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d581b5b92e0c8cbcfe21dbbdfb0f99ca05c1a4e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_eng_formatting.py
@@ -0,0 +1,254 @@
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ reset_option,
+ set_eng_float_format,
+)
+
+from pandas.io.formats.format import EngFormatter
+
+
+@pytest.fixture(autouse=True)
+def reset_float_format():
+ yield
+ reset_option("display.float_format")
+
+
+class TestEngFormatter:
+ def test_eng_float_formatter2(self, float_frame):
+ df = float_frame
+ df.loc[5] = 0
+
+ set_eng_float_format()
+ repr(df)
+
+ set_eng_float_format(use_eng_prefix=True)
+ repr(df)
+
+ set_eng_float_format(accuracy=0)
+ repr(df)
+
+ def test_eng_float_formatter(self):
+ df = DataFrame({"A": [1.41, 141.0, 14100, 1410000.0]})
+
+ set_eng_float_format()
+ result = df.to_string()
+ expected = (
+ " A\n"
+ "0 1.410E+00\n"
+ "1 141.000E+00\n"
+ "2 14.100E+03\n"
+ "3 1.410E+06"
+ )
+ assert result == expected
+
+ set_eng_float_format(use_eng_prefix=True)
+ result = df.to_string()
+ expected = " A\n0 1.410\n1 141.000\n2 14.100k\n3 1.410M"
+ assert result == expected
+
+ set_eng_float_format(accuracy=0)
+ result = df.to_string()
+ expected = " A\n0 1E+00\n1 141E+00\n2 14E+03\n3 1E+06"
+ assert result == expected
+
+ def compare(self, formatter, input, output):
+ formatted_input = formatter(input)
+ assert formatted_input == output
+
+ def compare_all(self, formatter, in_out):
+ """
+ Parameters:
+ -----------
+ formatter: EngFormatter under test
+ in_out: list of tuples. Each tuple = (number, expected_formatting)
+
+ It is tested if 'formatter(number) == expected_formatting'.
+ *number* should be >= 0 because formatter(-number) == fmt is also
+ tested. *fmt* is derived from *expected_formatting*
+ """
+ for input, output in in_out:
+ self.compare(formatter, input, output)
+ self.compare(formatter, -input, "-" + output[1:])
+
+ def test_exponents_with_eng_prefix(self):
+ formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
+ f = np.sqrt(2)
+ in_out = [
+ (f * 10**-24, " 1.414y"),
+ (f * 10**-23, " 14.142y"),
+ (f * 10**-22, " 141.421y"),
+ (f * 10**-21, " 1.414z"),
+ (f * 10**-20, " 14.142z"),
+ (f * 10**-19, " 141.421z"),
+ (f * 10**-18, " 1.414a"),
+ (f * 10**-17, " 14.142a"),
+ (f * 10**-16, " 141.421a"),
+ (f * 10**-15, " 1.414f"),
+ (f * 10**-14, " 14.142f"),
+ (f * 10**-13, " 141.421f"),
+ (f * 10**-12, " 1.414p"),
+ (f * 10**-11, " 14.142p"),
+ (f * 10**-10, " 141.421p"),
+ (f * 10**-9, " 1.414n"),
+ (f * 10**-8, " 14.142n"),
+ (f * 10**-7, " 141.421n"),
+ (f * 10**-6, " 1.414u"),
+ (f * 10**-5, " 14.142u"),
+ (f * 10**-4, " 141.421u"),
+ (f * 10**-3, " 1.414m"),
+ (f * 10**-2, " 14.142m"),
+ (f * 10**-1, " 141.421m"),
+ (f * 10**0, " 1.414"),
+ (f * 10**1, " 14.142"),
+ (f * 10**2, " 141.421"),
+ (f * 10**3, " 1.414k"),
+ (f * 10**4, " 14.142k"),
+ (f * 10**5, " 141.421k"),
+ (f * 10**6, " 1.414M"),
+ (f * 10**7, " 14.142M"),
+ (f * 10**8, " 141.421M"),
+ (f * 10**9, " 1.414G"),
+ (f * 10**10, " 14.142G"),
+ (f * 10**11, " 141.421G"),
+ (f * 10**12, " 1.414T"),
+ (f * 10**13, " 14.142T"),
+ (f * 10**14, " 141.421T"),
+ (f * 10**15, " 1.414P"),
+ (f * 10**16, " 14.142P"),
+ (f * 10**17, " 141.421P"),
+ (f * 10**18, " 1.414E"),
+ (f * 10**19, " 14.142E"),
+ (f * 10**20, " 141.421E"),
+ (f * 10**21, " 1.414Z"),
+ (f * 10**22, " 14.142Z"),
+ (f * 10**23, " 141.421Z"),
+ (f * 10**24, " 1.414Y"),
+ (f * 10**25, " 14.142Y"),
+ (f * 10**26, " 141.421Y"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ def test_exponents_without_eng_prefix(self):
+ formatter = EngFormatter(accuracy=4, use_eng_prefix=False)
+ f = np.pi
+ in_out = [
+ (f * 10**-24, " 3.1416E-24"),
+ (f * 10**-23, " 31.4159E-24"),
+ (f * 10**-22, " 314.1593E-24"),
+ (f * 10**-21, " 3.1416E-21"),
+ (f * 10**-20, " 31.4159E-21"),
+ (f * 10**-19, " 314.1593E-21"),
+ (f * 10**-18, " 3.1416E-18"),
+ (f * 10**-17, " 31.4159E-18"),
+ (f * 10**-16, " 314.1593E-18"),
+ (f * 10**-15, " 3.1416E-15"),
+ (f * 10**-14, " 31.4159E-15"),
+ (f * 10**-13, " 314.1593E-15"),
+ (f * 10**-12, " 3.1416E-12"),
+ (f * 10**-11, " 31.4159E-12"),
+ (f * 10**-10, " 314.1593E-12"),
+ (f * 10**-9, " 3.1416E-09"),
+ (f * 10**-8, " 31.4159E-09"),
+ (f * 10**-7, " 314.1593E-09"),
+ (f * 10**-6, " 3.1416E-06"),
+ (f * 10**-5, " 31.4159E-06"),
+ (f * 10**-4, " 314.1593E-06"),
+ (f * 10**-3, " 3.1416E-03"),
+ (f * 10**-2, " 31.4159E-03"),
+ (f * 10**-1, " 314.1593E-03"),
+ (f * 10**0, " 3.1416E+00"),
+ (f * 10**1, " 31.4159E+00"),
+ (f * 10**2, " 314.1593E+00"),
+ (f * 10**3, " 3.1416E+03"),
+ (f * 10**4, " 31.4159E+03"),
+ (f * 10**5, " 314.1593E+03"),
+ (f * 10**6, " 3.1416E+06"),
+ (f * 10**7, " 31.4159E+06"),
+ (f * 10**8, " 314.1593E+06"),
+ (f * 10**9, " 3.1416E+09"),
+ (f * 10**10, " 31.4159E+09"),
+ (f * 10**11, " 314.1593E+09"),
+ (f * 10**12, " 3.1416E+12"),
+ (f * 10**13, " 31.4159E+12"),
+ (f * 10**14, " 314.1593E+12"),
+ (f * 10**15, " 3.1416E+15"),
+ (f * 10**16, " 31.4159E+15"),
+ (f * 10**17, " 314.1593E+15"),
+ (f * 10**18, " 3.1416E+18"),
+ (f * 10**19, " 31.4159E+18"),
+ (f * 10**20, " 314.1593E+18"),
+ (f * 10**21, " 3.1416E+21"),
+ (f * 10**22, " 31.4159E+21"),
+ (f * 10**23, " 314.1593E+21"),
+ (f * 10**24, " 3.1416E+24"),
+ (f * 10**25, " 31.4159E+24"),
+ (f * 10**26, " 314.1593E+24"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ def test_rounding(self):
+ formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
+ in_out = [
+ (5.55555, " 5.556"),
+ (55.5555, " 55.556"),
+ (555.555, " 555.555"),
+ (5555.55, " 5.556k"),
+ (55555.5, " 55.556k"),
+ (555555, " 555.555k"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
+ in_out = [
+ (5.55555, " 5.6"),
+ (55.5555, " 55.6"),
+ (555.555, " 555.6"),
+ (5555.55, " 5.6k"),
+ (55555.5, " 55.6k"),
+ (555555, " 555.6k"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ formatter = EngFormatter(accuracy=0, use_eng_prefix=True)
+ in_out = [
+ (5.55555, " 6"),
+ (55.5555, " 56"),
+ (555.555, " 556"),
+ (5555.55, " 6k"),
+ (55555.5, " 56k"),
+ (555555, " 556k"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
+ result = formatter(0)
+ assert result == " 0.000"
+
+ def test_nan(self):
+ # Issue #11981
+
+ formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
+ result = formatter(np.nan)
+ assert result == "NaN"
+
+ df = DataFrame(
+ {
+ "a": [1.5, 10.3, 20.5],
+ "b": [50.3, 60.67, 70.12],
+ "c": [100.2, 101.33, 120.33],
+ }
+ )
+ pt = df.pivot_table(values="a", index="b", columns="c")
+ set_eng_float_format(accuracy=1)
+ result = pt.to_string()
+ assert "NaN" in result
+
+ def test_inf(self):
+ # Issue #11981
+
+ formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
+ result = formatter(np.inf)
+ assert result == "inf"
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_format.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ca29c219b55b0931885f2fbf92cbf1fd809c5b8
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_format.py
@@ -0,0 +1,2293 @@
+"""
+Tests for the file pandas.io.formats.format, *not* tests for general formatting
+of pandas objects.
+"""
+from datetime import datetime
+from io import StringIO
+from pathlib import Path
+import re
+from shutil import get_terminal_size
+
+import numpy as np
+import pytest
+
+from pandas._config import using_pyarrow_string_dtype
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Index,
+ MultiIndex,
+ NaT,
+ Series,
+ Timestamp,
+ date_range,
+ get_option,
+ option_context,
+ read_csv,
+ reset_option,
+)
+
+from pandas.io.formats import printing
+import pandas.io.formats.format as fmt
+
+
+@pytest.fixture(params=["string", "pathlike", "buffer"])
+def filepath_or_buffer_id(request):
+ """
+ A fixture yielding test ids for filepath_or_buffer testing.
+ """
+ return request.param
+
+
+@pytest.fixture
+def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
+ """
+ A fixture yielding a string representing a filepath, a path-like object
+ and a StringIO buffer. Also checks that buffer is not closed.
+ """
+ if filepath_or_buffer_id == "buffer":
+ buf = StringIO()
+ yield buf
+ assert not buf.closed
+ else:
+ assert isinstance(tmp_path, Path)
+ if filepath_or_buffer_id == "pathlike":
+ yield tmp_path / "foo"
+ else:
+ yield str(tmp_path / "foo")
+
+
+@pytest.fixture
+def assert_filepath_or_buffer_equals(
+ filepath_or_buffer, filepath_or_buffer_id, encoding
+):
+ """
+ Assertion helper for checking filepath_or_buffer.
+ """
+ if encoding is None:
+ encoding = "utf-8"
+
+ def _assert_filepath_or_buffer_equals(expected):
+ if filepath_or_buffer_id == "string":
+ with open(filepath_or_buffer, encoding=encoding) as f:
+ result = f.read()
+ elif filepath_or_buffer_id == "pathlike":
+ result = filepath_or_buffer.read_text(encoding=encoding)
+ elif filepath_or_buffer_id == "buffer":
+ result = filepath_or_buffer.getvalue()
+ assert result == expected
+
+ return _assert_filepath_or_buffer_equals
+
+
+def has_info_repr(df):
+ r = repr(df)
+ c1 = r.split("\n")[0].startswith("
+ # 2. Index
+ # 3. Columns
+ # 4. dtype
+ # 5. memory usage
+ # 6. trailing newline
+ nv = len(r.split("\n")) == 6
+ return has_info and nv
+
+
+def has_horizontally_truncated_repr(df):
+ try: # Check header row
+ fst_line = np.array(repr(df).splitlines()[0].split())
+ cand_col = np.where(fst_line == "...")[0][0]
+ except IndexError:
+ return False
+ # Make sure each row has this ... in the same place
+ r = repr(df)
+ for ix, _ in enumerate(r.splitlines()):
+ if not r.split()[cand_col] == "...":
+ return False
+ return True
+
+
+def has_vertically_truncated_repr(df):
+ r = repr(df)
+ only_dot_row = False
+ for row in r.splitlines():
+ if re.match(r"^[\.\ ]+$", row):
+ only_dot_row = True
+ return only_dot_row
+
+
+def has_truncated_repr(df):
+ return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
+
+
+def has_doubly_truncated_repr(df):
+ return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
+
+
+def has_expanded_repr(df):
+ r = repr(df)
+ for line in r.split("\n"):
+ if line.endswith("\\"):
+ return True
+ return False
+
+
+class TestDataFrameFormatting:
+ def test_repr_truncation(self):
+ max_len = 20
+ with option_context("display.max_colwidth", max_len):
+ df = DataFrame(
+ {
+ "A": np.random.default_rng(2).standard_normal(10),
+ "B": [
+ "a"
+ * np.random.default_rng(2).integers(max_len - 1, max_len + 1)
+ for _ in range(10)
+ ],
+ }
+ )
+ r = repr(df)
+ r = r[r.find("\n") + 1 :]
+
+ adj = printing.get_adjustment()
+
+ for line, value in zip(r.split("\n"), df["B"]):
+ if adj.len(value) + 1 > max_len:
+ assert "..." in line
+ else:
+ assert "..." not in line
+
+ with option_context("display.max_colwidth", 999999):
+ assert "..." not in repr(df)
+
+ with option_context("display.max_colwidth", max_len + 2):
+ assert "..." not in repr(df)
+
+ def test_repr_truncation_preserves_na(self):
+ # https://github.com/pandas-dev/pandas/issues/55630
+ df = DataFrame({"a": [pd.NA for _ in range(10)]})
+ with option_context("display.max_rows", 2, "display.show_dimensions", False):
+ assert repr(df) == " a\n0 \n.. ...\n9 "
+
+ def test_max_colwidth_negative_int_raises(self):
+ # Deprecation enforced from:
+ # https://github.com/pandas-dev/pandas/issues/31532
+ with pytest.raises(
+ ValueError, match="Value must be a nonnegative integer or None"
+ ):
+ with option_context("display.max_colwidth", -1):
+ pass
+
+ def test_repr_chop_threshold(self):
+ df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
+ reset_option("display.chop_threshold") # default None
+ assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
+
+ with option_context("display.chop_threshold", 0.2):
+ assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
+
+ with option_context("display.chop_threshold", 0.6):
+ assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
+
+ with option_context("display.chop_threshold", None):
+ assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
+
+ def test_repr_chop_threshold_column_below(self):
+ # GH 6839: validation case
+
+ df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
+
+ with option_context("display.chop_threshold", 0):
+ assert repr(df) == (
+ " 0 1\n"
+ "0 10.0 8.000000e-10\n"
+ "1 20.0 -1.000000e-11\n"
+ "2 30.0 2.000000e-09\n"
+ "3 40.0 -2.000000e-11"
+ )
+
+ with option_context("display.chop_threshold", 1e-8):
+ assert repr(df) == (
+ " 0 1\n"
+ "0 10.0 0.000000e+00\n"
+ "1 20.0 0.000000e+00\n"
+ "2 30.0 0.000000e+00\n"
+ "3 40.0 0.000000e+00"
+ )
+
+ with option_context("display.chop_threshold", 5e-11):
+ assert repr(df) == (
+ " 0 1\n"
+ "0 10.0 8.000000e-10\n"
+ "1 20.0 0.000000e+00\n"
+ "2 30.0 2.000000e-09\n"
+ "3 40.0 0.000000e+00"
+ )
+
+ def test_repr_no_backslash(self):
+ with option_context("mode.sim_interactive", True):
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
+ assert "\\" not in repr(df)
+
+ def test_expand_frame_repr(self):
+ df_small = DataFrame("hello", index=[0], columns=[0])
+ df_wide = DataFrame("hello", index=[0], columns=range(10))
+ df_tall = DataFrame("hello", index=range(30), columns=range(5))
+
+ with option_context("mode.sim_interactive", True):
+ with option_context(
+ "display.max_columns",
+ 10,
+ "display.width",
+ 20,
+ "display.max_rows",
+ 20,
+ "display.show_dimensions",
+ True,
+ ):
+ with option_context("display.expand_frame_repr", True):
+ assert not has_truncated_repr(df_small)
+ assert not has_expanded_repr(df_small)
+ assert not has_truncated_repr(df_wide)
+ assert has_expanded_repr(df_wide)
+ assert has_vertically_truncated_repr(df_tall)
+ assert has_expanded_repr(df_tall)
+
+ with option_context("display.expand_frame_repr", False):
+ assert not has_truncated_repr(df_small)
+ assert not has_expanded_repr(df_small)
+ assert not has_horizontally_truncated_repr(df_wide)
+ assert not has_expanded_repr(df_wide)
+ assert has_vertically_truncated_repr(df_tall)
+ assert not has_expanded_repr(df_tall)
+
+ def test_repr_non_interactive(self):
+ # in non interactive mode, there can be no dependency on the
+ # result of terminal auto size detection
+ df = DataFrame("hello", index=range(1000), columns=range(5))
+
+ with option_context(
+ "mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
+ ):
+ assert not has_truncated_repr(df)
+ assert not has_expanded_repr(df)
+
+ def test_repr_truncates_terminal_size(self, monkeypatch):
+ # see gh-21180
+
+ terminal_size = (118, 96)
+ monkeypatch.setattr(
+ "pandas.io.formats.format.get_terminal_size", lambda: terminal_size
+ )
+
+ index = range(5)
+ columns = MultiIndex.from_tuples(
+ [
+ ("This is a long title with > 37 chars.", "cat"),
+ ("This is a loooooonger title with > 43 chars.", "dog"),
+ ]
+ )
+ df = DataFrame(1, index=index, columns=columns)
+
+ result = repr(df)
+
+ h1, h2 = result.split("\n")[:2]
+ assert "long" in h1
+ assert "loooooonger" in h1
+ assert "cat" in h2
+ assert "dog" in h2
+
+ # regular columns
+ df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
+ result = repr(df2)
+
+ assert df2.columns[0] in result.split("\n")[0]
+
+ def test_repr_truncates_terminal_size_full(self, monkeypatch):
+ # GH 22984 ensure entire window is filled
+ terminal_size = (80, 24)
+ df = DataFrame(np.random.default_rng(2).random((1, 7)))
+
+ monkeypatch.setattr(
+ "pandas.io.formats.format.get_terminal_size", lambda: terminal_size
+ )
+ assert "..." not in str(df)
+
+ def test_repr_truncation_column_size(self):
+ # dataframe with last column very wide -> check it is not used to
+ # determine size of truncation (...) column
+ df = DataFrame(
+ {
+ "a": [108480, 30830],
+ "b": [12345, 12345],
+ "c": [12345, 12345],
+ "d": [12345, 12345],
+ "e": ["a" * 50] * 2,
+ }
+ )
+ assert "..." in str(df)
+ assert " ... " not in str(df)
+
+ def test_repr_max_columns_max_rows(self):
+ term_width, term_height = get_terminal_size()
+ if term_width < 10 or term_height < 10:
+ pytest.skip(f"terminal size too small, {term_width} x {term_height}")
+
+ def mkframe(n):
+ index = [f"{i:05d}" for i in range(n)]
+ return DataFrame(0, index, index)
+
+ df6 = mkframe(6)
+ df10 = mkframe(10)
+ with option_context("mode.sim_interactive", True):
+ with option_context("display.width", term_width * 2):
+ with option_context("display.max_rows", 5, "display.max_columns", 5):
+ assert not has_expanded_repr(mkframe(4))
+ assert not has_expanded_repr(mkframe(5))
+ assert not has_expanded_repr(df6)
+ assert has_doubly_truncated_repr(df6)
+
+ with option_context("display.max_rows", 20, "display.max_columns", 10):
+ # Out off max_columns boundary, but no extending
+ # since not exceeding width
+ assert not has_expanded_repr(df6)
+ assert not has_truncated_repr(df6)
+
+ with option_context("display.max_rows", 9, "display.max_columns", 10):
+ # out vertical bounds can not result in expanded repr
+ assert not has_expanded_repr(df10)
+ assert has_vertically_truncated_repr(df10)
+
+ # width=None in terminal, auto detection
+ with option_context(
+ "display.max_columns",
+ 100,
+ "display.max_rows",
+ term_width * 20,
+ "display.width",
+ None,
+ ):
+ df = mkframe((term_width // 7) - 2)
+ assert not has_expanded_repr(df)
+ df = mkframe((term_width // 7) + 2)
+ printing.pprint_thing(df._repr_fits_horizontal_())
+ assert has_expanded_repr(df)
+
+ def test_repr_min_rows(self):
+ df = DataFrame({"a": range(20)})
+
+ # default setting no truncation even if above min_rows
+ assert ".." not in repr(df)
+ assert ".." not in df._repr_html_()
+
+ df = DataFrame({"a": range(61)})
+
+ # default of max_rows 60 triggers truncation if above
+ assert ".." in repr(df)
+ assert ".." in df._repr_html_()
+
+ with option_context("display.max_rows", 10, "display.min_rows", 4):
+ # truncated after first two rows
+ assert ".." in repr(df)
+ assert "2 " not in repr(df)
+ assert "..." in df._repr_html_()
+ assert "2 " not in df._repr_html_()
+
+ with option_context("display.max_rows", 12, "display.min_rows", None):
+ # when set to None, follow value of max_rows
+ assert "5 5" in repr(df)
+ assert "5 " in df._repr_html_()
+
+ with option_context("display.max_rows", 10, "display.min_rows", 12):
+ # when set value higher as max_rows, use the minimum
+ assert "5 5" not in repr(df)
+ assert "5 " not in df._repr_html_()
+
+ with option_context("display.max_rows", None, "display.min_rows", 12):
+ # max_rows of None -> never truncate
+ assert ".." not in repr(df)
+ assert ".." not in df._repr_html_()
+
+ def test_str_max_colwidth(self):
+ # GH 7856
+ df = DataFrame(
+ [
+ {
+ "a": "foo",
+ "b": "bar",
+ "c": "uncomfortably long line with lots of stuff",
+ "d": 1,
+ },
+ {"a": "foo", "b": "bar", "c": "stuff", "d": 1},
+ ]
+ )
+ df.set_index(["a", "b", "c"])
+ assert str(df) == (
+ " a b c d\n"
+ "0 foo bar uncomfortably long line with lots of stuff 1\n"
+ "1 foo bar stuff 1"
+ )
+ with option_context("max_colwidth", 20):
+ assert str(df) == (
+ " a b c d\n"
+ "0 foo bar uncomfortably lo... 1\n"
+ "1 foo bar stuff 1"
+ )
+
+ def test_auto_detect(self):
+ term_width, term_height = get_terminal_size()
+ fac = 1.05 # Arbitrary large factor to exceed term width
+ cols = range(int(term_width * fac))
+ index = range(10)
+ df = DataFrame(index=index, columns=cols)
+ with option_context("mode.sim_interactive", True):
+ with option_context("display.max_rows", None):
+ with option_context("display.max_columns", None):
+ # Wrap around with None
+ assert has_expanded_repr(df)
+ with option_context("display.max_rows", 0):
+ with option_context("display.max_columns", 0):
+ # Truncate with auto detection.
+ assert has_horizontally_truncated_repr(df)
+
+ index = range(int(term_height * fac))
+ df = DataFrame(index=index, columns=cols)
+ with option_context("display.max_rows", 0):
+ with option_context("display.max_columns", None):
+ # Wrap around with None
+ assert has_expanded_repr(df)
+ # Truncate vertically
+ assert has_vertically_truncated_repr(df)
+
+ with option_context("display.max_rows", None):
+ with option_context("display.max_columns", 0):
+ assert has_horizontally_truncated_repr(df)
+
+ def test_to_string_repr_unicode2(self):
+ idx = Index(["abc", "\u03c3a", "aegdvg"])
+ ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx)
+ rs = repr(ser).split("\n")
+ line_len = len(rs[0])
+ for line in rs[1:]:
+ try:
+ line = line.decode(get_option("display.encoding"))
+ except AttributeError:
+ pass
+ if not line.startswith("dtype:"):
+ assert len(line) == line_len
+
+ def test_east_asian_unicode_false(self):
+ # not aligned properly because of east asian width
+
+ # mid col
+ df = DataFrame(
+ {"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na あ 1\n"
+ "bb いいい 222\nc う 33333\n"
+ "ddd ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ # last col
+ df = DataFrame(
+ {"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na 1 あ\n"
+ "bb 222 いいい\nc 33333 う\n"
+ "ddd 4 ええええええ"
+ )
+ assert repr(df) == expected
+
+ # all col
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na あああああ あ\n"
+ "bb い いいい\nc う う\n"
+ "ddd えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # column name
+ df = DataFrame(
+ {
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ "あああああ": [1, 222, 33333, 4],
+ },
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " b あああああ\na あ 1\n"
+ "bb いいい 222\nc う 33333\n"
+ "ddd ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ # index
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=["あああ", "いいいいいい", "うう", "え"],
+ )
+ expected = (
+ " a b\nあああ あああああ あ\n"
+ "いいいいいい い いいい\nうう う う\n"
+ "え えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # index name
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=Index(["あ", "い", "うう", "え"], name="おおおお"),
+ )
+ expected = (
+ " a b\n"
+ "おおおお \n"
+ "あ あああああ あ\n"
+ "い い いいい\n"
+ "うう う う\n"
+ "え えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # all
+ df = DataFrame(
+ {
+ "あああ": ["あああ", "い", "う", "えええええ"],
+ "いいいいい": ["あ", "いいい", "う", "ええ"],
+ },
+ index=Index(["あ", "いいい", "うう", "え"], name="お"),
+ )
+ expected = (
+ " あああ いいいいい\n"
+ "お \n"
+ "あ あああ あ\n"
+ "いいい い いいい\n"
+ "うう う う\n"
+ "え えええええ ええ"
+ )
+ assert repr(df) == expected
+
+ # MultiIndex
+ idx = MultiIndex.from_tuples(
+ [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
+ )
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=idx,
+ )
+ expected = (
+ " a b\n"
+ "あ いい あああああ あ\n"
+ "う え い いいい\n"
+ "おおお かかかか う う\n"
+ "き くく えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # truncate
+ with option_context("display.max_rows", 3, "display.max_columns", 3):
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ "c": ["お", "か", "ききき", "くくくくくく"],
+ "ああああ": ["さ", "し", "す", "せ"],
+ },
+ columns=["a", "b", "c", "ああああ"],
+ )
+
+ expected = (
+ " a ... ああああ\n0 あああああ ... さ\n"
+ ".. ... ... ...\n3 えええ ... せ\n"
+ "\n[4 rows x 4 columns]"
+ )
+ assert repr(df) == expected
+
+ df.index = ["あああ", "いいいい", "う", "aaa"]
+ expected = (
+ " a ... ああああ\nあああ あああああ ... さ\n"
+ ".. ... ... ...\naaa えええ ... せ\n"
+ "\n[4 rows x 4 columns]"
+ )
+ assert repr(df) == expected
+
+ def test_east_asian_unicode_true(self):
+ # Enable Unicode option -----------------------------------------
+ with option_context("display.unicode.east_asian_width", True):
+ # mid col
+ df = DataFrame(
+ {"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na あ 1\n"
+ "bb いいい 222\nc う 33333\n"
+ "ddd ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ # last col
+ df = DataFrame(
+ {"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na 1 あ\n"
+ "bb 222 いいい\nc 33333 う\n"
+ "ddd 4 ええええええ"
+ )
+ assert repr(df) == expected
+
+ # all col
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\n"
+ "a あああああ あ\n"
+ "bb い いいい\n"
+ "c う う\n"
+ "ddd えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # column name
+ df = DataFrame(
+ {
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ "あああああ": [1, 222, 33333, 4],
+ },
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " b あああああ\n"
+ "a あ 1\n"
+ "bb いいい 222\n"
+ "c う 33333\n"
+ "ddd ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ # index
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=["あああ", "いいいいいい", "うう", "え"],
+ )
+ expected = (
+ " a b\n"
+ "あああ あああああ あ\n"
+ "いいいいいい い いいい\n"
+ "うう う う\n"
+ "え えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # index name
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=Index(["あ", "い", "うう", "え"], name="おおおお"),
+ )
+ expected = (
+ " a b\n"
+ "おおおお \n"
+ "あ あああああ あ\n"
+ "い い いいい\n"
+ "うう う う\n"
+ "え えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # all
+ df = DataFrame(
+ {
+ "あああ": ["あああ", "い", "う", "えええええ"],
+ "いいいいい": ["あ", "いいい", "う", "ええ"],
+ },
+ index=Index(["あ", "いいい", "うう", "え"], name="お"),
+ )
+ expected = (
+ " あああ いいいいい\n"
+ "お \n"
+ "あ あああ あ\n"
+ "いいい い いいい\n"
+ "うう う う\n"
+ "え えええええ ええ"
+ )
+ assert repr(df) == expected
+
+ # MultiIndex
+ idx = MultiIndex.from_tuples(
+ [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
+ )
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=idx,
+ )
+ expected = (
+ " a b\n"
+ "あ いい あああああ あ\n"
+ "う え い いいい\n"
+ "おおお かかかか う う\n"
+ "き くく えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # truncate
+ with option_context("display.max_rows", 3, "display.max_columns", 3):
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ "c": ["お", "か", "ききき", "くくくくくく"],
+ "ああああ": ["さ", "し", "す", "せ"],
+ },
+ columns=["a", "b", "c", "ああああ"],
+ )
+
+ expected = (
+ " a ... ああああ\n"
+ "0 あああああ ... さ\n"
+ ".. ... ... ...\n"
+ "3 えええ ... せ\n"
+ "\n[4 rows x 4 columns]"
+ )
+ assert repr(df) == expected
+
+ df.index = ["あああ", "いいいい", "う", "aaa"]
+ expected = (
+ " a ... ああああ\n"
+ "あああ あああああ ... さ\n"
+ "... ... ... ...\n"
+ "aaa えええ ... せ\n"
+ "\n[4 rows x 4 columns]"
+ )
+ assert repr(df) == expected
+
+ # ambiguous unicode
+ df = DataFrame(
+ {
+ "b": ["あ", "いいい", "¡¡", "ええええええ"],
+ "あああああ": [1, 222, 33333, 4],
+ },
+ index=["a", "bb", "c", "¡¡¡"],
+ )
+ expected = (
+ " b あああああ\n"
+ "a あ 1\n"
+ "bb いいい 222\n"
+ "c ¡¡ 33333\n"
+ "¡¡¡ ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ def test_to_string_buffer_all_unicode(self):
+ buf = StringIO()
+
+ empty = DataFrame({"c/\u03c3": Series(dtype=object)})
+ nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
+
+ print(empty, file=buf)
+ print(nonempty, file=buf)
+
+ # this should work
+ buf.getvalue()
+
+ @pytest.mark.parametrize(
+ "index_scalar",
+ [
+ "a" * 10,
+ 1,
+ Timestamp(2020, 1, 1),
+ pd.Period("2020-01-01"),
+ ],
+ )
+ @pytest.mark.parametrize("h", [10, 20])
+ @pytest.mark.parametrize("w", [10, 20])
+ def test_to_string_truncate_indices(self, index_scalar, h, w):
+ with option_context("display.expand_frame_repr", False):
+ df = DataFrame(
+ index=[index_scalar] * h, columns=[str(i) * 10 for i in range(w)]
+ )
+ with option_context("display.max_rows", 15):
+ if h == 20:
+ assert has_vertically_truncated_repr(df)
+ else:
+ assert not has_vertically_truncated_repr(df)
+ with option_context("display.max_columns", 15):
+ if w == 20:
+ assert has_horizontally_truncated_repr(df)
+ else:
+ assert not has_horizontally_truncated_repr(df)
+ with option_context("display.max_rows", 15, "display.max_columns", 15):
+ if h == 20 and w == 20:
+ assert has_doubly_truncated_repr(df)
+ else:
+ assert not has_doubly_truncated_repr(df)
+
+ def test_to_string_truncate_multilevel(self):
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
+ df = DataFrame(index=arrays, columns=arrays)
+ with option_context("display.max_rows", 7, "display.max_columns", 7):
+ assert has_doubly_truncated_repr(df)
+
+ @pytest.mark.parametrize("dtype", ["object", "datetime64[us]"])
+ def test_truncate_with_different_dtypes(self, dtype):
+ # 11594, 12045
+ # when truncated the dtypes of the splits can differ
+
+ # 11594
+ ser = Series(
+ [datetime(2012, 1, 1)] * 10
+ + [datetime(1012, 1, 2)]
+ + [datetime(2012, 1, 3)] * 10,
+ dtype=dtype,
+ )
+
+ with option_context("display.max_rows", 8):
+ result = str(ser)
+ assert dtype in result
+
+ def test_truncate_with_different_dtypes2(self):
+ # 12045
+ df = DataFrame({"text": ["some words"] + [None] * 9}, dtype=object)
+
+ with option_context("display.max_rows", 8, "display.max_columns", 3):
+ result = str(df)
+ assert "None" in result
+ assert "NaN" not in result
+
+ def test_truncate_with_different_dtypes_multiindex(self):
+ # GH#13000
+ df = DataFrame({"Vals": range(100)})
+ frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
+ result = repr(frame)
+
+ result2 = repr(frame.iloc[:5])
+ assert result.startswith(result2)
+
+ def test_datetimelike_frame(self):
+ # GH 12211
+ df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
+
+ with option_context("display.max_rows", 5):
+ result = str(df)
+ assert "2013-01-01 00:00:00+00:00" in result
+ assert "NaT" in result
+ assert "..." in result
+ assert "[6 rows x 1 columns]" in result
+
+ dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
+ df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
+ with option_context("display.max_rows", 5):
+ expected = (
+ " dt x\n"
+ "0 2011-01-01 00:00:00-05:00 1\n"
+ "1 2011-01-01 00:00:00-05:00 2\n"
+ ".. ... ..\n"
+ "8 NaT 9\n"
+ "9 NaT 10\n\n"
+ "[10 rows x 2 columns]"
+ )
+ assert repr(df) == expected
+
+ dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
+ df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
+ with option_context("display.max_rows", 5):
+ expected = (
+ " dt x\n"
+ "0 NaT 1\n"
+ "1 NaT 2\n"
+ ".. ... ..\n"
+ "8 2011-01-01 00:00:00-05:00 9\n"
+ "9 2011-01-01 00:00:00-05:00 10\n\n"
+ "[10 rows x 2 columns]"
+ )
+ assert repr(df) == expected
+
+ dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
+ Timestamp("2011-01-01", tz="US/Eastern")
+ ] * 5
+ df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
+ with option_context("display.max_rows", 5):
+ expected = (
+ " dt x\n"
+ "0 2011-01-01 00:00:00+09:00 1\n"
+ "1 2011-01-01 00:00:00+09:00 2\n"
+ ".. ... ..\n"
+ "8 2011-01-01 00:00:00-05:00 9\n"
+ "9 2011-01-01 00:00:00-05:00 10\n\n"
+ "[10 rows x 2 columns]"
+ )
+ assert repr(df) == expected
+
+ @pytest.mark.parametrize(
+ "start_date",
+ [
+ "2017-01-01 23:59:59.999999999",
+ "2017-01-01 23:59:59.99999999",
+ "2017-01-01 23:59:59.9999999",
+ "2017-01-01 23:59:59.999999",
+ "2017-01-01 23:59:59.99999",
+ "2017-01-01 23:59:59.9999",
+ ],
+ )
+ def test_datetimeindex_highprecision(self, start_date):
+ # GH19030
+ # Check that high-precision time values for the end of day are
+ # included in repr for DatetimeIndex
+ df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
+ result = str(df)
+ assert start_date in result
+
+ dti = date_range(start=start_date, freq="D", periods=5)
+ df = DataFrame({"A": range(5)}, index=dti)
+ result = str(df.index)
+ assert start_date in result
+
+ def test_string_repr_encoding(self, datapath):
+ filepath = datapath("io", "parser", "data", "unicode_series.csv")
+ df = read_csv(filepath, header=None, encoding="latin1")
+ repr(df)
+ repr(df[1])
+
+ def test_repr_corner(self):
+ # representing infs poses no problems
+ df = DataFrame({"foo": [-np.inf, np.inf]})
+ repr(df)
+
+ def test_frame_info_encoding(self):
+ index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
+ with option_context("display.max_rows", 1):
+ df = DataFrame(columns=["a", "b", "c"], index=index)
+ repr(df)
+ repr(df.T)
+
+ def test_wide_repr(self):
+ with option_context(
+ "mode.sim_interactive",
+ True,
+ "display.show_dimensions",
+ True,
+ "display.max_columns",
+ 20,
+ ):
+ max_cols = get_option("display.max_columns")
+ df = DataFrame([["a" * 25] * (max_cols - 1)] * 10)
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+
+ assert f"10 rows x {max_cols - 1} columns" in rep_str
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 120):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ def test_wide_repr_wide_columns(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 3)),
+ columns=["a" * 90, "b" * 90, "c" * 90],
+ )
+ rep_str = repr(df)
+
+ assert len(rep_str.splitlines()) == 20
+
+ def test_wide_repr_named(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ max_cols = get_option("display.max_columns")
+ df = DataFrame([["a" * 25] * (max_cols - 1)] * 10)
+ df.index.name = "DataFrame Index"
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 150):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ for line in wide_repr.splitlines()[1::13]:
+ assert "DataFrame Index" in line
+
+ def test_wide_repr_multiindex(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ midx = MultiIndex.from_arrays([["a" * 5] * 10] * 2)
+ max_cols = get_option("display.max_columns")
+ df = DataFrame([["a" * 25] * (max_cols - 1)] * 10, index=midx)
+ df.index.names = ["Level 0", "Level 1"]
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 150):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ for line in wide_repr.splitlines()[1::13]:
+ assert "Level 0 Level 1" in line
+
+ def test_wide_repr_multiindex_cols(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ max_cols = get_option("display.max_columns")
+ midx = MultiIndex.from_arrays([["a" * 5] * 10] * 2)
+ mcols = MultiIndex.from_arrays([["b" * 3] * (max_cols - 1)] * 2)
+ df = DataFrame(
+ [["c" * 25] * (max_cols - 1)] * 10, index=midx, columns=mcols
+ )
+ df.index.names = ["Level 0", "Level 1"]
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 150, "display.max_columns", 20):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ def test_wide_repr_unicode(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ max_cols = 20
+ df = DataFrame([["a" * 25] * 10] * (max_cols - 1))
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 150):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ def test_wide_repr_wide_long_columns(self):
+ with option_context("mode.sim_interactive", True):
+ df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
+
+ result = repr(df)
+ assert "ccccc" in result
+ assert "ddddd" in result
+
+ def test_long_series(self):
+ n = 1000
+ s = Series(
+ np.random.default_rng(2).integers(-50, 50, n),
+ index=[f"s{x:04d}" for x in range(n)],
+ dtype="int64",
+ )
+
+ str_rep = str(s)
+ nmatches = len(re.findall("dtype", str_rep))
+ assert nmatches == 1
+
+ def test_to_string_ascii_error(self):
+ data = [
+ (
+ "0 ",
+ " .gitignore ",
+ " 5 ",
+ " \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
+ )
+ ]
+ df = DataFrame(data)
+
+ # it works!
+ repr(df)
+
+ def test_show_dimensions(self):
+ df = DataFrame(123, index=range(10, 15), columns=range(30))
+
+ with option_context(
+ "display.max_rows",
+ 10,
+ "display.max_columns",
+ 40,
+ "display.width",
+ 500,
+ "display.expand_frame_repr",
+ "info",
+ "display.show_dimensions",
+ True,
+ ):
+ assert "5 rows" in str(df)
+ assert "5 rows" in df._repr_html_()
+ with option_context(
+ "display.max_rows",
+ 10,
+ "display.max_columns",
+ 40,
+ "display.width",
+ 500,
+ "display.expand_frame_repr",
+ "info",
+ "display.show_dimensions",
+ False,
+ ):
+ assert "5 rows" not in str(df)
+ assert "5 rows" not in df._repr_html_()
+ with option_context(
+ "display.max_rows",
+ 2,
+ "display.max_columns",
+ 2,
+ "display.width",
+ 500,
+ "display.expand_frame_repr",
+ "info",
+ "display.show_dimensions",
+ "truncate",
+ ):
+ assert "5 rows" in str(df)
+ assert "5 rows" in df._repr_html_()
+ with option_context(
+ "display.max_rows",
+ 10,
+ "display.max_columns",
+ 40,
+ "display.width",
+ 500,
+ "display.expand_frame_repr",
+ "info",
+ "display.show_dimensions",
+ "truncate",
+ ):
+ assert "5 rows" not in str(df)
+ assert "5 rows" not in df._repr_html_()
+
+ def test_info_repr(self):
+ # GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
+ # the terminal size to ensure that we try to print something "too big"
+ term_width, term_height = get_terminal_size()
+
+ max_rows = 60
+ max_cols = 20 + (max(term_width, 80) - 80) // 4
+ # Long
+ h, w = max_rows + 1, max_cols - 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ assert has_vertically_truncated_repr(df)
+ with option_context("display.large_repr", "info"):
+ assert has_info_repr(df)
+
+ # Wide
+ h, w = max_rows - 1, max_cols + 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ assert has_horizontally_truncated_repr(df)
+ with option_context(
+ "display.large_repr", "info", "display.max_columns", max_cols
+ ):
+ assert has_info_repr(df)
+
+ def test_info_repr_max_cols(self):
+ # GH #6939
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 5)))
+ with option_context(
+ "display.large_repr",
+ "info",
+ "display.max_columns",
+ 1,
+ "display.max_info_columns",
+ 4,
+ ):
+ assert has_non_verbose_info_repr(df)
+
+ with option_context(
+ "display.large_repr",
+ "info",
+ "display.max_columns",
+ 1,
+ "display.max_info_columns",
+ 5,
+ ):
+ assert not has_non_verbose_info_repr(df)
+
+ # FIXME: don't leave commented-out
+ # test verbose overrides
+ # set_option('display.max_info_columns', 4) # exceeded
+
+ def test_pprint_pathological_object(self):
+ """
+ If the test fails, it at least won't hang.
+ """
+
+ class A:
+ def __getitem__(self, key):
+ return 3 # obviously simplified
+
+ df = DataFrame([A()])
+ repr(df) # just don't die
+
+ def test_float_trim_zeros(self):
+ vals = [
+ 2.08430917305e10,
+ 3.52205017305e10,
+ 2.30674817305e10,
+ 2.03954217305e10,
+ 5.59897817305e10,
+ ]
+ skip = True
+ for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
+ if line.startswith("dtype:"):
+ continue
+ if _three_digit_exp():
+ assert ("+010" in line) or skip
+ else:
+ assert ("+10" in line) or skip
+ skip = False
+
+ @pytest.mark.parametrize(
+ "data, expected",
+ [
+ (["3.50"], "0 3.50\ndtype: object"),
+ ([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
+ ([np.nan], "0 NaN\ndtype: float64"),
+ ([None], "0 None\ndtype: object"),
+ (["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
+ ([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
+ ([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
+ ([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
+ ],
+ )
+ def test_repr_str_float_truncation(self, data, expected, using_infer_string):
+ # GH#38708
+ series = Series(data, dtype=object if "3.50" in data else None)
+ result = repr(series)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "float_format,expected",
+ [
+ ("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
+ ("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
+ ],
+ )
+ def test_repr_float_format_in_object_col(self, float_format, expected):
+ # GH#40024
+ df = Series([1000.0, "test"])
+ with option_context("display.float_format", float_format):
+ result = repr(df)
+
+ assert result == expected
+
+ def test_period(self):
+ # GH 12615
+ df = DataFrame(
+ {
+ "A": pd.period_range("2013-01", periods=4, freq="M"),
+ "B": [
+ pd.Period("2011-01", freq="M"),
+ pd.Period("2011-02-01", freq="D"),
+ pd.Period("2011-03-01 09:00", freq="h"),
+ pd.Period("2011-04", freq="M"),
+ ],
+ "C": list("abcd"),
+ }
+ )
+ exp = (
+ " A B C\n"
+ "0 2013-01 2011-01 a\n"
+ "1 2013-02 2011-02-01 b\n"
+ "2 2013-03 2011-03-01 09:00 c\n"
+ "3 2013-04 2011-04 d"
+ )
+ assert str(df) == exp
+
+ @pytest.mark.parametrize(
+ "length, max_rows, min_rows, expected",
+ [
+ (10, 10, 10, 10),
+ (10, 10, None, 10),
+ (10, 8, None, 8),
+ (20, 30, 10, 30), # max_rows > len(frame), hence max_rows
+ (50, 30, 10, 10), # max_rows < len(frame), hence min_rows
+ (100, 60, 10, 10), # same
+ (60, 60, 10, 60), # edge case
+ (61, 60, 10, 10), # edge case
+ ],
+ )
+ def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
+ """Check that display logic is correct.
+
+ GH #37359
+
+ See description here:
+ https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
+ """
+ formatter = fmt.DataFrameFormatter(
+ DataFrame(np.random.default_rng(2).random((length, 3))),
+ max_rows=max_rows,
+ min_rows=min_rows,
+ )
+ result = formatter.max_rows_fitted
+ assert result == expected
+
+
+def gen_series_formatting():
+ s1 = Series(["a"] * 100)
+ s2 = Series(["ab"] * 100)
+ s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
+ s4 = s3[::-1]
+ test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
+ return test_sers
+
+
+class TestSeriesFormatting:
+ def test_freq_name_separation(self):
+ s = Series(
+ np.random.default_rng(2).standard_normal(10),
+ index=date_range("1/1/2000", periods=10),
+ name=0,
+ )
+
+ result = repr(s)
+ assert "Freq: D, Name: 0" in result
+
+ def test_unicode_name_in_footer(self):
+ s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
+ sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
+ sf._get_footer() # should not raise exception
+
+ @pytest.mark.xfail(
+ using_pyarrow_string_dtype(), reason="Fixup when arrow is default"
+ )
+ def test_east_asian_unicode_series(self):
+ # not aligned properly because of east asian width
+
+ # unicode index
+ s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
+ expected = "".join(
+ [
+ "あ a\n",
+ "いい bb\n",
+ "ううう CCC\n",
+ "ええええ D\ndtype: object",
+ ]
+ )
+ assert repr(s) == expected
+
+ # unicode values
+ s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
+ expected = "".join(
+ [
+ "a あ\n",
+ "bb いい\n",
+ "c ううう\n",
+ "ddd ええええ\n",
+ "dtype: object",
+ ]
+ )
+
+ assert repr(s) == expected
+
+ # both
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["ああ", "いいいい", "う", "えええ"],
+ )
+ expected = "".join(
+ [
+ "ああ あ\n",
+ "いいいい いい\n",
+ "う ううう\n",
+ "えええ ええええ\n",
+ "dtype: object",
+ ]
+ )
+
+ assert repr(s) == expected
+
+ # unicode footer
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["ああ", "いいいい", "う", "えええ"],
+ name="おおおおおおお",
+ )
+ expected = (
+ "ああ あ\nいいいい いい\nう ううう\n"
+ "えええ ええええ\nName: おおおおおおお, dtype: object"
+ )
+ assert repr(s) == expected
+
+ # MultiIndex
+ idx = MultiIndex.from_tuples(
+ [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
+ )
+ s = Series([1, 22, 3333, 44444], index=idx)
+ expected = (
+ "あ いい 1\n"
+ "う え 22\n"
+ "おおお かかかか 3333\n"
+ "き くく 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # object dtype, shorter than unicode repr
+ s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
+ expected = (
+ "1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # object dtype, longer than unicode repr
+ s = Series(
+ [1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
+ )
+ expected = (
+ "1 1\n"
+ "AB 22\n"
+ "2011-01-01 00:00:00 3333\n"
+ "あああ 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # truncate
+ with option_context("display.max_rows", 3):
+ s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
+
+ expected = (
+ "0 あ\n ... \n"
+ "3 ええええ\n"
+ "Name: おおおおおおお, Length: 4, dtype: object"
+ )
+ assert repr(s) == expected
+
+ s.index = ["ああ", "いいいい", "う", "えええ"]
+ expected = (
+ "ああ あ\n ... \n"
+ "えええ ええええ\n"
+ "Name: おおおおおおお, Length: 4, dtype: object"
+ )
+ assert repr(s) == expected
+
+ # Enable Unicode option -----------------------------------------
+ with option_context("display.unicode.east_asian_width", True):
+ # unicode index
+ s = Series(
+ ["a", "bb", "CCC", "D"],
+ index=["あ", "いい", "ううう", "ええええ"],
+ )
+ expected = (
+ "あ a\nいい bb\nううう CCC\n"
+ "ええええ D\ndtype: object"
+ )
+ assert repr(s) == expected
+
+ # unicode values
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ "a あ\nbb いい\nc ううう\n"
+ "ddd ええええ\ndtype: object"
+ )
+ assert repr(s) == expected
+ # both
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["ああ", "いいいい", "う", "えええ"],
+ )
+ expected = (
+ "ああ あ\n"
+ "いいいい いい\n"
+ "う ううう\n"
+ "えええ ええええ\ndtype: object"
+ )
+ assert repr(s) == expected
+
+ # unicode footer
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["ああ", "いいいい", "う", "えええ"],
+ name="おおおおおおお",
+ )
+ expected = (
+ "ああ あ\n"
+ "いいいい いい\n"
+ "う ううう\n"
+ "えええ ええええ\n"
+ "Name: おおおおおおお, dtype: object"
+ )
+ assert repr(s) == expected
+
+ # MultiIndex
+ idx = MultiIndex.from_tuples(
+ [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
+ )
+ s = Series([1, 22, 3333, 44444], index=idx)
+ expected = (
+ "あ いい 1\n"
+ "う え 22\n"
+ "おおお かかかか 3333\n"
+ "き くく 44444\n"
+ "dtype: int64"
+ )
+ assert repr(s) == expected
+
+ # object dtype, shorter than unicode repr
+ s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
+ expected = (
+ "1 1\nAB 22\nNaN 3333\n"
+ "あああ 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # object dtype, longer than unicode repr
+ s = Series(
+ [1, 22, 3333, 44444],
+ index=[1, "AB", Timestamp("2011-01-01"), "あああ"],
+ )
+ expected = (
+ "1 1\n"
+ "AB 22\n"
+ "2011-01-01 00:00:00 3333\n"
+ "あああ 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # truncate
+ with option_context("display.max_rows", 3):
+ s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
+ expected = (
+ "0 あ\n ... \n"
+ "3 ええええ\n"
+ "Name: おおおおおおお, Length: 4, dtype: object"
+ )
+ assert repr(s) == expected
+
+ s.index = ["ああ", "いいいい", "う", "えええ"]
+ expected = (
+ "ああ あ\n"
+ " ... \n"
+ "えええ ええええ\n"
+ "Name: おおおおおおお, Length: 4, dtype: object"
+ )
+ assert repr(s) == expected
+
+ # ambiguous unicode
+ s = Series(
+ ["¡¡", "い¡¡", "ううう", "ええええ"],
+ index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"],
+ )
+ expected = (
+ "ああ ¡¡\n"
+ "¡¡¡¡いい い¡¡\n"
+ "¡¡ ううう\n"
+ "えええ ええええ\ndtype: object"
+ )
+ assert repr(s) == expected
+
+ def test_float_trim_zeros(self):
+ vals = [
+ 2.08430917305e10,
+ 3.52205017305e10,
+ 2.30674817305e10,
+ 2.03954217305e10,
+ 5.59897817305e10,
+ ]
+ for line in repr(Series(vals)).split("\n"):
+ if line.startswith("dtype:"):
+ continue
+ if _three_digit_exp():
+ assert "+010" in line
+ else:
+ assert "+10" in line
+
+ @pytest.mark.parametrize(
+ "start_date",
+ [
+ "2017-01-01 23:59:59.999999999",
+ "2017-01-01 23:59:59.99999999",
+ "2017-01-01 23:59:59.9999999",
+ "2017-01-01 23:59:59.999999",
+ "2017-01-01 23:59:59.99999",
+ "2017-01-01 23:59:59.9999",
+ ],
+ )
+ def test_datetimeindex_highprecision(self, start_date):
+ # GH19030
+ # Check that high-precision time values for the end of day are
+ # included in repr for DatetimeIndex
+ s1 = Series(date_range(start=start_date, freq="D", periods=5))
+ result = str(s1)
+ assert start_date in result
+
+ dti = date_range(start=start_date, freq="D", periods=5)
+ s2 = Series(3, index=dti)
+ result = str(s2.index)
+ assert start_date in result
+
+ def test_mixed_datetime64(self):
+ df = DataFrame({"A": [1, 2], "B": ["2012-01-01", "2012-01-02"]})
+ df["B"] = pd.to_datetime(df.B)
+
+ result = repr(df.loc[0])
+ assert "2012-01-01" in result
+
+ def test_period(self):
+ # GH 12615
+ index = pd.period_range("2013-01", periods=6, freq="M")
+ s = Series(np.arange(6, dtype="int64"), index=index)
+ exp = (
+ "2013-01 0\n"
+ "2013-02 1\n"
+ "2013-03 2\n"
+ "2013-04 3\n"
+ "2013-05 4\n"
+ "2013-06 5\n"
+ "Freq: M, dtype: int64"
+ )
+ assert str(s) == exp
+
+ s = Series(index)
+ exp = (
+ "0 2013-01\n"
+ "1 2013-02\n"
+ "2 2013-03\n"
+ "3 2013-04\n"
+ "4 2013-05\n"
+ "5 2013-06\n"
+ "dtype: period[M]"
+ )
+ assert str(s) == exp
+
+ # periods with mixed freq
+ s = Series(
+ [
+ pd.Period("2011-01", freq="M"),
+ pd.Period("2011-02-01", freq="D"),
+ pd.Period("2011-03-01 09:00", freq="h"),
+ ]
+ )
+ exp = (
+ "0 2011-01\n1 2011-02-01\n"
+ "2 2011-03-01 09:00\ndtype: object"
+ )
+ assert str(s) == exp
+
+ def test_max_multi_index_display(self):
+ # GH 7101
+
+ # doc example (indexing.rst)
+
+ # multi-index
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
+ tuples = list(zip(*arrays))
+ index = MultiIndex.from_tuples(tuples, names=["first", "second"])
+ s = Series(np.random.default_rng(2).standard_normal(8), index=index)
+
+ with option_context("display.max_rows", 10):
+ assert len(str(s).split("\n")) == 10
+ with option_context("display.max_rows", 3):
+ assert len(str(s).split("\n")) == 5
+ with option_context("display.max_rows", 2):
+ assert len(str(s).split("\n")) == 5
+ with option_context("display.max_rows", 1):
+ assert len(str(s).split("\n")) == 4
+ with option_context("display.max_rows", 0):
+ assert len(str(s).split("\n")) == 10
+
+ # index
+ s = Series(np.random.default_rng(2).standard_normal(8), None)
+
+ with option_context("display.max_rows", 10):
+ assert len(str(s).split("\n")) == 9
+ with option_context("display.max_rows", 3):
+ assert len(str(s).split("\n")) == 4
+ with option_context("display.max_rows", 2):
+ assert len(str(s).split("\n")) == 4
+ with option_context("display.max_rows", 1):
+ assert len(str(s).split("\n")) == 3
+ with option_context("display.max_rows", 0):
+ assert len(str(s).split("\n")) == 9
+
+ # Make sure #8532 is fixed
+ def test_consistent_format(self):
+ s = Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
+ with option_context("display.max_rows", 10, "display.show_dimensions", False):
+ res = repr(s)
+ exp = (
+ "0 1.0000\n1 1.0000\n2 1.0000\n3 "
+ "1.0000\n4 1.0000\n ... \n125 "
+ "1.0000\n126 1.0000\n127 0.9999\n128 "
+ "1.0000\n129 1.0000\ndtype: float64"
+ )
+ assert res == exp
+
+ def chck_ncols(self, s):
+ lines = [
+ line for line in repr(s).split("\n") if not re.match(r"[^\.]*\.+", line)
+ ][:-1]
+ ncolsizes = len({len(line.strip()) for line in lines})
+ assert ncolsizes == 1
+
+ @pytest.mark.xfail(
+ using_pyarrow_string_dtype(), reason="change when arrow is default"
+ )
+ def test_format_explicit(self):
+ test_sers = gen_series_formatting()
+ with option_context("display.max_rows", 4, "display.show_dimensions", False):
+ res = repr(test_sers["onel"])
+ exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object"
+ assert exp == res
+ res = repr(test_sers["twol"])
+ exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object"
+ assert exp == res
+ res = repr(test_sers["asc"])
+ exp = (
+ "0 a\n1 ab\n ... \n4 abcde\n5 "
+ "abcdef\ndtype: object"
+ )
+ assert exp == res
+ res = repr(test_sers["desc"])
+ exp = (
+ "5 abcdef\n4 abcde\n ... \n1 ab\n0 "
+ "a\ndtype: object"
+ )
+ assert exp == res
+
+ def test_ncols(self):
+ test_sers = gen_series_formatting()
+ for s in test_sers.values():
+ self.chck_ncols(s)
+
+ def test_max_rows_eq_one(self):
+ s = Series(range(10), dtype="int64")
+ with option_context("display.max_rows", 1):
+ strrepr = repr(s).split("\n")
+ exp1 = ["0", "0"]
+ res1 = strrepr[0].split()
+ assert exp1 == res1
+ exp2 = [".."]
+ res2 = strrepr[1].split()
+ assert exp2 == res2
+
+ def test_truncate_ndots(self):
+ def getndots(s):
+ return len(re.match(r"[^\.]*(\.*)", s).groups()[0])
+
+ s = Series([0, 2, 3, 6])
+ with option_context("display.max_rows", 2):
+ strrepr = repr(s).replace("\n", "")
+ assert getndots(strrepr) == 2
+
+ s = Series([0, 100, 200, 400])
+ with option_context("display.max_rows", 2):
+ strrepr = repr(s).replace("\n", "")
+ assert getndots(strrepr) == 3
+
+ def test_show_dimensions(self):
+ # gh-7117
+ s = Series(range(5))
+
+ assert "Length" not in repr(s)
+
+ with option_context("display.max_rows", 4):
+ assert "Length" in repr(s)
+
+ with option_context("display.show_dimensions", True):
+ assert "Length" in repr(s)
+
+ with option_context("display.max_rows", 4, "display.show_dimensions", False):
+ assert "Length" not in repr(s)
+
+ def test_repr_min_rows(self):
+ s = Series(range(20))
+
+ # default setting no truncation even if above min_rows
+ assert ".." not in repr(s)
+
+ s = Series(range(61))
+
+ # default of max_rows 60 triggers truncation if above
+ assert ".." in repr(s)
+
+ with option_context("display.max_rows", 10, "display.min_rows", 4):
+ # truncated after first two rows
+ assert ".." in repr(s)
+ assert "2 " not in repr(s)
+
+ with option_context("display.max_rows", 12, "display.min_rows", None):
+ # when set to None, follow value of max_rows
+ assert "5 5" in repr(s)
+
+ with option_context("display.max_rows", 10, "display.min_rows", 12):
+ # when set value higher as max_rows, use the minimum
+ assert "5 5" not in repr(s)
+
+ with option_context("display.max_rows", None, "display.min_rows", 12):
+ # max_rows of None -> never truncate
+ assert ".." not in repr(s)
+
+
+class TestGenericArrayFormatter:
+ def test_1d_array(self):
+ # _GenericArrayFormatter is used on types for which there isn't a dedicated
+ # formatter. np.bool_ is one of those types.
+ obj = fmt._GenericArrayFormatter(np.array([True, False]))
+ res = obj.get_result()
+ assert len(res) == 2
+ # Results should be right-justified.
+ assert res[0] == " True"
+ assert res[1] == " False"
+
+ def test_2d_array(self):
+ obj = fmt._GenericArrayFormatter(np.array([[True, False], [False, True]]))
+ res = obj.get_result()
+ assert len(res) == 2
+ assert res[0] == " [True, False]"
+ assert res[1] == " [False, True]"
+
+ def test_3d_array(self):
+ obj = fmt._GenericArrayFormatter(
+ np.array([[[True, True], [False, False]], [[False, True], [True, False]]])
+ )
+ res = obj.get_result()
+ assert len(res) == 2
+ assert res[0] == " [[True, True], [False, False]]"
+ assert res[1] == " [[False, True], [True, False]]"
+
+ def test_2d_extension_type(self):
+ # GH 33770
+
+ # Define a stub extension type with just enough code to run Series.__repr__()
+ class DtypeStub(pd.api.extensions.ExtensionDtype):
+ @property
+ def type(self):
+ return np.ndarray
+
+ @property
+ def name(self):
+ return "DtypeStub"
+
+ class ExtTypeStub(pd.api.extensions.ExtensionArray):
+ def __len__(self) -> int:
+ return 2
+
+ def __getitem__(self, ix):
+ return [ix == 1, ix == 0]
+
+ @property
+ def dtype(self):
+ return DtypeStub()
+
+ series = Series(ExtTypeStub(), copy=False)
+ res = repr(series) # This line crashed before #33770 was fixed.
+ expected = "\n".join(
+ ["0 [False True]", "1 [True False]", "dtype: DtypeStub"]
+ )
+ assert res == expected
+
+
+def _three_digit_exp():
+ return f"{1.7e8:.4g}" == "1.7e+008"
+
+
+class TestFloatArrayFormatter:
+ def test_misc(self):
+ obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
+ result = obj.get_result()
+ assert len(result) == 0
+
+ def test_format(self):
+ obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
+ result = obj.get_result()
+ assert result[0] == " 12.0"
+ assert result[1] == " 0.0"
+
+ def test_output_display_precision_trailing_zeroes(self):
+ # Issue #20359: trimming zeros while there is no decimal point
+
+ # Happens when display precision is set to zero
+ with option_context("display.precision", 0):
+ s = Series([840.0, 4200.0])
+ expected_output = "0 840\n1 4200\ndtype: float64"
+ assert str(s) == expected_output
+
+ @pytest.mark.parametrize(
+ "value,expected",
+ [
+ ([9.4444], " 0\n0 9"),
+ ([0.49], " 0\n0 5e-01"),
+ ([10.9999], " 0\n0 11"),
+ ([9.5444, 9.6], " 0\n0 10\n1 10"),
+ ([0.46, 0.78, -9.9999], " 0\n0 5e-01\n1 8e-01\n2 -1e+01"),
+ ],
+ )
+ def test_set_option_precision(self, value, expected):
+ # Issue #30122
+ # Precision was incorrectly shown
+
+ with option_context("display.precision", 0):
+ df_value = DataFrame(value)
+ assert str(df_value) == expected
+
+ def test_output_significant_digits(self):
+ # Issue #9764
+
+ # In case default display precision changes:
+ with option_context("display.precision", 6):
+ # DataFrame example from issue #9764
+ d = DataFrame(
+ {
+ "col1": [
+ 9.999e-8,
+ 1e-7,
+ 1.0001e-7,
+ 2e-7,
+ 4.999e-7,
+ 5e-7,
+ 5.0001e-7,
+ 6e-7,
+ 9.999e-7,
+ 1e-6,
+ 1.0001e-6,
+ 2e-6,
+ 4.999e-6,
+ 5e-6,
+ 5.0001e-6,
+ 6e-6,
+ ]
+ }
+ )
+
+ expected_output = {
+ (0, 6): " col1\n"
+ "0 9.999000e-08\n"
+ "1 1.000000e-07\n"
+ "2 1.000100e-07\n"
+ "3 2.000000e-07\n"
+ "4 4.999000e-07\n"
+ "5 5.000000e-07",
+ (1, 6): " col1\n"
+ "1 1.000000e-07\n"
+ "2 1.000100e-07\n"
+ "3 2.000000e-07\n"
+ "4 4.999000e-07\n"
+ "5 5.000000e-07",
+ (1, 8): " col1\n"
+ "1 1.000000e-07\n"
+ "2 1.000100e-07\n"
+ "3 2.000000e-07\n"
+ "4 4.999000e-07\n"
+ "5 5.000000e-07\n"
+ "6 5.000100e-07\n"
+ "7 6.000000e-07",
+ (8, 16): " col1\n"
+ "8 9.999000e-07\n"
+ "9 1.000000e-06\n"
+ "10 1.000100e-06\n"
+ "11 2.000000e-06\n"
+ "12 4.999000e-06\n"
+ "13 5.000000e-06\n"
+ "14 5.000100e-06\n"
+ "15 6.000000e-06",
+ (9, 16): " col1\n"
+ "9 0.000001\n"
+ "10 0.000001\n"
+ "11 0.000002\n"
+ "12 0.000005\n"
+ "13 0.000005\n"
+ "14 0.000005\n"
+ "15 0.000006",
+ }
+
+ for (start, stop), v in expected_output.items():
+ assert str(d[start:stop]) == v
+
+ def test_too_long(self):
+ # GH 10451
+ with option_context("display.precision", 4):
+ # need both a number > 1e6 and something that normally formats to
+ # having length > display.precision + 6
+ df = DataFrame({"x": [12345.6789]})
+ assert str(df) == " x\n0 12345.6789"
+ df = DataFrame({"x": [2e6]})
+ assert str(df) == " x\n0 2000000.0"
+ df = DataFrame({"x": [12345.6789, 2e6]})
+ assert str(df) == " x\n0 1.2346e+04\n1 2.0000e+06"
+
+
+class TestTimedelta64Formatter:
+ def test_days(self):
+ x = pd.to_timedelta(list(range(5)) + [NaT], unit="D")._values
+ result = fmt._Timedelta64Formatter(x).get_result()
+ assert result[0].strip() == "0 days"
+ assert result[1].strip() == "1 days"
+
+ result = fmt._Timedelta64Formatter(x[1:2]).get_result()
+ assert result[0].strip() == "1 days"
+
+ result = fmt._Timedelta64Formatter(x).get_result()
+ assert result[0].strip() == "0 days"
+ assert result[1].strip() == "1 days"
+
+ result = fmt._Timedelta64Formatter(x[1:2]).get_result()
+ assert result[0].strip() == "1 days"
+
+ def test_days_neg(self):
+ x = pd.to_timedelta(list(range(5)) + [NaT], unit="D")._values
+ result = fmt._Timedelta64Formatter(-x).get_result()
+ assert result[0].strip() == "0 days"
+ assert result[1].strip() == "-1 days"
+
+ def test_subdays(self):
+ y = pd.to_timedelta(list(range(5)) + [NaT], unit="s")._values
+ result = fmt._Timedelta64Formatter(y).get_result()
+ assert result[0].strip() == "0 days 00:00:00"
+ assert result[1].strip() == "0 days 00:00:01"
+
+ def test_subdays_neg(self):
+ y = pd.to_timedelta(list(range(5)) + [NaT], unit="s")._values
+ result = fmt._Timedelta64Formatter(-y).get_result()
+ assert result[0].strip() == "0 days 00:00:00"
+ assert result[1].strip() == "-1 days +23:59:59"
+
+ def test_zero(self):
+ x = pd.to_timedelta(list(range(1)) + [NaT], unit="D")._values
+ result = fmt._Timedelta64Formatter(x).get_result()
+ assert result[0].strip() == "0 days"
+
+ x = pd.to_timedelta(list(range(1)), unit="D")._values
+ result = fmt._Timedelta64Formatter(x).get_result()
+ assert result[0].strip() == "0 days"
+
+
+class TestDatetime64Formatter:
+ def test_mixed(self):
+ x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), NaT])._values
+ result = fmt._Datetime64Formatter(x).get_result()
+ assert result[0].strip() == "2013-01-01 00:00:00"
+ assert result[1].strip() == "2013-01-01 12:00:00"
+
+ def test_dates(self):
+ x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), NaT])._values
+ result = fmt._Datetime64Formatter(x).get_result()
+ assert result[0].strip() == "2013-01-01"
+ assert result[1].strip() == "2013-01-02"
+
+ def test_date_nanos(self):
+ x = Series([Timestamp(200)])._values
+ result = fmt._Datetime64Formatter(x).get_result()
+ assert result[0].strip() == "1970-01-01 00:00:00.000000200"
+
+ def test_dates_display(self):
+ # 10170
+ # make sure that we are consistently display date formatting
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="D"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-05 09:00:00"
+
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="s"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-01 09:00:04"
+
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="ms"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00.000"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-01 09:00:00.004"
+
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="us"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00.000000"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-01 09:00:00.000004"
+
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="ns"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00.000000000"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-01 09:00:00.000000004"
+
+ def test_datetime64formatter_yearmonth(self):
+ x = Series([datetime(2016, 1, 1), datetime(2016, 2, 2)])._values
+
+ def format_func(x):
+ return x.strftime("%Y-%m")
+
+ formatter = fmt._Datetime64Formatter(x, formatter=format_func)
+ result = formatter.get_result()
+ assert result == ["2016-01", "2016-02"]
+
+ def test_datetime64formatter_hoursecond(self):
+ x = Series(
+ pd.to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")
+ )._values
+
+ def format_func(x):
+ return x.strftime("%H:%M")
+
+ formatter = fmt._Datetime64Formatter(x, formatter=format_func)
+ result = formatter.get_result()
+ assert result == ["10:10", "12:12"]
+
+ def test_datetime64formatter_tz_ms(self):
+ x = (
+ Series(
+ np.array(["2999-01-01", "2999-01-02", "NaT"], dtype="datetime64[ms]")
+ )
+ .dt.tz_localize("US/Pacific")
+ ._values
+ )
+ result = fmt._Datetime64TZFormatter(x).get_result()
+ assert result[0].strip() == "2999-01-01 00:00:00-08:00"
+ assert result[1].strip() == "2999-01-02 00:00:00-08:00"
+
+
+class TestFormatPercentiles:
+ @pytest.mark.parametrize(
+ "percentiles, expected",
+ [
+ (
+ [0.01999, 0.02001, 0.5, 0.666666, 0.9999],
+ ["1.999%", "2.001%", "50%", "66.667%", "99.99%"],
+ ),
+ (
+ [0, 0.5, 0.02001, 0.5, 0.666666, 0.9999],
+ ["0%", "50%", "2.0%", "50%", "66.67%", "99.99%"],
+ ),
+ ([0.281, 0.29, 0.57, 0.58], ["28.1%", "29%", "57%", "58%"]),
+ ([0.28, 0.29, 0.57, 0.58], ["28%", "29%", "57%", "58%"]),
+ (
+ [0.9, 0.99, 0.999, 0.9999, 0.99999],
+ ["90%", "99%", "99.9%", "99.99%", "99.999%"],
+ ),
+ ],
+ )
+ def test_format_percentiles(self, percentiles, expected):
+ result = fmt.format_percentiles(percentiles)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "percentiles",
+ [
+ ([0.1, np.nan, 0.5]),
+ ([-0.001, 0.1, 0.5]),
+ ([2, 0.1, 0.5]),
+ ([0.1, 0.5, "a"]),
+ ],
+ )
+ def test_error_format_percentiles(self, percentiles):
+ msg = r"percentiles should all be in the interval \[0,1\]"
+ with pytest.raises(ValueError, match=msg):
+ fmt.format_percentiles(percentiles)
+
+ def test_format_percentiles_integer_idx(self):
+ # Issue #26660
+ result = fmt.format_percentiles(np.linspace(0, 1, 10 + 1))
+ expected = [
+ "0%",
+ "10%",
+ "20%",
+ "30%",
+ "40%",
+ "50%",
+ "60%",
+ "70%",
+ "80%",
+ "90%",
+ "100%",
+ ]
+ assert result == expected
+
+
+@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
+@pytest.mark.parametrize(
+ "encoding, data",
+ [(None, "abc"), ("utf-8", "abc"), ("gbk", "造成输出中文显示乱码"), ("foo", "abc")],
+)
+def test_filepath_or_buffer_arg(
+ method,
+ filepath_or_buffer,
+ assert_filepath_or_buffer_equals,
+ encoding,
+ data,
+ filepath_or_buffer_id,
+):
+ df = DataFrame([data])
+ if method in ["to_latex"]: # uses styler implementation
+ pytest.importorskip("jinja2")
+
+ if filepath_or_buffer_id not in ["string", "pathlike"] and encoding is not None:
+ with pytest.raises(
+ ValueError, match="buf is not a file name and encoding is specified."
+ ):
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ elif encoding == "foo":
+ with pytest.raises(LookupError, match="unknown encoding"):
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ else:
+ expected = getattr(df, method)()
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ assert_filepath_or_buffer_equals(expected)
+
+
+@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
+def test_filepath_or_buffer_bad_arg_raises(float_frame, method):
+ if method in ["to_latex"]: # uses styler implementation
+ pytest.importorskip("jinja2")
+ msg = "buf is not a file name and it has no write method"
+ with pytest.raises(TypeError, match=msg):
+ getattr(float_frame, method)(buf=object())
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_ipython_compat.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_ipython_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..8512f41396906de1f59bbb23d4b535f82c546132
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_ipython_compat.py
@@ -0,0 +1,90 @@
+import numpy as np
+
+import pandas._config.config as cf
+
+from pandas import (
+ DataFrame,
+ MultiIndex,
+)
+
+
+class TestTableSchemaRepr:
+ def test_publishes(self, ip):
+ ipython = ip.instance(config=ip.config)
+ df = DataFrame({"A": [1, 2]})
+ objects = [df["A"], df] # dataframe / series
+ expected_keys = [
+ {"text/plain", "application/vnd.dataresource+json"},
+ {"text/plain", "text/html", "application/vnd.dataresource+json"},
+ ]
+
+ opt = cf.option_context("display.html.table_schema", True)
+ last_obj = None
+ for obj, expected in zip(objects, expected_keys):
+ last_obj = obj
+ with opt:
+ formatted = ipython.display_formatter.format(obj)
+ assert set(formatted[0].keys()) == expected
+
+ with_latex = cf.option_context("styler.render.repr", "latex")
+
+ with opt, with_latex:
+ formatted = ipython.display_formatter.format(last_obj)
+
+ expected = {
+ "text/plain",
+ "text/html",
+ "text/latex",
+ "application/vnd.dataresource+json",
+ }
+ assert set(formatted[0].keys()) == expected
+
+ def test_publishes_not_implemented(self, ip):
+ # column MultiIndex
+ # GH#15996
+ midx = MultiIndex.from_product([["A", "B"], ["a", "b", "c"]])
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, len(midx))), columns=midx
+ )
+
+ opt = cf.option_context("display.html.table_schema", True)
+
+ with opt:
+ formatted = ip.instance(config=ip.config).display_formatter.format(df)
+
+ expected = {"text/plain", "text/html"}
+ assert set(formatted[0].keys()) == expected
+
+ def test_config_on(self):
+ df = DataFrame({"A": [1, 2]})
+ with cf.option_context("display.html.table_schema", True):
+ result = df._repr_data_resource_()
+
+ assert result is not None
+
+ def test_config_default_off(self):
+ df = DataFrame({"A": [1, 2]})
+ with cf.option_context("display.html.table_schema", False):
+ result = df._repr_data_resource_()
+
+ assert result is None
+
+ def test_enable_data_resource_formatter(self, ip):
+ # GH#10491
+ formatters = ip.instance(config=ip.config).display_formatter.formatters
+ mimetype = "application/vnd.dataresource+json"
+
+ with cf.option_context("display.html.table_schema", True):
+ assert "application/vnd.dataresource+json" in formatters
+ assert formatters[mimetype].enabled
+
+ # still there, just disabled
+ assert "application/vnd.dataresource+json" in formatters
+ assert not formatters[mimetype].enabled
+
+ # able to re-set
+ with cf.option_context("display.html.table_schema", True):
+ assert "application/vnd.dataresource+json" in formatters
+ assert formatters[mimetype].enabled
+ # smoke test that it works
+ ip.instance(config=ip.config).display_formatter.format(cf)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_printing.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_printing.py
new file mode 100644
index 0000000000000000000000000000000000000000..acf2bc72c687d44dd1769468d21fba1bb04443b0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_printing.py
@@ -0,0 +1,129 @@
+# Note! This file is aimed specifically at pandas.io.formats.printing utility
+# functions, not the general printing of pandas objects.
+import string
+
+import pandas._config.config as cf
+
+from pandas.io.formats import printing
+
+
+def test_adjoin():
+ data = [["a", "b", "c"], ["dd", "ee", "ff"], ["ggg", "hhh", "iii"]]
+ expected = "a dd ggg\nb ee hhh\nc ff iii"
+
+ adjoined = printing.adjoin(2, *data)
+
+ assert adjoined == expected
+
+
+class TestPPrintThing:
+ def test_repr_binary_type(self):
+ letters = string.ascii_letters
+ try:
+ raw = bytes(letters, encoding=cf.get_option("display.encoding"))
+ except TypeError:
+ raw = bytes(letters)
+ b = str(raw.decode("utf-8"))
+ res = printing.pprint_thing(b, quote_strings=True)
+ assert res == repr(b)
+ res = printing.pprint_thing(b, quote_strings=False)
+ assert res == b
+
+ def test_repr_obeys_max_seq_limit(self):
+ with cf.option_context("display.max_seq_items", 2000):
+ assert len(printing.pprint_thing(list(range(1000)))) > 1000
+
+ with cf.option_context("display.max_seq_items", 5):
+ assert len(printing.pprint_thing(list(range(1000)))) < 100
+
+ with cf.option_context("display.max_seq_items", 1):
+ assert len(printing.pprint_thing(list(range(1000)))) < 9
+
+ def test_repr_set(self):
+ assert printing.pprint_thing({1}) == "{1}"
+
+
+class TestFormatBase:
+ def test_adjoin(self):
+ data = [["a", "b", "c"], ["dd", "ee", "ff"], ["ggg", "hhh", "iii"]]
+ expected = "a dd ggg\nb ee hhh\nc ff iii"
+
+ adjoined = printing.adjoin(2, *data)
+
+ assert adjoined == expected
+
+ def test_adjoin_unicode(self):
+ data = [["あ", "b", "c"], ["dd", "ええ", "ff"], ["ggg", "hhh", "いいい"]]
+ expected = "あ dd ggg\nb ええ hhh\nc ff いいい"
+ adjoined = printing.adjoin(2, *data)
+ assert adjoined == expected
+
+ adj = printing._EastAsianTextAdjustment()
+
+ expected = """あ dd ggg
+b ええ hhh
+c ff いいい"""
+
+ adjoined = adj.adjoin(2, *data)
+ assert adjoined == expected
+ cols = adjoined.split("\n")
+ assert adj.len(cols[0]) == 13
+ assert adj.len(cols[1]) == 13
+ assert adj.len(cols[2]) == 16
+
+ expected = """あ dd ggg
+b ええ hhh
+c ff いいい"""
+
+ adjoined = adj.adjoin(7, *data)
+ assert adjoined == expected
+ cols = adjoined.split("\n")
+ assert adj.len(cols[0]) == 23
+ assert adj.len(cols[1]) == 23
+ assert adj.len(cols[2]) == 26
+
+ def test_justify(self):
+ adj = printing._EastAsianTextAdjustment()
+
+ def just(x, *args, **kwargs):
+ # wrapper to test single str
+ return adj.justify([x], *args, **kwargs)[0]
+
+ assert just("abc", 5, mode="left") == "abc "
+ assert just("abc", 5, mode="center") == " abc "
+ assert just("abc", 5, mode="right") == " abc"
+ assert just("abc", 5, mode="left") == "abc "
+ assert just("abc", 5, mode="center") == " abc "
+ assert just("abc", 5, mode="right") == " abc"
+
+ assert just("パンダ", 5, mode="left") == "パンダ"
+ assert just("パンダ", 5, mode="center") == "パンダ"
+ assert just("パンダ", 5, mode="right") == "パンダ"
+
+ assert just("パンダ", 10, mode="left") == "パンダ "
+ assert just("パンダ", 10, mode="center") == " パンダ "
+ assert just("パンダ", 10, mode="right") == " パンダ"
+
+ def test_east_asian_len(self):
+ adj = printing._EastAsianTextAdjustment()
+
+ assert adj.len("abc") == 3
+ assert adj.len("abc") == 3
+
+ assert adj.len("パンダ") == 6
+ assert adj.len("パンダ") == 5
+ assert adj.len("パンダpanda") == 11
+ assert adj.len("パンダpanda") == 10
+
+ def test_ambiguous_width(self):
+ adj = printing._EastAsianTextAdjustment()
+ assert adj.len("¡¡ab") == 4
+
+ with cf.option_context("display.unicode.ambiguous_as_wide", True):
+ adj = printing._EastAsianTextAdjustment()
+ assert adj.len("¡¡ab") == 6
+
+ data = [["あ", "b", "c"], ["dd", "ええ", "ff"], ["ggg", "¡¡ab", "いいい"]]
+ expected = "あ dd ggg \nb ええ ¡¡ab\nc ff いいい"
+ adjoined = adj.adjoin(2, *data)
+ assert adjoined == expected
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_html.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_html.py
new file mode 100644
index 0000000000000000000000000000000000000000..790ba92f70c40095af3f40396135be2842b33229
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_html.py
@@ -0,0 +1,1177 @@
+from datetime import datetime
+from io import StringIO
+import itertools
+import re
+import textwrap
+
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Index,
+ MultiIndex,
+ get_option,
+ option_context,
+)
+import pandas._testing as tm
+
+import pandas.io.formats.format as fmt
+
+lorem_ipsum = (
+ "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod "
+ "tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim "
+ "veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex "
+ "ea commodo consequat. Duis aute irure dolor in reprehenderit in "
+ "voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur "
+ "sint occaecat cupidatat non proident, sunt in culpa qui officia "
+ "deserunt mollit anim id est laborum."
+)
+
+
+def expected_html(datapath, name):
+ """
+ Read HTML file from formats data directory.
+
+ Parameters
+ ----------
+ datapath : pytest fixture
+ The datapath fixture injected into a test by pytest.
+ name : str
+ The name of the HTML file without the suffix.
+
+ Returns
+ -------
+ str : contents of HTML file.
+ """
+ filename = ".".join([name, "html"])
+ filepath = datapath("io", "formats", "data", "html", filename)
+ with open(filepath, encoding="utf-8") as f:
+ html = f.read()
+ return html.rstrip()
+
+
+@pytest.fixture(params=["mixed", "empty"])
+def biggie_df_fixture(request):
+ """Fixture for a big mixed Dataframe and an empty Dataframe"""
+ if request.param == "mixed":
+ df = DataFrame(
+ {
+ "A": np.random.default_rng(2).standard_normal(200),
+ "B": Index([f"{i}?!" for i in range(200)]),
+ },
+ index=np.arange(200),
+ )
+ df.loc[:20, "A"] = np.nan
+ df.loc[:20, "B"] = np.nan
+ return df
+ elif request.param == "empty":
+ df = DataFrame(index=np.arange(200))
+ return df
+
+
+@pytest.fixture(params=fmt.VALID_JUSTIFY_PARAMETERS)
+def justify(request):
+ return request.param
+
+
+@pytest.mark.parametrize("col_space", [30, 50])
+def test_to_html_with_col_space(col_space):
+ df = DataFrame(np.random.default_rng(2).random(size=(1, 3)))
+ # check that col_space affects HTML generation
+ # and be very brittle about it.
+ result = df.to_html(col_space=col_space)
+ hdrs = [x for x in result.split(r"\n") if re.search(r"\s]", x)]
+ assert len(hdrs) > 0
+ for h in hdrs:
+ assert "min-width" in h
+ assert str(col_space) in h
+
+
+def test_to_html_with_column_specific_col_space_raises():
+ df = DataFrame(
+ np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"]
+ )
+
+ msg = (
+ "Col_space length\\(\\d+\\) should match "
+ "DataFrame number of columns\\(\\d+\\)"
+ )
+ with pytest.raises(ValueError, match=msg):
+ df.to_html(col_space=[30, 40])
+
+ with pytest.raises(ValueError, match=msg):
+ df.to_html(col_space=[30, 40, 50, 60])
+
+ msg = "unknown column"
+ with pytest.raises(ValueError, match=msg):
+ df.to_html(col_space={"a": "foo", "b": 23, "d": 34})
+
+
+def test_to_html_with_column_specific_col_space():
+ df = DataFrame(
+ np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"]
+ )
+
+ result = df.to_html(col_space={"a": "2em", "b": 23})
+ hdrs = [x for x in result.split("\n") if re.search(r" \s]", x)]
+ assert 'min-width: 2em;">a ' in hdrs[1]
+ assert 'min-width: 23px;">b' in hdrs[2]
+ assert "c " in hdrs[3]
+
+ result = df.to_html(col_space=["1em", 2, 3])
+ hdrs = [x for x in result.split("\n") if re.search(r"\s]", x)]
+ assert 'min-width: 1em;">a ' in hdrs[1]
+ assert 'min-width: 2px;">b' in hdrs[2]
+ assert 'min-width: 3px;">c' in hdrs[3]
+
+
+def test_to_html_with_empty_string_label():
+ # GH 3547, to_html regards empty string labels as repeated labels
+ data = {"c1": ["a", "b"], "c2": ["a", ""], "data": [1, 2]}
+ df = DataFrame(data).set_index(["c1", "c2"])
+ result = df.to_html()
+ assert "rowspan" not in result
+
+
+@pytest.mark.parametrize(
+ "df,expected",
+ [
+ (DataFrame({"\u03c3": np.arange(10.0)}), "unicode_1"),
+ (DataFrame({"A": ["\u03c3"]}), "unicode_2"),
+ ],
+)
+def test_to_html_unicode(df, expected, datapath):
+ expected = expected_html(datapath, expected)
+ result = df.to_html()
+ assert result == expected
+
+
+def test_to_html_encoding(float_frame, tmp_path):
+ # GH 28663
+ path = tmp_path / "test.html"
+ float_frame.to_html(path, encoding="gbk")
+ with open(str(path), encoding="gbk") as f:
+ assert float_frame.to_html() == f.read()
+
+
+def test_to_html_decimal(datapath):
+ # GH 12031
+ df = DataFrame({"A": [6.0, 3.1, 2.2]})
+ result = df.to_html(decimal=",")
+ expected = expected_html(datapath, "gh12031_expected_output")
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "kwargs,string,expected",
+ [
+ ({}, "", "escaped"),
+ ({"escape": False}, "bold ", "escape_disabled"),
+ ],
+)
+def test_to_html_escaped(kwargs, string, expected, datapath):
+ a = "strl2": {a: string, b: string}}
+ result = DataFrame(test_dict).to_html(**kwargs)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize("index_is_named", [True, False])
+def test_to_html_multiindex_index_false(index_is_named, datapath):
+ # GH 8452
+ df = DataFrame(
+ {"a": range(2), "b": range(3, 5), "c": range(5, 7), "d": range(3, 5)}
+ )
+ df.columns = MultiIndex.from_product([["a", "b"], ["c", "d"]])
+ if index_is_named:
+ df.index = Index(df.index.values, name="idx")
+ result = df.to_html(index=False)
+ expected = expected_html(datapath, "gh8452_expected_output")
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "multi_sparse,expected",
+ [
+ (False, "multiindex_sparsify_false_multi_sparse_1"),
+ (False, "multiindex_sparsify_false_multi_sparse_2"),
+ (True, "multiindex_sparsify_1"),
+ (True, "multiindex_sparsify_2"),
+ ],
+)
+def test_to_html_multiindex_sparsify(multi_sparse, expected, datapath):
+ index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]], names=["foo", None])
+ df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
+ if expected.endswith("2"):
+ df.columns = index[::2]
+ with option_context("display.multi_sparse", multi_sparse):
+ result = df.to_html()
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "max_rows,expected",
+ [
+ (60, "gh14882_expected_output_1"),
+ # Test that ... appears in a middle level
+ (56, "gh14882_expected_output_2"),
+ ],
+)
+def test_to_html_multiindex_odd_even_truncate(max_rows, expected, datapath):
+ # GH 14882 - Issue on truncation with odd length DataFrame
+ index = MultiIndex.from_product(
+ [[100, 200, 300], [10, 20, 30], [1, 2, 3, 4, 5, 6, 7]], names=["a", "b", "c"]
+ )
+ df = DataFrame({"n": range(len(index))}, index=index)
+ result = df.to_html(max_rows=max_rows)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "df,formatters,expected",
+ [
+ (
+ DataFrame(
+ [[0, 1], [2, 3], [4, 5], [6, 7]],
+ columns=Index(["foo", None], dtype=object),
+ index=np.arange(4),
+ ),
+ {"__index__": lambda x: "abcd"[x]},
+ "index_formatter",
+ ),
+ (
+ DataFrame({"months": [datetime(2016, 1, 1), datetime(2016, 2, 2)]}),
+ {"months": lambda x: x.strftime("%Y-%m")},
+ "datetime64_monthformatter",
+ ),
+ (
+ DataFrame(
+ {
+ "hod": pd.to_datetime(
+ ["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f"
+ )
+ }
+ ),
+ {"hod": lambda x: x.strftime("%H:%M")},
+ "datetime64_hourformatter",
+ ),
+ (
+ DataFrame(
+ {
+ "i": pd.Series([1, 2], dtype="int64"),
+ "f": pd.Series([1, 2], dtype="float64"),
+ "I": pd.Series([1, 2], dtype="Int64"),
+ "s": pd.Series([1, 2], dtype="string"),
+ "b": pd.Series([True, False], dtype="boolean"),
+ "c": pd.Series(["a", "b"], dtype=pd.CategoricalDtype(["a", "b"])),
+ "o": pd.Series([1, "2"], dtype=object),
+ }
+ ),
+ [lambda x: "formatted"] * 7,
+ "various_dtypes_formatted",
+ ),
+ ],
+)
+def test_to_html_formatters(df, formatters, expected, datapath):
+ expected = expected_html(datapath, expected)
+ result = df.to_html(formatters=formatters)
+ assert result == expected
+
+
+def test_to_html_regression_GH6098():
+ df = DataFrame(
+ {
+ "clé1": ["a", "a", "b", "b", "a"],
+ "clé2": ["1er", "2ème", "1er", "2ème", "1er"],
+ "données1": np.random.default_rng(2).standard_normal(5),
+ "données2": np.random.default_rng(2).standard_normal(5),
+ }
+ )
+
+ # it works
+ df.pivot_table(index=["clé1"], columns=["clé2"])._repr_html_()
+
+
+def test_to_html_truncate(datapath):
+ index = pd.date_range(start="20010101", freq="D", periods=20)
+ df = DataFrame(index=index, columns=range(20))
+ result = df.to_html(max_rows=8, max_cols=4)
+ expected = expected_html(datapath, "truncate")
+ assert result == expected
+
+
+@pytest.mark.parametrize("size", [1, 5])
+def test_html_invalid_formatters_arg_raises(size):
+ # issue-28469
+ df = DataFrame(columns=["a", "b", "c"])
+ msg = "Formatters length({}) should match DataFrame number of columns(3)"
+ with pytest.raises(ValueError, match=re.escape(msg.format(size))):
+ df.to_html(formatters=["{}".format] * size)
+
+
+def test_to_html_truncate_formatter(datapath):
+ # issue-25955
+ data = [
+ {"A": 1, "B": 2, "C": 3, "D": 4},
+ {"A": 5, "B": 6, "C": 7, "D": 8},
+ {"A": 9, "B": 10, "C": 11, "D": 12},
+ {"A": 13, "B": 14, "C": 15, "D": 16},
+ ]
+
+ df = DataFrame(data)
+ fmt = lambda x: str(x) + "_mod"
+ formatters = [fmt, fmt, None, None]
+ result = df.to_html(formatters=formatters, max_cols=3)
+ expected = expected_html(datapath, "truncate_formatter")
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "sparsify,expected",
+ [(True, "truncate_multi_index"), (False, "truncate_multi_index_sparse_off")],
+)
+def test_to_html_truncate_multi_index(sparsify, expected, datapath):
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
+ df = DataFrame(index=arrays, columns=arrays)
+ result = df.to_html(max_rows=7, max_cols=7, sparsify=sparsify)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "option,result,expected",
+ [
+ (None, lambda df: df.to_html(), "1"),
+ (None, lambda df: df.to_html(border=2), "2"),
+ (2, lambda df: df.to_html(), "2"),
+ (2, lambda df: df._repr_html_(), "2"),
+ ],
+)
+def test_to_html_border(option, result, expected):
+ df = DataFrame({"A": [1, 2]})
+ if option is None:
+ result = result(df)
+ else:
+ with option_context("display.html.border", option):
+ result = result(df)
+ expected = f'border="{expected}"'
+ assert expected in result
+
+
+@pytest.mark.parametrize("biggie_df_fixture", ["mixed"], indirect=True)
+def test_to_html(biggie_df_fixture):
+ # TODO: split this test
+ df = biggie_df_fixture
+ s = df.to_html()
+
+ buf = StringIO()
+ retval = df.to_html(buf=buf)
+ assert retval is None
+ assert buf.getvalue() == s
+
+ assert isinstance(s, str)
+
+ df.to_html(columns=["B", "A"], col_space=17)
+ df.to_html(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
+
+ df.to_html(columns=["B", "A"], float_format=str)
+ df.to_html(columns=["B", "A"], col_space=12, float_format=str)
+
+
+@pytest.mark.parametrize("biggie_df_fixture", ["empty"], indirect=True)
+def test_to_html_empty_dataframe(biggie_df_fixture):
+ df = biggie_df_fixture
+ df.to_html()
+
+
+def test_to_html_filename(biggie_df_fixture, tmpdir):
+ df = biggie_df_fixture
+ expected = df.to_html()
+ path = tmpdir.join("test.html")
+ df.to_html(path)
+ result = path.read()
+ assert result == expected
+
+
+def test_to_html_with_no_bold():
+ df = DataFrame({"x": np.random.default_rng(2).standard_normal(5)})
+ html = df.to_html(bold_rows=False)
+ result = html[html.find("")]
+ assert "B" not in result
+
+
+@pytest.mark.parametrize(
+ "columns,justify,expected",
+ [
+ (
+ MultiIndex.from_arrays(
+ [np.arange(2).repeat(2), np.mod(range(4), 2)],
+ names=["CL0", "CL1"],
+ ),
+ "left",
+ "multiindex_1",
+ ),
+ (
+ MultiIndex.from_arrays([np.arange(4), np.mod(range(4), 2)]),
+ "right",
+ "multiindex_2",
+ ),
+ ],
+)
+def test_to_html_multiindex(columns, justify, expected, datapath):
+ df = DataFrame([list("abcd"), list("efgh")], columns=columns)
+ result = df.to_html(justify=justify)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+def test_to_html_justify(justify, datapath):
+ df = DataFrame(
+ {"A": [6, 30000, 2], "B": [1, 2, 70000], "C": [223442, 0, 1]},
+ columns=["A", "B", "C"],
+ )
+ result = df.to_html(justify=justify)
+ expected = expected_html(datapath, "justify").format(justify=justify)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "justify", ["super-right", "small-left", "noinherit", "tiny", "pandas"]
+)
+def test_to_html_invalid_justify(justify):
+ # GH 17527
+ df = DataFrame()
+ msg = "Invalid value for justify parameter"
+
+ with pytest.raises(ValueError, match=msg):
+ df.to_html(justify=justify)
+
+
+class TestHTMLIndex:
+ @pytest.fixture
+ def df(self):
+ index = ["foo", "bar", "baz"]
+ df = DataFrame(
+ {"A": [1, 2, 3], "B": [1.2, 3.4, 5.6], "C": ["one", "two", np.nan]},
+ columns=["A", "B", "C"],
+ index=index,
+ )
+ return df
+
+ @pytest.fixture
+ def expected_without_index(self, datapath):
+ return expected_html(datapath, "index_2")
+
+ def test_to_html_flat_index_without_name(
+ self, datapath, df, expected_without_index
+ ):
+ expected_with_index = expected_html(datapath, "index_1")
+ assert df.to_html() == expected_with_index
+
+ result = df.to_html(index=False)
+ for i in df.index:
+ assert i not in result
+ assert result == expected_without_index
+
+ def test_to_html_flat_index_with_name(self, datapath, df, expected_without_index):
+ df.index = Index(["foo", "bar", "baz"], name="idx")
+ expected_with_index = expected_html(datapath, "index_3")
+ assert df.to_html() == expected_with_index
+ assert df.to_html(index=False) == expected_without_index
+
+ def test_to_html_multiindex_without_names(
+ self, datapath, df, expected_without_index
+ ):
+ tuples = [("foo", "car"), ("foo", "bike"), ("bar", "car")]
+ df.index = MultiIndex.from_tuples(tuples)
+
+ expected_with_index = expected_html(datapath, "index_4")
+ assert df.to_html() == expected_with_index
+
+ result = df.to_html(index=False)
+ for i in ["foo", "bar", "car", "bike"]:
+ assert i not in result
+ # must be the same result as normal index
+ assert result == expected_without_index
+
+ def test_to_html_multiindex_with_names(self, datapath, df, expected_without_index):
+ tuples = [("foo", "car"), ("foo", "bike"), ("bar", "car")]
+ df.index = MultiIndex.from_tuples(tuples, names=["idx1", "idx2"])
+ expected_with_index = expected_html(datapath, "index_5")
+ assert df.to_html() == expected_with_index
+ assert df.to_html(index=False) == expected_without_index
+
+
+@pytest.mark.parametrize("classes", ["sortable draggable", ["sortable", "draggable"]])
+def test_to_html_with_classes(classes, datapath):
+ df = DataFrame()
+ expected = expected_html(datapath, "with_classes")
+ result = df.to_html(classes=classes)
+ assert result == expected
+
+
+def test_to_html_no_index_max_rows(datapath):
+ # GH 14998
+ df = DataFrame({"A": [1, 2, 3, 4]})
+ result = df.to_html(index=False, max_rows=1)
+ expected = expected_html(datapath, "gh14998_expected_output")
+ assert result == expected
+
+
+def test_to_html_multiindex_max_cols(datapath):
+ # GH 6131
+ index = MultiIndex(
+ levels=[["ba", "bb", "bc"], ["ca", "cb", "cc"]],
+ codes=[[0, 1, 2], [0, 1, 2]],
+ names=["b", "c"],
+ )
+ columns = MultiIndex(
+ levels=[["d"], ["aa", "ab", "ac"]],
+ codes=[[0, 0, 0], [0, 1, 2]],
+ names=[None, "a"],
+ )
+ data = np.array(
+ [[1.0, np.nan, np.nan], [np.nan, 2.0, np.nan], [np.nan, np.nan, 3.0]]
+ )
+ df = DataFrame(data, index, columns)
+ result = df.to_html(max_cols=2)
+ expected = expected_html(datapath, "gh6131_expected_output")
+ assert result == expected
+
+
+def test_to_html_multi_indexes_index_false(datapath):
+ # GH 22579
+ df = DataFrame(
+ {"a": range(10), "b": range(10, 20), "c": range(10, 20), "d": range(10, 20)}
+ )
+ df.columns = MultiIndex.from_product([["a", "b"], ["c", "d"]])
+ df.index = MultiIndex.from_product([["a", "b"], ["c", "d", "e", "f", "g"]])
+ result = df.to_html(index=False)
+ expected = expected_html(datapath, "gh22579_expected_output")
+ assert result == expected
+
+
+@pytest.mark.parametrize("index_names", [True, False])
+@pytest.mark.parametrize("header", [True, False])
+@pytest.mark.parametrize("index", [True, False])
+@pytest.mark.parametrize(
+ "column_index, column_type",
+ [
+ (Index([0, 1]), "unnamed_standard"),
+ (Index([0, 1], name="columns.name"), "named_standard"),
+ (MultiIndex.from_product([["a"], ["b", "c"]]), "unnamed_multi"),
+ (
+ MultiIndex.from_product(
+ [["a"], ["b", "c"]], names=["columns.name.0", "columns.name.1"]
+ ),
+ "named_multi",
+ ),
+ ],
+)
+@pytest.mark.parametrize(
+ "row_index, row_type",
+ [
+ (Index([0, 1]), "unnamed_standard"),
+ (Index([0, 1], name="index.name"), "named_standard"),
+ (MultiIndex.from_product([["a"], ["b", "c"]]), "unnamed_multi"),
+ (
+ MultiIndex.from_product(
+ [["a"], ["b", "c"]], names=["index.name.0", "index.name.1"]
+ ),
+ "named_multi",
+ ),
+ ],
+)
+def test_to_html_basic_alignment(
+ datapath, row_index, row_type, column_index, column_type, index, header, index_names
+):
+ # GH 22747, GH 22579
+ df = DataFrame(np.zeros((2, 2), dtype=int), index=row_index, columns=column_index)
+ result = df.to_html(index=index, header=header, index_names=index_names)
+
+ if not index:
+ row_type = "none"
+ elif not index_names and row_type.startswith("named"):
+ row_type = "un" + row_type
+
+ if not header:
+ column_type = "none"
+ elif not index_names and column_type.startswith("named"):
+ column_type = "un" + column_type
+
+ filename = "index_" + row_type + "_columns_" + column_type
+ expected = expected_html(datapath, filename)
+ assert result == expected
+
+
+@pytest.mark.parametrize("index_names", [True, False])
+@pytest.mark.parametrize("header", [True, False])
+@pytest.mark.parametrize("index", [True, False])
+@pytest.mark.parametrize(
+ "column_index, column_type",
+ [
+ (Index(np.arange(8)), "unnamed_standard"),
+ (Index(np.arange(8), name="columns.name"), "named_standard"),
+ (
+ MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
+ "unnamed_multi",
+ ),
+ (
+ MultiIndex.from_product(
+ [["a", "b"], ["c", "d"], ["e", "f"]], names=["foo", None, "baz"]
+ ),
+ "named_multi",
+ ),
+ ],
+)
+@pytest.mark.parametrize(
+ "row_index, row_type",
+ [
+ (Index(np.arange(8)), "unnamed_standard"),
+ (Index(np.arange(8), name="index.name"), "named_standard"),
+ (
+ MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
+ "unnamed_multi",
+ ),
+ (
+ MultiIndex.from_product(
+ [["a", "b"], ["c", "d"], ["e", "f"]], names=["foo", None, "baz"]
+ ),
+ "named_multi",
+ ),
+ ],
+)
+def test_to_html_alignment_with_truncation(
+ datapath, row_index, row_type, column_index, column_type, index, header, index_names
+):
+ # GH 22747, GH 22579
+ df = DataFrame(np.arange(64).reshape(8, 8), index=row_index, columns=column_index)
+ result = df.to_html(
+ max_rows=4, max_cols=4, index=index, header=header, index_names=index_names
+ )
+
+ if not index:
+ row_type = "none"
+ elif not index_names and row_type.startswith("named"):
+ row_type = "un" + row_type
+
+ if not header:
+ column_type = "none"
+ elif not index_names and column_type.startswith("named"):
+ column_type = "un" + column_type
+
+ filename = "trunc_df_index_" + row_type + "_columns_" + column_type
+ expected = expected_html(datapath, filename)
+ assert result == expected
+
+
+@pytest.mark.parametrize("index", [False, 0])
+def test_to_html_truncation_index_false_max_rows(datapath, index):
+ # GH 15019
+ data = [
+ [1.764052, 0.400157],
+ [0.978738, 2.240893],
+ [1.867558, -0.977278],
+ [0.950088, -0.151357],
+ [-0.103219, 0.410599],
+ ]
+ df = DataFrame(data)
+ result = df.to_html(max_rows=4, index=index)
+ expected = expected_html(datapath, "gh15019_expected_output")
+ assert result == expected
+
+
+@pytest.mark.parametrize("index", [False, 0])
+@pytest.mark.parametrize(
+ "col_index_named, expected_output",
+ [(False, "gh22783_expected_output"), (True, "gh22783_named_columns_index")],
+)
+def test_to_html_truncation_index_false_max_cols(
+ datapath, index, col_index_named, expected_output
+):
+ # GH 22783
+ data = [
+ [1.764052, 0.400157, 0.978738, 2.240893, 1.867558],
+ [-0.977278, 0.950088, -0.151357, -0.103219, 0.410599],
+ ]
+ df = DataFrame(data)
+ if col_index_named:
+ df.columns.rename("columns.name", inplace=True)
+ result = df.to_html(max_cols=4, index=index)
+ expected = expected_html(datapath, expected_output)
+ assert result == expected
+
+
+@pytest.mark.parametrize("notebook", [True, False])
+def test_to_html_notebook_has_style(notebook):
+ df = DataFrame({"A": [1, 2, 3]})
+ result = df.to_html(notebook=notebook)
+
+ if notebook:
+ assert "tbody tr th:only-of-type" in result
+ assert "vertical-align: middle;" in result
+ assert "thead th" in result
+ else:
+ assert "tbody tr th:only-of-type" not in result
+ assert "vertical-align: middle;" not in result
+ assert "thead th" not in result
+
+
+def test_to_html_with_index_names_false():
+ # GH 16493
+ df = DataFrame({"A": [1, 2]}, index=Index(["a", "b"], name="myindexname"))
+ result = df.to_html(index_names=False)
+ assert "myindexname" not in result
+
+
+def test_to_html_with_id():
+ # GH 8496
+ df = DataFrame({"A": [1, 2]}, index=Index(["a", "b"], name="myindexname"))
+ result = df.to_html(index_names=False, table_id="TEST_ID")
+ assert ' id="TEST_ID"' in result
+
+
+@pytest.mark.parametrize(
+ "value,float_format,expected",
+ [
+ (0.19999, "%.3f", "gh21625_expected_output"),
+ (100.0, "%.0f", "gh22270_expected_output"),
+ ],
+)
+def test_to_html_float_format_no_fixed_width(value, float_format, expected, datapath):
+ # GH 21625, GH 22270
+ df = DataFrame({"x": [value]})
+ expected = expected_html(datapath, expected)
+ result = df.to_html(float_format=float_format)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "render_links,expected",
+ [(True, "render_links_true"), (False, "render_links_false")],
+)
+def test_to_html_render_links(render_links, expected, datapath):
+ # GH 2679
+ data = [
+ [0, "https://pandas.pydata.org/?q1=a&q2=b", "pydata.org"],
+ [0, "www.pydata.org", "pydata.org"],
+ ]
+ df = DataFrame(data, columns=Index(["foo", "bar", None], dtype=object))
+
+ result = df.to_html(render_links=render_links)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "method,expected",
+ [
+ ("to_html", lambda x: lorem_ipsum),
+ ("_repr_html_", lambda x: lorem_ipsum[: x - 4] + "..."), # regression case
+ ],
+)
+@pytest.mark.parametrize("max_colwidth", [10, 20, 50, 100])
+def test_ignore_display_max_colwidth(method, expected, max_colwidth):
+ # see gh-17004
+ df = DataFrame([lorem_ipsum])
+ with option_context("display.max_colwidth", max_colwidth):
+ result = getattr(df, method)()
+ expected = expected(max_colwidth)
+ assert expected in result
+
+
+@pytest.mark.parametrize("classes", [True, 0])
+def test_to_html_invalid_classes_type(classes):
+ # GH 25608
+ df = DataFrame()
+ msg = "classes must be a string, list, or tuple"
+
+ with pytest.raises(TypeError, match=msg):
+ df.to_html(classes=classes)
+
+
+def test_to_html_round_column_headers():
+ # GH 17280
+ df = DataFrame([1], columns=[0.55555])
+ with option_context("display.precision", 3):
+ html = df.to_html(notebook=False)
+ notebook = df.to_html(notebook=True)
+ assert "0.55555" in html
+ assert "0.556" in notebook
+
+
+@pytest.mark.parametrize("unit", ["100px", "10%", "5em", 150])
+def test_to_html_with_col_space_units(unit):
+ # GH 25941
+ df = DataFrame(np.random.default_rng(2).random(size=(1, 3)))
+ result = df.to_html(col_space=unit)
+ result = result.split("tbody")[0]
+ hdrs = [x for x in result.split("\n") if re.search(r"\s]", x)]
+ if isinstance(unit, int):
+ unit = str(unit) + "px"
+ for h in hdrs:
+ expected = f' '
+ assert expected in h
+
+
+class TestReprHTML:
+ def test_html_repr_min_rows_default(self, datapath):
+ # gh-27991
+
+ # default setting no truncation even if above min_rows
+ df = DataFrame({"a": range(20)})
+ result = df._repr_html_()
+ expected = expected_html(datapath, "html_repr_min_rows_default_no_truncation")
+ assert result == expected
+
+ # default of max_rows 60 triggers truncation if above
+ df = DataFrame({"a": range(61)})
+ result = df._repr_html_()
+ expected = expected_html(datapath, "html_repr_min_rows_default_truncated")
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "max_rows,min_rows,expected",
+ [
+ # truncated after first two rows
+ (10, 4, "html_repr_max_rows_10_min_rows_4"),
+ # when set to None, follow value of max_rows
+ (12, None, "html_repr_max_rows_12_min_rows_None"),
+ # when set value higher as max_rows, use the minimum
+ (10, 12, "html_repr_max_rows_10_min_rows_12"),
+ # max_rows of None -> never truncate
+ (None, 12, "html_repr_max_rows_None_min_rows_12"),
+ ],
+ )
+ def test_html_repr_min_rows(self, datapath, max_rows, min_rows, expected):
+ # gh-27991
+
+ df = DataFrame({"a": range(61)})
+ expected = expected_html(datapath, expected)
+ with option_context("display.max_rows", max_rows, "display.min_rows", min_rows):
+ result = df._repr_html_()
+ assert result == expected
+
+ def test_repr_html_ipython_config(self, ip):
+ code = textwrap.dedent(
+ """\
+ from pandas import DataFrame
+ df = DataFrame({"A": [1, 2]})
+ df._repr_html_()
+
+ cfg = get_ipython().config
+ cfg['IPKernelApp']['parent_appname']
+ df._repr_html_()
+ """
+ )
+ result = ip.run_cell(code, silent=True)
+ assert not result.error_in_exec
+
+ def test_info_repr_html(self):
+ max_rows = 60
+ max_cols = 20
+ # Long
+ h, w = max_rows + 1, max_cols - 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ assert r"<class" not in df._repr_html_()
+ with option_context("display.large_repr", "info"):
+ assert r"<class" in df._repr_html_()
+
+ # Wide
+ h, w = max_rows - 1, max_cols + 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ assert "{40 + h}" in reg_repr
+
+ h = max_rows + 1
+ df = DataFrame(
+ {
+ "idx": np.linspace(-10, 10, h),
+ "A": np.arange(1, 1 + h),
+ "B": np.arange(41, 41 + h),
+ }
+ ).set_index("idx")
+ long_repr = df._repr_html_()
+ assert ".." in long_repr
+ assert "31 " not in long_repr
+ assert f"{h} rows " in long_repr
+ assert "2 columns" in long_repr
+
+ def test_repr_html_long_multiindex(self):
+ max_rows = 60
+ max_L1 = max_rows // 2
+
+ tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
+ idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((max_L1 * 2, 2)),
+ index=idx,
+ columns=["A", "B"],
+ )
+ with option_context("display.max_rows", 60, "display.max_columns", 20):
+ reg_repr = df._repr_html_()
+ assert "..." not in reg_repr
+
+ tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
+ idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal(((max_L1 + 1) * 2, 2)),
+ index=idx,
+ columns=["A", "B"],
+ )
+ long_repr = df._repr_html_()
+ assert "..." in long_repr
+
+ def test_repr_html_long_and_wide(self):
+ max_cols = 20
+ max_rows = 60
+
+ h, w = max_rows - 1, max_cols - 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ with option_context("display.max_rows", 60, "display.max_columns", 20):
+ assert "..." not in df._repr_html_()
+
+ h, w = max_rows + 1, max_cols + 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ with option_context("display.max_rows", 60, "display.max_columns", 20):
+ assert "..." in df._repr_html_()
+
+
+def test_to_html_multilevel(multiindex_year_month_day_dataframe_random_data):
+ ymd = multiindex_year_month_day_dataframe_random_data
+
+ ymd.columns.name = "foo"
+ ymd.to_html()
+ ymd.T.to_html()
+
+
+@pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
+def test_to_html_na_rep_and_float_format(na_rep, datapath):
+ # https://github.com/pandas-dev/pandas/issues/13828
+ df = DataFrame(
+ [
+ ["A", 1.2225],
+ ["A", None],
+ ],
+ columns=["Group", "Data"],
+ )
+ result = df.to_html(na_rep=na_rep, float_format="{:.2f}".format)
+ expected = expected_html(datapath, "gh13828_expected_output")
+ expected = expected.format(na_rep=na_rep)
+ assert result == expected
+
+
+def test_to_html_na_rep_non_scalar_data(datapath):
+ # GH47103
+ df = DataFrame([{"a": 1, "b": [1, 2, 3]}])
+ result = df.to_html(na_rep="-")
+ expected = expected_html(datapath, "gh47103_expected_output")
+ assert result == expected
+
+
+def test_to_html_float_format_object_col(datapath):
+ # GH#40024
+ df = DataFrame(data={"x": [1000.0, "test"]})
+ result = df.to_html(float_format=lambda x: f"{x:,.0f}")
+ expected = expected_html(datapath, "gh40024_expected_output")
+ assert result == expected
+
+
+def test_to_html_multiindex_col_with_colspace():
+ # GH#53885
+ df = DataFrame([[1, 2]])
+ df.columns = MultiIndex.from_tuples([(1, 1), (2, 1)])
+ result = df.to_html(col_space=100)
+ expected = (
+ '\n'
+ " \n"
+ " \n"
+ ' \n'
+ ' 1 \n'
+ ' 2 \n'
+ " \n"
+ " \n"
+ ' \n'
+ ' 1 \n'
+ ' 1 \n'
+ " \n"
+ " \n"
+ " \n"
+ " \n"
+ " 0 \n"
+ " 1 \n"
+ " 2 \n"
+ " \n"
+ " \n"
+ "
"
+ )
+ assert result == expected
+
+
+def test_to_html_tuple_col_with_colspace():
+ # GH#53885
+ df = DataFrame({("a", "b"): [1], "b": [2]})
+ result = df.to_html(col_space=100)
+ expected = (
+ '\n'
+ " \n"
+ ' \n'
+ ' \n'
+ ' (a, b) \n'
+ ' b \n'
+ " \n"
+ " \n"
+ " \n"
+ " \n"
+ " 0 \n"
+ " 1 \n"
+ " 2 \n"
+ " \n"
+ " \n"
+ "
"
+ )
+ assert result == expected
+
+
+def test_to_html_empty_complex_array():
+ # GH#54167
+ df = DataFrame({"x": np.array([], dtype="complex")})
+ result = df.to_html(col_space=100)
+ expected = (
+ '\n'
+ " \n"
+ ' \n'
+ ' \n'
+ ' x \n'
+ " \n"
+ " \n"
+ " \n"
+ " \n"
+ "
"
+ )
+ assert result == expected
+
+
+def test_to_html_pos_args_deprecation():
+ # GH-54229
+ df = DataFrame({"a": [1, 2, 3]})
+ msg = (
+ r"Starting with pandas version 3.0 all arguments of to_html except for the "
+ r"argument 'buf' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ df.to_html(None, None)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_latex.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_latex.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fd96dff27d06dc3056b56c5f7e8eb054e98bd8f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_latex.py
@@ -0,0 +1,1425 @@
+import codecs
+from datetime import datetime
+from textwrap import dedent
+
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Series,
+)
+import pandas._testing as tm
+
+pytest.importorskip("jinja2")
+
+
+def _dedent(string):
+ """Dedent without new line in the beginning.
+
+ Built-in textwrap.dedent would keep new line character in the beginning
+ of multi-line string starting from the new line.
+ This version drops the leading new line character.
+ """
+ return dedent(string).lstrip()
+
+
+@pytest.fixture
+def df_short():
+ """Short dataframe for testing table/tabular/longtable LaTeX env."""
+ return DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+
+
+class TestToLatex:
+ def test_to_latex_to_file(self, float_frame):
+ with tm.ensure_clean("test.tex") as path:
+ float_frame.to_latex(path)
+ with open(path, encoding="utf-8") as f:
+ assert float_frame.to_latex() == f.read()
+
+ def test_to_latex_to_file_utf8_with_encoding(self):
+ # test with utf-8 and encoding option (GH 7061)
+ df = DataFrame([["au\xdfgangen"]])
+ with tm.ensure_clean("test.tex") as path:
+ df.to_latex(path, encoding="utf-8")
+ with codecs.open(path, "r", encoding="utf-8") as f:
+ assert df.to_latex() == f.read()
+
+ def test_to_latex_to_file_utf8_without_encoding(self):
+ # test with utf-8 without encoding option
+ df = DataFrame([["au\xdfgangen"]])
+ with tm.ensure_clean("test.tex") as path:
+ df.to_latex(path)
+ with codecs.open(path, "r", encoding="utf-8") as f:
+ assert df.to_latex() == f.read()
+
+ def test_to_latex_tabular_with_index(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_tabular_without_index(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(index=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{rl}
+ \toprule
+ a & b \\
+ \midrule
+ 1 & b1 \\
+ 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "bad_column_format",
+ [5, 1.2, ["l", "r"], ("r", "c"), {"r", "c", "l"}, {"a": "r", "b": "l"}],
+ )
+ def test_to_latex_bad_column_format(self, bad_column_format):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ msg = r"`column_format` must be str or unicode"
+ with pytest.raises(ValueError, match=msg):
+ df.to_latex(column_format=bad_column_format)
+
+ def test_to_latex_column_format_just_works(self, float_frame):
+ # GH Bug #9402
+ float_frame.to_latex(column_format="lcr")
+
+ def test_to_latex_column_format(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(column_format="lcr")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lcr}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_float_format_object_col(self):
+ # GH#40024
+ ser = Series([1000.0, "test"])
+ result = ser.to_latex(float_format="{:,.0f}".format)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & 0 \\
+ \midrule
+ 0 & 1,000 \\
+ 1 & test \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_empty_tabular(self):
+ df = DataFrame()
+ result = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{l}
+ \toprule
+ \midrule
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_series(self):
+ s = Series(["a", "b", "c"])
+ result = s.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & 0 \\
+ \midrule
+ 0 & a \\
+ 1 & b \\
+ 2 & c \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_midrule_location(self):
+ # GH 18326
+ df = DataFrame({"a": [1, 2]})
+ df.index.name = "foo"
+ result = df.to_latex(index_names=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lr}
+ \toprule
+ & a \\
+ \midrule
+ 0 & 1 \\
+ 1 & 2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_pos_args_deprecation(self):
+ # GH-54229
+ df = DataFrame(
+ {
+ "name": ["Raphael", "Donatello"],
+ "age": [26, 45],
+ "height": [181.23, 177.65],
+ }
+ )
+ msg = (
+ r"Starting with pandas version 3.0 all arguments of to_latex except for "
+ r"the argument 'buf' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ df.to_latex(None, None)
+
+
+class TestToLatexLongtable:
+ def test_to_latex_empty_longtable(self):
+ df = DataFrame()
+ result = df.to_latex(longtable=True)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{l}
+ \toprule
+ \midrule
+ \endfirsthead
+ \toprule
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{0}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_with_index(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(longtable=True)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_without_index(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(index=False, longtable=True)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{rl}
+ \toprule
+ a & b \\
+ \midrule
+ \endfirsthead
+ \toprule
+ a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{2}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 1 & b1 \\
+ 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "df, expected_number",
+ [
+ (DataFrame({"a": [1, 2]}), 1),
+ (DataFrame({"a": [1, 2], "b": [3, 4]}), 2),
+ (DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}), 3),
+ ],
+ )
+ def test_to_latex_longtable_continued_on_next_page(self, df, expected_number):
+ result = df.to_latex(index=False, longtable=True)
+ assert rf"\multicolumn{{{expected_number}}}" in result
+
+
+class TestToLatexHeader:
+ def test_to_latex_no_header_with_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_no_header_without_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(index=False, header=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{rl}
+ \toprule
+ \midrule
+ 1 & b1 \\
+ 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_specified_header_with_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=["AA", "BB"])
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & AA & BB \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_specified_header_without_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=["AA", "BB"], index=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{rl}
+ \toprule
+ AA & BB \\
+ \midrule
+ 1 & b1 \\
+ 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "header, num_aliases",
+ [
+ (["A"], 1),
+ (("B",), 1),
+ (("Col1", "Col2", "Col3"), 3),
+ (("Col1", "Col2", "Col3", "Col4"), 4),
+ ],
+ )
+ def test_to_latex_number_of_items_in_header_missmatch_raises(
+ self,
+ header,
+ num_aliases,
+ ):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ msg = f"Writing 2 cols but got {num_aliases} aliases"
+ with pytest.raises(ValueError, match=msg):
+ df.to_latex(header=header)
+
+ def test_to_latex_decimal(self):
+ # GH 12031
+ df = DataFrame({"a": [1.0, 2.1], "b": ["b1", "b2"]})
+ result = df.to_latex(decimal=",")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1,000000 & b1 \\
+ 1 & 2,100000 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexBold:
+ def test_to_latex_bold_rows(self):
+ # GH 16707
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(bold_rows=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ \textbf{0} & 1 & b1 \\
+ \textbf{1} & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_no_bold_rows(self):
+ # GH 16707
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(bold_rows=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexCaptionLabel:
+ @pytest.fixture
+ def caption_table(self):
+ """Caption for table/tabular LaTeX environment."""
+ return "a table in a \\texttt{table/tabular} environment"
+
+ @pytest.fixture
+ def short_caption(self):
+ """Short caption for testing \\caption[short_caption]{full_caption}."""
+ return "a table"
+
+ @pytest.fixture
+ def label_table(self):
+ """Label for table/tabular LaTeX environment."""
+ return "tab:table_tabular"
+
+ @pytest.fixture
+ def caption_longtable(self):
+ """Caption for longtable LaTeX environment."""
+ return "a table in a \\texttt{longtable} environment"
+
+ @pytest.fixture
+ def label_longtable(self):
+ """Label for longtable LaTeX environment."""
+ return "tab:longtable"
+
+ def test_to_latex_caption_only(self, df_short, caption_table):
+ # GH 25436
+ result = df_short.to_latex(caption=caption_table)
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption{a table in a \texttt{table/tabular} environment}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_label_only(self, df_short, label_table):
+ # GH 25436
+ result = df_short.to_latex(label=label_table)
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \label{tab:table_tabular}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_caption_and_label(self, df_short, caption_table, label_table):
+ # GH 25436
+ result = df_short.to_latex(caption=caption_table, label=label_table)
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption{a table in a \texttt{table/tabular} environment}
+ \label{tab:table_tabular}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_caption_and_shortcaption(
+ self,
+ df_short,
+ caption_table,
+ short_caption,
+ ):
+ result = df_short.to_latex(caption=(caption_table, short_caption))
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption[a table]{a table in a \texttt{table/tabular} environment}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_caption_and_shortcaption_list_is_ok(self, df_short):
+ caption = ("Long-long-caption", "Short")
+ result_tuple = df_short.to_latex(caption=caption)
+ result_list = df_short.to_latex(caption=list(caption))
+ assert result_tuple == result_list
+
+ def test_to_latex_caption_shortcaption_and_label(
+ self,
+ df_short,
+ caption_table,
+ short_caption,
+ label_table,
+ ):
+ # test when the short_caption is provided alongside caption and label
+ result = df_short.to_latex(
+ caption=(caption_table, short_caption),
+ label=label_table,
+ )
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption[a table]{a table in a \texttt{table/tabular} environment}
+ \label{tab:table_tabular}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "bad_caption",
+ [
+ ("full_caption", "short_caption", "extra_string"),
+ ("full_caption", "short_caption", 1),
+ ("full_caption", "short_caption", None),
+ ("full_caption",),
+ (None,),
+ ],
+ )
+ def test_to_latex_bad_caption_raises(self, bad_caption):
+ # test that wrong number of params is raised
+ df = DataFrame({"a": [1]})
+ msg = "`caption` must be either a string or 2-tuple of strings"
+ with pytest.raises(ValueError, match=msg):
+ df.to_latex(caption=bad_caption)
+
+ def test_to_latex_two_chars_caption(self, df_short):
+ # test that two chars caption is handled correctly
+ # it must not be unpacked into long_caption, short_caption.
+ result = df_short.to_latex(caption="xy")
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption{xy}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_caption_only(self, df_short, caption_longtable):
+ # GH 25436
+ # test when no caption and no label is provided
+ # is performed by test_to_latex_longtable()
+ result = df_short.to_latex(longtable=True, caption=caption_longtable)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \caption{a table in a \texttt{longtable} environment} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \caption[]{a table in a \texttt{longtable} environment} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_label_only(self, df_short, label_longtable):
+ # GH 25436
+ result = df_short.to_latex(longtable=True, label=label_longtable)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \label{tab:longtable} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_caption_and_label(
+ self,
+ df_short,
+ caption_longtable,
+ label_longtable,
+ ):
+ # GH 25436
+ result = df_short.to_latex(
+ longtable=True,
+ caption=caption_longtable,
+ label=label_longtable,
+ )
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \caption{a table in a \texttt{longtable} environment} \label{tab:longtable} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \caption[]{a table in a \texttt{longtable} environment} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_caption_shortcaption_and_label(
+ self,
+ df_short,
+ caption_longtable,
+ short_caption,
+ label_longtable,
+ ):
+ # test when the caption, the short_caption and the label are provided
+ result = df_short.to_latex(
+ longtable=True,
+ caption=(caption_longtable, short_caption),
+ label=label_longtable,
+ )
+ expected = _dedent(
+ r"""
+\begin{longtable}{lrl}
+\caption[a table]{a table in a \texttt{longtable} environment} \label{tab:longtable} \\
+\toprule
+ & a & b \\
+\midrule
+\endfirsthead
+\caption[]{a table in a \texttt{longtable} environment} \\
+\toprule
+ & a & b \\
+\midrule
+\endhead
+\midrule
+\multicolumn{3}{r}{Continued on next page} \\
+\midrule
+\endfoot
+\bottomrule
+\endlastfoot
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\end{longtable}
+"""
+ )
+ assert result == expected
+
+
+class TestToLatexEscape:
+ @pytest.fixture
+ def df_with_symbols(self):
+ """Dataframe with special characters for testing chars escaping."""
+ a = "a"
+ b = "b"
+ yield DataFrame({"co$e^x$": {a: "a", b: "b"}, "co^l1": {a: "a", b: "b"}})
+
+ def test_to_latex_escape_false(self, df_with_symbols):
+ result = df_with_symbols.to_latex(escape=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ & co$e^x$ & co^l1 \\
+ \midrule
+ a & a & a \\
+ b & b & b \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_escape_default(self, df_with_symbols):
+ # gh50871: in v2.0 escape is False by default (styler.format.escape=None)
+ default = df_with_symbols.to_latex()
+ specified_true = df_with_symbols.to_latex(escape=True)
+ assert default != specified_true
+
+ def test_to_latex_special_escape(self):
+ df = DataFrame([r"a\b\c", r"^a^b^c", r"~a~b~c"])
+ result = df.to_latex(escape=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & 0 \\
+ \midrule
+ 0 & a\textbackslash b\textbackslash c \\
+ 1 & \textasciicircum a\textasciicircum b\textasciicircum c \\
+ 2 & \textasciitilde a\textasciitilde b\textasciitilde c \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_escape_special_chars(self):
+ special_characters = ["&", "%", "$", "#", "_", "{", "}", "~", "^", "\\"]
+ df = DataFrame(data=special_characters)
+ result = df.to_latex(escape=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & 0 \\
+ \midrule
+ 0 & \& \\
+ 1 & \% \\
+ 2 & \$ \\
+ 3 & \# \\
+ 4 & \_ \\
+ 5 & \{ \\
+ 6 & \} \\
+ 7 & \textasciitilde \\
+ 8 & \textasciicircum \\
+ 9 & \textbackslash \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_specified_header_special_chars_without_escape(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=["$A$", "$B$"], escape=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & $A$ & $B$ \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexPosition:
+ def test_to_latex_position(self):
+ the_position = "h"
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(position=the_position)
+ expected = _dedent(
+ r"""
+ \begin{table}[h]
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_position(self):
+ the_position = "t"
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(longtable=True, position=the_position)
+ expected = _dedent(
+ r"""
+ \begin{longtable}[t]{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexFormatters:
+ def test_to_latex_with_formatters(self):
+ df = DataFrame(
+ {
+ "datetime64": [
+ datetime(2016, 1, 1),
+ datetime(2016, 2, 5),
+ datetime(2016, 3, 3),
+ ],
+ "float": [1.0, 2.0, 3.0],
+ "int": [1, 2, 3],
+ "object": [(1, 2), True, False],
+ }
+ )
+
+ formatters = {
+ "datetime64": lambda x: x.strftime("%Y-%m"),
+ "float": lambda x: f"[{x: 4.1f}]",
+ "int": lambda x: f"0x{x:x}",
+ "object": lambda x: f"-{x!s}-",
+ "__index__": lambda x: f"index: {x}",
+ }
+ result = df.to_latex(formatters=dict(formatters))
+
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrl}
+ \toprule
+ & datetime64 & float & int & object \\
+ \midrule
+ index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\
+ index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\
+ index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_float_format_no_fixed_width_3decimals(self):
+ # GH 21625
+ df = DataFrame({"x": [0.19999]})
+ result = df.to_latex(float_format="%.3f")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lr}
+ \toprule
+ & x \\
+ \midrule
+ 0 & 0.200 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_float_format_no_fixed_width_integer(self):
+ # GH 22270
+ df = DataFrame({"x": [100.0]})
+ result = df.to_latex(float_format="%.0f")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lr}
+ \toprule
+ & x \\
+ \midrule
+ 0 & 100 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
+ def test_to_latex_na_rep_and_float_format(self, na_rep):
+ df = DataFrame(
+ [
+ ["A", 1.2225],
+ ["A", None],
+ ],
+ columns=["Group", "Data"],
+ )
+ result = df.to_latex(na_rep=na_rep, float_format="{:.2f}".format)
+ expected = _dedent(
+ rf"""
+ \begin{{tabular}}{{llr}}
+ \toprule
+ & Group & Data \\
+ \midrule
+ 0 & A & 1.22 \\
+ 1 & A & {na_rep} \\
+ \bottomrule
+ \end{{tabular}}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexMultiindex:
+ @pytest.fixture
+ def multiindex_frame(self):
+ """Multiindex dataframe for testing multirow LaTeX macros."""
+ yield DataFrame.from_dict(
+ {
+ ("c1", 0): Series({x: x for x in range(4)}),
+ ("c1", 1): Series({x: x + 4 for x in range(4)}),
+ ("c2", 0): Series({x: x for x in range(4)}),
+ ("c2", 1): Series({x: x + 4 for x in range(4)}),
+ ("c3", 0): Series({x: x for x in range(4)}),
+ }
+ ).T
+
+ @pytest.fixture
+ def multicolumn_frame(self):
+ """Multicolumn dataframe for testing multicolumn LaTeX macros."""
+ yield DataFrame(
+ {
+ ("c1", 0): {x: x for x in range(5)},
+ ("c1", 1): {x: x + 5 for x in range(5)},
+ ("c2", 0): {x: x for x in range(5)},
+ ("c2", 1): {x: x + 5 for x in range(5)},
+ ("c3", 0): {x: x for x in range(5)},
+ }
+ )
+
+ def test_to_latex_multindex_header(self):
+ # GH 16718
+ df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]})
+ df = df.set_index(["a", "b"])
+ observed = df.to_latex(header=["r1", "r2"], multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrr}
+ \toprule
+ & & r1 & r2 \\
+ a & b & & \\
+ \midrule
+ 0 & 1 & 2 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert observed == expected
+
+ def test_to_latex_multiindex_empty_name(self):
+ # GH 18669
+ mi = pd.MultiIndex.from_product([[1, 2]], names=[""])
+ df = DataFrame(-1, index=mi, columns=range(4))
+ observed = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrr}
+ \toprule
+ & 0 & 1 & 2 & 3 \\
+ & & & & \\
+ \midrule
+ 1 & -1 & -1 & -1 & -1 \\
+ 2 & -1 & -1 & -1 & -1 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert observed == expected
+
+ def test_to_latex_multiindex_column_tabular(self):
+ df = DataFrame({("x", "y"): ["a"]})
+ result = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & x \\
+ & y \\
+ \midrule
+ 0 & a \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multiindex_small_tabular(self):
+ df = DataFrame({("x", "y"): ["a"]}).T
+ result = df.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ & & 0 \\
+ \midrule
+ x & y & a \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multiindex_tabular(self, multiindex_frame):
+ result = multiindex_frame.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrrr}
+ \toprule
+ & & 0 & 1 & 2 & 3 \\
+ \midrule
+ c1 & 0 & 0 & 1 & 2 & 3 \\
+ & 1 & 4 & 5 & 6 & 7 \\
+ c2 & 0 & 0 & 1 & 2 & 3 \\
+ & 1 & 4 & 5 & 6 & 7 \\
+ c3 & 0 & 0 & 1 & 2 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multicolumn_tabular(self, multiindex_frame):
+ # GH 14184
+ df = multiindex_frame.T
+ df.columns.names = ["a", "b"]
+ result = df.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrr}
+ \toprule
+ a & \multicolumn{2}{r}{c1} & \multicolumn{2}{r}{c2} & c3 \\
+ b & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 4 & 0 & 4 & 0 \\
+ 1 & 1 & 5 & 1 & 5 & 1 \\
+ 2 & 2 & 6 & 2 & 6 & 2 \\
+ 3 & 3 & 7 & 3 & 7 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_index_has_name_tabular(self):
+ # GH 10660
+ df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]})
+ result = df.set_index(["a", "b"]).to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llr}
+ \toprule
+ & & c \\
+ a & b & \\
+ \midrule
+ 0 & a & 1 \\
+ & b & 2 \\
+ 1 & a & 3 \\
+ & b & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_groupby_tabular(self):
+ # GH 10660
+ df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]})
+ result = (
+ df.groupby("a")
+ .describe()
+ .to_latex(float_format="{:.1f}".format, escape=True)
+ )
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrrrrr}
+ \toprule
+ & \multicolumn{8}{r}{c} \\
+ & count & mean & std & min & 25\% & 50\% & 75\% & max \\
+ a & & & & & & & & \\
+ \midrule
+ 0 & 2.0 & 1.5 & 0.7 & 1.0 & 1.2 & 1.5 & 1.8 & 2.0 \\
+ 1 & 2.0 & 3.5 & 0.7 & 3.0 & 3.2 & 3.5 & 3.8 & 4.0 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multiindex_dupe_level(self):
+ # see gh-14484
+ #
+ # If an index is repeated in subsequent rows, it should be
+ # replaced with a blank in the created table. This should
+ # ONLY happen if all higher order indices (to the left) are
+ # equal too. In this test, 'c' has to be printed both times
+ # because the higher order index 'A' != 'B'.
+ df = DataFrame(
+ index=pd.MultiIndex.from_tuples([("A", "c"), ("B", "c")]), columns=["col"]
+ )
+ result = df.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ & & col \\
+ \midrule
+ A & c & NaN \\
+ B & c & NaN \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multicolumn_default(self, multicolumn_frame):
+ result = multicolumn_frame.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrr}
+ \toprule
+ & \multicolumn{2}{r}{c1} & \multicolumn{2}{r}{c2} & c3 \\
+ & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 5 & 0 & 5 & 0 \\
+ 1 & 1 & 6 & 1 & 6 & 1 \\
+ 2 & 2 & 7 & 2 & 7 & 2 \\
+ 3 & 3 & 8 & 3 & 8 & 3 \\
+ 4 & 4 & 9 & 4 & 9 & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multicolumn_false(self, multicolumn_frame):
+ result = multicolumn_frame.to_latex(multicolumn=False, multicolumn_format="l")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrr}
+ \toprule
+ & c1 & & c2 & & c3 \\
+ & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 5 & 0 & 5 & 0 \\
+ 1 & 1 & 6 & 1 & 6 & 1 \\
+ 2 & 2 & 7 & 2 & 7 & 2 \\
+ 3 & 3 & 8 & 3 & 8 & 3 \\
+ 4 & 4 & 9 & 4 & 9 & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multirow_true(self, multicolumn_frame):
+ result = multicolumn_frame.T.to_latex(multirow=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrrrr}
+ \toprule
+ & & 0 & 1 & 2 & 3 & 4 \\
+ \midrule
+ \multirow[t]{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ \multirow[t]{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
+ \cline{1-7}
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multicolumnrow_with_multicol_format(self, multicolumn_frame):
+ multicolumn_frame.index = multicolumn_frame.T.index
+ result = multicolumn_frame.T.to_latex(
+ multirow=True,
+ multicolumn=True,
+ multicolumn_format="c",
+ )
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrrrr}
+ \toprule
+ & & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\
+ & & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ \multirow[t]{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ \multirow[t]{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
+ \cline{1-7}
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize("name0", [None, "named0"])
+ @pytest.mark.parametrize("name1", [None, "named1"])
+ @pytest.mark.parametrize("axes", [[0], [1], [0, 1]])
+ def test_to_latex_multiindex_names(self, name0, name1, axes):
+ # GH 18667
+ names = [name0, name1]
+ mi = pd.MultiIndex.from_product([[1, 2], [3, 4]])
+ df = DataFrame(-1, index=mi.copy(), columns=mi.copy())
+ for idx in axes:
+ df.axes[idx].names = names
+
+ idx_names = tuple(n or "" for n in names)
+ idx_names_row = (
+ f"{idx_names[0]} & {idx_names[1]} & & & & \\\\\n"
+ if (0 in axes and any(names))
+ else ""
+ )
+ col_names = [n if (bool(n) and 1 in axes) else "" for n in names]
+ observed = df.to_latex(multirow=False)
+ # pylint: disable-next=consider-using-f-string
+ expected = r"""\begin{tabular}{llrrrr}
+\toprule
+ & %s & \multicolumn{2}{r}{1} & \multicolumn{2}{r}{2} \\
+ & %s & 3 & 4 & 3 & 4 \\
+%s\midrule
+1 & 3 & -1 & -1 & -1 & -1 \\
+ & 4 & -1 & -1 & -1 & -1 \\
+2 & 3 & -1 & -1 & -1 & -1 \\
+ & 4 & -1 & -1 & -1 & -1 \\
+\bottomrule
+\end{tabular}
+""" % tuple(
+ list(col_names) + [idx_names_row]
+ )
+ assert observed == expected
+
+ @pytest.mark.parametrize("one_row", [True, False])
+ def test_to_latex_multiindex_nans(self, one_row):
+ # GH 14249
+ df = DataFrame({"a": [None, 1], "b": [2, 3], "c": [4, 5]})
+ if one_row:
+ df = df.iloc[[0]]
+ observed = df.set_index(["a", "b"]).to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llr}
+ \toprule
+ & & c \\
+ a & b & \\
+ \midrule
+ NaN & 2 & 4 \\
+ """
+ )
+ if not one_row:
+ expected += r"""1.000000 & 3 & 5 \\
+"""
+ expected += r"""\bottomrule
+\end{tabular}
+"""
+ assert observed == expected
+
+ def test_to_latex_non_string_index(self):
+ # GH 19981
+ df = DataFrame([[1, 2, 3]] * 2).set_index([0, 1])
+ result = df.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llr}
+ \toprule
+ & & 2 \\
+ 0 & 1 & \\
+ \midrule
+ 1 & 2 & 3 \\
+ & 2 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multiindex_multirow(self):
+ # GH 16719
+ mi = pd.MultiIndex.from_product(
+ [[0.0, 1.0], [3.0, 2.0, 1.0], ["0", "1"]], names=["i", "val0", "val1"]
+ )
+ df = DataFrame(index=mi)
+ result = df.to_latex(multirow=True, escape=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ i & val0 & val1 \\
+ \midrule
+ \multirow[t]{6}{*}{0.000000} & \multirow[t]{2}{*}{3.000000} & 0 \\
+ & & 1 \\
+ \cline{2-3}
+ & \multirow[t]{2}{*}{2.000000} & 0 \\
+ & & 1 \\
+ \cline{2-3}
+ & \multirow[t]{2}{*}{1.000000} & 0 \\
+ & & 1 \\
+ \cline{1-3} \cline{2-3}
+ \multirow[t]{6}{*}{1.000000} & \multirow[t]{2}{*}{3.000000} & 0 \\
+ & & 1 \\
+ \cline{2-3}
+ & \multirow[t]{2}{*}{2.000000} & 0 \\
+ & & 1 \\
+ \cline{2-3}
+ & \multirow[t]{2}{*}{1.000000} & 0 \\
+ & & 1 \\
+ \cline{1-3} \cline{2-3}
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_markdown.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_markdown.py
new file mode 100644
index 0000000000000000000000000000000000000000..85eca834ff0d43ca30eb4043ed9f97fd3807899b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_markdown.py
@@ -0,0 +1,106 @@
+from io import (
+ BytesIO,
+ StringIO,
+)
+
+import pytest
+
+import pandas as pd
+import pandas._testing as tm
+
+pytest.importorskip("tabulate")
+
+
+def test_simple():
+ buf = StringIO()
+ df = pd.DataFrame([1, 2, 3])
+ df.to_markdown(buf=buf)
+ result = buf.getvalue()
+ assert (
+ result == "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |"
+ )
+
+
+def test_empty_frame():
+ buf = StringIO()
+ df = pd.DataFrame({"id": [], "first_name": [], "last_name": []}).set_index("id")
+ df.to_markdown(buf=buf)
+ result = buf.getvalue()
+ assert result == (
+ "| id | first_name | last_name |\n"
+ "|------|--------------|-------------|"
+ )
+
+
+def test_other_tablefmt():
+ buf = StringIO()
+ df = pd.DataFrame([1, 2, 3])
+ df.to_markdown(buf=buf, tablefmt="jira")
+ result = buf.getvalue()
+ assert result == "|| || 0 ||\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |"
+
+
+def test_other_headers():
+ buf = StringIO()
+ df = pd.DataFrame([1, 2, 3])
+ df.to_markdown(buf=buf, headers=["foo", "bar"])
+ result = buf.getvalue()
+ assert result == (
+ "| foo | bar |\n|------:|------:|\n| 0 "
+ "| 1 |\n| 1 | 2 |\n| 2 | 3 |"
+ )
+
+
+def test_series():
+ buf = StringIO()
+ s = pd.Series([1, 2, 3], name="foo")
+ s.to_markdown(buf=buf)
+ result = buf.getvalue()
+ assert result == (
+ "| | foo |\n|---:|------:|\n| 0 | 1 "
+ "|\n| 1 | 2 |\n| 2 | 3 |"
+ )
+
+
+def test_no_buf():
+ df = pd.DataFrame([1, 2, 3])
+ result = df.to_markdown()
+ assert (
+ result == "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |"
+ )
+
+
+@pytest.mark.parametrize("index", [True, False])
+def test_index(index):
+ # GH 32667
+
+ df = pd.DataFrame([1, 2, 3])
+
+ result = df.to_markdown(index=index)
+
+ if index:
+ expected = (
+ "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |"
+ )
+ else:
+ expected = "| 0 |\n|----:|\n| 1 |\n| 2 |\n| 3 |"
+ assert result == expected
+
+
+def test_showindex_disallowed_in_kwargs():
+ # GH 32667; disallowing showindex in kwargs enforced in 2.0
+ df = pd.DataFrame([1, 2, 3])
+ with pytest.raises(ValueError, match="Pass 'index' instead of 'showindex"):
+ df.to_markdown(index=True, showindex=True)
+
+
+def test_markdown_pos_args_deprecatation():
+ # GH-54229
+ df = pd.DataFrame({"a": [1, 2, 3]})
+ msg = (
+ r"Starting with pandas version 3.0 all arguments of to_markdown except for the "
+ r"argument 'buf' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ buffer = BytesIO()
+ df.to_markdown(buffer, "grid")
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_string.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_string.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e5a5005cb0761c104ed6de26cd5a6ef730c08d5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_string.py
@@ -0,0 +1,1216 @@
+from datetime import (
+ datetime,
+ timedelta,
+)
+from io import StringIO
+import re
+import sys
+from textwrap import dedent
+
+import numpy as np
+import pytest
+
+from pandas._config import using_pyarrow_string_dtype
+
+from pandas import (
+ CategoricalIndex,
+ DataFrame,
+ Index,
+ NaT,
+ Series,
+ Timestamp,
+ concat,
+ date_range,
+ get_option,
+ option_context,
+ read_csv,
+ timedelta_range,
+ to_datetime,
+)
+import pandas._testing as tm
+
+
+def _three_digit_exp():
+ return f"{1.7e8:.4g}" == "1.7e+008"
+
+
+class TestDataFrameToStringFormatters:
+ def test_to_string_masked_ea_with_formatter(self):
+ # GH#39336
+ df = DataFrame(
+ {
+ "a": Series([0.123456789, 1.123456789], dtype="Float64"),
+ "b": Series([1, 2], dtype="Int64"),
+ }
+ )
+ result = df.to_string(formatters=["{:.2f}".format, "{:.2f}".format])
+ expected = dedent(
+ """\
+ a b
+ 0 0.12 1.00
+ 1 1.12 2.00"""
+ )
+ assert result == expected
+
+ def test_to_string_with_formatters(self):
+ df = DataFrame(
+ {
+ "int": [1, 2, 3],
+ "float": [1.0, 2.0, 3.0],
+ "object": [(1, 2), True, False],
+ },
+ columns=["int", "float", "object"],
+ )
+
+ formatters = [
+ ("int", lambda x: f"0x{x:x}"),
+ ("float", lambda x: f"[{x: 4.1f}]"),
+ ("object", lambda x: f"-{x!s}-"),
+ ]
+ result = df.to_string(formatters=dict(formatters))
+ result2 = df.to_string(formatters=list(zip(*formatters))[1])
+ assert result == (
+ " int float object\n"
+ "0 0x1 [ 1.0] -(1, 2)-\n"
+ "1 0x2 [ 2.0] -True-\n"
+ "2 0x3 [ 3.0] -False-"
+ )
+ assert result == result2
+
+ def test_to_string_with_datetime64_monthformatter(self):
+ months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]
+ x = DataFrame({"months": months})
+
+ def format_func(x):
+ return x.strftime("%Y-%m")
+
+ result = x.to_string(formatters={"months": format_func})
+ expected = dedent(
+ """\
+ months
+ 0 2016-01
+ 1 2016-02"""
+ )
+ assert result.strip() == expected
+
+ def test_to_string_with_datetime64_hourformatter(self):
+ x = DataFrame(
+ {"hod": to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")}
+ )
+
+ def format_func(x):
+ return x.strftime("%H:%M")
+
+ result = x.to_string(formatters={"hod": format_func})
+ expected = dedent(
+ """\
+ hod
+ 0 10:10
+ 1 12:12"""
+ )
+ assert result.strip() == expected
+
+ def test_to_string_with_formatters_unicode(self):
+ df = DataFrame({"c/\u03c3": [1, 2, 3]})
+ result = df.to_string(formatters={"c/\u03c3": str})
+ expected = dedent(
+ """\
+ c/\u03c3
+ 0 1
+ 1 2
+ 2 3"""
+ )
+ assert result == expected
+
+ def test_to_string_index_formatter(self):
+ df = DataFrame([range(5), range(5, 10), range(10, 15)])
+
+ rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
+
+ xp = dedent(
+ """\
+ 0 1 2 3 4
+ a 0 1 2 3 4
+ b 5 6 7 8 9
+ c 10 11 12 13 14\
+ """
+ )
+ assert rs == xp
+
+ def test_no_extra_space(self):
+ # GH#52690: Check that no extra space is given
+ col1 = "TEST"
+ col2 = "PANDAS"
+ col3 = "to_string"
+ expected = f"{col1:<6s} {col2:<7s} {col3:<10s}"
+ df = DataFrame([{"col1": "TEST", "col2": "PANDAS", "col3": "to_string"}])
+ d = {"col1": "{:<6s}".format, "col2": "{:<7s}".format, "col3": "{:<10s}".format}
+ result = df.to_string(index=False, header=False, formatters=d)
+ assert result == expected
+
+
+class TestDataFrameToStringColSpace:
+ def test_to_string_with_column_specific_col_space_raises(self):
+ df = DataFrame(
+ np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"]
+ )
+
+ msg = (
+ "Col_space length\\(\\d+\\) should match "
+ "DataFrame number of columns\\(\\d+\\)"
+ )
+ with pytest.raises(ValueError, match=msg):
+ df.to_string(col_space=[30, 40])
+
+ with pytest.raises(ValueError, match=msg):
+ df.to_string(col_space=[30, 40, 50, 60])
+
+ msg = "unknown column"
+ with pytest.raises(ValueError, match=msg):
+ df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
+
+ def test_to_string_with_column_specific_col_space(self):
+ df = DataFrame(
+ np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"]
+ )
+
+ result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
+ # 3 separating space + each col_space for (id, a, b, c)
+ assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
+
+ result = df.to_string(col_space=[10, 11, 12])
+ assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
+
+ def test_to_string_with_col_space(self):
+ df = DataFrame(np.random.default_rng(2).random(size=(1, 3)))
+ c10 = len(df.to_string(col_space=10).split("\n")[1])
+ c20 = len(df.to_string(col_space=20).split("\n")[1])
+ c30 = len(df.to_string(col_space=30).split("\n")[1])
+ assert c10 < c20 < c30
+
+ # GH#8230
+ # col_space wasn't being applied with header=False
+ with_header = df.to_string(col_space=20)
+ with_header_row1 = with_header.splitlines()[1]
+ no_header = df.to_string(col_space=20, header=False)
+ assert len(with_header_row1) == len(no_header)
+
+ def test_to_string_repr_tuples(self):
+ buf = StringIO()
+
+ df = DataFrame({"tups": list(zip(range(10), range(10)))})
+ repr(df)
+ df.to_string(col_space=10, buf=buf)
+
+
+class TestDataFrameToStringHeader:
+ def test_to_string_header_false(self):
+ # GH#49230
+ df = DataFrame([1, 2])
+ df.index.name = "a"
+ s = df.to_string(header=False)
+ expected = "a \n0 1\n1 2"
+ assert s == expected
+
+ df = DataFrame([[1, 2], [3, 4]])
+ df.index.name = "a"
+ s = df.to_string(header=False)
+ expected = "a \n0 1 2\n1 3 4"
+ assert s == expected
+
+ def test_to_string_multindex_header(self):
+ # GH#16718
+ df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(["a", "b"])
+ res = df.to_string(header=["r1", "r2"])
+ exp = " r1 r2\na b \n0 1 2 3"
+ assert res == exp
+
+ def test_to_string_no_header(self):
+ df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
+
+ df_s = df.to_string(header=False)
+ expected = "0 1 4\n1 2 5\n2 3 6"
+
+ assert df_s == expected
+
+ def test_to_string_specified_header(self):
+ df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
+
+ df_s = df.to_string(header=["X", "Y"])
+ expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
+
+ assert df_s == expected
+
+ msg = "Writing 2 cols but got 1 aliases"
+ with pytest.raises(ValueError, match=msg):
+ df.to_string(header=["X"])
+
+
+class TestDataFrameToStringLineWidth:
+ def test_to_string_line_width(self):
+ df = DataFrame(123, index=range(10, 15), columns=range(30))
+ lines = df.to_string(line_width=80)
+ assert max(len(line) for line in lines.split("\n")) == 80
+
+ def test_to_string_line_width_no_index(self):
+ # GH#13998, GH#22505
+ df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1, index=False)
+ expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1, index=False)
+ expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
+
+ df_s = df.to_string(line_width=1, index=False)
+ expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
+
+ assert df_s == expected
+
+ def test_to_string_line_width_no_header(self):
+ # GH#53054
+ df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1, header=False)
+ expected = "0 1 \\\n1 2 \n2 3 \n\n0 4 \n1 5 \n2 6 "
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1, header=False)
+ expected = "0 11 \\\n1 22 \n2 33 \n\n0 4 \n1 5 \n2 6 "
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
+
+ df_s = df.to_string(line_width=1, header=False)
+ expected = "0 11 \\\n1 22 \n2 -33 \n\n0 4 \n1 5 \n2 -6 "
+
+ assert df_s == expected
+
+ def test_to_string_line_width_with_both_index_and_header(self):
+ # GH#53054
+ df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1)
+ expected = (
+ " x \\\n0 1 \n1 2 \n2 3 \n\n y \n0 4 \n1 5 \n2 6 "
+ )
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1)
+ expected = (
+ " x \\\n0 11 \n1 22 \n2 33 \n\n y \n0 4 \n1 5 \n2 6 "
+ )
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
+
+ df_s = df.to_string(line_width=1)
+ expected = (
+ " x \\\n0 11 \n1 22 \n2 -33 \n\n y \n0 4 \n1 5 \n2 -6 "
+ )
+
+ assert df_s == expected
+
+ def test_to_string_line_width_no_index_no_header(self):
+ # GH#53054
+ df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1, index=False, header=False)
+ expected = "1 \\\n2 \n3 \n\n4 \n5 \n6 "
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1, index=False, header=False)
+ expected = "11 \\\n22 \n33 \n\n4 \n5 \n6 "
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
+
+ df_s = df.to_string(line_width=1, index=False, header=False)
+ expected = " 11 \\\n 22 \n-33 \n\n 4 \n 5 \n-6 "
+
+ assert df_s == expected
+
+
+class TestToStringNumericFormatting:
+ def test_to_string_float_format_no_fixed_width(self):
+ # GH#21625
+ df = DataFrame({"x": [0.19999]})
+ expected = " x\n0 0.200"
+ assert df.to_string(float_format="%.3f") == expected
+
+ # GH#22270
+ df = DataFrame({"x": [100.0]})
+ expected = " x\n0 100"
+ assert df.to_string(float_format="%.0f") == expected
+
+ def test_to_string_small_float_values(self):
+ df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
+
+ result = df.to_string()
+ # sadness per above
+ if _three_digit_exp():
+ expected = (
+ " a\n"
+ "0 1.500000e+000\n"
+ "1 1.000000e-017\n"
+ "2 -5.500000e-007"
+ )
+ else:
+ expected = (
+ " a\n"
+ "0 1.500000e+00\n"
+ "1 1.000000e-17\n"
+ "2 -5.500000e-07"
+ )
+ assert result == expected
+
+ # but not all exactly zero
+ df = df * 0
+ result = df.to_string()
+ expected = " 0\n0 0\n1 0\n2 -0"
+ # TODO: assert that these match??
+
+ def test_to_string_complex_float_formatting(self):
+ # GH #25514, 25745
+ with option_context("display.precision", 5):
+ df = DataFrame(
+ {
+ "x": [
+ (0.4467846931321966 + 0.0715185102060818j),
+ (0.2739442392974528 + 0.23515228785438969j),
+ (0.26974928742135185 + 0.3250604054898979j),
+ (-1j),
+ ]
+ }
+ )
+ result = df.to_string()
+ expected = (
+ " x\n0 0.44678+0.07152j\n"
+ "1 0.27394+0.23515j\n"
+ "2 0.26975+0.32506j\n"
+ "3 -0.00000-1.00000j"
+ )
+ assert result == expected
+
+ def test_to_string_format_inf(self):
+ # GH#24861
+ df = DataFrame(
+ {
+ "A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
+ "B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
+ }
+ )
+ result = df.to_string()
+
+ expected = (
+ " A B\n"
+ "0 -inf -inf\n"
+ "1 inf inf\n"
+ "2 -1.0000 foo\n"
+ "3 -2.1234 foooo\n"
+ "4 3.0000 fooooo\n"
+ "5 4.0000 bar"
+ )
+ assert result == expected
+
+ df = DataFrame(
+ {
+ "A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
+ "B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
+ }
+ )
+ result = df.to_string()
+
+ expected = (
+ " A B\n"
+ "0 -inf -inf\n"
+ "1 inf inf\n"
+ "2 -1.0 foo\n"
+ "3 -2.0 foooo\n"
+ "4 3.0 fooooo\n"
+ "5 4.0 bar"
+ )
+ assert result == expected
+
+ def test_to_string_int_formatting(self):
+ df = DataFrame({"x": [-15, 20, 25, -35]})
+ assert issubclass(df["x"].dtype.type, np.integer)
+
+ output = df.to_string()
+ expected = " x\n0 -15\n1 20\n2 25\n3 -35"
+ assert output == expected
+
+ def test_to_string_float_formatting(self):
+ with option_context(
+ "display.precision",
+ 5,
+ "display.notebook_repr_html",
+ False,
+ ):
+ df = DataFrame(
+ {"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
+ )
+
+ df_s = df.to_string()
+
+ if _three_digit_exp():
+ expected = (
+ " x\n0 0.00000e+000\n1 2.50000e-001\n"
+ "2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
+ "5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
+ "8 -1.00000e+006"
+ )
+ else:
+ expected = (
+ " x\n0 0.00000e+00\n1 2.50000e-01\n"
+ "2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
+ "5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
+ "8 -1.00000e+06"
+ )
+ assert df_s == expected
+
+ df = DataFrame({"x": [3234, 0.253]})
+ df_s = df.to_string()
+
+ expected = " x\n0 3234.000\n1 0.253"
+ assert df_s == expected
+
+ assert get_option("display.precision") == 6
+
+ df = DataFrame({"x": [1e9, 0.2512]})
+ df_s = df.to_string()
+
+ if _three_digit_exp():
+ expected = " x\n0 1.000000e+009\n1 2.512000e-001"
+ else:
+ expected = " x\n0 1.000000e+09\n1 2.512000e-01"
+ assert df_s == expected
+
+
+class TestDataFrameToString:
+ def test_to_string_decimal(self):
+ # GH#23614
+ df = DataFrame({"A": [6.0, 3.1, 2.2]})
+ expected = " A\n0 6,0\n1 3,1\n2 2,2"
+ assert df.to_string(decimal=",") == expected
+
+ def test_to_string_left_justify_cols(self):
+ df = DataFrame({"x": [3234, 0.253]})
+ df_s = df.to_string(justify="left")
+ expected = " x \n0 3234.000\n1 0.253"
+ assert df_s == expected
+
+ def test_to_string_format_na(self):
+ df = DataFrame(
+ {
+ "A": [np.nan, -1, -2.1234, 3, 4],
+ "B": [np.nan, "foo", "foooo", "fooooo", "bar"],
+ }
+ )
+ result = df.to_string()
+
+ expected = (
+ " A B\n"
+ "0 NaN NaN\n"
+ "1 -1.0000 foo\n"
+ "2 -2.1234 foooo\n"
+ "3 3.0000 fooooo\n"
+ "4 4.0000 bar"
+ )
+ assert result == expected
+
+ df = DataFrame(
+ {
+ "A": [np.nan, -1.0, -2.0, 3.0, 4.0],
+ "B": [np.nan, "foo", "foooo", "fooooo", "bar"],
+ }
+ )
+ result = df.to_string()
+
+ expected = (
+ " A B\n"
+ "0 NaN NaN\n"
+ "1 -1.0 foo\n"
+ "2 -2.0 foooo\n"
+ "3 3.0 fooooo\n"
+ "4 4.0 bar"
+ )
+ assert result == expected
+
+ def test_to_string_with_dict_entries(self):
+ df = DataFrame({"A": [{"a": 1, "b": 2}]})
+
+ val = df.to_string()
+ assert "'a': 1" in val
+ assert "'b': 2" in val
+
+ def test_to_string_with_categorical_columns(self):
+ # GH#35439
+ data = [[4, 2], [3, 2], [4, 3]]
+ cols = ["aaaaaaaaa", "b"]
+ df = DataFrame(data, columns=cols)
+ df_cat_cols = DataFrame(data, columns=CategoricalIndex(cols))
+
+ assert df.to_string() == df_cat_cols.to_string()
+
+ def test_repr_embedded_ndarray(self):
+ arr = np.empty(10, dtype=[("err", object)])
+ for i in range(len(arr)):
+ arr["err"][i] = np.random.default_rng(2).standard_normal(i)
+
+ df = DataFrame(arr)
+ repr(df["err"])
+ repr(df)
+ df.to_string()
+
+ def test_to_string_truncate(self):
+ # GH 9784 - dont truncate when calling DataFrame.to_string
+ df = DataFrame(
+ [
+ {
+ "a": "foo",
+ "b": "bar",
+ "c": "let's make this a very VERY long line that is longer "
+ "than the default 50 character limit",
+ "d": 1,
+ },
+ {"a": "foo", "b": "bar", "c": "stuff", "d": 1},
+ ]
+ )
+ df.set_index(["a", "b", "c"])
+ assert df.to_string() == (
+ " a b "
+ " c d\n"
+ "0 foo bar let's make this a very VERY long line t"
+ "hat is longer than the default 50 character limit 1\n"
+ "1 foo bar "
+ " stuff 1"
+ )
+ with option_context("max_colwidth", 20):
+ # the display option has no effect on the to_string method
+ assert df.to_string() == (
+ " a b "
+ " c d\n"
+ "0 foo bar let's make this a very VERY long line t"
+ "hat is longer than the default 50 character limit 1\n"
+ "1 foo bar "
+ " stuff 1"
+ )
+ assert df.to_string(max_colwidth=20) == (
+ " a b c d\n"
+ "0 foo bar let's make this ... 1\n"
+ "1 foo bar stuff 1"
+ )
+
+ @pytest.mark.parametrize(
+ "input_array, expected",
+ [
+ ({"A": ["a"]}, "A\na"),
+ ({"A": ["a", "b"], "B": ["c", "dd"]}, "A B\na c\nb dd"),
+ ({"A": ["a", 1], "B": ["aa", 1]}, "A B\na aa\n1 1"),
+ ],
+ )
+ def test_format_remove_leading_space_dataframe(self, input_array, expected):
+ # GH#24980
+ df = DataFrame(input_array).to_string(index=False)
+ assert df == expected
+
+ @pytest.mark.parametrize(
+ "data,expected",
+ [
+ (
+ {"col1": [1, 2], "col2": [3, 4]},
+ " col1 col2\n0 1 3\n1 2 4",
+ ),
+ (
+ {"col1": ["Abc", 0.756], "col2": [np.nan, 4.5435]},
+ " col1 col2\n0 Abc NaN\n1 0.756 4.5435",
+ ),
+ (
+ {"col1": [np.nan, "a"], "col2": [0.009, 3.543], "col3": ["Abc", 23]},
+ " col1 col2 col3\n0 NaN 0.009 Abc\n1 a 3.543 23",
+ ),
+ ],
+ )
+ def test_to_string_max_rows_zero(self, data, expected):
+ # GH#35394
+ result = DataFrame(data=data).to_string(max_rows=0)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "max_cols, max_rows, expected",
+ [
+ (
+ 10,
+ None,
+ " 0 1 2 3 4 ... 6 7 8 9 10\n"
+ " 0 0 0 0 0 ... 0 0 0 0 0\n"
+ " 0 0 0 0 0 ... 0 0 0 0 0\n"
+ " 0 0 0 0 0 ... 0 0 0 0 0\n"
+ " 0 0 0 0 0 ... 0 0 0 0 0",
+ ),
+ (
+ None,
+ 2,
+ " 0 1 2 3 4 5 6 7 8 9 10\n"
+ " 0 0 0 0 0 0 0 0 0 0 0\n"
+ " .. .. .. .. .. .. .. .. .. .. ..\n"
+ " 0 0 0 0 0 0 0 0 0 0 0",
+ ),
+ (
+ 10,
+ 2,
+ " 0 1 2 3 4 ... 6 7 8 9 10\n"
+ " 0 0 0 0 0 ... 0 0 0 0 0\n"
+ " .. .. .. .. .. ... .. .. .. .. ..\n"
+ " 0 0 0 0 0 ... 0 0 0 0 0",
+ ),
+ (
+ 9,
+ 2,
+ " 0 1 2 3 ... 7 8 9 10\n"
+ " 0 0 0 0 ... 0 0 0 0\n"
+ " .. .. .. .. ... .. .. .. ..\n"
+ " 0 0 0 0 ... 0 0 0 0",
+ ),
+ (
+ 1,
+ 1,
+ " 0 ...\n 0 ...\n.. ...",
+ ),
+ ],
+ )
+ def test_truncation_no_index(self, max_cols, max_rows, expected):
+ df = DataFrame([[0] * 11] * 4)
+ assert (
+ df.to_string(index=False, max_cols=max_cols, max_rows=max_rows) == expected
+ )
+
+ def test_to_string_no_index(self):
+ # GH#16839, GH#13032
+ df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
+
+ df_s = df.to_string(index=False)
+ # Leading space is expected for positive numbers.
+ expected = " x y z\n11 33 AAA\n22 -44 "
+ assert df_s == expected
+
+ df_s = df[["y", "x", "z"]].to_string(index=False)
+ expected = " y x z\n 33 11 AAA\n-44 22 "
+ assert df_s == expected
+
+ def test_to_string_unicode_columns(self, float_frame):
+ df = DataFrame({"\u03c3": np.arange(10.0)})
+
+ buf = StringIO()
+ df.to_string(buf=buf)
+ buf.getvalue()
+
+ buf = StringIO()
+ df.info(buf=buf)
+ buf.getvalue()
+
+ result = float_frame.to_string()
+ assert isinstance(result, str)
+
+ @pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
+ def test_to_string_na_rep_and_float_format(self, na_rep):
+ # GH#13828
+ df = DataFrame([["A", 1.2225], ["A", None]], columns=["Group", "Data"])
+ result = df.to_string(na_rep=na_rep, float_format="{:.2f}".format)
+ expected = dedent(
+ f"""\
+ Group Data
+ 0 A 1.22
+ 1 A {na_rep}"""
+ )
+ assert result == expected
+
+ def test_to_string_string_dtype(self):
+ # GH#50099
+ pytest.importorskip("pyarrow")
+ df = DataFrame(
+ {"x": ["foo", "bar", "baz"], "y": ["a", "b", "c"], "z": [1, 2, 3]}
+ )
+ df = df.astype(
+ {"x": "string[pyarrow]", "y": "string[python]", "z": "int64[pyarrow]"}
+ )
+ result = df.dtypes.to_string()
+ expected = dedent(
+ """\
+ x string[pyarrow]
+ y string[python]
+ z int64[pyarrow]"""
+ )
+ assert result == expected
+
+ def test_to_string_pos_args_deprecation(self):
+ # GH#54229
+ df = DataFrame({"a": [1, 2, 3]})
+ msg = (
+ "Starting with pandas version 3.0 all arguments of to_string "
+ "except for the "
+ "argument 'buf' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ buf = StringIO()
+ df.to_string(buf, None, None, True, True)
+
+ def test_to_string_utf8_columns(self):
+ n = "\u05d0".encode()
+ df = DataFrame([1, 2], columns=[n])
+
+ with option_context("display.max_rows", 1):
+ repr(df)
+
+ def test_to_string_unicode_two(self):
+ dm = DataFrame({"c/\u03c3": []})
+ buf = StringIO()
+ dm.to_string(buf)
+
+ def test_to_string_unicode_three(self):
+ dm = DataFrame(["\xc2"])
+ buf = StringIO()
+ dm.to_string(buf)
+
+ def test_to_string_with_float_index(self):
+ index = Index([1.5, 2, 3, 4, 5])
+ df = DataFrame(np.arange(5), index=index)
+
+ result = df.to_string()
+ expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
+ assert result == expected
+
+ def test_to_string(self):
+ # big mixed
+ biggie = DataFrame(
+ {
+ "A": np.random.default_rng(2).standard_normal(200),
+ "B": Index([f"{i}?!" for i in range(200)]),
+ },
+ )
+
+ biggie.loc[:20, "A"] = np.nan
+ biggie.loc[:20, "B"] = np.nan
+ s = biggie.to_string()
+
+ buf = StringIO()
+ retval = biggie.to_string(buf=buf)
+ assert retval is None
+ assert buf.getvalue() == s
+
+ assert isinstance(s, str)
+
+ # print in right order
+ result = biggie.to_string(
+ columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
+ )
+ lines = result.split("\n")
+ header = lines[0].strip().split()
+ joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
+ recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
+ tm.assert_series_equal(recons["B"], biggie["B"])
+ assert recons["A"].count() == biggie["A"].count()
+ assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
+
+ # FIXME: don't leave commented-out
+ # expected = ['B', 'A']
+ # assert header == expected
+
+ result = biggie.to_string(columns=["A"], col_space=17)
+ header = result.split("\n")[0].strip().split()
+ expected = ["A"]
+ assert header == expected
+
+ biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
+
+ biggie.to_string(columns=["B", "A"], float_format=str)
+ biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
+
+ frame = DataFrame(index=np.arange(200))
+ frame.to_string()
+
+ # TODO: split or simplify this test?
+ @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="fix when arrow is default")
+ def test_to_string_index_with_nan(self):
+ # GH#2850
+ df = DataFrame(
+ {
+ "id1": {0: "1a3", 1: "9h4"},
+ "id2": {0: np.nan, 1: "d67"},
+ "id3": {0: "78d", 1: "79d"},
+ "value": {0: 123, 1: 64},
+ }
+ )
+
+ # multi-index
+ y = df.set_index(["id1", "id2", "id3"])
+ result = y.to_string()
+ expected = (
+ " value\nid1 id2 id3 \n"
+ "1a3 NaN 78d 123\n9h4 d67 79d 64"
+ )
+ assert result == expected
+
+ # index
+ y = df.set_index("id2")
+ result = y.to_string()
+ expected = (
+ " id1 id3 value\nid2 \n"
+ "NaN 1a3 78d 123\nd67 9h4 79d 64"
+ )
+ assert result == expected
+
+ # with append (this failed in 0.12)
+ y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
+ result = y.to_string()
+ expected = (
+ " value\nid1 id2 id3 \n"
+ "1a3 NaN 78d 123\n9h4 d67 79d 64"
+ )
+ assert result == expected
+
+ # all-nan in mi
+ df2 = df.copy()
+ df2.loc[:, "id2"] = np.nan
+ y = df2.set_index("id2")
+ result = y.to_string()
+ expected = (
+ " id1 id3 value\nid2 \n"
+ "NaN 1a3 78d 123\nNaN 9h4 79d 64"
+ )
+ assert result == expected
+
+ # partial nan in mi
+ df2 = df.copy()
+ df2.loc[:, "id2"] = np.nan
+ y = df2.set_index(["id2", "id3"])
+ result = y.to_string()
+ expected = (
+ " id1 value\nid2 id3 \n"
+ "NaN 78d 1a3 123\n 79d 9h4 64"
+ )
+ assert result == expected
+
+ df = DataFrame(
+ {
+ "id1": {0: np.nan, 1: "9h4"},
+ "id2": {0: np.nan, 1: "d67"},
+ "id3": {0: np.nan, 1: "79d"},
+ "value": {0: 123, 1: 64},
+ }
+ )
+
+ y = df.set_index(["id1", "id2", "id3"])
+ result = y.to_string()
+ expected = (
+ " value\nid1 id2 id3 \n"
+ "NaN NaN NaN 123\n9h4 d67 79d 64"
+ )
+ assert result == expected
+
+ def test_to_string_nonunicode_nonascii_alignment(self):
+ df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
+ rep_str = df.to_string()
+ lines = rep_str.split("\n")
+ assert len(lines[1]) == len(lines[2])
+
+ def test_unicode_problem_decoding_as_ascii(self):
+ df = DataFrame({"c/\u03c3": Series({"test": np.nan})})
+ str(df.to_string())
+
+ def test_to_string_repr_unicode(self):
+ buf = StringIO()
+
+ unicode_values = ["\u03c3"] * 10
+ unicode_values = np.array(unicode_values, dtype=object)
+ df = DataFrame({"unicode": unicode_values})
+ df.to_string(col_space=10, buf=buf)
+
+ # it works!
+ repr(df)
+ # it works even if sys.stdin in None
+ _stdin = sys.stdin
+ try:
+ sys.stdin = None
+ repr(df)
+ finally:
+ sys.stdin = _stdin
+
+
+class TestSeriesToString:
+ def test_to_string_without_index(self):
+ # GH#11729 Test index=False option
+ ser = Series([1, 2, 3, 4])
+ result = ser.to_string(index=False)
+ expected = "\n".join(["1", "2", "3", "4"])
+ assert result == expected
+
+ def test_to_string_name(self):
+ ser = Series(range(100), dtype="int64")
+ ser.name = "myser"
+ res = ser.to_string(max_rows=2, name=True)
+ exp = "0 0\n ..\n99 99\nName: myser"
+ assert res == exp
+ res = ser.to_string(max_rows=2, name=False)
+ exp = "0 0\n ..\n99 99"
+ assert res == exp
+
+ def test_to_string_dtype(self):
+ ser = Series(range(100), dtype="int64")
+ res = ser.to_string(max_rows=2, dtype=True)
+ exp = "0 0\n ..\n99 99\ndtype: int64"
+ assert res == exp
+ res = ser.to_string(max_rows=2, dtype=False)
+ exp = "0 0\n ..\n99 99"
+ assert res == exp
+
+ def test_to_string_length(self):
+ ser = Series(range(100), dtype="int64")
+ res = ser.to_string(max_rows=2, length=True)
+ exp = "0 0\n ..\n99 99\nLength: 100"
+ assert res == exp
+
+ def test_to_string_na_rep(self):
+ ser = Series(index=range(100), dtype=np.float64)
+ res = ser.to_string(na_rep="foo", max_rows=2)
+ exp = "0 foo\n ..\n99 foo"
+ assert res == exp
+
+ def test_to_string_float_format(self):
+ ser = Series(range(10), dtype="float64")
+ res = ser.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2)
+ exp = "0 0.0\n ..\n9 9.0"
+ assert res == exp
+
+ def test_to_string_header(self):
+ ser = Series(range(10), dtype="int64")
+ ser.index.name = "foo"
+ res = ser.to_string(header=True, max_rows=2)
+ exp = "foo\n0 0\n ..\n9 9"
+ assert res == exp
+ res = ser.to_string(header=False, max_rows=2)
+ exp = "0 0\n ..\n9 9"
+ assert res == exp
+
+ def test_to_string_empty_col(self):
+ # GH#13653
+ ser = Series(["", "Hello", "World", "", "", "Mooooo", "", ""])
+ res = ser.to_string(index=False)
+ exp = " \n Hello\n World\n \n \nMooooo\n \n "
+ assert re.match(exp, res)
+
+ def test_to_string_timedelta64(self):
+ Series(np.array([1100, 20], dtype="timedelta64[ns]")).to_string()
+
+ ser = Series(date_range("2012-1-1", periods=3, freq="D"))
+
+ # GH#2146
+
+ # adding NaTs
+ y = ser - ser.shift(1)
+ result = y.to_string()
+ assert "1 days" in result
+ assert "00:00:00" not in result
+ assert "NaT" in result
+
+ # with frac seconds
+ o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
+ y = ser - o
+ result = y.to_string()
+ assert "-1 days +23:59:59.999850" in result
+
+ # rounding?
+ o = Series([datetime(2012, 1, 1, 1)] * 3)
+ y = ser - o
+ result = y.to_string()
+ assert "-1 days +23:00:00" in result
+ assert "1 days 23:00:00" in result
+
+ o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
+ y = ser - o
+ result = y.to_string()
+ assert "-1 days +22:59:00" in result
+ assert "1 days 22:59:00" in result
+
+ o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
+ y = ser - o
+ result = y.to_string()
+ assert "-1 days +22:58:59.999850" in result
+ assert "0 days 22:58:59.999850" in result
+
+ # neg time
+ td = timedelta(minutes=5, seconds=3)
+ s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
+ y = ser - s2
+ result = y.to_string()
+ assert "-1 days +23:54:57" in result
+
+ td = timedelta(microseconds=550)
+ s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
+ y = ser - td
+ result = y.to_string()
+ assert "2012-01-01 23:59:59.999450" in result
+
+ # no boxing of the actual elements
+ td = Series(timedelta_range("1 days", periods=3))
+ result = td.to_string()
+ assert result == "0 1 days\n1 2 days\n2 3 days"
+
+ def test_to_string(self):
+ ts = Series(
+ np.arange(10, dtype=np.float64),
+ index=date_range("2020-01-01", periods=10, freq="B"),
+ )
+ buf = StringIO()
+
+ s = ts.to_string()
+
+ retval = ts.to_string(buf=buf)
+ assert retval is None
+ assert buf.getvalue().strip() == s
+
+ # pass float_format
+ format = "%.4f".__mod__
+ result = ts.to_string(float_format=format)
+ result = [x.split()[1] for x in result.split("\n")[:-1]]
+ expected = [format(x) for x in ts]
+ assert result == expected
+
+ # empty string
+ result = ts[:0].to_string()
+ assert result == "Series([], Freq: B)"
+
+ result = ts[:0].to_string(length=0)
+ assert result == "Series([], Freq: B)"
+
+ # name and length
+ cp = ts.copy()
+ cp.name = "foo"
+ result = cp.to_string(length=True, name=True, dtype=True)
+ last_line = result.split("\n")[-1].strip()
+ assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
+
+ @pytest.mark.parametrize(
+ "input_array, expected",
+ [
+ ("a", "a"),
+ (["a", "b"], "a\nb"),
+ ([1, "a"], "1\na"),
+ (1, "1"),
+ ([0, -1], " 0\n-1"),
+ (1.0, "1.0"),
+ ([" a", " b"], " a\n b"),
+ ([".1", "1"], ".1\n 1"),
+ (["10", "-10"], " 10\n-10"),
+ ],
+ )
+ def test_format_remove_leading_space_series(self, input_array, expected):
+ # GH: 24980
+ ser = Series(input_array)
+ result = ser.to_string(index=False)
+ assert result == expected
+
+ def test_to_string_complex_number_trims_zeros(self):
+ ser = Series([1.000000 + 1.000000j, 1.0 + 1.0j, 1.05 + 1.0j])
+ result = ser.to_string()
+ expected = dedent(
+ """\
+ 0 1.00+1.00j
+ 1 1.00+1.00j
+ 2 1.05+1.00j"""
+ )
+ assert result == expected
+
+ def test_nullable_float_to_string(self, float_ea_dtype):
+ # https://github.com/pandas-dev/pandas/issues/36775
+ dtype = float_ea_dtype
+ ser = Series([0.0, 1.0, None], dtype=dtype)
+ result = ser.to_string()
+ expected = dedent(
+ """\
+ 0 0.0
+ 1 1.0
+ 2 """
+ )
+ assert result == expected
+
+ def test_nullable_int_to_string(self, any_int_ea_dtype):
+ # https://github.com/pandas-dev/pandas/issues/36775
+ dtype = any_int_ea_dtype
+ ser = Series([0, 1, None], dtype=dtype)
+ result = ser.to_string()
+ expected = dedent(
+ """\
+ 0 0
+ 1 1
+ 2 """
+ )
+ assert result == expected
+
+ def test_to_string_mixed(self):
+ ser = Series(["foo", np.nan, -1.23, 4.56])
+ result = ser.to_string()
+ expected = "".join(["0 foo\n", "1 NaN\n", "2 -1.23\n", "3 4.56"])
+ assert result == expected
+
+ # but don't count NAs as floats
+ ser = Series(["foo", np.nan, "bar", "baz"])
+ result = ser.to_string()
+ expected = "".join(["0 foo\n", "1 NaN\n", "2 bar\n", "3 baz"])
+ assert result == expected
+
+ ser = Series(["foo", 5, "bar", "baz"])
+ result = ser.to_string()
+ expected = "".join(["0 foo\n", "1 5\n", "2 bar\n", "3 baz"])
+ assert result == expected
+
+ def test_to_string_float_na_spacing(self):
+ ser = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
+ ser[::2] = np.nan
+
+ result = ser.to_string()
+ expected = (
+ "0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
+ )
+ assert result == expected
+
+ def test_to_string_with_datetimeindex(self):
+ index = date_range("20130102", periods=6)
+ ser = Series(1, index=index)
+ result = ser.to_string()
+ assert "2013-01-02" in result
+
+ # nat in index
+ s2 = Series(2, index=[Timestamp("20130111"), NaT])
+ ser = concat([s2, ser])
+ result = ser.to_string()
+ assert "NaT" in result
+
+ # nat in summary
+ result = str(s2.index)
+ assert "NaT" in result
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..33e012eefcee4a6f4c25e8dce05a2bc91d731cb9
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_common_basic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_common_basic.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38139e09f9d10aac7a6c8dcf8cf7b0a8e711208e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_common_basic.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_decimal.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_decimal.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c53de67ab7e3f0923ab770dea5ddc1266a1bc965
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_decimal.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_index.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_index.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c4e320604626aad015b4a9a9c8128b8caf366fb1
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_index.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_iterator.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_iterator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..674020b5dad4631f0c5b1c78ffb1719909d63474
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_iterator.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_verbose.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_verbose.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..89b1fd42abb0757c061d79fc831084a5c8926747
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_verbose.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_data_list.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_data_list.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b0ff9e08d349e0c8012ebd743285b285d15a846
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_data_list.py
@@ -0,0 +1,91 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+import csv
+from io import StringIO
+
+import pytest
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+from pandas.io.parsers import TextParser
+
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+)
+
+xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
+
+
+@xfail_pyarrow
+def test_read_data_list(all_parsers):
+ parser = all_parsers
+ kwargs = {"index_col": 0}
+ data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
+
+ data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
+ expected = parser.read_csv(StringIO(data), **kwargs)
+
+ with TextParser(data_list, chunksize=2, **kwargs) as parser:
+ result = parser.read()
+
+ tm.assert_frame_equal(result, expected)
+
+
+def test_reader_list(all_parsers):
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+ parser = all_parsers
+ kwargs = {"index_col": 0}
+
+ lines = list(csv.reader(StringIO(data)))
+ with TextParser(lines, chunksize=2, **kwargs) as reader:
+ chunks = list(reader)
+
+ expected = parser.read_csv(StringIO(data), **kwargs)
+
+ tm.assert_frame_equal(chunks[0], expected[:2])
+ tm.assert_frame_equal(chunks[1], expected[2:4])
+ tm.assert_frame_equal(chunks[2], expected[4:])
+
+
+def test_reader_list_skiprows(all_parsers):
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+ parser = all_parsers
+ kwargs = {"index_col": 0}
+
+ lines = list(csv.reader(StringIO(data)))
+ with TextParser(lines, chunksize=2, skiprows=[1], **kwargs) as reader:
+ chunks = list(reader)
+
+ expected = parser.read_csv(StringIO(data), **kwargs)
+
+ tm.assert_frame_equal(chunks[0], expected[1:3])
+
+
+def test_read_csv_parse_simple_list(all_parsers):
+ parser = all_parsers
+ data = """foo
+bar baz
+qux foo
+foo
+bar"""
+
+ result = parser.read_csv(StringIO(data), header=None)
+ expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
+ tm.assert_frame_equal(result, expected)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_float.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_float.py
new file mode 100644
index 0000000000000000000000000000000000000000..6069c239362976cc242548a0dc52236ddb7d37d8
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_float.py
@@ -0,0 +1,79 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+from io import StringIO
+
+import numpy as np
+import pytest
+
+from pandas.compat import is_platform_linux
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+)
+xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
+skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
+
+
+@skip_pyarrow # ParserError: CSV parse error: Empty CSV file or block
+def test_float_parser(all_parsers):
+ # see gh-9565
+ parser = all_parsers
+ data = "45e-1,4.5,45.,inf,-inf"
+ result = parser.read_csv(StringIO(data), header=None)
+
+ expected = DataFrame([[float(s) for s in data.split(",")]])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_scientific_no_exponent(all_parsers_all_precisions):
+ # see gh-12215
+ df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]})
+ data = df.to_csv(index=False)
+ parser, precision = all_parsers_all_precisions
+
+ df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision)
+ tm.assert_frame_equal(df_roundtrip, df)
+
+
+@pytest.mark.parametrize(
+ "neg_exp",
+ [
+ -617,
+ -100000,
+ pytest.param(-99999999999999999, marks=pytest.mark.skip_ubsan),
+ ],
+)
+def test_very_negative_exponent(all_parsers_all_precisions, neg_exp):
+ # GH#38753
+ parser, precision = all_parsers_all_precisions
+
+ data = f"data\n10E{neg_exp}"
+ result = parser.read_csv(StringIO(data), float_precision=precision)
+ expected = DataFrame({"data": [0.0]})
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.skip_ubsan
+@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different
+@pytest.mark.parametrize("exp", [999999999999999999, -999999999999999999])
+def test_too_many_exponent_digits(all_parsers_all_precisions, exp, request):
+ # GH#38753
+ parser, precision = all_parsers_all_precisions
+ data = f"data\n10E{exp}"
+ result = parser.read_csv(StringIO(data), float_precision=precision)
+ if precision == "round_trip":
+ if exp == 999999999999999999 and is_platform_linux():
+ mark = pytest.mark.xfail(reason="GH38794, on Linux gives object result")
+ request.applymarker(mark)
+
+ value = np.inf if exp > 0 else 0.0
+ expected = DataFrame({"data": [value]})
+ else:
+ expected = DataFrame({"data": [f"10E{exp}"]})
+
+ tm.assert_frame_equal(result, expected)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_ints.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_ints.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3167346c64efdcbb76953a922c8cb22280278a3
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_ints.py
@@ -0,0 +1,231 @@
+"""
+Tests that work on both the Python and C engines but do not have a
+specific classification into the other test modules.
+"""
+from io import StringIO
+
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ Series,
+)
+import pandas._testing as tm
+
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+)
+
+xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
+skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
+
+
+def test_int_conversion(all_parsers):
+ data = """A,B
+1.0,1
+2.0,2
+3.0,3
+"""
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data))
+
+ expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "data,kwargs,expected",
+ [
+ (
+ "A,B\nTrue,1\nFalse,2\nTrue,3",
+ {},
+ DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
+ ),
+ (
+ "A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
+ {"true_values": ["yes", "Yes", "YES"], "false_values": ["no", "NO", "No"]},
+ DataFrame(
+ [[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
+ columns=["A", "B"],
+ ),
+ ),
+ (
+ "A,B\nTRUE,1\nFALSE,2\nTRUE,3",
+ {},
+ DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
+ ),
+ (
+ "A,B\nfoo,bar\nbar,foo",
+ {"true_values": ["foo"], "false_values": ["bar"]},
+ DataFrame([[True, False], [False, True]], columns=["A", "B"]),
+ ),
+ ],
+)
+def test_parse_bool(all_parsers, data, kwargs, expected):
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), **kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_parse_integers_above_fp_precision(all_parsers):
+ data = """Numbers
+17007000002000191
+17007000002000191
+17007000002000191
+17007000002000191
+17007000002000192
+17007000002000192
+17007000002000192
+17007000002000192
+17007000002000192
+17007000002000194"""
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data))
+ expected = DataFrame(
+ {
+ "Numbers": [
+ 17007000002000191,
+ 17007000002000191,
+ 17007000002000191,
+ 17007000002000191,
+ 17007000002000192,
+ 17007000002000192,
+ 17007000002000192,
+ 17007000002000192,
+ 17007000002000192,
+ 17007000002000194,
+ ]
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("sep", [" ", r"\s+"])
+def test_integer_overflow_bug(all_parsers, sep):
+ # see gh-2601
+ data = "65248E10 11\n55555E55 22\n"
+ parser = all_parsers
+ if parser.engine == "pyarrow" and sep != " ":
+ msg = "the 'pyarrow' engine does not support regex separators"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), header=None, sep=sep)
+ return
+
+ result = parser.read_csv(StringIO(data), header=None, sep=sep)
+ expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_int64_min_issues(all_parsers):
+ # see gh-2599
+ parser = all_parsers
+ data = "A,B\n0,0\n0,"
+ result = parser.read_csv(StringIO(data))
+
+ expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("conv", [None, np.int64, np.uint64])
+def test_int64_overflow(all_parsers, conv, request):
+ data = """ID
+00013007854817840016671868
+00013007854817840016749251
+00013007854817840016754630
+00013007854817840016781876
+00013007854817840017028824
+00013007854817840017963235
+00013007854817840018860166"""
+ parser = all_parsers
+
+ if conv is None:
+ # 13007854817840016671868 > UINT64_MAX, so this
+ # will overflow and return object as the dtype.
+ if parser.engine == "pyarrow":
+ mark = pytest.mark.xfail(reason="parses to float64")
+ request.applymarker(mark)
+
+ result = parser.read_csv(StringIO(data))
+ expected = DataFrame(
+ [
+ "00013007854817840016671868",
+ "00013007854817840016749251",
+ "00013007854817840016754630",
+ "00013007854817840016781876",
+ "00013007854817840017028824",
+ "00013007854817840017963235",
+ "00013007854817840018860166",
+ ],
+ columns=["ID"],
+ )
+ tm.assert_frame_equal(result, expected)
+ else:
+ # 13007854817840016671868 > UINT64_MAX, so attempts
+ # to cast to either int64 or uint64 will result in
+ # an OverflowError being raised.
+ msg = "|".join(
+ [
+ "Python int too large to convert to C long",
+ "long too big to convert",
+ "int too big to convert",
+ ]
+ )
+ err = OverflowError
+ if parser.engine == "pyarrow":
+ err = ValueError
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
+
+ with pytest.raises(err, match=msg):
+ parser.read_csv(StringIO(data), converters={"ID": conv})
+
+
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+@pytest.mark.parametrize(
+ "val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min]
+)
+def test_int64_uint64_range(all_parsers, val):
+ # These numbers fall right inside the int64-uint64
+ # range, so they should be parsed as string.
+ parser = all_parsers
+ result = parser.read_csv(StringIO(str(val)), header=None)
+
+ expected = DataFrame([val])
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+@pytest.mark.parametrize(
+ "val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1]
+)
+def test_outside_int64_uint64_range(all_parsers, val):
+ # These numbers fall just outside the int64-uint64
+ # range, so they should be parsed as string.
+ parser = all_parsers
+ result = parser.read_csv(StringIO(str(val)), header=None)
+
+ expected = DataFrame([str(val)])
+ tm.assert_frame_equal(result, expected)
+
+
+@xfail_pyarrow # gets float64 dtype instead of object
+@pytest.mark.parametrize("exp_data", [[str(-1), str(2**63)], [str(2**63), str(-1)]])
+def test_numeric_range_too_wide(all_parsers, exp_data):
+ # No numerical dtype can hold both negative and uint64
+ # values, so they should be cast as string.
+ parser = all_parsers
+ data = "\n".join(exp_data)
+ expected = DataFrame(exp_data)
+
+ result = parser.read_csv(StringIO(data), header=None)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_integer_precision(all_parsers):
+ # Gh 7072
+ s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765
+5,1;0;0;0;1;1;843;843;843;1;1;1;1;1;1;0;0;1;1;0;0,64.0,;,4321113141090630389"""
+ parser = all_parsers
+ result = parser.read_csv(StringIO(s), header=None)[4]
+ expected = Series([4321583677327450765, 4321113141090630389], name=4)
+ tm.assert_series_equal(result, expected)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/conftest.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d5f870f07206f0380f87233828047cdc6dc5ea9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/conftest.py
@@ -0,0 +1,319 @@
+from __future__ import annotations
+
+import os
+
+import pytest
+
+from pandas.compat._optional import VERSIONS
+
+from pandas import (
+ read_csv,
+ read_table,
+)
+import pandas._testing as tm
+
+
+class BaseParser:
+ engine: str | None = None
+ low_memory = True
+ float_precision_choices: list[str | None] = []
+
+ def update_kwargs(self, kwargs):
+ kwargs = kwargs.copy()
+ kwargs.update({"engine": self.engine, "low_memory": self.low_memory})
+
+ return kwargs
+
+ def read_csv(self, *args, **kwargs):
+ kwargs = self.update_kwargs(kwargs)
+ return read_csv(*args, **kwargs)
+
+ def read_csv_check_warnings(
+ self,
+ warn_type: type[Warning],
+ warn_msg: str,
+ *args,
+ raise_on_extra_warnings=True,
+ check_stacklevel: bool = True,
+ **kwargs,
+ ):
+ # We need to check the stacklevel here instead of in the tests
+ # since this is where read_csv is called and where the warning
+ # should point to.
+ kwargs = self.update_kwargs(kwargs)
+ with tm.assert_produces_warning(
+ warn_type,
+ match=warn_msg,
+ raise_on_extra_warnings=raise_on_extra_warnings,
+ check_stacklevel=check_stacklevel,
+ ):
+ return read_csv(*args, **kwargs)
+
+ def read_table(self, *args, **kwargs):
+ kwargs = self.update_kwargs(kwargs)
+ return read_table(*args, **kwargs)
+
+ def read_table_check_warnings(
+ self,
+ warn_type: type[Warning],
+ warn_msg: str,
+ *args,
+ raise_on_extra_warnings=True,
+ **kwargs,
+ ):
+ # We need to check the stacklevel here instead of in the tests
+ # since this is where read_table is called and where the warning
+ # should point to.
+ kwargs = self.update_kwargs(kwargs)
+ with tm.assert_produces_warning(
+ warn_type, match=warn_msg, raise_on_extra_warnings=raise_on_extra_warnings
+ ):
+ return read_table(*args, **kwargs)
+
+
+class CParser(BaseParser):
+ engine = "c"
+ float_precision_choices = [None, "high", "round_trip"]
+
+
+class CParserHighMemory(CParser):
+ low_memory = False
+
+
+class CParserLowMemory(CParser):
+ low_memory = True
+
+
+class PythonParser(BaseParser):
+ engine = "python"
+ float_precision_choices = [None]
+
+
+class PyArrowParser(BaseParser):
+ engine = "pyarrow"
+ float_precision_choices = [None]
+
+
+@pytest.fixture
+def csv_dir_path(datapath):
+ """
+ The directory path to the data files needed for parser tests.
+ """
+ return datapath("io", "parser", "data")
+
+
+@pytest.fixture
+def csv1(datapath):
+ """
+ The path to the data file "test1.csv" needed for parser tests.
+ """
+ return os.path.join(datapath("io", "data", "csv"), "test1.csv")
+
+
+_cParserHighMemory = CParserHighMemory
+_cParserLowMemory = CParserLowMemory
+_pythonParser = PythonParser
+_pyarrowParser = PyArrowParser
+
+_py_parsers_only = [_pythonParser]
+_c_parsers_only = [_cParserHighMemory, _cParserLowMemory]
+_pyarrow_parsers_only = [pytest.param(_pyarrowParser, marks=pytest.mark.single_cpu)]
+
+_all_parsers = [*_c_parsers_only, *_py_parsers_only, *_pyarrow_parsers_only]
+
+_py_parser_ids = ["python"]
+_c_parser_ids = ["c_high", "c_low"]
+_pyarrow_parsers_ids = ["pyarrow"]
+
+_all_parser_ids = [*_c_parser_ids, *_py_parser_ids, *_pyarrow_parsers_ids]
+
+
+@pytest.fixture(params=_all_parsers, ids=_all_parser_ids)
+def all_parsers(request):
+ """
+ Fixture all of the CSV parsers.
+ """
+ parser = request.param()
+ if parser.engine == "pyarrow":
+ pytest.importorskip("pyarrow", VERSIONS["pyarrow"])
+ # Try finding a way to disable threads all together
+ # for more stable CI runs
+ import pyarrow
+
+ pyarrow.set_cpu_count(1)
+ return parser
+
+
+@pytest.fixture(params=_c_parsers_only, ids=_c_parser_ids)
+def c_parser_only(request):
+ """
+ Fixture all of the CSV parsers using the C engine.
+ """
+ return request.param()
+
+
+@pytest.fixture(params=_py_parsers_only, ids=_py_parser_ids)
+def python_parser_only(request):
+ """
+ Fixture all of the CSV parsers using the Python engine.
+ """
+ return request.param()
+
+
+@pytest.fixture(params=_pyarrow_parsers_only, ids=_pyarrow_parsers_ids)
+def pyarrow_parser_only(request):
+ """
+ Fixture all of the CSV parsers using the Pyarrow engine.
+ """
+ return request.param()
+
+
+def _get_all_parser_float_precision_combinations():
+ """
+ Return all allowable parser and float precision
+ combinations and corresponding ids.
+ """
+ params = []
+ ids = []
+ for parser, parser_id in zip(_all_parsers, _all_parser_ids):
+ if hasattr(parser, "values"):
+ # Wrapped in pytest.param, get the actual parser back
+ parser = parser.values[0]
+ for precision in parser.float_precision_choices:
+ # Re-wrap in pytest.param for pyarrow
+ mark = pytest.mark.single_cpu if parser.engine == "pyarrow" else ()
+ param = pytest.param((parser(), precision), marks=mark)
+ params.append(param)
+ ids.append(f"{parser_id}-{precision}")
+
+ return {"params": params, "ids": ids}
+
+
+@pytest.fixture(
+ params=_get_all_parser_float_precision_combinations()["params"],
+ ids=_get_all_parser_float_precision_combinations()["ids"],
+)
+def all_parsers_all_precisions(request):
+ """
+ Fixture for all allowable combinations of parser
+ and float precision
+ """
+ return request.param
+
+
+_utf_values = [8, 16, 32]
+
+_encoding_seps = ["", "-", "_"]
+_encoding_prefixes = ["utf", "UTF"]
+
+_encoding_fmts = [
+ f"{prefix}{sep}{{0}}" for sep in _encoding_seps for prefix in _encoding_prefixes
+]
+
+
+@pytest.fixture(params=_utf_values)
+def utf_value(request):
+ """
+ Fixture for all possible integer values for a UTF encoding.
+ """
+ return request.param
+
+
+@pytest.fixture(params=_encoding_fmts)
+def encoding_fmt(request):
+ """
+ Fixture for all possible string formats of a UTF encoding.
+ """
+ return request.param
+
+
+@pytest.fixture(
+ params=[
+ ("-1,0", -1.0),
+ ("-1,2e0", -1.2),
+ ("-1e0", -1.0),
+ ("+1e0", 1.0),
+ ("+1e+0", 1.0),
+ ("+1e-1", 0.1),
+ ("+,1e1", 1.0),
+ ("+1,e0", 1.0),
+ ("-,1e1", -1.0),
+ ("-1,e0", -1.0),
+ ("0,1", 0.1),
+ ("1,", 1.0),
+ (",1", 0.1),
+ ("-,1", -0.1),
+ ("1_,", 1.0),
+ ("1_234,56", 1234.56),
+ ("1_234,56e0", 1234.56),
+ # negative cases; must not parse as float
+ ("_", "_"),
+ ("-_", "-_"),
+ ("-_1", "-_1"),
+ ("-_1e0", "-_1e0"),
+ ("_1", "_1"),
+ ("_1,", "_1,"),
+ ("_1,_", "_1,_"),
+ ("_1e0", "_1e0"),
+ ("1,2e_1", "1,2e_1"),
+ ("1,2e1_0", "1,2e1_0"),
+ ("1,_2", "1,_2"),
+ (",1__2", ",1__2"),
+ (",1e", ",1e"),
+ ("-,1e", "-,1e"),
+ ("1_000,000_000", "1_000,000_000"),
+ ("1,e1_2", "1,e1_2"),
+ ("e11,2", "e11,2"),
+ ("1e11,2", "1e11,2"),
+ ("1,2,2", "1,2,2"),
+ ("1,2_1", "1,2_1"),
+ ("1,2e-10e1", "1,2e-10e1"),
+ ("--1,2", "--1,2"),
+ ("1a_2,1", "1a_2,1"),
+ ("1,2E-1", 0.12),
+ ("1,2E1", 12.0),
+ ]
+)
+def numeric_decimal(request):
+ """
+ Fixture for all numeric formats which should get recognized. The first entry
+ represents the value to read while the second represents the expected result.
+ """
+ return request.param
+
+
+@pytest.fixture
+def pyarrow_xfail(request):
+ """
+ Fixture that xfails a test if the engine is pyarrow.
+
+ Use if failure is do to unsupported keywords or inconsistent results.
+ """
+ if "all_parsers" in request.fixturenames:
+ parser = request.getfixturevalue("all_parsers")
+ elif "all_parsers_all_precisions" in request.fixturenames:
+ # Return value is tuple of (engine, precision)
+ parser = request.getfixturevalue("all_parsers_all_precisions")[0]
+ else:
+ return
+ if parser.engine == "pyarrow":
+ mark = pytest.mark.xfail(reason="pyarrow doesn't support this.")
+ request.applymarker(mark)
+
+
+@pytest.fixture
+def pyarrow_skip(request):
+ """
+ Fixture that skips a test if the engine is pyarrow.
+
+ Use if failure is do a parsing failure from pyarrow.csv.read_csv
+ """
+ if "all_parsers" in request.fixturenames:
+ parser = request.getfixturevalue("all_parsers")
+ elif "all_parsers_all_precisions" in request.fixturenames:
+ # Return value is tuple of (engine, precision)
+ parser = request.getfixturevalue("all_parsers_all_precisions")[0]
+ else:
+ return
+ if parser.engine == "pyarrow":
+ pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_c_parser_only.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_c_parser_only.py
new file mode 100644
index 0000000000000000000000000000000000000000..27d7bc0bb6c07aeb74cc7324a101a3572c3161ee
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_c_parser_only.py
@@ -0,0 +1,643 @@
+"""
+Tests that apply specifically to the CParser. Unless specifically stated
+as a CParser-specific issue, the goal is to eventually move as many of
+these tests out of this module as soon as the Python parser can accept
+further arguments when parsing.
+"""
+from decimal import Decimal
+from io import (
+ BytesIO,
+ StringIO,
+ TextIOWrapper,
+)
+import mmap
+import os
+import tarfile
+
+import numpy as np
+import pytest
+
+from pandas.compat.numpy import np_version_gte1p24
+from pandas.errors import (
+ ParserError,
+ ParserWarning,
+)
+import pandas.util._test_decorators as td
+
+from pandas import (
+ DataFrame,
+ concat,
+)
+import pandas._testing as tm
+
+
+@pytest.mark.parametrize(
+ "malformed",
+ ["1\r1\r1\r 1\r 1\r", "1\r1\r1\r 1\r 1\r11\r", "1\r1\r1\r 1\r 1\r11\r1\r"],
+ ids=["words pointer", "stream pointer", "lines pointer"],
+)
+def test_buffer_overflow(c_parser_only, malformed):
+ # see gh-9205: test certain malformed input files that cause
+ # buffer overflows in tokenizer.c
+ msg = "Buffer overflow caught - possible malformed input file."
+ parser = c_parser_only
+
+ with pytest.raises(ParserError, match=msg):
+ parser.read_csv(StringIO(malformed))
+
+
+def test_delim_whitespace_custom_terminator(c_parser_only):
+ # See gh-12912
+ data = "a b c~1 2 3~4 5 6~7 8 9"
+ parser = c_parser_only
+
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
+ with tm.assert_produces_warning(
+ FutureWarning, match=depr_msg, check_stacklevel=False
+ ):
+ df = parser.read_csv(StringIO(data), lineterminator="~", delim_whitespace=True)
+ expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])
+ tm.assert_frame_equal(df, expected)
+
+
+def test_dtype_and_names_error(c_parser_only):
+ # see gh-8833: passing both dtype and names
+ # resulting in an error reporting issue
+ parser = c_parser_only
+ data = """
+1.0 1
+2.0 2
+3.0 3
+"""
+ # base cases
+ result = parser.read_csv(StringIO(data), sep=r"\s+", header=None)
+ expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
+ tm.assert_frame_equal(result, expected)
+
+ result = parser.read_csv(StringIO(data), sep=r"\s+", header=None, names=["a", "b"])
+ expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["a", "b"])
+ tm.assert_frame_equal(result, expected)
+
+ # fallback casting
+ result = parser.read_csv(
+ StringIO(data), sep=r"\s+", header=None, names=["a", "b"], dtype={"a": np.int32}
+ )
+ expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=["a", "b"])
+ expected["a"] = expected["a"].astype(np.int32)
+ tm.assert_frame_equal(result, expected)
+
+ data = """
+1.0 1
+nan 2
+3.0 3
+"""
+ # fallback casting, but not castable
+ warning = RuntimeWarning if np_version_gte1p24 else None
+ with pytest.raises(ValueError, match="cannot safely convert"):
+ with tm.assert_produces_warning(warning, check_stacklevel=False):
+ parser.read_csv(
+ StringIO(data),
+ sep=r"\s+",
+ header=None,
+ names=["a", "b"],
+ dtype={"a": np.int32},
+ )
+
+
+@pytest.mark.parametrize(
+ "match,kwargs",
+ [
+ # For each of these cases, all of the dtypes are valid, just unsupported.
+ (
+ (
+ "the dtype datetime64 is not supported for parsing, "
+ "pass this column using parse_dates instead"
+ ),
+ {"dtype": {"A": "datetime64", "B": "float64"}},
+ ),
+ (
+ (
+ "the dtype datetime64 is not supported for parsing, "
+ "pass this column using parse_dates instead"
+ ),
+ {"dtype": {"A": "datetime64", "B": "float64"}, "parse_dates": ["B"]},
+ ),
+ (
+ "the dtype timedelta64 is not supported for parsing",
+ {"dtype": {"A": "timedelta64", "B": "float64"}},
+ ),
+ (
+ f"the dtype {tm.ENDIAN}U8 is not supported for parsing",
+ {"dtype": {"A": "U8"}},
+ ),
+ ],
+ ids=["dt64-0", "dt64-1", "td64", f"{tm.ENDIAN}U8"],
+)
+def test_unsupported_dtype(c_parser_only, match, kwargs):
+ parser = c_parser_only
+ df = DataFrame(
+ np.random.default_rng(2).random((5, 2)),
+ columns=list("AB"),
+ index=["1A", "1B", "1C", "1D", "1E"],
+ )
+
+ with tm.ensure_clean("__unsupported_dtype__.csv") as path:
+ df.to_csv(path)
+
+ with pytest.raises(TypeError, match=match):
+ parser.read_csv(path, index_col=0, **kwargs)
+
+
+@td.skip_if_32bit
+@pytest.mark.slow
+# test numbers between 1 and 2
+@pytest.mark.parametrize("num", np.linspace(1.0, 2.0, num=21))
+def test_precise_conversion(c_parser_only, num):
+ parser = c_parser_only
+
+ normal_errors = []
+ precise_errors = []
+
+ def error(val: float, actual_val: Decimal) -> Decimal:
+ return abs(Decimal(f"{val:.100}") - actual_val)
+
+ # 25 decimal digits of precision
+ text = f"a\n{num:.25}"
+
+ normal_val = float(
+ parser.read_csv(StringIO(text), float_precision="legacy")["a"][0]
+ )
+ precise_val = float(parser.read_csv(StringIO(text), float_precision="high")["a"][0])
+ roundtrip_val = float(
+ parser.read_csv(StringIO(text), float_precision="round_trip")["a"][0]
+ )
+ actual_val = Decimal(text[2:])
+
+ normal_errors.append(error(normal_val, actual_val))
+ precise_errors.append(error(precise_val, actual_val))
+
+ # round-trip should match float()
+ assert roundtrip_val == float(text[2:])
+
+ assert sum(precise_errors) <= sum(normal_errors)
+ assert max(precise_errors) <= max(normal_errors)
+
+
+def test_usecols_dtypes(c_parser_only):
+ parser = c_parser_only
+ data = """\
+1,2,3
+4,5,6
+7,8,9
+10,11,12"""
+
+ result = parser.read_csv(
+ StringIO(data),
+ usecols=(0, 1, 2),
+ names=("a", "b", "c"),
+ header=None,
+ converters={"a": str},
+ dtype={"b": int, "c": float},
+ )
+ result2 = parser.read_csv(
+ StringIO(data),
+ usecols=(0, 2),
+ names=("a", "b", "c"),
+ header=None,
+ converters={"a": str},
+ dtype={"b": int, "c": float},
+ )
+
+ assert (result.dtypes == [object, int, float]).all()
+ assert (result2.dtypes == [object, float]).all()
+
+
+def test_disable_bool_parsing(c_parser_only):
+ # see gh-2090
+
+ parser = c_parser_only
+ data = """A,B,C
+Yes,No,Yes
+No,Yes,Yes
+Yes,,Yes
+No,No,No"""
+
+ result = parser.read_csv(StringIO(data), dtype=object)
+ assert (result.dtypes == object).all()
+
+ result = parser.read_csv(StringIO(data), dtype=object, na_filter=False)
+ assert result["B"][2] == ""
+
+
+def test_custom_lineterminator(c_parser_only):
+ parser = c_parser_only
+ data = "a,b,c~1,2,3~4,5,6"
+
+ result = parser.read_csv(StringIO(data), lineterminator="~")
+ expected = parser.read_csv(StringIO(data.replace("~", "\n")))
+
+ tm.assert_frame_equal(result, expected)
+
+
+def test_parse_ragged_csv(c_parser_only):
+ parser = c_parser_only
+ data = """1,2,3
+1,2,3,4
+1,2,3,4,5
+1,2
+1,2,3,4"""
+
+ nice_data = """1,2,3,,
+1,2,3,4,
+1,2,3,4,5
+1,2,,,
+1,2,3,4,"""
+ result = parser.read_csv(
+ StringIO(data), header=None, names=["a", "b", "c", "d", "e"]
+ )
+
+ expected = parser.read_csv(
+ StringIO(nice_data), header=None, names=["a", "b", "c", "d", "e"]
+ )
+
+ tm.assert_frame_equal(result, expected)
+
+ # too many columns, cause segfault if not careful
+ data = "1,2\n3,4,5"
+
+ result = parser.read_csv(StringIO(data), header=None, names=range(50))
+ expected = parser.read_csv(StringIO(data), header=None, names=range(3)).reindex(
+ columns=range(50)
+ )
+
+ tm.assert_frame_equal(result, expected)
+
+
+def test_tokenize_CR_with_quoting(c_parser_only):
+ # see gh-3453
+ parser = c_parser_only
+ data = ' a,b,c\r"a,b","e,d","f,f"'
+
+ result = parser.read_csv(StringIO(data), header=None)
+ expected = parser.read_csv(StringIO(data.replace("\r", "\n")), header=None)
+ tm.assert_frame_equal(result, expected)
+
+ result = parser.read_csv(StringIO(data))
+ expected = parser.read_csv(StringIO(data.replace("\r", "\n")))
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize("count", [3 * 2**n for n in range(6)])
+def test_grow_boundary_at_cap(c_parser_only, count):
+ # See gh-12494
+ #
+ # Cause of error was that the C parser
+ # was not increasing the buffer size when
+ # the desired space would fill the buffer
+ # to capacity, which would later cause a
+ # buffer overflow error when checking the
+ # EOF terminator of the CSV stream.
+ # 3 * 2^n commas was observed to break the parser
+ parser = c_parser_only
+
+ with StringIO("," * count) as s:
+ expected = DataFrame(columns=[f"Unnamed: {i}" for i in range(count + 1)])
+ df = parser.read_csv(s)
+ tm.assert_frame_equal(df, expected)
+
+
+@pytest.mark.slow
+@pytest.mark.parametrize("encoding", [None, "utf-8"])
+def test_parse_trim_buffers(c_parser_only, encoding):
+ # This test is part of a bugfix for gh-13703. It attempts to
+ # to stress the system memory allocator, to cause it to move the
+ # stream buffer and either let the OS reclaim the region, or let
+ # other memory requests of parser otherwise modify the contents
+ # of memory space, where it was formally located.
+ # This test is designed to cause a `segfault` with unpatched
+ # `tokenizer.c`. Sometimes the test fails on `segfault`, other
+ # times it fails due to memory corruption, which causes the
+ # loaded DataFrame to differ from the expected one.
+
+ # Also force 'utf-8' encoding, so that `_string_convert` would take
+ # a different execution branch.
+
+ parser = c_parser_only
+
+ # Generate a large mixed-type CSV file on-the-fly (one record is
+ # approx 1.5KiB).
+ record_ = (
+ """9999-9,99:99,,,,ZZ,ZZ,,,ZZZ-ZZZZ,.Z-ZZZZ,-9.99,,,9.99,Z"""
+ """ZZZZ,,-99,9,ZZZ-ZZZZ,ZZ-ZZZZ,,9.99,ZZZ-ZZZZZ,ZZZ-ZZZZZ,"""
+ """ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,9"""
+ """99,ZZZ-ZZZZ,,ZZ-ZZZZ,,,,,ZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,,,9,9,"""
+ """9,9,99,99,999,999,ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,9,ZZ-ZZZZ,9."""
+ """99,ZZ-ZZZZ,ZZ-ZZZZ,,,,ZZZZ,,,ZZ,ZZ,,,,,,,,,,,,,9,,,999."""
+ """99,999.99,,,ZZZZZ,,,Z9,,,,,,,ZZZ,ZZZ,,,,,,,,,,,ZZZZZ,ZZ"""
+ """ZZZ,ZZZ-ZZZZZZ,ZZZ-ZZZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZ"""
+ """ZZ,,,999999,999999,ZZZ,ZZZ,,,ZZZ,ZZZ,999.99,999.99,,,,Z"""
+ """ZZ-ZZZ,ZZZ-ZZZ,-9.99,-9.99,9,9,,99,,9.99,9.99,9,9,9.99,"""
+ """9.99,,,,9.99,9.99,,99,,99,9.99,9.99,,,ZZZ,ZZZ,,999.99,,"""
+ """999.99,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,ZZZZZ,ZZZZZ,ZZZ,ZZZ,9,9,"""
+ """,,,,,ZZZ-ZZZZ,ZZZ999Z,,,999.99,,999.99,ZZZ-ZZZZ,,,9.999"""
+ """,9.999,9.999,9.999,-9.999,-9.999,-9.999,-9.999,9.999,9."""
+ """999,9.999,9.999,9.999,9.999,9.999,9.999,99999,ZZZ-ZZZZ,"""
+ """,9.99,ZZZ,,,,,,,,ZZZ,,,,,9,,,,9,,,,,,,,,,ZZZ-ZZZZ,ZZZ-Z"""
+ """ZZZ,,ZZZZZ,ZZZZZ,ZZZZZ,ZZZZZ,,,9.99,,ZZ-ZZZZ,ZZ-ZZZZ,ZZ"""
+ """,999,,,,ZZ-ZZZZ,ZZZ,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,99.99,99.99"""
+ """,,,9.99,9.99,9.99,9.99,ZZZ-ZZZZ,,,ZZZ-ZZZZZ,,,,,-9.99,-"""
+ """9.99,-9.99,-9.99,,,,,,,,,ZZZ-ZZZZ,,9,9.99,9.99,99ZZ,,-9"""
+ """.99,-9.99,ZZZ-ZZZZ,,,,,,,ZZZ-ZZZZ,9.99,9.99,9999,,,,,,,"""
+ """,,,-9.9,Z/Z-ZZZZ,999.99,9.99,,999.99,ZZ-ZZZZ,ZZ-ZZZZ,9."""
+ """99,9.99,9.99,9.99,9.99,9.99,,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZ"""
+ """ZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ,ZZZ,ZZZ,ZZZ,9.99,,,-9.99,ZZ"""
+ """-ZZZZ,-999.99,,-9999,,999.99,,,,999.99,99.99,,,ZZ-ZZZZZ"""
+ """ZZZ,ZZ-ZZZZ-ZZZZZZZ,,,,ZZ-ZZ-ZZZZZZZZ,ZZZZZZZZ,ZZZ-ZZZZ"""
+ """,9999,999.99,ZZZ-ZZZZ,-9.99,-9.99,ZZZ-ZZZZ,99:99:99,,99"""
+ """,99,,9.99,,-99.99,,,,,,9.99,ZZZ-ZZZZ,-9.99,-9.99,9.99,9"""
+ """.99,,ZZZ,,,,,,,ZZZ,ZZZ,,,,,"""
+ )
+
+ # Set the number of lines so that a call to `parser_trim_buffers`
+ # is triggered: after a couple of full chunks are consumed a
+ # relatively small 'residual' chunk would cause reallocation
+ # within the parser.
+ chunksize, n_lines = 128, 2 * 128 + 15
+ csv_data = "\n".join([record_] * n_lines) + "\n"
+
+ # We will use StringIO to load the CSV from this text buffer.
+ # pd.read_csv() will iterate over the file in chunks and will
+ # finally read a residual chunk of really small size.
+
+ # Generate the expected output: manually create the dataframe
+ # by splitting by comma and repeating the `n_lines` times.
+ row = tuple(val_ if val_ else np.nan for val_ in record_.split(","))
+ expected = DataFrame(
+ [row for _ in range(n_lines)], dtype=object, columns=None, index=None
+ )
+
+ # Iterate over the CSV file in chunks of `chunksize` lines
+ with parser.read_csv(
+ StringIO(csv_data),
+ header=None,
+ dtype=object,
+ chunksize=chunksize,
+ encoding=encoding,
+ ) as chunks_:
+ result = concat(chunks_, axis=0, ignore_index=True)
+
+ # Check for data corruption if there was no segfault
+ tm.assert_frame_equal(result, expected)
+
+
+def test_internal_null_byte(c_parser_only):
+ # see gh-14012
+ #
+ # The null byte ('\x00') should not be used as a
+ # true line terminator, escape character, or comment
+ # character, only as a placeholder to indicate that
+ # none was specified.
+ #
+ # This test should be moved to test_common.py ONLY when
+ # Python's csv class supports parsing '\x00'.
+ parser = c_parser_only
+
+ names = ["a", "b", "c"]
+ data = "1,2,3\n4,\x00,6\n7,8,9"
+ expected = DataFrame([[1, 2.0, 3], [4, np.nan, 6], [7, 8, 9]], columns=names)
+
+ result = parser.read_csv(StringIO(data), names=names)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_nrows_large(c_parser_only):
+ # gh-7626 - Read only nrows of data in for large inputs (>262144b)
+ parser = c_parser_only
+ header_narrow = "\t".join(["COL_HEADER_" + str(i) for i in range(10)]) + "\n"
+ data_narrow = "\t".join(["somedatasomedatasomedata1" for _ in range(10)]) + "\n"
+ header_wide = "\t".join(["COL_HEADER_" + str(i) for i in range(15)]) + "\n"
+ data_wide = "\t".join(["somedatasomedatasomedata2" for _ in range(15)]) + "\n"
+ test_input = header_narrow + data_narrow * 1050 + header_wide + data_wide * 2
+
+ df = parser.read_csv(StringIO(test_input), sep="\t", nrows=1010)
+
+ assert df.size == 1010 * 10
+
+
+def test_float_precision_round_trip_with_text(c_parser_only):
+ # see gh-15140
+ parser = c_parser_only
+ df = parser.read_csv(StringIO("a"), header=None, float_precision="round_trip")
+ tm.assert_frame_equal(df, DataFrame({0: ["a"]}))
+
+
+def test_large_difference_in_columns(c_parser_only):
+ # see gh-14125
+ parser = c_parser_only
+
+ count = 10000
+ large_row = ("X," * count)[:-1] + "\n"
+ normal_row = "XXXXXX XXXXXX,111111111111111\n"
+ test_input = (large_row + normal_row * 6)[:-1]
+
+ result = parser.read_csv(StringIO(test_input), header=None, usecols=[0])
+ rows = test_input.split("\n")
+
+ expected = DataFrame([row.split(",")[0] for row in rows])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_data_after_quote(c_parser_only):
+ # see gh-15910
+ parser = c_parser_only
+
+ data = 'a\n1\n"b"a'
+ result = parser.read_csv(StringIO(data))
+
+ expected = DataFrame({"a": ["1", "ba"]})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_comment_whitespace_delimited(c_parser_only):
+ parser = c_parser_only
+ test_input = """\
+1 2
+2 2 3
+3 2 3 # 3 fields
+4 2 3# 3 fields
+5 2 # 2 fields
+6 2# 2 fields
+7 # 1 field, NaN
+8# 1 field, NaN
+9 2 3 # skipped line
+# comment"""
+ with tm.assert_produces_warning(
+ ParserWarning, match="Skipping line", check_stacklevel=False
+ ):
+ df = parser.read_csv(
+ StringIO(test_input),
+ comment="#",
+ header=None,
+ delimiter="\\s+",
+ skiprows=0,
+ on_bad_lines="warn",
+ )
+ expected = DataFrame([[1, 2], [5, 2], [6, 2], [7, np.nan], [8, np.nan]])
+ tm.assert_frame_equal(df, expected)
+
+
+def test_file_like_no_next(c_parser_only):
+ # gh-16530: the file-like need not have a "next" or "__next__"
+ # attribute despite having an "__iter__" attribute.
+ #
+ # NOTE: This is only true for the C engine, not Python engine.
+ class NoNextBuffer(StringIO):
+ def __next__(self):
+ raise AttributeError("No next method")
+
+ next = __next__
+
+ parser = c_parser_only
+ data = "a\n1"
+
+ expected = DataFrame({"a": [1]})
+ result = parser.read_csv(NoNextBuffer(data))
+
+ tm.assert_frame_equal(result, expected)
+
+
+def test_buffer_rd_bytes_bad_unicode(c_parser_only):
+ # see gh-22748
+ t = BytesIO(b"\xB0")
+ t = TextIOWrapper(t, encoding="ascii", errors="surrogateescape")
+ msg = "'utf-8' codec can't encode character"
+ with pytest.raises(UnicodeError, match=msg):
+ c_parser_only.read_csv(t, encoding="UTF-8")
+
+
+@pytest.mark.parametrize("tar_suffix", [".tar", ".tar.gz"])
+def test_read_tarfile(c_parser_only, csv_dir_path, tar_suffix):
+ # see gh-16530
+ #
+ # Unfortunately, Python's CSV library can't handle
+ # tarfile objects (expects string, not bytes when
+ # iterating through a file-like).
+ parser = c_parser_only
+ tar_path = os.path.join(csv_dir_path, "tar_csv" + tar_suffix)
+
+ with tarfile.open(tar_path, "r") as tar:
+ data_file = tar.extractfile("tar_data.csv")
+
+ out = parser.read_csv(data_file)
+ expected = DataFrame({"a": [1]})
+ tm.assert_frame_equal(out, expected)
+
+
+def test_chunk_whitespace_on_boundary(c_parser_only):
+ # see gh-9735: this issue is C parser-specific (bug when
+ # parsing whitespace and characters at chunk boundary)
+ #
+ # This test case has a field too large for the Python parser / CSV library.
+ parser = c_parser_only
+
+ chunk1 = "a" * (1024 * 256 - 2) + "\na"
+ chunk2 = "\n a"
+ result = parser.read_csv(StringIO(chunk1 + chunk2), header=None)
+
+ expected = DataFrame(["a" * (1024 * 256 - 2), "a", " a"])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_file_handles_mmap(c_parser_only, csv1):
+ # gh-14418
+ #
+ # Don't close user provided file handles.
+ parser = c_parser_only
+
+ with open(csv1, encoding="utf-8") as f:
+ with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as m:
+ parser.read_csv(m)
+ assert not m.closed
+
+
+def test_file_binary_mode(c_parser_only):
+ # see gh-23779
+ parser = c_parser_only
+ expected = DataFrame([[1, 2, 3], [4, 5, 6]])
+
+ with tm.ensure_clean() as path:
+ with open(path, "w", encoding="utf-8") as f:
+ f.write("1,2,3\n4,5,6")
+
+ with open(path, "rb") as f:
+ result = parser.read_csv(f, header=None)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_unix_style_breaks(c_parser_only):
+ # GH 11020
+ parser = c_parser_only
+ with tm.ensure_clean() as path:
+ with open(path, "w", newline="\n", encoding="utf-8") as f:
+ f.write("blah\n\ncol_1,col_2,col_3\n\n")
+ result = parser.read_csv(path, skiprows=2, encoding="utf-8", engine="c")
+ expected = DataFrame(columns=["col_1", "col_2", "col_3"])
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
+@pytest.mark.parametrize(
+ "data,thousands,decimal",
+ [
+ (
+ """A|B|C
+1|2,334.01|5
+10|13|10.
+""",
+ ",",
+ ".",
+ ),
+ (
+ """A|B|C
+1|2.334,01|5
+10|13|10,
+""",
+ ".",
+ ",",
+ ),
+ ],
+)
+def test_1000_sep_with_decimal(
+ c_parser_only, data, thousands, decimal, float_precision
+):
+ parser = c_parser_only
+ expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
+
+ result = parser.read_csv(
+ StringIO(data),
+ sep="|",
+ thousands=thousands,
+ decimal=decimal,
+ float_precision=float_precision,
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_float_precision_options(c_parser_only):
+ # GH 17154, 36228
+ parser = c_parser_only
+ s = "foo\n243.164\n"
+ df = parser.read_csv(StringIO(s))
+ df2 = parser.read_csv(StringIO(s), float_precision="high")
+
+ tm.assert_frame_equal(df, df2)
+
+ df3 = parser.read_csv(StringIO(s), float_precision="legacy")
+
+ assert not df.iloc[0, 0] == df3.iloc[0, 0]
+
+ msg = "Unrecognized float_precision option: junk"
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(s), float_precision="junk")
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_converters.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_converters.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f3e45324dbd2113669bddacf824f308d7058123
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_converters.py
@@ -0,0 +1,263 @@
+"""
+Tests column conversion functionality during parsing
+for all of the parsers defined in parsers.py
+"""
+from io import StringIO
+
+from dateutil.parser import parse
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Index,
+)
+import pandas._testing as tm
+
+
+def test_converters_type_must_be_dict(all_parsers):
+ parser = all_parsers
+ data = """index,A,B,C,D
+foo,2,3,4,5
+"""
+ if parser.engine == "pyarrow":
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), converters=0)
+ return
+ with pytest.raises(TypeError, match="Type converters.+"):
+ parser.read_csv(StringIO(data), converters=0)
+
+
+@pytest.mark.parametrize("column", [3, "D"])
+@pytest.mark.parametrize(
+ "converter", [parse, lambda x: int(x.split("/")[2])] # Produce integer.
+)
+def test_converters(all_parsers, column, converter):
+ parser = all_parsers
+ data = """A,B,C,D
+a,1,2,01/01/2009
+b,3,4,01/02/2009
+c,4,5,01/03/2009
+"""
+ if parser.engine == "pyarrow":
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), converters={column: converter})
+ return
+
+ result = parser.read_csv(StringIO(data), converters={column: converter})
+
+ expected = parser.read_csv(StringIO(data))
+ expected["D"] = expected["D"].map(converter)
+
+ tm.assert_frame_equal(result, expected)
+
+
+def test_converters_no_implicit_conv(all_parsers):
+ # see gh-2184
+ parser = all_parsers
+ data = """000102,1.2,A\n001245,2,B"""
+
+ converters = {0: lambda x: x.strip()}
+
+ if parser.engine == "pyarrow":
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), header=None, converters=converters)
+ return
+
+ result = parser.read_csv(StringIO(data), header=None, converters=converters)
+
+ # Column 0 should not be casted to numeric and should remain as object.
+ expected = DataFrame([["000102", 1.2, "A"], ["001245", 2, "B"]])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_converters_euro_decimal_format(all_parsers):
+ # see gh-583
+ converters = {}
+ parser = all_parsers
+
+ data = """Id;Number1;Number2;Text1;Text2;Number3
+1;1521,1541;187101,9543;ABC;poi;4,7387
+2;121,12;14897,76;DEF;uyt;0,3773
+3;878,158;108013,434;GHI;rez;2,7356"""
+ converters["Number1"] = converters["Number2"] = converters[
+ "Number3"
+ ] = lambda x: float(x.replace(",", "."))
+
+ if parser.engine == "pyarrow":
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), sep=";", converters=converters)
+ return
+
+ result = parser.read_csv(StringIO(data), sep=";", converters=converters)
+ expected = DataFrame(
+ [
+ [1, 1521.1541, 187101.9543, "ABC", "poi", 4.7387],
+ [2, 121.12, 14897.76, "DEF", "uyt", 0.3773],
+ [3, 878.158, 108013.434, "GHI", "rez", 2.7356],
+ ],
+ columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_converters_corner_with_nans(all_parsers):
+ parser = all_parsers
+ data = """id,score,days
+1,2,12
+2,2-5,
+3,,14+
+4,6-12,2"""
+
+ # Example converters.
+ def convert_days(x):
+ x = x.strip()
+
+ if not x:
+ return np.nan
+
+ is_plus = x.endswith("+")
+
+ if is_plus:
+ x = int(x[:-1]) + 1
+ else:
+ x = int(x)
+
+ return x
+
+ def convert_days_sentinel(x):
+ x = x.strip()
+
+ if not x:
+ return np.nan
+
+ is_plus = x.endswith("+")
+
+ if is_plus:
+ x = int(x[:-1]) + 1
+ else:
+ x = int(x)
+
+ return x
+
+ def convert_score(x):
+ x = x.strip()
+
+ if not x:
+ return np.nan
+
+ if x.find("-") > 0:
+ val_min, val_max = map(int, x.split("-"))
+ val = 0.5 * (val_min + val_max)
+ else:
+ val = float(x)
+
+ return val
+
+ results = []
+
+ for day_converter in [convert_days, convert_days_sentinel]:
+ if parser.engine == "pyarrow":
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(
+ StringIO(data),
+ converters={"score": convert_score, "days": day_converter},
+ na_values=["", None],
+ )
+ continue
+
+ result = parser.read_csv(
+ StringIO(data),
+ converters={"score": convert_score, "days": day_converter},
+ na_values=["", None],
+ )
+ assert pd.isna(result["days"][1])
+ results.append(result)
+
+ if parser.engine != "pyarrow":
+ tm.assert_frame_equal(results[0], results[1])
+
+
+@pytest.mark.parametrize("conv_f", [lambda x: x, str])
+def test_converter_index_col_bug(all_parsers, conv_f):
+ # see gh-1835 , GH#40589
+ parser = all_parsers
+ data = "A;B\n1;2\n3;4"
+
+ if parser.engine == "pyarrow":
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(
+ StringIO(data), sep=";", index_col="A", converters={"A": conv_f}
+ )
+ return
+
+ rs = parser.read_csv(
+ StringIO(data), sep=";", index_col="A", converters={"A": conv_f}
+ )
+
+ xp = DataFrame({"B": [2, 4]}, index=Index(["1", "3"], name="A", dtype="object"))
+ tm.assert_frame_equal(rs, xp)
+
+
+def test_converter_identity_object(all_parsers):
+ # GH#40589
+ parser = all_parsers
+ data = "A,B\n1,2\n3,4"
+
+ if parser.engine == "pyarrow":
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), converters={"A": lambda x: x})
+ return
+
+ rs = parser.read_csv(StringIO(data), converters={"A": lambda x: x})
+
+ xp = DataFrame({"A": ["1", "3"], "B": [2, 4]})
+ tm.assert_frame_equal(rs, xp)
+
+
+def test_converter_multi_index(all_parsers):
+ # GH 42446
+ parser = all_parsers
+ data = "A,B,B\nX,Y,Z\n1,2,3"
+
+ if parser.engine == "pyarrow":
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(
+ StringIO(data),
+ header=list(range(2)),
+ converters={
+ ("A", "X"): np.int32,
+ ("B", "Y"): np.int32,
+ ("B", "Z"): np.float32,
+ },
+ )
+ return
+
+ result = parser.read_csv(
+ StringIO(data),
+ header=list(range(2)),
+ converters={
+ ("A", "X"): np.int32,
+ ("B", "Y"): np.int32,
+ ("B", "Z"): np.float32,
+ },
+ )
+
+ expected = DataFrame(
+ {
+ ("A", "X"): np.int32([1]),
+ ("B", "Y"): np.int32([2]),
+ ("B", "Z"): np.float32([3]),
+ }
+ )
+
+ tm.assert_frame_equal(result, expected)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_encoding.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_encoding.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbd3917ba9c044962c262cac705e43b6d597599c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_encoding.py
@@ -0,0 +1,337 @@
+"""
+Tests encoding functionality during parsing
+for all of the parsers defined in parsers.py
+"""
+from io import (
+ BytesIO,
+ TextIOWrapper,
+)
+import os
+import tempfile
+import uuid
+
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ read_csv,
+)
+import pandas._testing as tm
+
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+)
+
+skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
+
+
+def test_bytes_io_input(all_parsers):
+ encoding = "cp1255"
+ parser = all_parsers
+
+ data = BytesIO("שלום:1234\n562:123".encode(encoding))
+ result = parser.read_csv(data, sep=":", encoding=encoding)
+
+ expected = DataFrame([[562, 123]], columns=["שלום", "1234"])
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+def test_read_csv_unicode(all_parsers):
+ parser = all_parsers
+ data = BytesIO("\u0141aski, Jan;1".encode())
+
+ result = parser.read_csv(data, sep=";", encoding="utf-8", header=None)
+ expected = DataFrame([["\u0141aski, Jan", 1]])
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow
+@pytest.mark.parametrize("sep", [",", "\t"])
+@pytest.mark.parametrize("encoding", ["utf-16", "utf-16le", "utf-16be"])
+def test_utf16_bom_skiprows(all_parsers, sep, encoding):
+ # see gh-2298
+ parser = all_parsers
+ data = """skip this
+skip this too
+A,B,C
+1,2,3
+4,5,6""".replace(
+ ",", sep
+ )
+ path = f"__{uuid.uuid4()}__.csv"
+ kwargs = {"sep": sep, "skiprows": 2}
+ utf8 = "utf-8"
+
+ with tm.ensure_clean(path) as path:
+ bytes_data = data.encode(encoding)
+
+ with open(path, "wb") as f:
+ f.write(bytes_data)
+
+ with TextIOWrapper(BytesIO(data.encode(utf8)), encoding=utf8) as bytes_buffer:
+ result = parser.read_csv(path, encoding=encoding, **kwargs)
+ expected = parser.read_csv(bytes_buffer, encoding=utf8, **kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_utf16_example(all_parsers, csv_dir_path):
+ path = os.path.join(csv_dir_path, "utf16_ex.txt")
+ parser = all_parsers
+ result = parser.read_csv(path, encoding="utf-16", sep="\t")
+ assert len(result) == 50
+
+
+def test_unicode_encoding(all_parsers, csv_dir_path):
+ path = os.path.join(csv_dir_path, "unicode_series.csv")
+ parser = all_parsers
+
+ result = parser.read_csv(path, header=None, encoding="latin-1")
+ result = result.set_index(0)
+ got = result[1][1632]
+
+ expected = "\xc1 k\xf6ldum klaka (Cold Fever) (1994)"
+ assert got == expected
+
+
+@pytest.mark.parametrize(
+ "data,kwargs,expected",
+ [
+ # Basic test
+ ("a\n1", {}, DataFrame({"a": [1]})),
+ # "Regular" quoting
+ ('"a"\n1', {"quotechar": '"'}, DataFrame({"a": [1]})),
+ # Test in a data row instead of header
+ ("b\n1", {"names": ["a"]}, DataFrame({"a": ["b", "1"]})),
+ # Test in empty data row with skipping
+ ("\n1", {"names": ["a"], "skip_blank_lines": True}, DataFrame({"a": [1]})),
+ # Test in empty data row without skipping
+ (
+ "\n1",
+ {"names": ["a"], "skip_blank_lines": False},
+ DataFrame({"a": [np.nan, 1]}),
+ ),
+ ],
+)
+def test_utf8_bom(all_parsers, data, kwargs, expected, request):
+ # see gh-4793
+ parser = all_parsers
+ bom = "\ufeff"
+ utf8 = "utf-8"
+
+ def _encode_data_with_bom(_data):
+ bom_data = (bom + _data).encode(utf8)
+ return BytesIO(bom_data)
+
+ if (
+ parser.engine == "pyarrow"
+ and data == "\n1"
+ and kwargs.get("skip_blank_lines", True)
+ ):
+ # CSV parse error: Empty CSV file or block: cannot infer number of columns
+ pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
+
+ result = parser.read_csv(_encode_data_with_bom(data), encoding=utf8, **kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_csv_utf_aliases(all_parsers, utf_value, encoding_fmt):
+ # see gh-13549
+ expected = DataFrame({"mb_num": [4.8], "multibyte": ["test"]})
+ parser = all_parsers
+
+ encoding = encoding_fmt.format(utf_value)
+ data = "mb_num,multibyte\n4.8,test".encode(encoding)
+
+ result = parser.read_csv(BytesIO(data), encoding=encoding)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "file_path,encoding",
+ [
+ (("io", "data", "csv", "test1.csv"), "utf-8"),
+ (("io", "parser", "data", "unicode_series.csv"), "latin-1"),
+ (("io", "parser", "data", "sauron.SHIFT_JIS.csv"), "shiftjis"),
+ ],
+)
+def test_binary_mode_file_buffers(all_parsers, file_path, encoding, datapath):
+ # gh-23779: Python csv engine shouldn't error on files opened in binary.
+ # gh-31575: Python csv engine shouldn't error on files opened in raw binary.
+ parser = all_parsers
+
+ fpath = datapath(*file_path)
+ expected = parser.read_csv(fpath, encoding=encoding)
+
+ with open(fpath, encoding=encoding) as fa:
+ result = parser.read_csv(fa)
+ assert not fa.closed
+ tm.assert_frame_equal(expected, result)
+
+ with open(fpath, mode="rb") as fb:
+ result = parser.read_csv(fb, encoding=encoding)
+ assert not fb.closed
+ tm.assert_frame_equal(expected, result)
+
+ with open(fpath, mode="rb", buffering=0) as fb:
+ result = parser.read_csv(fb, encoding=encoding)
+ assert not fb.closed
+ tm.assert_frame_equal(expected, result)
+
+
+@pytest.mark.parametrize("pass_encoding", [True, False])
+def test_encoding_temp_file(all_parsers, utf_value, encoding_fmt, pass_encoding):
+ # see gh-24130
+ parser = all_parsers
+ encoding = encoding_fmt.format(utf_value)
+
+ if parser.engine == "pyarrow" and pass_encoding is True and utf_value in [16, 32]:
+ # FIXME: this is bad!
+ pytest.skip("These cases freeze")
+
+ expected = DataFrame({"foo": ["bar"]})
+
+ with tm.ensure_clean(mode="w+", encoding=encoding, return_filelike=True) as f:
+ f.write("foo\nbar")
+ f.seek(0)
+
+ result = parser.read_csv(f, encoding=encoding if pass_encoding else None)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_encoding_named_temp_file(all_parsers):
+ # see gh-31819
+ parser = all_parsers
+ encoding = "shift-jis"
+
+ title = "てすと"
+ data = "こむ"
+
+ expected = DataFrame({title: [data]})
+
+ with tempfile.NamedTemporaryFile() as f:
+ f.write(f"{title}\n{data}".encode(encoding))
+
+ f.seek(0)
+
+ result = parser.read_csv(f, encoding=encoding)
+ tm.assert_frame_equal(result, expected)
+ assert not f.closed
+
+
+@pytest.mark.parametrize(
+ "encoding", ["utf-8", "utf-16", "utf-16-be", "utf-16-le", "utf-32"]
+)
+def test_parse_encoded_special_characters(encoding):
+ # GH16218 Verify parsing of data with encoded special characters
+ # Data contains a Unicode 'FULLWIDTH COLON' (U+FF1A) at position (0,"a")
+ data = "a\tb\n:foo\t0\nbar\t1\nbaz\t2" # noqa: RUF001
+ encoded_data = BytesIO(data.encode(encoding))
+ result = read_csv(encoded_data, delimiter="\t", encoding=encoding)
+
+ expected = DataFrame(
+ data=[[":foo", 0], ["bar", 1], ["baz", 2]], # noqa: RUF001
+ columns=["a", "b"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("encoding", ["utf-8", None, "utf-16", "cp1255", "latin-1"])
+def test_encoding_memory_map(all_parsers, encoding):
+ # GH40986
+ parser = all_parsers
+ expected = DataFrame(
+ {
+ "name": ["Raphael", "Donatello", "Miguel Angel", "Leonardo"],
+ "mask": ["red", "purple", "orange", "blue"],
+ "weapon": ["sai", "bo staff", "nunchunk", "katana"],
+ }
+ )
+ with tm.ensure_clean() as file:
+ expected.to_csv(file, index=False, encoding=encoding)
+
+ if parser.engine == "pyarrow":
+ msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(file, encoding=encoding, memory_map=True)
+ return
+
+ df = parser.read_csv(file, encoding=encoding, memory_map=True)
+ tm.assert_frame_equal(df, expected)
+
+
+def test_chunk_splits_multibyte_char(all_parsers):
+ """
+ Chunk splits a multibyte character with memory_map=True
+
+ GH 43540
+ """
+ parser = all_parsers
+ # DEFAULT_CHUNKSIZE = 262144, defined in parsers.pyx
+ df = DataFrame(data=["a" * 127] * 2048)
+
+ # Put two-bytes utf-8 encoded character "ą" at the end of chunk
+ # utf-8 encoding of "ą" is b'\xc4\x85'
+ df.iloc[2047] = "a" * 127 + "ą"
+ with tm.ensure_clean("bug-gh43540.csv") as fname:
+ df.to_csv(fname, index=False, header=False, encoding="utf-8")
+
+ if parser.engine == "pyarrow":
+ msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(fname, header=None, memory_map=True)
+ return
+
+ dfr = parser.read_csv(fname, header=None, memory_map=True)
+ tm.assert_frame_equal(dfr, df)
+
+
+def test_readcsv_memmap_utf8(all_parsers):
+ """
+ GH 43787
+
+ Test correct handling of UTF-8 chars when memory_map=True and encoding is UTF-8
+ """
+ lines = []
+ line_length = 128
+ start_char = " "
+ end_char = "\U00010080"
+ # This for loop creates a list of 128-char strings
+ # consisting of consecutive Unicode chars
+ for lnum in range(ord(start_char), ord(end_char), line_length):
+ line = "".join([chr(c) for c in range(lnum, lnum + 0x80)]) + "\n"
+ try:
+ line.encode("utf-8")
+ except UnicodeEncodeError:
+ continue
+ lines.append(line)
+ parser = all_parsers
+ df = DataFrame(lines)
+ with tm.ensure_clean("utf8test.csv") as fname:
+ df.to_csv(fname, index=False, header=False, encoding="utf-8")
+
+ if parser.engine == "pyarrow":
+ msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(fname, header=None, memory_map=True, encoding="utf-8")
+ return
+
+ dfr = parser.read_csv(fname, header=None, memory_map=True, encoding="utf-8")
+ tm.assert_frame_equal(df, dfr)
+
+
+@pytest.mark.usefixtures("pyarrow_xfail")
+@pytest.mark.parametrize("mode", ["w+b", "w+t"])
+def test_not_readable(all_parsers, mode):
+ # GH43439
+ parser = all_parsers
+ content = b"abcd"
+ if "t" in mode:
+ content = "abcd"
+ with tempfile.SpooledTemporaryFile(mode=mode, encoding="utf-8") as handle:
+ handle.write(content)
+ handle.seek(0)
+ df = parser.read_csv(handle)
+ expected = DataFrame([], columns=["abcd"])
+ tm.assert_frame_equal(df, expected)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_index_col.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_index_col.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba15d061b2deba294cd054d58eac93e4647e8a62
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_index_col.py
@@ -0,0 +1,376 @@
+"""
+Tests that the specified index column (a.k.a "index_col")
+is properly handled or inferred during parsing for all of
+the parsers defined in parsers.py
+"""
+from io import StringIO
+
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ Index,
+ MultiIndex,
+)
+import pandas._testing as tm
+
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+)
+
+xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
+skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
+
+
+@pytest.mark.parametrize("with_header", [True, False])
+def test_index_col_named(all_parsers, with_header):
+ parser = all_parsers
+ no_header = """\
+KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
+KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
+KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
+KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
+KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
+KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
+ header = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
+
+ if with_header:
+ data = header + no_header
+
+ result = parser.read_csv(StringIO(data), index_col="ID")
+ expected = parser.read_csv(StringIO(data), header=0).set_index("ID")
+ tm.assert_frame_equal(result, expected)
+ else:
+ data = no_header
+ msg = "Index ID invalid"
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), index_col="ID")
+
+
+def test_index_col_named2(all_parsers):
+ parser = all_parsers
+ data = """\
+1,2,3,4,hello
+5,6,7,8,world
+9,10,11,12,foo
+"""
+
+ expected = DataFrame(
+ {"a": [1, 5, 9], "b": [2, 6, 10], "c": [3, 7, 11], "d": [4, 8, 12]},
+ index=Index(["hello", "world", "foo"], name="message"),
+ )
+ names = ["a", "b", "c", "d", "message"]
+
+ result = parser.read_csv(StringIO(data), names=names, index_col=["message"])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_index_col_is_true(all_parsers):
+ # see gh-9798
+ data = "a,b\n1,2"
+ parser = all_parsers
+
+ msg = "The value of index_col couldn't be 'True'"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), index_col=True)
+
+
+@skip_pyarrow # CSV parse error: Expected 3 columns, got 4
+def test_infer_index_col(all_parsers):
+ data = """A,B,C
+foo,1,2,3
+bar,4,5,6
+baz,7,8,9
+"""
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data))
+
+ expected = DataFrame(
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ index=["foo", "bar", "baz"],
+ columns=["A", "B", "C"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+@pytest.mark.parametrize(
+ "index_col,kwargs",
+ [
+ (None, {"columns": ["x", "y", "z"]}),
+ (False, {"columns": ["x", "y", "z"]}),
+ (0, {"columns": ["y", "z"], "index": Index([], name="x")}),
+ (1, {"columns": ["x", "z"], "index": Index([], name="y")}),
+ ("x", {"columns": ["y", "z"], "index": Index([], name="x")}),
+ ("y", {"columns": ["x", "z"], "index": Index([], name="y")}),
+ (
+ [0, 1],
+ {
+ "columns": ["z"],
+ "index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]),
+ },
+ ),
+ (
+ ["x", "y"],
+ {
+ "columns": ["z"],
+ "index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]),
+ },
+ ),
+ (
+ [1, 0],
+ {
+ "columns": ["z"],
+ "index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]),
+ },
+ ),
+ (
+ ["y", "x"],
+ {
+ "columns": ["z"],
+ "index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]),
+ },
+ ),
+ ],
+)
+def test_index_col_empty_data(all_parsers, index_col, kwargs):
+ data = "x,y,z"
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), index_col=index_col)
+
+ expected = DataFrame(**kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+def test_empty_with_index_col_false(all_parsers):
+ # see gh-10413
+ data = "x,y"
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), index_col=False)
+
+ expected = DataFrame(columns=["x", "y"])
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "index_names",
+ [
+ ["", ""],
+ ["foo", ""],
+ ["", "bar"],
+ ["foo", "bar"],
+ ["NotReallyUnnamed", "Unnamed: 0"],
+ ],
+)
+def test_multi_index_naming(all_parsers, index_names, request):
+ parser = all_parsers
+
+ if parser.engine == "pyarrow" and "" in index_names:
+ mark = pytest.mark.xfail(reason="One case raises, others are wrong")
+ request.applymarker(mark)
+
+ # We don't want empty index names being replaced with "Unnamed: 0"
+ data = ",".join(index_names + ["col\na,c,1\na,d,2\nb,c,3\nb,d,4"])
+ result = parser.read_csv(StringIO(data), index_col=[0, 1])
+
+ expected = DataFrame(
+ {"col": [1, 2, 3, 4]}, index=MultiIndex.from_product([["a", "b"], ["c", "d"]])
+ )
+ expected.index.names = [name if name else None for name in index_names]
+ tm.assert_frame_equal(result, expected)
+
+
+@xfail_pyarrow # ValueError: Found non-unique column index
+def test_multi_index_naming_not_all_at_beginning(all_parsers):
+ parser = all_parsers
+ data = ",Unnamed: 2,\na,c,1\na,d,2\nb,c,3\nb,d,4"
+ result = parser.read_csv(StringIO(data), index_col=[0, 2])
+
+ expected = DataFrame(
+ {"Unnamed: 2": ["c", "d", "c", "d"]},
+ index=MultiIndex(
+ levels=[["a", "b"], [1, 2, 3, 4]], codes=[[0, 0, 1, 1], [0, 1, 2, 3]]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@xfail_pyarrow # ValueError: Found non-unique column index
+def test_no_multi_index_level_names_empty(all_parsers):
+ # GH 10984
+ parser = all_parsers
+ midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
+ expected = DataFrame(
+ np.random.default_rng(2).standard_normal((3, 3)),
+ index=midx,
+ columns=["x", "y", "z"],
+ )
+ with tm.ensure_clean() as path:
+ expected.to_csv(path)
+ result = parser.read_csv(path, index_col=[0, 1, 2])
+ tm.assert_frame_equal(result, expected)
+
+
+@xfail_pyarrow # TypeError: an integer is required
+def test_header_with_index_col(all_parsers):
+ # GH 33476
+ parser = all_parsers
+ data = """
+I11,A,A
+I12,B,B
+I2,1,3
+"""
+ midx = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"])
+ idx = Index(["I2"])
+ expected = DataFrame([[1, 3]], index=idx, columns=midx)
+
+ result = parser.read_csv(StringIO(data), index_col=0, header=[0, 1])
+ tm.assert_frame_equal(result, expected)
+
+ col_idx = Index(["A", "A.1"])
+ idx = Index(["I12", "I2"], name="I11")
+ expected = DataFrame([["B", "B"], ["1", "3"]], index=idx, columns=col_idx)
+
+ result = parser.read_csv(StringIO(data), index_col="I11", header=0)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.slow
+def test_index_col_large_csv(all_parsers, monkeypatch):
+ # https://github.com/pandas-dev/pandas/issues/37094
+ parser = all_parsers
+
+ ARR_LEN = 100
+ df = DataFrame(
+ {
+ "a": range(ARR_LEN + 1),
+ "b": np.random.default_rng(2).standard_normal(ARR_LEN + 1),
+ }
+ )
+
+ with tm.ensure_clean() as path:
+ df.to_csv(path, index=False)
+ with monkeypatch.context() as m:
+ m.setattr("pandas.core.algorithms._MINIMUM_COMP_ARR_LEN", ARR_LEN)
+ result = parser.read_csv(path, index_col=[0])
+
+ tm.assert_frame_equal(result, df.set_index("a"))
+
+
+@xfail_pyarrow # TypeError: an integer is required
+def test_index_col_multiindex_columns_no_data(all_parsers):
+ # GH#38292
+ parser = all_parsers
+ result = parser.read_csv(
+ StringIO("a0,a1,a2\nb0,b1,b2\n"), header=[0, 1], index_col=0
+ )
+ expected = DataFrame(
+ [],
+ index=Index([]),
+ columns=MultiIndex.from_arrays(
+ [["a1", "a2"], ["b1", "b2"]], names=["a0", "b0"]
+ ),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@xfail_pyarrow # TypeError: an integer is required
+def test_index_col_header_no_data(all_parsers):
+ # GH#38292
+ parser = all_parsers
+ result = parser.read_csv(StringIO("a0,a1,a2\n"), header=[0], index_col=0)
+ expected = DataFrame(
+ [],
+ columns=["a1", "a2"],
+ index=Index([], name="a0"),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@xfail_pyarrow # TypeError: an integer is required
+def test_multiindex_columns_no_data(all_parsers):
+ # GH#38292
+ parser = all_parsers
+ result = parser.read_csv(StringIO("a0,a1,a2\nb0,b1,b2\n"), header=[0, 1])
+ expected = DataFrame(
+ [], columns=MultiIndex.from_arrays([["a0", "a1", "a2"], ["b0", "b1", "b2"]])
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@xfail_pyarrow # TypeError: an integer is required
+def test_multiindex_columns_index_col_with_data(all_parsers):
+ # GH#38292
+ parser = all_parsers
+ result = parser.read_csv(
+ StringIO("a0,a1,a2\nb0,b1,b2\ndata,data,data"), header=[0, 1], index_col=0
+ )
+ expected = DataFrame(
+ [["data", "data"]],
+ columns=MultiIndex.from_arrays(
+ [["a1", "a2"], ["b1", "b2"]], names=["a0", "b0"]
+ ),
+ index=Index(["data"]),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+def test_infer_types_boolean_sum(all_parsers):
+ # GH#44079
+ parser = all_parsers
+ result = parser.read_csv(
+ StringIO("0,1"),
+ names=["a", "b"],
+ index_col=["a"],
+ dtype={"a": "UInt8"},
+ )
+ expected = DataFrame(
+ data={
+ "a": [
+ 0,
+ ],
+ "b": [1],
+ }
+ ).set_index("a")
+ # Not checking index type now, because the C parser will return a
+ # index column of dtype 'object', and the Python parser will return a
+ # index column of dtype 'int64'.
+ tm.assert_frame_equal(result, expected, check_index_type=False)
+
+
+@pytest.mark.parametrize("dtype, val", [(object, "01"), ("int64", 1)])
+def test_specify_dtype_for_index_col(all_parsers, dtype, val, request):
+ # GH#9435
+ data = "a,b\n01,2"
+ parser = all_parsers
+ if dtype == object and parser.engine == "pyarrow":
+ request.applymarker(
+ pytest.mark.xfail(reason="Cannot disable type-inference for pyarrow engine")
+ )
+ result = parser.read_csv(StringIO(data), index_col="a", dtype={"a": dtype})
+ expected = DataFrame({"b": [2]}, index=Index([val], name="a"))
+ tm.assert_frame_equal(result, expected)
+
+
+@xfail_pyarrow # TypeError: an integer is required
+def test_multiindex_columns_not_leading_index_col(all_parsers):
+ # GH#38549
+ parser = all_parsers
+ data = """a,b,c,d
+e,f,g,h
+x,y,1,2
+"""
+ result = parser.read_csv(
+ StringIO(data),
+ header=[0, 1],
+ index_col=1,
+ )
+ cols = MultiIndex.from_tuples(
+ [("a", "e"), ("c", "g"), ("d", "h")], names=["b", "f"]
+ )
+ expected = DataFrame([["x", 1, 2]], columns=cols, index=["y"])
+ tm.assert_frame_equal(result, expected)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_quoting.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_quoting.py
new file mode 100644
index 0000000000000000000000000000000000000000..a70b7e3389c1ba5ebc6e2a8fadd54e13999e12f9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_quoting.py
@@ -0,0 +1,183 @@
+"""
+Tests that quoting specifications are properly handled
+during parsing for all of the parsers defined in parsers.py
+"""
+
+import csv
+from io import StringIO
+
+import pytest
+
+from pandas.compat import PY311
+from pandas.errors import ParserError
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+)
+xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
+skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
+
+
+@pytest.mark.parametrize(
+ "kwargs,msg",
+ [
+ ({"quotechar": "foo"}, '"quotechar" must be a(n)? 1-character string'),
+ (
+ {"quotechar": None, "quoting": csv.QUOTE_MINIMAL},
+ "quotechar must be set if quoting enabled",
+ ),
+ ({"quotechar": 2}, '"quotechar" must be string( or None)?, not int'),
+ ],
+)
+@skip_pyarrow # ParserError: CSV parse error: Empty CSV file or block
+def test_bad_quote_char(all_parsers, kwargs, msg):
+ data = "1,2,3"
+ parser = all_parsers
+
+ with pytest.raises(TypeError, match=msg):
+ parser.read_csv(StringIO(data), **kwargs)
+
+
+@pytest.mark.parametrize(
+ "quoting,msg",
+ [
+ ("foo", '"quoting" must be an integer|Argument'),
+ (10, 'bad "quoting" value'), # quoting must be in the range [0, 3]
+ ],
+)
+@xfail_pyarrow # ValueError: The 'quoting' option is not supported
+def test_bad_quoting(all_parsers, quoting, msg):
+ data = "1,2,3"
+ parser = all_parsers
+
+ with pytest.raises(TypeError, match=msg):
+ parser.read_csv(StringIO(data), quoting=quoting)
+
+
+def test_quote_char_basic(all_parsers):
+ parser = all_parsers
+ data = 'a,b,c\n1,2,"cat"'
+ expected = DataFrame([[1, 2, "cat"]], columns=["a", "b", "c"])
+
+ result = parser.read_csv(StringIO(data), quotechar='"')
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("quote_char", ["~", "*", "%", "$", "@", "P"])
+def test_quote_char_various(all_parsers, quote_char):
+ parser = all_parsers
+ expected = DataFrame([[1, 2, "cat"]], columns=["a", "b", "c"])
+
+ data = 'a,b,c\n1,2,"cat"'
+ new_data = data.replace('"', quote_char)
+
+ result = parser.read_csv(StringIO(new_data), quotechar=quote_char)
+ tm.assert_frame_equal(result, expected)
+
+
+@xfail_pyarrow # ValueError: The 'quoting' option is not supported
+@pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE])
+@pytest.mark.parametrize("quote_char", ["", None])
+def test_null_quote_char(all_parsers, quoting, quote_char):
+ kwargs = {"quotechar": quote_char, "quoting": quoting}
+ data = "a,b,c\n1,2,3"
+ parser = all_parsers
+
+ if quoting != csv.QUOTE_NONE:
+ # Sanity checking.
+ msg = (
+ '"quotechar" must be a 1-character string'
+ if PY311 and all_parsers.engine == "python" and quote_char == ""
+ else "quotechar must be set if quoting enabled"
+ )
+
+ with pytest.raises(TypeError, match=msg):
+ parser.read_csv(StringIO(data), **kwargs)
+ elif not (PY311 and all_parsers.engine == "python"):
+ # Python 3.11+ doesn't support null/blank quote chars in their csv parsers
+ expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"])
+ result = parser.read_csv(StringIO(data), **kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "kwargs,exp_data",
+ [
+ ({}, [[1, 2, "foo"]]), # Test default.
+ # QUOTE_MINIMAL only applies to CSV writing, so no effect on reading.
+ ({"quotechar": '"', "quoting": csv.QUOTE_MINIMAL}, [[1, 2, "foo"]]),
+ # QUOTE_MINIMAL only applies to CSV writing, so no effect on reading.
+ ({"quotechar": '"', "quoting": csv.QUOTE_ALL}, [[1, 2, "foo"]]),
+ # QUOTE_NONE tells the reader to do no special handling
+ # of quote characters and leave them alone.
+ ({"quotechar": '"', "quoting": csv.QUOTE_NONE}, [[1, 2, '"foo"']]),
+ # QUOTE_NONNUMERIC tells the reader to cast
+ # all non-quoted fields to float
+ ({"quotechar": '"', "quoting": csv.QUOTE_NONNUMERIC}, [[1.0, 2.0, "foo"]]),
+ ],
+)
+@xfail_pyarrow # ValueError: The 'quoting' option is not supported
+def test_quoting_various(all_parsers, kwargs, exp_data):
+ data = '1,2,"foo"'
+ parser = all_parsers
+ columns = ["a", "b", "c"]
+
+ result = parser.read_csv(StringIO(data), names=columns, **kwargs)
+ expected = DataFrame(exp_data, columns=columns)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "doublequote,exp_data", [(True, [[3, '4 " 5']]), (False, [[3, '4 " 5"']])]
+)
+def test_double_quote(all_parsers, doublequote, exp_data, request):
+ parser = all_parsers
+ data = 'a,b\n3,"4 "" 5"'
+
+ if parser.engine == "pyarrow" and not doublequote:
+ mark = pytest.mark.xfail(reason="Mismatched result")
+ request.applymarker(mark)
+
+ result = parser.read_csv(StringIO(data), quotechar='"', doublequote=doublequote)
+ expected = DataFrame(exp_data, columns=["a", "b"])
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("quotechar", ['"', "\u0001"])
+def test_quotechar_unicode(all_parsers, quotechar):
+ # see gh-14477
+ data = "a\n1"
+ parser = all_parsers
+ expected = DataFrame({"a": [1]})
+
+ result = parser.read_csv(StringIO(data), quotechar=quotechar)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("balanced", [True, False])
+def test_unbalanced_quoting(all_parsers, balanced, request):
+ # see gh-22789.
+ parser = all_parsers
+ data = 'a,b,c\n1,2,"3'
+
+ if parser.engine == "pyarrow" and not balanced:
+ mark = pytest.mark.xfail(reason="Mismatched result")
+ request.applymarker(mark)
+
+ if balanced:
+ # Re-balance the quoting and read in without errors.
+ expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"])
+ result = parser.read_csv(StringIO(data + '"'))
+ tm.assert_frame_equal(result, expected)
+ else:
+ msg = (
+ "EOF inside string starting at row 1"
+ if parser.engine == "c"
+ else "unexpected end of data"
+ )
+
+ with pytest.raises(ParserError, match=msg):
+ parser.read_csv(StringIO(data))
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_read_fwf.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_read_fwf.py
new file mode 100644
index 0000000000000000000000000000000000000000..bed2b5e10a6f79cd3393be9859d652720800aadd
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_read_fwf.py
@@ -0,0 +1,1044 @@
+"""
+Tests the 'read_fwf' function in parsers.py. This
+test suite is independent of the others because the
+engine is set to 'python-fwf' internally.
+"""
+
+from datetime import datetime
+from io import (
+ BytesIO,
+ StringIO,
+)
+from pathlib import Path
+
+import numpy as np
+import pytest
+
+from pandas.errors import EmptyDataError
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ DatetimeIndex,
+)
+import pandas._testing as tm
+from pandas.core.arrays import (
+ ArrowStringArray,
+ StringArray,
+)
+
+from pandas.io.common import urlopen
+from pandas.io.parsers import (
+ read_csv,
+ read_fwf,
+)
+
+
+def test_basic():
+ data = """\
+A B C D
+201158 360.242940 149.910199 11950.7
+201159 444.953632 166.985655 11788.4
+201160 364.136849 183.628767 11806.2
+201161 413.836124 184.375703 11916.8
+201162 502.953953 173.237159 12468.3
+"""
+ result = read_fwf(StringIO(data))
+ expected = DataFrame(
+ [
+ [201158, 360.242940, 149.910199, 11950.7],
+ [201159, 444.953632, 166.985655, 11788.4],
+ [201160, 364.136849, 183.628767, 11806.2],
+ [201161, 413.836124, 184.375703, 11916.8],
+ [201162, 502.953953, 173.237159, 12468.3],
+ ],
+ columns=["A", "B", "C", "D"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_colspecs():
+ data = """\
+A B C D E
+201158 360.242940 149.910199 11950.7
+201159 444.953632 166.985655 11788.4
+201160 364.136849 183.628767 11806.2
+201161 413.836124 184.375703 11916.8
+201162 502.953953 173.237159 12468.3
+"""
+ colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
+ result = read_fwf(StringIO(data), colspecs=colspecs)
+
+ expected = DataFrame(
+ [
+ [2011, 58, 360.242940, 149.910199, 11950.7],
+ [2011, 59, 444.953632, 166.985655, 11788.4],
+ [2011, 60, 364.136849, 183.628767, 11806.2],
+ [2011, 61, 413.836124, 184.375703, 11916.8],
+ [2011, 62, 502.953953, 173.237159, 12468.3],
+ ],
+ columns=["A", "B", "C", "D", "E"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_widths():
+ data = """\
+A B C D E
+2011 58 360.242940 149.910199 11950.7
+2011 59 444.953632 166.985655 11788.4
+2011 60 364.136849 183.628767 11806.2
+2011 61 413.836124 184.375703 11916.8
+2011 62 502.953953 173.237159 12468.3
+"""
+ result = read_fwf(StringIO(data), widths=[5, 5, 13, 13, 7])
+
+ expected = DataFrame(
+ [
+ [2011, 58, 360.242940, 149.910199, 11950.7],
+ [2011, 59, 444.953632, 166.985655, 11788.4],
+ [2011, 60, 364.136849, 183.628767, 11806.2],
+ [2011, 61, 413.836124, 184.375703, 11916.8],
+ [2011, 62, 502.953953, 173.237159, 12468.3],
+ ],
+ columns=["A", "B", "C", "D", "E"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_non_space_filler():
+ # From Thomas Kluyver:
+ #
+ # Apparently, some non-space filler characters can be seen, this is
+ # supported by specifying the 'delimiter' character:
+ #
+ # http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
+ data = """\
+A~~~~B~~~~C~~~~~~~~~~~~D~~~~~~~~~~~~E
+201158~~~~360.242940~~~149.910199~~~11950.7
+201159~~~~444.953632~~~166.985655~~~11788.4
+201160~~~~364.136849~~~183.628767~~~11806.2
+201161~~~~413.836124~~~184.375703~~~11916.8
+201162~~~~502.953953~~~173.237159~~~12468.3
+"""
+ colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
+ result = read_fwf(StringIO(data), colspecs=colspecs, delimiter="~")
+
+ expected = DataFrame(
+ [
+ [2011, 58, 360.242940, 149.910199, 11950.7],
+ [2011, 59, 444.953632, 166.985655, 11788.4],
+ [2011, 60, 364.136849, 183.628767, 11806.2],
+ [2011, 61, 413.836124, 184.375703, 11916.8],
+ [2011, 62, 502.953953, 173.237159, 12468.3],
+ ],
+ columns=["A", "B", "C", "D", "E"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_over_specified():
+ data = """\
+A B C D E
+201158 360.242940 149.910199 11950.7
+201159 444.953632 166.985655 11788.4
+201160 364.136849 183.628767 11806.2
+201161 413.836124 184.375703 11916.8
+201162 502.953953 173.237159 12468.3
+"""
+ colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
+
+ with pytest.raises(ValueError, match="must specify only one of"):
+ read_fwf(StringIO(data), colspecs=colspecs, widths=[6, 10, 10, 7])
+
+
+def test_under_specified():
+ data = """\
+A B C D E
+201158 360.242940 149.910199 11950.7
+201159 444.953632 166.985655 11788.4
+201160 364.136849 183.628767 11806.2
+201161 413.836124 184.375703 11916.8
+201162 502.953953 173.237159 12468.3
+"""
+ with pytest.raises(ValueError, match="Must specify either"):
+ read_fwf(StringIO(data), colspecs=None, widths=None)
+
+
+def test_read_csv_compat():
+ csv_data = """\
+A,B,C,D,E
+2011,58,360.242940,149.910199,11950.7
+2011,59,444.953632,166.985655,11788.4
+2011,60,364.136849,183.628767,11806.2
+2011,61,413.836124,184.375703,11916.8
+2011,62,502.953953,173.237159,12468.3
+"""
+ expected = read_csv(StringIO(csv_data), engine="python")
+
+ fwf_data = """\
+A B C D E
+201158 360.242940 149.910199 11950.7
+201159 444.953632 166.985655 11788.4
+201160 364.136849 183.628767 11806.2
+201161 413.836124 184.375703 11916.8
+201162 502.953953 173.237159 12468.3
+"""
+ colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
+ result = read_fwf(StringIO(fwf_data), colspecs=colspecs)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_bytes_io_input():
+ data = BytesIO("שלום\nשלום".encode()) # noqa: RUF001
+ result = read_fwf(data, widths=[2, 2], encoding="utf8")
+ expected = DataFrame([["של", "ום"]], columns=["של", "ום"])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_fwf_colspecs_is_list_or_tuple():
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+
+ msg = "column specifications must be a list or tuple.+"
+
+ with pytest.raises(TypeError, match=msg):
+ read_fwf(StringIO(data), colspecs={"a": 1}, delimiter=",")
+
+
+def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples():
+ data = """index,A,B,C,D
+foo,2,3,4,5
+bar,7,8,9,10
+baz,12,13,14,15
+qux,12,13,14,15
+foo2,12,13,14,15
+bar2,12,13,14,15
+"""
+
+ msg = "Each column specification must be.+"
+
+ with pytest.raises(TypeError, match=msg):
+ read_fwf(StringIO(data), colspecs=[("a", 1)])
+
+
+@pytest.mark.parametrize(
+ "colspecs,exp_data",
+ [
+ ([(0, 3), (3, None)], [[123, 456], [456, 789]]),
+ ([(None, 3), (3, 6)], [[123, 456], [456, 789]]),
+ ([(0, None), (3, None)], [[123456, 456], [456789, 789]]),
+ ([(None, None), (3, 6)], [[123456, 456], [456789, 789]]),
+ ],
+)
+def test_fwf_colspecs_none(colspecs, exp_data):
+ # see gh-7079
+ data = """\
+123456
+456789
+"""
+ expected = DataFrame(exp_data)
+
+ result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "infer_nrows,exp_data",
+ [
+ # infer_nrows --> colspec == [(2, 3), (5, 6)]
+ (1, [[1, 2], [3, 8]]),
+ # infer_nrows > number of rows
+ (10, [[1, 2], [123, 98]]),
+ ],
+)
+def test_fwf_colspecs_infer_nrows(infer_nrows, exp_data):
+ # see gh-15138
+ data = """\
+ 1 2
+123 98
+"""
+ expected = DataFrame(exp_data)
+
+ result = read_fwf(StringIO(data), infer_nrows=infer_nrows, header=None)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_fwf_regression():
+ # see gh-3594
+ #
+ # Turns out "T060" is parsable as a datetime slice!
+ tz_list = [1, 10, 20, 30, 60, 80, 100]
+ widths = [16] + [8] * len(tz_list)
+ names = ["SST"] + [f"T{z:03d}" for z in tz_list[1:]]
+
+ data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
+2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
+2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
+2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
+2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
+"""
+
+ with tm.assert_produces_warning(FutureWarning, match="use 'date_format' instead"):
+ result = read_fwf(
+ StringIO(data),
+ index_col=0,
+ header=None,
+ names=names,
+ widths=widths,
+ parse_dates=True,
+ date_parser=lambda s: datetime.strptime(s, "%Y%j%H%M%S"),
+ )
+ expected = DataFrame(
+ [
+ [9.5403, 9.4105, 8.6571, 7.8372, 6.0612, 5.8843, 5.5192],
+ [9.5435, 9.2010, 8.6167, 7.8176, 6.0804, 5.8728, 5.4869],
+ [9.5873, 9.1326, 8.4694, 7.5889, 6.0422, 5.8526, 5.4657],
+ [9.5810, 9.0896, 8.4009, 7.4652, 6.0322, 5.8189, 5.4379],
+ [9.6034, 9.0897, 8.3822, 7.4905, 6.0908, 5.7904, 5.4039],
+ ],
+ index=DatetimeIndex(
+ [
+ "2009-06-13 20:20:00",
+ "2009-06-13 20:30:00",
+ "2009-06-13 20:40:00",
+ "2009-06-13 20:50:00",
+ "2009-06-13 21:00:00",
+ ]
+ ),
+ columns=["SST", "T010", "T020", "T030", "T060", "T080", "T100"],
+ )
+ tm.assert_frame_equal(result, expected)
+ result = read_fwf(
+ StringIO(data),
+ index_col=0,
+ header=None,
+ names=names,
+ widths=widths,
+ parse_dates=True,
+ date_format="%Y%j%H%M%S",
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_fwf_for_uint8():
+ data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
+1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa: E501
+ df = read_fwf(
+ StringIO(data),
+ colspecs=[(0, 17), (25, 26), (33, 37), (49, 51), (58, 62), (63, 1000)],
+ names=["time", "pri", "pgn", "dst", "src", "data"],
+ converters={
+ "pgn": lambda x: int(x, 16),
+ "src": lambda x: int(x, 16),
+ "dst": lambda x: int(x, 16),
+ "data": lambda x: len(x.split(" ")),
+ },
+ )
+
+ expected = DataFrame(
+ [
+ [1421302965.213420, 3, 61184, 23, 40, 8],
+ [1421302964.226776, 6, 61442, None, 71, 8],
+ ],
+ columns=["time", "pri", "pgn", "dst", "src", "data"],
+ )
+ expected["dst"] = expected["dst"].astype(object)
+ tm.assert_frame_equal(df, expected)
+
+
+@pytest.mark.parametrize("comment", ["#", "~", "!"])
+def test_fwf_comment(comment):
+ data = """\
+ 1 2. 4 #hello world
+ 5 NaN 10.0
+"""
+ data = data.replace("#", comment)
+
+ colspecs = [(0, 3), (4, 9), (9, 25)]
+ expected = DataFrame([[1, 2.0, 4], [5, np.nan, 10.0]])
+
+ result = read_fwf(StringIO(data), colspecs=colspecs, header=None, comment=comment)
+ tm.assert_almost_equal(result, expected)
+
+
+def test_fwf_skip_blank_lines():
+ data = """
+
+A B C D
+
+201158 360.242940 149.910199 11950.7
+201159 444.953632 166.985655 11788.4
+
+
+201162 502.953953 173.237159 12468.3
+
+"""
+ result = read_fwf(StringIO(data), skip_blank_lines=True)
+ expected = DataFrame(
+ [
+ [201158, 360.242940, 149.910199, 11950.7],
+ [201159, 444.953632, 166.985655, 11788.4],
+ [201162, 502.953953, 173.237159, 12468.3],
+ ],
+ columns=["A", "B", "C", "D"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+ data = """\
+A B C D
+201158 360.242940 149.910199 11950.7
+201159 444.953632 166.985655 11788.4
+
+
+201162 502.953953 173.237159 12468.3
+"""
+ result = read_fwf(StringIO(data), skip_blank_lines=False)
+ expected = DataFrame(
+ [
+ [201158, 360.242940, 149.910199, 11950.7],
+ [201159, 444.953632, 166.985655, 11788.4],
+ [np.nan, np.nan, np.nan, np.nan],
+ [np.nan, np.nan, np.nan, np.nan],
+ [201162, 502.953953, 173.237159, 12468.3],
+ ],
+ columns=["A", "B", "C", "D"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("thousands", [",", "#", "~"])
+def test_fwf_thousands(thousands):
+ data = """\
+ 1 2,334.0 5
+10 13 10.
+"""
+ data = data.replace(",", thousands)
+
+ colspecs = [(0, 3), (3, 11), (12, 16)]
+ expected = DataFrame([[1, 2334.0, 5], [10, 13, 10.0]])
+
+ result = read_fwf(
+ StringIO(data), header=None, colspecs=colspecs, thousands=thousands
+ )
+ tm.assert_almost_equal(result, expected)
+
+
+@pytest.mark.parametrize("header", [True, False])
+def test_bool_header_arg(header):
+ # see gh-6114
+ data = """\
+MyColumn
+ a
+ b
+ a
+ b"""
+
+ msg = "Passing a bool to header is invalid"
+ with pytest.raises(TypeError, match=msg):
+ read_fwf(StringIO(data), header=header)
+
+
+def test_full_file():
+ # File with all values.
+ test = """index A B C
+2000-01-03T00:00:00 0.980268513777 3 foo
+2000-01-04T00:00:00 1.04791624281 -4 bar
+2000-01-05T00:00:00 0.498580885705 73 baz
+2000-01-06T00:00:00 1.12020151869 1 foo
+2000-01-07T00:00:00 0.487094399463 0 bar
+2000-01-10T00:00:00 0.836648671666 2 baz
+2000-01-11T00:00:00 0.157160753327 34 foo"""
+ colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
+ expected = read_fwf(StringIO(test), colspecs=colspecs)
+
+ result = read_fwf(StringIO(test))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_full_file_with_missing():
+ # File with missing values.
+ test = """index A B C
+2000-01-03T00:00:00 0.980268513777 3 foo
+2000-01-04T00:00:00 1.04791624281 -4 bar
+ 0.498580885705 73 baz
+2000-01-06T00:00:00 1.12020151869 1 foo
+2000-01-07T00:00:00 0 bar
+2000-01-10T00:00:00 0.836648671666 2 baz
+ 34"""
+ colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
+ expected = read_fwf(StringIO(test), colspecs=colspecs)
+
+ result = read_fwf(StringIO(test))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_full_file_with_spaces():
+ # File with spaces in columns.
+ test = """
+Account Name Balance CreditLimit AccountCreated
+101 Keanu Reeves 9315.45 10000.00 1/17/1998
+312 Gerard Butler 90.00 1000.00 8/6/2003
+868 Jennifer Love Hewitt 0 17000.00 5/25/1985
+761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
+317 Bill Murray 789.65 5000.00 2/5/2007
+""".strip(
+ "\r\n"
+ )
+ colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
+ expected = read_fwf(StringIO(test), colspecs=colspecs)
+
+ result = read_fwf(StringIO(test))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_full_file_with_spaces_and_missing():
+ # File with spaces and missing values in columns.
+ test = """
+Account Name Balance CreditLimit AccountCreated
+101 10000.00 1/17/1998
+312 Gerard Butler 90.00 1000.00 8/6/2003
+868 5/25/1985
+761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
+317 Bill Murray 789.65
+""".strip(
+ "\r\n"
+ )
+ colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
+ expected = read_fwf(StringIO(test), colspecs=colspecs)
+
+ result = read_fwf(StringIO(test))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_messed_up_data():
+ # Completely messed up file.
+ test = """
+ Account Name Balance Credit Limit Account Created
+ 101 10000.00 1/17/1998
+ 312 Gerard Butler 90.00 1000.00
+
+ 761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
+ 317 Bill Murray 789.65
+""".strip(
+ "\r\n"
+ )
+ colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
+ expected = read_fwf(StringIO(test), colspecs=colspecs)
+
+ result = read_fwf(StringIO(test))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_multiple_delimiters():
+ test = r"""
+col1~~~~~col2 col3++++++++++++++++++col4
+~~22.....11.0+++foo~~~~~~~~~~Keanu Reeves
+ 33+++122.33\\\bar.........Gerard Butler
+++44~~~~12.01 baz~~Jennifer Love Hewitt
+~~55 11+++foo++++Jada Pinkett-Smith
+..66++++++.03~~~bar Bill Murray
+""".strip(
+ "\r\n"
+ )
+ delimiter = " +~.\\"
+ colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
+ expected = read_fwf(StringIO(test), colspecs=colspecs, delimiter=delimiter)
+
+ result = read_fwf(StringIO(test), delimiter=delimiter)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_variable_width_unicode():
+ data = """
+שלום שלום
+ום שלל
+של ום
+""".strip(
+ "\r\n"
+ )
+ encoding = "utf8"
+ kwargs = {"header": None, "encoding": encoding}
+
+ expected = read_fwf(
+ BytesIO(data.encode(encoding)), colspecs=[(0, 4), (5, 9)], **kwargs
+ )
+ result = read_fwf(BytesIO(data.encode(encoding)), **kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("dtype", [{}, {"a": "float64", "b": str, "c": "int32"}])
+def test_dtype(dtype):
+ data = """ a b c
+1 2 3.2
+3 4 5.2
+"""
+ colspecs = [(0, 5), (5, 10), (10, None)]
+ result = read_fwf(StringIO(data), colspecs=colspecs, dtype=dtype)
+
+ expected = DataFrame(
+ {"a": [1, 3], "b": [2, 4], "c": [3.2, 5.2]}, columns=["a", "b", "c"]
+ )
+
+ for col, dt in dtype.items():
+ expected[col] = expected[col].astype(dt)
+
+ tm.assert_frame_equal(result, expected)
+
+
+def test_skiprows_inference():
+ # see gh-11256
+ data = """
+Text contained in the file header
+
+DataCol1 DataCol2
+ 0.0 1.0
+ 101.6 956.1
+""".strip()
+ skiprows = 2
+
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
+ expected = read_csv(StringIO(data), skiprows=skiprows, delim_whitespace=True)
+
+ result = read_fwf(StringIO(data), skiprows=skiprows)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_skiprows_by_index_inference():
+ data = """
+To be skipped
+Not To Be Skipped
+Once more to be skipped
+123 34 8 123
+456 78 9 456
+""".strip()
+ skiprows = [0, 2]
+
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
+ expected = read_csv(StringIO(data), skiprows=skiprows, delim_whitespace=True)
+
+ result = read_fwf(StringIO(data), skiprows=skiprows)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_skiprows_inference_empty():
+ data = """
+AA BBB C
+12 345 6
+78 901 2
+""".strip()
+
+ msg = "No rows from which to infer column width"
+ with pytest.raises(EmptyDataError, match=msg):
+ read_fwf(StringIO(data), skiprows=3)
+
+
+def test_whitespace_preservation():
+ # see gh-16772
+ header = None
+ csv_data = """
+ a ,bbb
+ cc,dd """
+
+ fwf_data = """
+ a bbb
+ ccdd """
+ result = read_fwf(
+ StringIO(fwf_data), widths=[3, 3], header=header, skiprows=[0], delimiter="\n\t"
+ )
+ expected = read_csv(StringIO(csv_data), header=header)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_default_delimiter():
+ header = None
+ csv_data = """
+a,bbb
+cc,dd"""
+
+ fwf_data = """
+a \tbbb
+cc\tdd """
+ result = read_fwf(StringIO(fwf_data), widths=[3, 3], header=header, skiprows=[0])
+ expected = read_csv(StringIO(csv_data), header=header)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("infer", [True, False])
+def test_fwf_compression(compression_only, infer, compression_to_extension):
+ data = """1111111111
+ 2222222222
+ 3333333333""".strip()
+
+ compression = compression_only
+ extension = compression_to_extension[compression]
+
+ kwargs = {"widths": [5, 5], "names": ["one", "two"]}
+ expected = read_fwf(StringIO(data), **kwargs)
+
+ data = bytes(data, encoding="utf-8")
+
+ with tm.ensure_clean(filename="tmp." + extension) as path:
+ tm.write_to_compressed(compression, path, data)
+
+ if infer is not None:
+ kwargs["compression"] = "infer" if infer else compression
+
+ result = read_fwf(path, **kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_binary_mode():
+ """
+ read_fwf supports opening files in binary mode.
+
+ GH 18035.
+ """
+ data = """aaa aaa aaa
+bba bab b a"""
+ df_reference = DataFrame(
+ [["bba", "bab", "b a"]], columns=["aaa", "aaa.1", "aaa.2"], index=[0]
+ )
+ with tm.ensure_clean() as path:
+ Path(path).write_text(data, encoding="utf-8")
+ with open(path, "rb") as file:
+ df = read_fwf(file)
+ file.seek(0)
+ tm.assert_frame_equal(df, df_reference)
+
+
+@pytest.mark.parametrize("memory_map", [True, False])
+def test_encoding_mmap(memory_map):
+ """
+ encoding should be working, even when using a memory-mapped file.
+
+ GH 23254.
+ """
+ encoding = "iso8859_1"
+ with tm.ensure_clean() as path:
+ Path(path).write_bytes(" 1 A Ä 2\n".encode(encoding))
+ df = read_fwf(
+ path,
+ header=None,
+ widths=[2, 2, 2, 2],
+ encoding=encoding,
+ memory_map=memory_map,
+ )
+ df_reference = DataFrame([[1, "A", "Ä", 2]])
+ tm.assert_frame_equal(df, df_reference)
+
+
+@pytest.mark.parametrize(
+ "colspecs, names, widths, index_col",
+ [
+ (
+ [(0, 6), (6, 12), (12, 18), (18, None)],
+ list("abcde"),
+ None,
+ None,
+ ),
+ (
+ None,
+ list("abcde"),
+ [6] * 4,
+ None,
+ ),
+ (
+ [(0, 6), (6, 12), (12, 18), (18, None)],
+ list("abcde"),
+ None,
+ True,
+ ),
+ (
+ None,
+ list("abcde"),
+ [6] * 4,
+ False,
+ ),
+ (
+ None,
+ list("abcde"),
+ [6] * 4,
+ True,
+ ),
+ (
+ [(0, 6), (6, 12), (12, 18), (18, None)],
+ list("abcde"),
+ None,
+ False,
+ ),
+ ],
+)
+def test_len_colspecs_len_names(colspecs, names, widths, index_col):
+ # GH#40830
+ data = """col1 col2 col3 col4
+ bab ba 2"""
+ msg = "Length of colspecs must match length of names"
+ with pytest.raises(ValueError, match=msg):
+ read_fwf(
+ StringIO(data),
+ colspecs=colspecs,
+ names=names,
+ widths=widths,
+ index_col=index_col,
+ )
+
+
+@pytest.mark.parametrize(
+ "colspecs, names, widths, index_col, expected",
+ [
+ (
+ [(0, 6), (6, 12), (12, 18), (18, None)],
+ list("abc"),
+ None,
+ 0,
+ DataFrame(
+ index=["col1", "ba"],
+ columns=["a", "b", "c"],
+ data=[["col2", "col3", "col4"], ["b ba", "2", np.nan]],
+ ),
+ ),
+ (
+ [(0, 6), (6, 12), (12, 18), (18, None)],
+ list("ab"),
+ None,
+ [0, 1],
+ DataFrame(
+ index=[["col1", "ba"], ["col2", "b ba"]],
+ columns=["a", "b"],
+ data=[["col3", "col4"], ["2", np.nan]],
+ ),
+ ),
+ (
+ [(0, 6), (6, 12), (12, 18), (18, None)],
+ list("a"),
+ None,
+ [0, 1, 2],
+ DataFrame(
+ index=[["col1", "ba"], ["col2", "b ba"], ["col3", "2"]],
+ columns=["a"],
+ data=[["col4"], [np.nan]],
+ ),
+ ),
+ (
+ None,
+ list("abc"),
+ [6] * 4,
+ 0,
+ DataFrame(
+ index=["col1", "ba"],
+ columns=["a", "b", "c"],
+ data=[["col2", "col3", "col4"], ["b ba", "2", np.nan]],
+ ),
+ ),
+ (
+ None,
+ list("ab"),
+ [6] * 4,
+ [0, 1],
+ DataFrame(
+ index=[["col1", "ba"], ["col2", "b ba"]],
+ columns=["a", "b"],
+ data=[["col3", "col4"], ["2", np.nan]],
+ ),
+ ),
+ (
+ None,
+ list("a"),
+ [6] * 4,
+ [0, 1, 2],
+ DataFrame(
+ index=[["col1", "ba"], ["col2", "b ba"], ["col3", "2"]],
+ columns=["a"],
+ data=[["col4"], [np.nan]],
+ ),
+ ),
+ ],
+)
+def test_len_colspecs_len_names_with_index_col(
+ colspecs, names, widths, index_col, expected
+):
+ # GH#40830
+ data = """col1 col2 col3 col4
+ bab ba 2"""
+ result = read_fwf(
+ StringIO(data),
+ colspecs=colspecs,
+ names=names,
+ widths=widths,
+ index_col=index_col,
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_colspecs_with_comment():
+ # GH 14135
+ result = read_fwf(
+ StringIO("#\nA1K\n"), colspecs=[(1, 2), (2, 3)], comment="#", header=None
+ )
+ expected = DataFrame([[1, "K"]], columns=[0, 1])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_skip_rows_and_n_rows():
+ # GH#44021
+ data = """a\tb
+1\t a
+2\t b
+3\t c
+4\t d
+5\t e
+6\t f
+ """
+ result = read_fwf(StringIO(data), nrows=4, skiprows=[2, 4])
+ expected = DataFrame({"a": [1, 3, 5, 6], "b": ["a", "c", "e", "f"]})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_skiprows_with_iterator():
+ # GH#10261, GH#56323
+ data = """0
+1
+2
+3
+4
+5
+6
+7
+8
+9
+ """
+ df_iter = read_fwf(
+ StringIO(data),
+ colspecs=[(0, 2)],
+ names=["a"],
+ iterator=True,
+ chunksize=2,
+ skiprows=[0, 1, 2, 6, 9],
+ )
+ expected_frames = [
+ DataFrame({"a": [3, 4]}),
+ DataFrame({"a": [5, 7]}, index=[2, 3]),
+ DataFrame({"a": [8]}, index=[4]),
+ ]
+ for i, result in enumerate(df_iter):
+ tm.assert_frame_equal(result, expected_frames[i])
+
+
+def test_names_and_infer_colspecs():
+ # GH#45337
+ data = """X Y Z
+ 959.0 345 22.2
+ """
+ result = read_fwf(StringIO(data), skiprows=1, usecols=[0, 2], names=["a", "b"])
+ expected = DataFrame({"a": [959.0], "b": 22.2})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_widths_and_usecols():
+ # GH#46580
+ data = """0 1 n -0.4100.1
+0 2 p 0.2 90.1
+0 3 n -0.3140.4"""
+ result = read_fwf(
+ StringIO(data),
+ header=None,
+ usecols=(0, 1, 3),
+ widths=(3, 5, 1, 5, 5),
+ index_col=False,
+ names=("c0", "c1", "c3"),
+ )
+ expected = DataFrame(
+ {
+ "c0": 0,
+ "c1": [1, 2, 3],
+ "c3": [-0.4, 0.2, -0.3],
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_dtype_backend(string_storage, dtype_backend):
+ # GH#50289
+ if string_storage == "python":
+ arr = StringArray(np.array(["a", "b"], dtype=np.object_))
+ arr_na = StringArray(np.array([pd.NA, "a"], dtype=np.object_))
+ elif dtype_backend == "pyarrow":
+ pa = pytest.importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ arr = ArrowExtensionArray(pa.array(["a", "b"]))
+ arr_na = ArrowExtensionArray(pa.array([None, "a"]))
+ else:
+ pa = pytest.importorskip("pyarrow")
+ arr = ArrowStringArray(pa.array(["a", "b"]))
+ arr_na = ArrowStringArray(pa.array([None, "a"]))
+
+ data = """a b c d e f g h i
+1 2.5 True a
+3 4.5 False b True 6 7.5 a"""
+ with pd.option_context("mode.string_storage", string_storage):
+ result = read_fwf(StringIO(data), dtype_backend=dtype_backend)
+
+ expected = DataFrame(
+ {
+ "a": pd.Series([1, 3], dtype="Int64"),
+ "b": pd.Series([2.5, 4.5], dtype="Float64"),
+ "c": pd.Series([True, False], dtype="boolean"),
+ "d": arr,
+ "e": pd.Series([pd.NA, True], dtype="boolean"),
+ "f": pd.Series([pd.NA, 6], dtype="Int64"),
+ "g": pd.Series([pd.NA, 7.5], dtype="Float64"),
+ "h": arr_na,
+ "i": pd.Series([pd.NA, pd.NA], dtype="Int64"),
+ }
+ )
+ if dtype_backend == "pyarrow":
+ pa = pytest.importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ expected = DataFrame(
+ {
+ col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))
+ for col in expected.columns
+ }
+ )
+ expected["i"] = ArrowExtensionArray(pa.array([None, None]))
+
+ tm.assert_frame_equal(result, expected)
+
+
+def test_invalid_dtype_backend():
+ msg = (
+ "dtype_backend numpy is invalid, only 'numpy_nullable' and "
+ "'pyarrow' are allowed."
+ )
+ with pytest.raises(ValueError, match=msg):
+ read_fwf("test", dtype_backend="numpy")
+
+
+@pytest.mark.network
+@pytest.mark.single_cpu
+def test_url_urlopen(httpserver):
+ data = """\
+A B C D
+201158 360.242940 149.910199 11950.7
+201159 444.953632 166.985655 11788.4
+201160 364.136849 183.628767 11806.2
+201161 413.836124 184.375703 11916.8
+201162 502.953953 173.237159 12468.3
+"""
+ httpserver.serve_content(content=data)
+ expected = pd.Index(list("ABCD"))
+ with urlopen(httpserver.url) as f:
+ result = read_fwf(f).columns
+
+ tm.assert_index_equal(result, expected)
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_textreader.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_textreader.py
new file mode 100644
index 0000000000000000000000000000000000000000..fef5414e85e52749faab254c2336d6707a10347e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_textreader.py
@@ -0,0 +1,342 @@
+"""
+Tests the TextReader class in parsers.pyx, which
+is integral to the C engine in parsers.py
+"""
+from io import (
+ BytesIO,
+ StringIO,
+)
+
+import numpy as np
+import pytest
+
+import pandas._libs.parsers as parser
+from pandas._libs.parsers import TextReader
+from pandas.errors import ParserWarning
+
+from pandas import DataFrame
+import pandas._testing as tm
+
+from pandas.io.parsers import (
+ TextFileReader,
+ read_csv,
+)
+from pandas.io.parsers.c_parser_wrapper import ensure_dtype_objs
+
+
+class TestTextReader:
+ @pytest.fixture
+ def csv_path(self, datapath):
+ return datapath("io", "data", "csv", "test1.csv")
+
+ def test_file_handle(self, csv_path):
+ with open(csv_path, "rb") as f:
+ reader = TextReader(f)
+ reader.read()
+
+ def test_file_handle_mmap(self, csv_path):
+ # this was never using memory_map=True
+ with open(csv_path, "rb") as f:
+ reader = TextReader(f, header=None)
+ reader.read()
+
+ def test_StringIO(self, csv_path):
+ with open(csv_path, "rb") as f:
+ text = f.read()
+ src = BytesIO(text)
+ reader = TextReader(src, header=None)
+ reader.read()
+
+ def test_string_factorize(self):
+ # should this be optional?
+ data = "a\nb\na\nb\na"
+ reader = TextReader(StringIO(data), header=None)
+ result = reader.read()
+ assert len(set(map(id, result[0]))) == 2
+
+ def test_skipinitialspace(self):
+ data = "a, b\na, b\na, b\na, b"
+
+ reader = TextReader(StringIO(data), skipinitialspace=True, header=None)
+ result = reader.read()
+
+ tm.assert_numpy_array_equal(
+ result[0], np.array(["a", "a", "a", "a"], dtype=np.object_)
+ )
+ tm.assert_numpy_array_equal(
+ result[1], np.array(["b", "b", "b", "b"], dtype=np.object_)
+ )
+
+ def test_parse_booleans(self):
+ data = "True\nFalse\nTrue\nTrue"
+
+ reader = TextReader(StringIO(data), header=None)
+ result = reader.read()
+
+ assert result[0].dtype == np.bool_
+
+ def test_delimit_whitespace(self):
+ data = 'a b\na\t\t "b"\n"a"\t \t b'
+
+ reader = TextReader(StringIO(data), delim_whitespace=True, header=None)
+ result = reader.read()
+
+ tm.assert_numpy_array_equal(
+ result[0], np.array(["a", "a", "a"], dtype=np.object_)
+ )
+ tm.assert_numpy_array_equal(
+ result[1], np.array(["b", "b", "b"], dtype=np.object_)
+ )
+
+ def test_embedded_newline(self):
+ data = 'a\n"hello\nthere"\nthis'
+
+ reader = TextReader(StringIO(data), header=None)
+ result = reader.read()
+
+ expected = np.array(["a", "hello\nthere", "this"], dtype=np.object_)
+ tm.assert_numpy_array_equal(result[0], expected)
+
+ def test_euro_decimal(self):
+ data = "12345,67\n345,678"
+
+ reader = TextReader(StringIO(data), delimiter=":", decimal=",", header=None)
+ result = reader.read()
+
+ expected = np.array([12345.67, 345.678])
+ tm.assert_almost_equal(result[0], expected)
+
+ def test_integer_thousands(self):
+ data = "123,456\n12,500"
+
+ reader = TextReader(StringIO(data), delimiter=":", thousands=",", header=None)
+ result = reader.read()
+
+ expected = np.array([123456, 12500], dtype=np.int64)
+ tm.assert_almost_equal(result[0], expected)
+
+ def test_integer_thousands_alt(self):
+ data = "123.456\n12.500"
+
+ reader = TextFileReader(
+ StringIO(data), delimiter=":", thousands=".", header=None
+ )
+ result = reader.read()
+
+ expected = DataFrame([123456, 12500])
+ tm.assert_frame_equal(result, expected)
+
+ def test_skip_bad_lines(self):
+ # too many lines, see #2430 for why
+ data = "a:b:c\nd:e:f\ng:h:i\nj:k:l:m\nl:m:n\no:p:q:r"
+
+ reader = TextReader(StringIO(data), delimiter=":", header=None)
+ msg = r"Error tokenizing data\. C error: Expected 3 fields in line 4, saw 4"
+ with pytest.raises(parser.ParserError, match=msg):
+ reader.read()
+
+ reader = TextReader(
+ StringIO(data), delimiter=":", header=None, on_bad_lines=2 # Skip
+ )
+ result = reader.read()
+ expected = {
+ 0: np.array(["a", "d", "g", "l"], dtype=object),
+ 1: np.array(["b", "e", "h", "m"], dtype=object),
+ 2: np.array(["c", "f", "i", "n"], dtype=object),
+ }
+ assert_array_dicts_equal(result, expected)
+
+ with tm.assert_produces_warning(ParserWarning, match="Skipping line"):
+ reader = TextReader(
+ StringIO(data), delimiter=":", header=None, on_bad_lines=1 # Warn
+ )
+ reader.read()
+
+ def test_header_not_enough_lines(self):
+ data = "skip this\nskip this\na,b,c\n1,2,3\n4,5,6"
+
+ reader = TextReader(StringIO(data), delimiter=",", header=2)
+ header = reader.header
+ expected = [["a", "b", "c"]]
+ assert header == expected
+
+ recs = reader.read()
+ expected = {
+ 0: np.array([1, 4], dtype=np.int64),
+ 1: np.array([2, 5], dtype=np.int64),
+ 2: np.array([3, 6], dtype=np.int64),
+ }
+ assert_array_dicts_equal(recs, expected)
+
+ def test_escapechar(self):
+ data = '\\"hello world"\n\\"hello world"\n\\"hello world"'
+
+ reader = TextReader(StringIO(data), delimiter=",", header=None, escapechar="\\")
+ result = reader.read()
+ expected = {0: np.array(['"hello world"'] * 3, dtype=object)}
+ assert_array_dicts_equal(result, expected)
+
+ def test_eof_has_eol(self):
+ # handling of new line at EOF
+ pass
+
+ def test_na_substitution(self):
+ pass
+
+ def test_numpy_string_dtype(self):
+ data = """\
+a,1
+aa,2
+aaa,3
+aaaa,4
+aaaaa,5"""
+
+ def _make_reader(**kwds):
+ if "dtype" in kwds:
+ kwds["dtype"] = ensure_dtype_objs(kwds["dtype"])
+ return TextReader(StringIO(data), delimiter=",", header=None, **kwds)
+
+ reader = _make_reader(dtype="S5,i4")
+ result = reader.read()
+
+ assert result[0].dtype == "S5"
+
+ ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaaa"], dtype="S5")
+ assert (result[0] == ex_values).all()
+ assert result[1].dtype == "i4"
+
+ reader = _make_reader(dtype="S4")
+ result = reader.read()
+ assert result[0].dtype == "S4"
+ ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaa"], dtype="S4")
+ assert (result[0] == ex_values).all()
+ assert result[1].dtype == "S4"
+
+ def test_pass_dtype(self):
+ data = """\
+one,two
+1,a
+2,b
+3,c
+4,d"""
+
+ def _make_reader(**kwds):
+ if "dtype" in kwds:
+ kwds["dtype"] = ensure_dtype_objs(kwds["dtype"])
+ return TextReader(StringIO(data), delimiter=",", **kwds)
+
+ reader = _make_reader(dtype={"one": "u1", 1: "S1"})
+ result = reader.read()
+ assert result[0].dtype == "u1"
+ assert result[1].dtype == "S1"
+
+ reader = _make_reader(dtype={"one": np.uint8, 1: object})
+ result = reader.read()
+ assert result[0].dtype == "u1"
+ assert result[1].dtype == "O"
+
+ reader = _make_reader(dtype={"one": np.dtype("u1"), 1: np.dtype("O")})
+ result = reader.read()
+ assert result[0].dtype == "u1"
+ assert result[1].dtype == "O"
+
+ def test_usecols(self):
+ data = """\
+a,b,c
+1,2,3
+4,5,6
+7,8,9
+10,11,12"""
+
+ def _make_reader(**kwds):
+ return TextReader(StringIO(data), delimiter=",", **kwds)
+
+ reader = _make_reader(usecols=(1, 2))
+ result = reader.read()
+
+ exp = _make_reader().read()
+ assert len(result) == 2
+ assert (result[1] == exp[1]).all()
+ assert (result[2] == exp[2]).all()
+
+ @pytest.mark.parametrize(
+ "text, kwargs",
+ [
+ ("a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12", {"delimiter": ","}),
+ (
+ "a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12",
+ {"delim_whitespace": True},
+ ),
+ ("a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12", {"delimiter": ","}),
+ (
+ (
+ "A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r"
+ "AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r"
+ ",BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0"
+ ),
+ {"delimiter": ","},
+ ),
+ ("A B C\r 2 3\r4 5 6", {"delim_whitespace": True}),
+ ("A B C\r2 3\r4 5 6", {"delim_whitespace": True}),
+ ],
+ )
+ def test_cr_delimited(self, text, kwargs):
+ nice_text = text.replace("\r", "\r\n")
+ result = TextReader(StringIO(text), **kwargs).read()
+ expected = TextReader(StringIO(nice_text), **kwargs).read()
+ assert_array_dicts_equal(result, expected)
+
+ def test_empty_field_eof(self):
+ data = "a,b,c\n1,2,3\n4,,"
+
+ result = TextReader(StringIO(data), delimiter=",").read()
+
+ expected = {
+ 0: np.array([1, 4], dtype=np.int64),
+ 1: np.array(["2", ""], dtype=object),
+ 2: np.array(["3", ""], dtype=object),
+ }
+ assert_array_dicts_equal(result, expected)
+
+ @pytest.mark.parametrize("repeat", range(10))
+ def test_empty_field_eof_mem_access_bug(self, repeat):
+ # GH5664
+ a = DataFrame([["b"], [np.nan]], columns=["a"], index=["a", "c"])
+ b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]], columns=list("abcd"), index=[1, 1])
+ c = DataFrame(
+ [
+ [1, 2, 3, 4],
+ [6, np.nan, np.nan, np.nan],
+ [8, 9, 10, 11],
+ [13, 14, np.nan, np.nan],
+ ],
+ columns=list("abcd"),
+ index=[0, 5, 7, 12],
+ )
+
+ df = read_csv(StringIO("a,b\nc\n"), skiprows=0, names=["a"], engine="c")
+ tm.assert_frame_equal(df, a)
+
+ df = read_csv(
+ StringIO("1,1,1,1,0\n" * 2 + "\n" * 2), names=list("abcd"), engine="c"
+ )
+ tm.assert_frame_equal(df, b)
+
+ df = read_csv(
+ StringIO("0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14"),
+ names=list("abcd"),
+ engine="c",
+ )
+ tm.assert_frame_equal(df, c)
+
+ def test_empty_csv_input(self):
+ # GH14867
+ with read_csv(
+ StringIO(), chunksize=20, header=None, names=["a", "b", "c"]
+ ) as df:
+ assert isinstance(df, TextFileReader)
+
+
+def assert_array_dicts_equal(left, right):
+ for k, v in left.items():
+ tm.assert_numpy_array_equal(np.asarray(v), np.asarray(right[k]))
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_unsupported.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_unsupported.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8790bdb5fa426252f85f472f1249e75dae42dcd
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/test_unsupported.py
@@ -0,0 +1,226 @@
+"""
+Tests that features that are currently unsupported in
+either the Python or C parser are actually enforced
+and are clearly communicated to the user.
+
+Ultimately, the goal is to remove test cases from this
+test suite as new feature support is added to the parsers.
+"""
+from io import StringIO
+import os
+from pathlib import Path
+
+import pytest
+
+from pandas.errors import ParserError
+
+import pandas._testing as tm
+
+from pandas.io.parsers import read_csv
+import pandas.io.parsers.readers as parsers
+
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+)
+
+
+@pytest.fixture(params=["python", "python-fwf"], ids=lambda val: val)
+def python_engine(request):
+ return request.param
+
+
+class TestUnsupportedFeatures:
+ def test_mangle_dupe_cols_false(self):
+ # see gh-12935
+ data = "a b c\n1 2 3"
+
+ for engine in ("c", "python"):
+ with pytest.raises(TypeError, match="unexpected keyword"):
+ read_csv(StringIO(data), engine=engine, mangle_dupe_cols=True)
+
+ def test_c_engine(self):
+ # see gh-6607
+ data = "a b c\n1 2 3"
+ msg = "does not support"
+
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
+
+ # specify C engine with unsupported options (raise)
+ with pytest.raises(ValueError, match=msg):
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
+ read_csv(StringIO(data), engine="c", sep=None, delim_whitespace=False)
+ with pytest.raises(ValueError, match=msg):
+ read_csv(StringIO(data), engine="c", sep=r"\s")
+ with pytest.raises(ValueError, match=msg):
+ read_csv(StringIO(data), engine="c", sep="\t", quotechar=chr(128))
+ with pytest.raises(ValueError, match=msg):
+ read_csv(StringIO(data), engine="c", skipfooter=1)
+
+ # specify C-unsupported options without python-unsupported options
+ with tm.assert_produces_warning((parsers.ParserWarning, FutureWarning)):
+ read_csv(StringIO(data), sep=None, delim_whitespace=False)
+ with tm.assert_produces_warning(parsers.ParserWarning):
+ read_csv(StringIO(data), sep=r"\s")
+ with tm.assert_produces_warning(parsers.ParserWarning):
+ read_csv(StringIO(data), sep="\t", quotechar=chr(128))
+ with tm.assert_produces_warning(parsers.ParserWarning):
+ read_csv(StringIO(data), skipfooter=1)
+
+ text = """ A B C D E
+one two three four
+a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
+a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
+x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
+ msg = "Error tokenizing data"
+
+ with pytest.raises(ParserError, match=msg):
+ read_csv(StringIO(text), sep="\\s+")
+ with pytest.raises(ParserError, match=msg):
+ read_csv(StringIO(text), engine="c", sep="\\s+")
+
+ msg = "Only length-1 thousands markers supported"
+ data = """A|B|C
+1|2,334|5
+10|13|10.
+"""
+ with pytest.raises(ValueError, match=msg):
+ read_csv(StringIO(data), thousands=",,")
+ with pytest.raises(ValueError, match=msg):
+ read_csv(StringIO(data), thousands="")
+
+ msg = "Only length-1 line terminators supported"
+ data = "a,b,c~~1,2,3~~4,5,6"
+ with pytest.raises(ValueError, match=msg):
+ read_csv(StringIO(data), lineterminator="~~")
+
+ def test_python_engine(self, python_engine):
+ from pandas.io.parsers.readers import _python_unsupported as py_unsupported
+
+ data = """1,2,3,,
+1,2,3,4,
+1,2,3,4,5
+1,2,,,
+1,2,3,4,"""
+
+ for default in py_unsupported:
+ msg = (
+ f"The {repr(default)} option is not "
+ f"supported with the {repr(python_engine)} engine"
+ )
+
+ kwargs = {default: object()}
+ with pytest.raises(ValueError, match=msg):
+ read_csv(StringIO(data), engine=python_engine, **kwargs)
+
+ def test_python_engine_file_no_iter(self, python_engine):
+ # see gh-16530
+ class NoNextBuffer:
+ def __init__(self, csv_data) -> None:
+ self.data = csv_data
+
+ def __next__(self):
+ return self.data.__next__()
+
+ def read(self):
+ return self.data
+
+ def readline(self):
+ return self.data
+
+ data = "a\n1"
+ msg = "'NoNextBuffer' object is not iterable|argument 1 must be an iterator"
+
+ with pytest.raises(TypeError, match=msg):
+ read_csv(NoNextBuffer(data), engine=python_engine)
+
+ def test_pyarrow_engine(self):
+ from pandas.io.parsers.readers import _pyarrow_unsupported as pa_unsupported
+
+ data = """1,2,3,,
+ 1,2,3,4,
+ 1,2,3,4,5
+ 1,2,,,
+ 1,2,3,4,"""
+
+ for default in pa_unsupported:
+ msg = (
+ f"The {repr(default)} option is not "
+ f"supported with the 'pyarrow' engine"
+ )
+ kwargs = {default: object()}
+ default_needs_bool = {"warn_bad_lines", "error_bad_lines"}
+ if default == "dialect":
+ kwargs[default] = "excel" # test a random dialect
+ elif default in default_needs_bool:
+ kwargs[default] = True
+ elif default == "on_bad_lines":
+ kwargs[default] = "warn"
+
+ warn = None
+ depr_msg = None
+ if "delim_whitespace" in kwargs:
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
+ warn = FutureWarning
+ if "verbose" in kwargs:
+ depr_msg = "The 'verbose' keyword in pd.read_csv is deprecated"
+ warn = FutureWarning
+
+ with pytest.raises(ValueError, match=msg):
+ with tm.assert_produces_warning(warn, match=depr_msg):
+ read_csv(StringIO(data), engine="pyarrow", **kwargs)
+
+ def test_on_bad_lines_callable_python_or_pyarrow(self, all_parsers):
+ # GH 5686
+ # GH 54643
+ sio = StringIO("a,b\n1,2")
+ bad_lines_func = lambda x: x
+ parser = all_parsers
+ if all_parsers.engine not in ["python", "pyarrow"]:
+ msg = (
+ "on_bad_line can only be a callable "
+ "function if engine='python' or 'pyarrow'"
+ )
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(sio, on_bad_lines=bad_lines_func)
+ else:
+ parser.read_csv(sio, on_bad_lines=bad_lines_func)
+
+
+def test_close_file_handle_on_invalid_usecols(all_parsers):
+ # GH 45384
+ parser = all_parsers
+
+ error = ValueError
+ if parser.engine == "pyarrow":
+ # Raises pyarrow.lib.ArrowKeyError
+ pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
+
+ with tm.ensure_clean("test.csv") as fname:
+ Path(fname).write_text("col1,col2\na,b\n1,2", encoding="utf-8")
+ with tm.assert_produces_warning(False):
+ with pytest.raises(error, match="col3"):
+ parser.read_csv(fname, usecols=["col1", "col2", "col3"])
+ # unlink fails on windows if file handles still point to it
+ os.unlink(fname)
+
+
+def test_invalid_file_inputs(request, all_parsers):
+ # GH#45957
+ parser = all_parsers
+ if parser.engine == "python":
+ request.applymarker(
+ pytest.mark.xfail(reason=f"{parser.engine} engine supports lists.")
+ )
+
+ with pytest.raises(ValueError, match="Invalid"):
+ parser.read_csv([])
+
+
+def test_invalid_dtype_backend(all_parsers):
+ parser = all_parsers
+ msg = (
+ "dtype_backend numpy is invalid, only 'numpy_nullable' and "
+ "'pyarrow' are allowed."
+ )
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv("test", dtype_backend="numpy")
diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_strings.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_strings.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6402b31e9aecf693056e36598f4dc68425efecda
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_strings.cpython-310.pyc differ