diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cc7fba9c72cbdc8a32ac7660a9d48f3b4fe02690
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_css.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_css.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b2129b1a666a01d1b51445156457c8da4a9dbe22
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_css.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_format.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_format.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f3447d40b70e80c31b4acbf1d41a1918c64bd86d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_format.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_ipython_compat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_ipython_compat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c8b90656b52f9b2b751d199916bcda8609f4803b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_ipython_compat.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_printing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_printing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..14de9bdcd72e3206e6a705f56bf408cb883e847f
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_printing.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_csv.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_csv.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9afcf6dda7671071190d41c013edb179758f1fad
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_csv.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_excel.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_excel.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b5cc656a4db0723748b1f447aaed956e9e60e5f7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_excel.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_html.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_html.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..860ad43780c95167918292c22a759e05f306d7c1
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_html.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_latex.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_latex.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7a57cefd5bd688301495ca514751b92b4fca4173
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_latex.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_string.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_string.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3705cb84af7a5d5ec1637e5055d70bcc747e87ce
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_string.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_bar.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_bar.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0e4712e8bb3d15959bddc0bd8697981b16bd8ef
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_bar.py
@@ -0,0 +1,358 @@
+import io
+
+import numpy as np
+import pytest
+
+from pandas import (
+ NA,
+ DataFrame,
+ read_csv,
+)
+
+pytest.importorskip("jinja2")
+
+
+def bar_grad(a=None, b=None, c=None, d=None):
+ """Used in multiple tests to simplify formatting of expected result"""
+ ret = [("width", "10em")]
+ if all(x is None for x in [a, b, c, d]):
+ return ret
+ return ret + [
+ (
+ "background",
+ f"linear-gradient(90deg,{','.join([x for x in [a, b, c, d] if x])})",
+ )
+ ]
+
+
+def no_bar():
+ return bar_grad()
+
+
+def bar_to(x, color="#d65f5f"):
+ return bar_grad(f" {color} {x:.1f}%", f" transparent {x:.1f}%")
+
+
+def bar_from_to(x, y, color="#d65f5f"):
+ return bar_grad(
+ f" transparent {x:.1f}%",
+ f" {color} {x:.1f}%",
+ f" {color} {y:.1f}%",
+ f" transparent {y:.1f}%",
+ )
+
+
+@pytest.fixture
+def df_pos():
+ return DataFrame([[1], [2], [3]])
+
+
+@pytest.fixture
+def df_neg():
+ return DataFrame([[-1], [-2], [-3]])
+
+
+@pytest.fixture
+def df_mix():
+ return DataFrame([[-3], [1], [2]])
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ ("left", [no_bar(), bar_to(50), bar_to(100)]),
+ ("right", [bar_to(100), bar_from_to(50, 100), no_bar()]),
+ ("mid", [bar_to(33.33), bar_to(66.66), bar_to(100)]),
+ ("zero", [bar_from_to(50, 66.7), bar_from_to(50, 83.3), bar_from_to(50, 100)]),
+ ("mean", [bar_to(50), no_bar(), bar_from_to(50, 100)]),
+ (2.0, [bar_to(50), no_bar(), bar_from_to(50, 100)]),
+ (np.median, [bar_to(50), no_bar(), bar_from_to(50, 100)]),
+ ],
+)
+def test_align_positive_cases(df_pos, align, exp):
+ # test different align cases for all positive values
+ result = df_pos.style.bar(align=align)._compute().ctx
+ expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ ("left", [bar_to(100), bar_to(50), no_bar()]),
+ ("right", [no_bar(), bar_from_to(50, 100), bar_to(100)]),
+ ("mid", [bar_from_to(66.66, 100), bar_from_to(33.33, 100), bar_to(100)]),
+ ("zero", [bar_from_to(33.33, 50), bar_from_to(16.66, 50), bar_to(50)]),
+ ("mean", [bar_from_to(50, 100), no_bar(), bar_to(50)]),
+ (-2.0, [bar_from_to(50, 100), no_bar(), bar_to(50)]),
+ (np.median, [bar_from_to(50, 100), no_bar(), bar_to(50)]),
+ ],
+)
+def test_align_negative_cases(df_neg, align, exp):
+ # test different align cases for all negative values
+ result = df_neg.style.bar(align=align)._compute().ctx
+ expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ ("left", [no_bar(), bar_to(80), bar_to(100)]),
+ ("right", [bar_to(100), bar_from_to(80, 100), no_bar()]),
+ ("mid", [bar_to(60), bar_from_to(60, 80), bar_from_to(60, 100)]),
+ ("zero", [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
+ ("mean", [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
+ (-0.0, [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
+ (np.nanmedian, [bar_to(50), no_bar(), bar_from_to(50, 62.5)]),
+ ],
+)
+@pytest.mark.parametrize("nans", [True, False])
+def test_align_mixed_cases(df_mix, align, exp, nans):
+ # test different align cases for mixed positive and negative values
+ # also test no impact of NaNs and no_bar
+ expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
+ if nans:
+ df_mix.loc[3, :] = np.nan
+ expected.update({(3, 0): no_bar()})
+ result = df_mix.style.bar(align=align)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ (
+ "left",
+ {
+ "index": [[no_bar(), no_bar()], [bar_to(100), bar_to(100)]],
+ "columns": [[no_bar(), bar_to(100)], [no_bar(), bar_to(100)]],
+ "none": [[no_bar(), bar_to(33.33)], [bar_to(66.66), bar_to(100)]],
+ },
+ ),
+ (
+ "mid",
+ {
+ "index": [[bar_to(33.33), bar_to(50)], [bar_to(100), bar_to(100)]],
+ "columns": [[bar_to(50), bar_to(100)], [bar_to(75), bar_to(100)]],
+ "none": [[bar_to(25), bar_to(50)], [bar_to(75), bar_to(100)]],
+ },
+ ),
+ (
+ "zero",
+ {
+ "index": [
+ [bar_from_to(50, 66.66), bar_from_to(50, 75)],
+ [bar_from_to(50, 100), bar_from_to(50, 100)],
+ ],
+ "columns": [
+ [bar_from_to(50, 75), bar_from_to(50, 100)],
+ [bar_from_to(50, 87.5), bar_from_to(50, 100)],
+ ],
+ "none": [
+ [bar_from_to(50, 62.5), bar_from_to(50, 75)],
+ [bar_from_to(50, 87.5), bar_from_to(50, 100)],
+ ],
+ },
+ ),
+ (
+ 2,
+ {
+ "index": [
+ [bar_to(50), no_bar()],
+ [bar_from_to(50, 100), bar_from_to(50, 100)],
+ ],
+ "columns": [
+ [bar_to(50), no_bar()],
+ [bar_from_to(50, 75), bar_from_to(50, 100)],
+ ],
+ "none": [
+ [bar_from_to(25, 50), no_bar()],
+ [bar_from_to(50, 75), bar_from_to(50, 100)],
+ ],
+ },
+ ),
+ ],
+)
+@pytest.mark.parametrize("axis", ["index", "columns", "none"])
+def test_align_axis(align, exp, axis):
+ # test all axis combinations with positive values and different aligns
+ data = DataFrame([[1, 2], [3, 4]])
+ result = (
+ data.style.bar(align=align, axis=None if axis == "none" else axis)
+ ._compute()
+ .ctx
+ )
+ expected = {
+ (0, 0): exp[axis][0][0],
+ (0, 1): exp[axis][0][1],
+ (1, 0): exp[axis][1][0],
+ (1, 1): exp[axis][1][1],
+ }
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "values, vmin, vmax",
+ [
+ ("positive", 1.5, 2.5),
+ ("negative", -2.5, -1.5),
+ ("mixed", -2.5, 1.5),
+ ],
+)
+@pytest.mark.parametrize("nullify", [None, "vmin", "vmax"]) # test min/max separately
+@pytest.mark.parametrize("align", ["left", "right", "zero", "mid"])
+def test_vmin_vmax_clipping(df_pos, df_neg, df_mix, values, vmin, vmax, nullify, align):
+ # test that clipping occurs if any vmin > data_values or vmax < data_values
+ if align == "mid": # mid acts as left or right in each case
+ if values == "positive":
+ align = "left"
+ elif values == "negative":
+ align = "right"
+ df = {"positive": df_pos, "negative": df_neg, "mixed": df_mix}[values]
+ vmin = None if nullify == "vmin" else vmin
+ vmax = None if nullify == "vmax" else vmax
+
+ clip_df = df.where(df <= (vmax if vmax else 999), other=vmax)
+ clip_df = clip_df.where(clip_df >= (vmin if vmin else -999), other=vmin)
+
+ result = (
+ df.style.bar(align=align, vmin=vmin, vmax=vmax, color=["red", "green"])
+ ._compute()
+ .ctx
+ )
+ expected = clip_df.style.bar(align=align, color=["red", "green"])._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "values, vmin, vmax",
+ [
+ ("positive", 0.5, 4.5),
+ ("negative", -4.5, -0.5),
+ ("mixed", -4.5, 4.5),
+ ],
+)
+@pytest.mark.parametrize("nullify", [None, "vmin", "vmax"]) # test min/max separately
+@pytest.mark.parametrize("align", ["left", "right", "zero", "mid"])
+def test_vmin_vmax_widening(df_pos, df_neg, df_mix, values, vmin, vmax, nullify, align):
+ # test that widening occurs if any vmax > data_values or vmin < data_values
+ if align == "mid": # mid acts as left or right in each case
+ if values == "positive":
+ align = "left"
+ elif values == "negative":
+ align = "right"
+ df = {"positive": df_pos, "negative": df_neg, "mixed": df_mix}[values]
+ vmin = None if nullify == "vmin" else vmin
+ vmax = None if nullify == "vmax" else vmax
+
+ expand_df = df.copy()
+ expand_df.loc[3, :], expand_df.loc[4, :] = vmin, vmax
+
+ result = (
+ df.style.bar(align=align, vmin=vmin, vmax=vmax, color=["red", "green"])
+ ._compute()
+ .ctx
+ )
+ expected = expand_df.style.bar(align=align, color=["red", "green"])._compute().ctx
+ assert result.items() <= expected.items()
+
+
+def test_numerics():
+ # test data is pre-selected for numeric values
+ data = DataFrame([[1, "a"], [2, "b"]])
+ result = data.style.bar()._compute().ctx
+ assert (0, 1) not in result
+ assert (1, 1) not in result
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ ("left", [no_bar(), bar_to(100, "green")]),
+ ("right", [bar_to(100, "red"), no_bar()]),
+ ("mid", [bar_to(25, "red"), bar_from_to(25, 100, "green")]),
+ ("zero", [bar_from_to(33.33, 50, "red"), bar_from_to(50, 100, "green")]),
+ ],
+)
+def test_colors_mixed(align, exp):
+ data = DataFrame([[-1], [3]])
+ result = data.style.bar(align=align, color=["red", "green"])._compute().ctx
+ assert result == {(0, 0): exp[0], (1, 0): exp[1]}
+
+
+def test_bar_align_height():
+ # test when keyword height is used 'no-repeat center' and 'background-size' present
+ data = DataFrame([[1], [2]])
+ result = data.style.bar(align="left", height=50)._compute().ctx
+ bg_s = "linear-gradient(90deg, #d65f5f 100.0%, transparent 100.0%) no-repeat center"
+ expected = {
+ (0, 0): [("width", "10em")],
+ (1, 0): [
+ ("width", "10em"),
+ ("background", bg_s),
+ ("background-size", "100% 50.0%"),
+ ],
+ }
+ assert result == expected
+
+
+def test_bar_value_error_raises():
+ df = DataFrame({"A": [-100, -60, -30, -20]})
+
+ msg = "`align` should be in {'left', 'right', 'mid', 'mean', 'zero'} or"
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(align="poorly", color=["#d65f5f", "#5fba7d"]).to_html()
+
+ msg = r"`width` must be a value in \[0, 100\]"
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(width=200).to_html()
+
+ msg = r"`height` must be a value in \[0, 100\]"
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(height=200).to_html()
+
+
+def test_bar_color_and_cmap_error_raises():
+ df = DataFrame({"A": [1, 2, 3, 4]})
+ msg = "`color` and `cmap` cannot both be given"
+ # Test that providing both color and cmap raises a ValueError
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color="#d65f5f", cmap="viridis").to_html()
+
+
+def test_bar_invalid_color_type_error_raises():
+ df = DataFrame({"A": [1, 2, 3, 4]})
+ msg = (
+ r"`color` must be string or list or tuple of 2 strings,"
+ r"\(eg: color=\['#d65f5f', '#5fba7d'\]\)"
+ )
+ # Test that providing an invalid color type raises a ValueError
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color=123).to_html()
+
+ # Test that providing a color list with more than two elements raises a ValueError
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color=["#d65f5f", "#5fba7d", "#abcdef"]).to_html()
+
+
+def test_styler_bar_with_NA_values():
+ df1 = DataFrame({"A": [1, 2, NA, 4]})
+ df2 = DataFrame([[NA, NA], [NA, NA]])
+ expected_substring = "style type="
+ html_output1 = df1.style.bar(subset="A").to_html()
+ html_output2 = df2.style.bar(align="left", axis=None).to_html()
+ assert expected_substring in html_output1
+ assert expected_substring in html_output2
+
+
+def test_style_bar_with_pyarrow_NA_values():
+ data = """name,age,test1,test2,teacher
+ Adam,15,95.0,80,Ashby
+ Bob,16,81.0,82,Ashby
+ Dave,16,89.0,84,Jones
+ Fred,15,,88,Jones"""
+ df = read_csv(io.StringIO(data), dtype_backend="pyarrow")
+ expected_substring = "style type="
+ html_output = df.style.bar(subset="test1").to_html()
+ assert expected_substring in html_output
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_exceptions.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..d52e3a37e7693dadce34f73fc03a0790c7a0b4d3
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_exceptions.py
@@ -0,0 +1,44 @@
+import pytest
+
+jinja2 = pytest.importorskip("jinja2")
+
+from pandas import (
+ DataFrame,
+ MultiIndex,
+)
+
+from pandas.io.formats.style import Styler
+
+
+@pytest.fixture
+def df():
+ return DataFrame(
+ data=[[0, -0.609], [1, -1.228]],
+ columns=["A", "B"],
+ index=["x", "y"],
+ )
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+def test_concat_bad_columns(styler):
+ msg = "`other.data` must have same columns as `Styler.data"
+ with pytest.raises(ValueError, match=msg):
+ styler.concat(DataFrame([[1, 2]]).style)
+
+
+def test_concat_bad_type(styler):
+ msg = "`other` must be of type `Styler`"
+ with pytest.raises(TypeError, match=msg):
+ styler.concat(DataFrame([[1, 2]]))
+
+
+def test_concat_bad_index_levels(styler, df):
+ df = df.copy()
+ df.index = MultiIndex.from_tuples([(0, 0), (1, 1)])
+ msg = "number of index levels must be same in `other`"
+ with pytest.raises(ValueError, match=msg):
+ styler.concat(df.style)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_non_unique.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_non_unique.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4d31fe21f2c9cf3454a67f8c7443382f7f1c0ef
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_non_unique.py
@@ -0,0 +1,140 @@
+from textwrap import dedent
+
+import pytest
+
+from pandas import (
+ DataFrame,
+ IndexSlice,
+)
+
+pytest.importorskip("jinja2")
+
+from pandas.io.formats.style import Styler
+
+
+@pytest.fixture
+def df():
+ return DataFrame(
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ index=["i", "j", "j"],
+ columns=["c", "d", "d"],
+ dtype=float,
+ )
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+def test_format_non_unique(df):
+ # GH 41269
+
+ # test dict
+ html = df.style.format({"d": "{:.1f}"}).to_html()
+ for val in ["1.000000<", "4.000000<", "7.000000<"]:
+ assert val in html
+ for val in ["2.0<", "3.0<", "5.0<", "6.0<", "8.0<", "9.0<"]:
+ assert val in html
+
+ # test subset
+ html = df.style.format(precision=1, subset=IndexSlice["j", "d"]).to_html()
+ for val in ["1.000000<", "4.000000<", "7.000000<", "2.000000<", "3.000000<"]:
+ assert val in html
+ for val in ["5.0<", "6.0<", "8.0<", "9.0<"]:
+ assert val in html
+
+
+@pytest.mark.parametrize("func", ["apply", "map"])
+def test_apply_map_non_unique_raises(df, func):
+ # GH 41269
+ if func == "apply":
+ op = lambda s: ["color: red;"] * len(s)
+ else:
+ op = lambda v: "color: red;"
+
+ with pytest.raises(KeyError, match="`Styler.apply` and `.map` are not"):
+ getattr(df.style, func)(op)._compute()
+
+
+def test_table_styles_dict_non_unique_index(styler):
+ styles = styler.set_table_styles(
+ {"j": [{"selector": "td", "props": "a: v;"}]}, axis=1
+ ).table_styles
+ assert styles == [
+ {"selector": "td.row1", "props": [("a", "v")]},
+ {"selector": "td.row2", "props": [("a", "v")]},
+ ]
+
+
+def test_table_styles_dict_non_unique_columns(styler):
+ styles = styler.set_table_styles(
+ {"d": [{"selector": "td", "props": "a: v;"}]}, axis=0
+ ).table_styles
+ assert styles == [
+ {"selector": "td.col1", "props": [("a", "v")]},
+ {"selector": "td.col2", "props": [("a", "v")]},
+ ]
+
+
+def test_tooltips_non_unique_raises(styler):
+ # ttips has unique keys
+ ttips = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "b"])
+ styler.set_tooltips(ttips=ttips) # OK
+
+ # ttips has non-unique columns
+ ttips = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "c"], index=["a", "b"])
+ with pytest.raises(KeyError, match="Tooltips render only if `ttips` has unique"):
+ styler.set_tooltips(ttips=ttips)
+
+ # ttips has non-unique index
+ ttips = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "a"])
+ with pytest.raises(KeyError, match="Tooltips render only if `ttips` has unique"):
+ styler.set_tooltips(ttips=ttips)
+
+
+def test_set_td_classes_non_unique_raises(styler):
+ # classes has unique keys
+ classes = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "b"])
+ styler.set_td_classes(classes=classes) # OK
+
+ # classes has non-unique columns
+ classes = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "c"], index=["a", "b"])
+ with pytest.raises(KeyError, match="Classes render only if `classes` has unique"):
+ styler.set_td_classes(classes=classes)
+
+ # classes has non-unique index
+ classes = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "a"])
+ with pytest.raises(KeyError, match="Classes render only if `classes` has unique"):
+ styler.set_td_classes(classes=classes)
+
+
+def test_hide_columns_non_unique(styler):
+ ctx = styler.hide(["d"], axis="columns")._translate(True, True)
+
+ assert ctx["head"][0][1]["display_value"] == "c"
+ assert ctx["head"][0][1]["is_visible"] is True
+
+ assert ctx["head"][0][2]["display_value"] == "d"
+ assert ctx["head"][0][2]["is_visible"] is False
+
+ assert ctx["head"][0][3]["display_value"] == "d"
+ assert ctx["head"][0][3]["is_visible"] is False
+
+ assert ctx["body"][0][1]["is_visible"] is True
+ assert ctx["body"][0][2]["is_visible"] is False
+ assert ctx["body"][0][3]["is_visible"] is False
+
+
+def test_latex_non_unique(styler):
+ result = styler.to_latex()
+ assert result == dedent(
+ """\
+ \\begin{tabular}{lrrr}
+ & c & d & d \\\\
+ i & 1.000000 & 2.000000 & 3.000000 \\\\
+ j & 4.000000 & 5.000000 & 6.000000 \\\\
+ j & 7.000000 & 8.000000 & 9.000000 \\\\
+ \\end{tabular}
+ """
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_style.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_style.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fa72bd48031cca999b81cccfcedafcd3abcd924
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_style.py
@@ -0,0 +1,1588 @@
+import contextlib
+import copy
+import re
+from textwrap import dedent
+
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ IndexSlice,
+ MultiIndex,
+ Series,
+ option_context,
+)
+import pandas._testing as tm
+
+jinja2 = pytest.importorskip("jinja2")
+from pandas.io.formats.style import ( # isort:skip
+ Styler,
+)
+from pandas.io.formats.style_render import (
+ _get_level_lengths,
+ _get_trimming_maximums,
+ maybe_convert_css_to_tuples,
+ non_reducing_slice,
+)
+
+
+@pytest.fixture
+def mi_df():
+ return DataFrame(
+ [[1, 2], [3, 4]],
+ index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
+ columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
+ dtype=int,
+ )
+
+
+@pytest.fixture
+def mi_styler(mi_df):
+ return Styler(mi_df, uuid_len=0)
+
+
+@pytest.fixture
+def mi_styler_comp(mi_styler):
+ # comprehensively add features to mi_styler
+ mi_styler = mi_styler._copy(deepcopy=True)
+ mi_styler.css = {**mi_styler.css, "row": "ROW", "col": "COL"}
+ mi_styler.uuid_len = 5
+ mi_styler.uuid = "abcde"
+ mi_styler.set_caption("capt")
+ mi_styler.set_table_styles([{"selector": "a", "props": "a:v;"}])
+ mi_styler.hide(axis="columns")
+ mi_styler.hide([("c0", "c1_a")], axis="columns", names=True)
+ mi_styler.hide(axis="index")
+ mi_styler.hide([("i0", "i1_a")], axis="index", names=True)
+ mi_styler.set_table_attributes('class="box"')
+ other = mi_styler.data.agg(["mean"])
+ other.index = MultiIndex.from_product([[""], other.index])
+ mi_styler.concat(other.style)
+ mi_styler.format(na_rep="MISSING", precision=3)
+ mi_styler.format_index(precision=2, axis=0)
+ mi_styler.format_index(precision=4, axis=1)
+ mi_styler.highlight_max(axis=None)
+ mi_styler.map_index(lambda x: "color: white;", axis=0)
+ mi_styler.map_index(lambda x: "color: black;", axis=1)
+ mi_styler.set_td_classes(
+ DataFrame(
+ [["a", "b"], ["a", "c"]], index=mi_styler.index, columns=mi_styler.columns
+ )
+ )
+ mi_styler.set_tooltips(
+ DataFrame(
+ [["a2", "b2"], ["a2", "c2"]],
+ index=mi_styler.index,
+ columns=mi_styler.columns,
+ )
+ )
+ return mi_styler
+
+
+@pytest.fixture
+def blank_value():
+ return " "
+
+
+@pytest.fixture
+def df():
+ df = DataFrame({"A": [0, 1], "B": np.random.default_rng(2).standard_normal(2)})
+ return df
+
+
+@pytest.fixture
+def styler(df):
+ df = DataFrame({"A": [0, 1], "B": np.random.default_rng(2).standard_normal(2)})
+ return Styler(df)
+
+
+@pytest.mark.parametrize(
+ "sparse_columns, exp_cols",
+ [
+ (
+ True,
+ [
+ {"is_visible": True, "attributes": 'colspan="2"', "value": "c0"},
+ {"is_visible": False, "attributes": "", "value": "c0"},
+ ],
+ ),
+ (
+ False,
+ [
+ {"is_visible": True, "attributes": "", "value": "c0"},
+ {"is_visible": True, "attributes": "", "value": "c0"},
+ ],
+ ),
+ ],
+)
+def test_mi_styler_sparsify_columns(mi_styler, sparse_columns, exp_cols):
+ exp_l1_c0 = {"is_visible": True, "attributes": "", "display_value": "c1_a"}
+ exp_l1_c1 = {"is_visible": True, "attributes": "", "display_value": "c1_b"}
+
+ ctx = mi_styler._translate(True, sparse_columns)
+
+ assert exp_cols[0].items() <= ctx["head"][0][2].items()
+ assert exp_cols[1].items() <= ctx["head"][0][3].items()
+ assert exp_l1_c0.items() <= ctx["head"][1][2].items()
+ assert exp_l1_c1.items() <= ctx["head"][1][3].items()
+
+
+@pytest.mark.parametrize(
+ "sparse_index, exp_rows",
+ [
+ (
+ True,
+ [
+ {"is_visible": True, "attributes": 'rowspan="2"', "value": "i0"},
+ {"is_visible": False, "attributes": "", "value": "i0"},
+ ],
+ ),
+ (
+ False,
+ [
+ {"is_visible": True, "attributes": "", "value": "i0"},
+ {"is_visible": True, "attributes": "", "value": "i0"},
+ ],
+ ),
+ ],
+)
+def test_mi_styler_sparsify_index(mi_styler, sparse_index, exp_rows):
+ exp_l1_r0 = {"is_visible": True, "attributes": "", "display_value": "i1_a"}
+ exp_l1_r1 = {"is_visible": True, "attributes": "", "display_value": "i1_b"}
+
+ ctx = mi_styler._translate(sparse_index, True)
+
+ assert exp_rows[0].items() <= ctx["body"][0][0].items()
+ assert exp_rows[1].items() <= ctx["body"][1][0].items()
+ assert exp_l1_r0.items() <= ctx["body"][0][1].items()
+ assert exp_l1_r1.items() <= ctx["body"][1][1].items()
+
+
+def test_mi_styler_sparsify_options(mi_styler):
+ with option_context("styler.sparse.index", False):
+ html1 = mi_styler.to_html()
+ with option_context("styler.sparse.index", True):
+ html2 = mi_styler.to_html()
+
+ assert html1 != html2
+
+ with option_context("styler.sparse.columns", False):
+ html1 = mi_styler.to_html()
+ with option_context("styler.sparse.columns", True):
+ html2 = mi_styler.to_html()
+
+ assert html1 != html2
+
+
+@pytest.mark.parametrize(
+ "rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn",
+ [
+ (100, 100, 100, None, None, 12, 6), # reduce to (12, 6) < 100 elements
+ (1000, 3, 750, None, None, 250, 3), # dynamically reduce rows to 250, keep cols
+ (4, 1000, 500, None, None, 4, 125), # dynamically reduce cols to 125, keep rows
+ (1000, 3, 750, 10, None, 10, 3), # overwrite above dynamics with max_row
+ (4, 1000, 500, None, 5, 4, 5), # overwrite above dynamics with max_col
+ (100, 100, 700, 50, 50, 25, 25), # rows cols below given maxes so < 700 elmts
+ ],
+)
+def test_trimming_maximum(rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn):
+ rn, cn = _get_trimming_maximums(
+ rn, cn, max_els, max_rows, max_cols, scaling_factor=0.5
+ )
+ assert (rn, cn) == (exp_rn, exp_cn)
+
+
+@pytest.mark.parametrize(
+ "option, val",
+ [
+ ("styler.render.max_elements", 6),
+ ("styler.render.max_rows", 3),
+ ],
+)
+def test_render_trimming_rows(option, val):
+ # test auto and specific trimming of rows
+ df = DataFrame(np.arange(120).reshape(60, 2))
+ with option_context(option, val):
+ ctx = df.style._translate(True, True)
+ assert len(ctx["head"][0]) == 3 # index + 2 data cols
+ assert len(ctx["body"]) == 4 # 3 data rows + trimming row
+ assert len(ctx["body"][0]) == 3 # index + 2 data cols
+
+
+@pytest.mark.parametrize(
+ "option, val",
+ [
+ ("styler.render.max_elements", 6),
+ ("styler.render.max_columns", 2),
+ ],
+)
+def test_render_trimming_cols(option, val):
+ # test auto and specific trimming of cols
+ df = DataFrame(np.arange(30).reshape(3, 10))
+ with option_context(option, val):
+ ctx = df.style._translate(True, True)
+ assert len(ctx["head"][0]) == 4 # index + 2 data cols + trimming col
+ assert len(ctx["body"]) == 3 # 3 data rows
+ assert len(ctx["body"][0]) == 4 # index + 2 data cols + trimming col
+
+
+def test_render_trimming_mi():
+ midx = MultiIndex.from_product([[1, 2], [1, 2, 3]])
+ df = DataFrame(np.arange(36).reshape(6, 6), columns=midx, index=midx)
+ with option_context("styler.render.max_elements", 4):
+ ctx = df.style._translate(True, True)
+
+ assert len(ctx["body"][0]) == 5 # 2 indexes + 2 data cols + trimming row
+ assert {"attributes": 'rowspan="2"'}.items() <= ctx["body"][0][0].items()
+ assert {"class": "data row0 col_trim"}.items() <= ctx["body"][0][4].items()
+ assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items()
+ assert len(ctx["body"]) == 3 # 2 data rows + trimming row
+
+
+def test_render_empty_mi():
+ # GH 43305
+ df = DataFrame(index=MultiIndex.from_product([["A"], [0, 1]], names=[None, "one"]))
+ expected = dedent(
+ """\
+ >
+
+
+ |
+ one |
+
+
+ """
+ )
+ assert expected in df.style.to_html()
+
+
+@pytest.mark.parametrize("comprehensive", [True, False])
+@pytest.mark.parametrize("render", [True, False])
+@pytest.mark.parametrize("deepcopy", [True, False])
+def test_copy(comprehensive, render, deepcopy, mi_styler, mi_styler_comp):
+ styler = mi_styler_comp if comprehensive else mi_styler
+ styler.uuid_len = 5
+
+ s2 = copy.deepcopy(styler) if deepcopy else copy.copy(styler) # make copy and check
+ assert s2 is not styler
+
+ if render:
+ styler.to_html()
+
+ excl = [
+ "cellstyle_map", # render time vars..
+ "cellstyle_map_columns",
+ "cellstyle_map_index",
+ "template_latex", # render templates are class level
+ "template_html",
+ "template_html_style",
+ "template_html_table",
+ ]
+ if not deepcopy: # check memory locations are equal for all included attributes
+ for attr in [a for a in styler.__dict__ if (not callable(a) and a not in excl)]:
+ assert id(getattr(s2, attr)) == id(getattr(styler, attr))
+ else: # check memory locations are different for nested or mutable vars
+ shallow = [
+ "data",
+ "columns",
+ "index",
+ "uuid_len",
+ "uuid",
+ "caption",
+ "cell_ids",
+ "hide_index_",
+ "hide_columns_",
+ "hide_index_names",
+ "hide_column_names",
+ "table_attributes",
+ ]
+ for attr in shallow:
+ assert id(getattr(s2, attr)) == id(getattr(styler, attr))
+
+ for attr in [
+ a
+ for a in styler.__dict__
+ if (not callable(a) and a not in excl and a not in shallow)
+ ]:
+ if getattr(s2, attr) is None:
+ assert id(getattr(s2, attr)) == id(getattr(styler, attr))
+ else:
+ assert id(getattr(s2, attr)) != id(getattr(styler, attr))
+
+
+@pytest.mark.parametrize("deepcopy", [True, False])
+def test_inherited_copy(mi_styler, deepcopy):
+ # Ensure that the inherited class is preserved when a Styler object is copied.
+ # GH 52728
+ class CustomStyler(Styler):
+ pass
+
+ custom_styler = CustomStyler(mi_styler.data)
+ custom_styler_copy = (
+ copy.deepcopy(custom_styler) if deepcopy else copy.copy(custom_styler)
+ )
+ assert isinstance(custom_styler_copy, CustomStyler)
+
+
+def test_clear(mi_styler_comp):
+ # NOTE: if this test fails for new features then 'mi_styler_comp' should be updated
+ # to ensure proper testing of the 'copy', 'clear', 'export' methods with new feature
+ # GH 40675
+ styler = mi_styler_comp
+ styler._compute() # execute applied methods
+
+ clean_copy = Styler(styler.data, uuid=styler.uuid)
+
+ excl = [
+ "data",
+ "index",
+ "columns",
+ "uuid",
+ "uuid_len", # uuid is set to be the same on styler and clean_copy
+ "cell_ids",
+ "cellstyle_map", # execution time only
+ "cellstyle_map_columns", # execution time only
+ "cellstyle_map_index", # execution time only
+ "template_latex", # render templates are class level
+ "template_html",
+ "template_html_style",
+ "template_html_table",
+ ]
+ # tests vars are not same vals on obj and clean copy before clear (except for excl)
+ for attr in [a for a in styler.__dict__ if not (callable(a) or a in excl)]:
+ res = getattr(styler, attr) == getattr(clean_copy, attr)
+ if hasattr(res, "__iter__") and len(res) > 0:
+ assert not all(res) # some element in iterable differs
+ elif hasattr(res, "__iter__") and len(res) == 0:
+ pass # empty array
+ else:
+ assert not res # explicit var differs
+
+ # test vars have same vales on obj and clean copy after clearing
+ styler.clear()
+ for attr in [a for a in styler.__dict__ if not callable(a)]:
+ res = getattr(styler, attr) == getattr(clean_copy, attr)
+ assert all(res) if hasattr(res, "__iter__") else res
+
+
+def test_export(mi_styler_comp, mi_styler):
+ exp_attrs = [
+ "_todo",
+ "hide_index_",
+ "hide_index_names",
+ "hide_columns_",
+ "hide_column_names",
+ "table_attributes",
+ "table_styles",
+ "css",
+ ]
+ for attr in exp_attrs:
+ check = getattr(mi_styler, attr) == getattr(mi_styler_comp, attr)
+ assert not (
+ all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
+ )
+
+ export = mi_styler_comp.export()
+ used = mi_styler.use(export)
+ for attr in exp_attrs:
+ check = getattr(used, attr) == getattr(mi_styler_comp, attr)
+ assert all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
+
+ used.to_html()
+
+
+def test_hide_raises(mi_styler):
+ msg = "`subset` and `level` cannot be passed simultaneously"
+ with pytest.raises(ValueError, match=msg):
+ mi_styler.hide(axis="index", subset="something", level="something else")
+
+ msg = "`level` must be of type `int`, `str` or list of such"
+ with pytest.raises(ValueError, match=msg):
+ mi_styler.hide(axis="index", level={"bad": 1, "type": 2})
+
+
+@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
+def test_hide_index_level(mi_styler, level):
+ mi_styler.index.names, mi_styler.columns.names = ["zero", "one"], ["zero", "one"]
+ ctx = mi_styler.hide(axis="index", level=level)._translate(False, True)
+ assert len(ctx["head"][0]) == 3
+ assert len(ctx["head"][1]) == 3
+ assert len(ctx["head"][2]) == 4
+ assert ctx["head"][2][0]["is_visible"]
+ assert not ctx["head"][2][1]["is_visible"]
+
+ assert ctx["body"][0][0]["is_visible"]
+ assert not ctx["body"][0][1]["is_visible"]
+ assert ctx["body"][1][0]["is_visible"]
+ assert not ctx["body"][1][1]["is_visible"]
+
+
+@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
+@pytest.mark.parametrize("names", [True, False])
+def test_hide_columns_level(mi_styler, level, names):
+ mi_styler.columns.names = ["zero", "one"]
+ if names:
+ mi_styler.index.names = ["zero", "one"]
+ ctx = mi_styler.hide(axis="columns", level=level)._translate(True, False)
+ assert len(ctx["head"]) == (2 if names else 1)
+
+
+@pytest.mark.parametrize("method", ["map", "apply"])
+@pytest.mark.parametrize("axis", ["index", "columns"])
+def test_apply_map_header(method, axis):
+ # GH 41893
+ df = DataFrame({"A": [0, 0], "B": [1, 1]}, index=["C", "D"])
+ func = {
+ "apply": lambda s: ["attr: val" if ("A" in v or "C" in v) else "" for v in s],
+ "map": lambda v: "attr: val" if ("A" in v or "C" in v) else "",
+ }
+
+ # test execution added to todo
+ result = getattr(df.style, f"{method}_index")(func[method], axis=axis)
+ assert len(result._todo) == 1
+ assert len(getattr(result, f"ctx_{axis}")) == 0
+
+ # test ctx object on compute
+ result._compute()
+ expected = {
+ (0, 0): [("attr", "val")],
+ }
+ assert getattr(result, f"ctx_{axis}") == expected
+
+
+@pytest.mark.parametrize("method", ["apply", "map"])
+@pytest.mark.parametrize("axis", ["index", "columns"])
+def test_apply_map_header_mi(mi_styler, method, axis):
+ # GH 41893
+ func = {
+ "apply": lambda s: ["attr: val;" if "b" in v else "" for v in s],
+ "map": lambda v: "attr: val" if "b" in v else "",
+ }
+ result = getattr(mi_styler, f"{method}_index")(func[method], axis=axis)._compute()
+ expected = {(1, 1): [("attr", "val")]}
+ assert getattr(result, f"ctx_{axis}") == expected
+
+
+def test_apply_map_header_raises(mi_styler):
+ # GH 41893
+ with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"):
+ mi_styler.map_index(lambda v: "attr: val;", axis="bad")._compute()
+
+
+class TestStyler:
+ def test_init_non_pandas(self):
+ msg = "``data`` must be a Series or DataFrame"
+ with pytest.raises(TypeError, match=msg):
+ Styler([1, 2, 3])
+
+ def test_init_series(self):
+ result = Styler(Series([1, 2]))
+ assert result.data.ndim == 2
+
+ def test_repr_html_ok(self, styler):
+ styler._repr_html_()
+
+ def test_repr_html_mathjax(self, styler):
+ # gh-19824 / 41395
+ assert "tex2jax_ignore" not in styler._repr_html_()
+
+ with option_context("styler.html.mathjax", False):
+ assert "tex2jax_ignore" in styler._repr_html_()
+
+ def test_update_ctx(self, styler):
+ styler._update_ctx(DataFrame({"A": ["color: red", "color: blue"]}))
+ expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]}
+ assert styler.ctx == expected
+
+ def test_update_ctx_flatten_multi_and_trailing_semi(self, styler):
+ attrs = DataFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]})
+ styler._update_ctx(attrs)
+ expected = {
+ (0, 0): [("color", "red"), ("foo", "bar")],
+ (1, 0): [("color", "blue"), ("foo", "baz")],
+ }
+ assert styler.ctx == expected
+
+ def test_render(self):
+ df = DataFrame({"A": [0, 1]})
+ style = lambda x: Series(["color: red", "color: blue"], name=x.name)
+ s = Styler(df, uuid="AB").apply(style)
+ s.to_html()
+ # it worked?
+
+ def test_multiple_render(self, df):
+ # GH 39396
+ s = Styler(df, uuid_len=0).map(lambda x: "color: red;", subset=["A"])
+ s.to_html() # do 2 renders to ensure css styles not duplicated
+ assert (
+ '" in s.to_html()
+ )
+
+ def test_render_empty_dfs(self):
+ empty_df = DataFrame()
+ es = Styler(empty_df)
+ es.to_html()
+ # An index but no columns
+ DataFrame(columns=["a"]).style.to_html()
+ # A column but no index
+ DataFrame(index=["a"]).style.to_html()
+ # No IndexError raised?
+
+ def test_render_double(self):
+ df = DataFrame({"A": [0, 1]})
+ style = lambda x: Series(
+ ["color: red; border: 1px", "color: blue; border: 2px"], name=x.name
+ )
+ s = Styler(df, uuid="AB").apply(style)
+ s.to_html()
+ # it worked?
+
+ def test_set_properties(self):
+ df = DataFrame({"A": [0, 1]})
+ result = df.style.set_properties(color="white", size="10px")._compute().ctx
+ # order is deterministic
+ v = [("color", "white"), ("size", "10px")]
+ expected = {(0, 0): v, (1, 0): v}
+ assert result.keys() == expected.keys()
+ for v1, v2 in zip(result.values(), expected.values()):
+ assert sorted(v1) == sorted(v2)
+
+ def test_set_properties_subset(self):
+ df = DataFrame({"A": [0, 1]})
+ result = (
+ df.style.set_properties(subset=IndexSlice[0, "A"], color="white")
+ ._compute()
+ .ctx
+ )
+ expected = {(0, 0): [("color", "white")]}
+ assert result == expected
+
+ def test_empty_index_name_doesnt_display(self, blank_value):
+ # https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
+ df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
+ result = df.style._translate(True, True)
+ assert len(result["head"]) == 1
+ expected = {
+ "class": "blank level0",
+ "type": "th",
+ "value": blank_value,
+ "is_visible": True,
+ "display_value": blank_value,
+ }
+ assert expected.items() <= result["head"][0][0].items()
+
+ def test_index_name(self):
+ # https://github.com/pandas-dev/pandas/issues/11655
+ df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
+ result = df.set_index("A").style._translate(True, True)
+ expected = {
+ "class": "index_name level0",
+ "type": "th",
+ "value": "A",
+ "is_visible": True,
+ "display_value": "A",
+ }
+ assert expected.items() <= result["head"][1][0].items()
+
+ def test_numeric_columns(self):
+ # https://github.com/pandas-dev/pandas/issues/12125
+ # smoke test for _translate
+ df = DataFrame({0: [1, 2, 3]})
+ df.style._translate(True, True)
+
+ def test_apply_axis(self):
+ df = DataFrame({"A": [0, 0], "B": [1, 1]})
+ f = lambda x: [f"val: {x.max()}" for v in x]
+ result = df.style.apply(f, axis=1)
+ assert len(result._todo) == 1
+ assert len(result.ctx) == 0
+ result._compute()
+ expected = {
+ (0, 0): [("val", "1")],
+ (0, 1): [("val", "1")],
+ (1, 0): [("val", "1")],
+ (1, 1): [("val", "1")],
+ }
+ assert result.ctx == expected
+
+ result = df.style.apply(f, axis=0)
+ expected = {
+ (0, 0): [("val", "0")],
+ (0, 1): [("val", "1")],
+ (1, 0): [("val", "0")],
+ (1, 1): [("val", "1")],
+ }
+ result._compute()
+ assert result.ctx == expected
+ result = df.style.apply(f) # default
+ result._compute()
+ assert result.ctx == expected
+
+ @pytest.mark.parametrize("axis", [0, 1])
+ def test_apply_series_return(self, axis):
+ # GH 42014
+ df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
+
+ # test Series return where len(Series) < df.index or df.columns but labels OK
+ func = lambda s: Series(["color: red;"], index=["Y"])
+ result = df.style.apply(func, axis=axis)._compute().ctx
+ assert result[(1, 1)] == [("color", "red")]
+ assert result[(1 - axis, axis)] == [("color", "red")]
+
+ # test Series return where labels align but different order
+ func = lambda s: Series(["color: red;", "color: blue;"], index=["Y", "X"])
+ result = df.style.apply(func, axis=axis)._compute().ctx
+ assert result[(0, 0)] == [("color", "blue")]
+ assert result[(1, 1)] == [("color", "red")]
+ assert result[(1 - axis, axis)] == [("color", "red")]
+ assert result[(axis, 1 - axis)] == [("color", "blue")]
+
+ @pytest.mark.parametrize("index", [False, True])
+ @pytest.mark.parametrize("columns", [False, True])
+ def test_apply_dataframe_return(self, index, columns):
+ # GH 42014
+ df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
+ idxs = ["X", "Y"] if index else ["Y"]
+ cols = ["X", "Y"] if columns else ["Y"]
+ df_styles = DataFrame("color: red;", index=idxs, columns=cols)
+ result = df.style.apply(lambda x: df_styles, axis=None)._compute().ctx
+
+ assert result[(1, 1)] == [("color", "red")] # (Y,Y) styles always present
+ assert (result[(0, 1)] == [("color", "red")]) is index # (X,Y) only if index
+ assert (result[(1, 0)] == [("color", "red")]) is columns # (Y,X) only if cols
+ assert (result[(0, 0)] == [("color", "red")]) is (index and columns) # (X,X)
+
+ @pytest.mark.parametrize(
+ "slice_",
+ [
+ IndexSlice[:],
+ IndexSlice[:, ["A"]],
+ IndexSlice[[1], :],
+ IndexSlice[[1], ["A"]],
+ IndexSlice[:2, ["A", "B"]],
+ ],
+ )
+ @pytest.mark.parametrize("axis", [0, 1])
+ def test_apply_subset(self, slice_, axis, df):
+ def h(x, color="bar"):
+ return Series(f"color: {color}", index=x.index, name=x.name)
+
+ result = df.style.apply(h, axis=axis, subset=slice_, color="baz")._compute().ctx
+ expected = {
+ (r, c): [("color", "baz")]
+ for r, row in enumerate(df.index)
+ for c, col in enumerate(df.columns)
+ if row in df.loc[slice_].index and col in df.loc[slice_].columns
+ }
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "slice_",
+ [
+ IndexSlice[:],
+ IndexSlice[:, ["A"]],
+ IndexSlice[[1], :],
+ IndexSlice[[1], ["A"]],
+ IndexSlice[:2, ["A", "B"]],
+ ],
+ )
+ def test_map_subset(self, slice_, df):
+ result = df.style.map(lambda x: "color:baz;", subset=slice_)._compute().ctx
+ expected = {
+ (r, c): [("color", "baz")]
+ for r, row in enumerate(df.index)
+ for c, col in enumerate(df.columns)
+ if row in df.loc[slice_].index and col in df.loc[slice_].columns
+ }
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "slice_",
+ [
+ IndexSlice[:, IndexSlice["x", "A"]],
+ IndexSlice[:, IndexSlice[:, "A"]],
+ IndexSlice[:, IndexSlice[:, ["A", "C"]]], # missing col element
+ IndexSlice[IndexSlice["a", 1], :],
+ IndexSlice[IndexSlice[:, 1], :],
+ IndexSlice[IndexSlice[:, [1, 3]], :], # missing row element
+ IndexSlice[:, ("x", "A")],
+ IndexSlice[("a", 1), :],
+ ],
+ )
+ def test_map_subset_multiindex(self, slice_):
+ # GH 19861
+ # edited for GH 33562
+ if (
+ isinstance(slice_[-1], tuple)
+ and isinstance(slice_[-1][-1], list)
+ and "C" in slice_[-1][-1]
+ ):
+ ctx = pytest.raises(KeyError, match="C")
+ elif (
+ isinstance(slice_[0], tuple)
+ and isinstance(slice_[0][1], list)
+ and 3 in slice_[0][1]
+ ):
+ ctx = pytest.raises(KeyError, match="3")
+ else:
+ ctx = contextlib.nullcontext()
+
+ idx = MultiIndex.from_product([["a", "b"], [1, 2]])
+ col = MultiIndex.from_product([["x", "y"], ["A", "B"]])
+ df = DataFrame(np.random.default_rng(2).random((4, 4)), columns=col, index=idx)
+
+ with ctx:
+ df.style.map(lambda x: "color: red;", subset=slice_).to_html()
+
+ def test_map_subset_multiindex_code(self):
+ # https://github.com/pandas-dev/pandas/issues/25858
+ # Checks styler.map works with multindex when codes are provided
+ codes = np.array([[0, 0, 1, 1], [0, 1, 0, 1]])
+ columns = MultiIndex(
+ levels=[["a", "b"], ["%", "#"]], codes=codes, names=["", ""]
+ )
+ df = DataFrame(
+ [[1, -1, 1, 1], [-1, 1, 1, 1]], index=["hello", "world"], columns=columns
+ )
+ pct_subset = IndexSlice[:, IndexSlice[:, "%":"%"]]
+
+ def color_negative_red(val):
+ color = "red" if val < 0 else "black"
+ return f"color: {color}"
+
+ df.loc[pct_subset]
+ df.style.map(color_negative_red, subset=pct_subset)
+
+ @pytest.mark.parametrize(
+ "stylefunc", ["background_gradient", "bar", "text_gradient"]
+ )
+ def test_subset_for_boolean_cols(self, stylefunc):
+ # GH47838
+ df = DataFrame(
+ [
+ [1, 2],
+ [3, 4],
+ ],
+ columns=[False, True],
+ )
+ styled = getattr(df.style, stylefunc)()
+ styled._compute()
+ assert set(styled.ctx) == {(0, 0), (0, 1), (1, 0), (1, 1)}
+
+ def test_empty(self):
+ df = DataFrame({"A": [1, 0]})
+ s = df.style
+ s.ctx = {(0, 0): [("color", "red")], (1, 0): [("", "")]}
+
+ result = s._translate(True, True)["cellstyle"]
+ expected = [
+ {"props": [("color", "red")], "selectors": ["row0_col0"]},
+ {"props": [("", "")], "selectors": ["row1_col0"]},
+ ]
+ assert result == expected
+
+ def test_duplicate(self):
+ df = DataFrame({"A": [1, 0]})
+ s = df.style
+ s.ctx = {(0, 0): [("color", "red")], (1, 0): [("color", "red")]}
+
+ result = s._translate(True, True)["cellstyle"]
+ expected = [
+ {"props": [("color", "red")], "selectors": ["row0_col0", "row1_col0"]}
+ ]
+ assert result == expected
+
+ def test_init_with_na_rep(self):
+ # GH 21527 28358
+ df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
+
+ ctx = Styler(df, na_rep="NA")._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "NA"
+ assert ctx["body"][0][2]["display_value"] == "NA"
+
+ def test_caption(self, df):
+ styler = Styler(df, caption="foo")
+ result = styler.to_html()
+ assert all(["caption" in result, "foo" in result])
+
+ styler = df.style
+ result = styler.set_caption("baz")
+ assert styler is result
+ assert styler.caption == "baz"
+
+ def test_uuid(self, df):
+ styler = Styler(df, uuid="abc123")
+ result = styler.to_html()
+ assert "abc123" in result
+
+ styler = df.style
+ result = styler.set_uuid("aaa")
+ assert result is styler
+ assert result.uuid == "aaa"
+
+ def test_unique_id(self):
+ # See https://github.com/pandas-dev/pandas/issues/16780
+ df = DataFrame({"a": [1, 3, 5, 6], "b": [2, 4, 12, 21]})
+ result = df.style.to_html(uuid="test")
+ assert "test" in result
+ ids = re.findall('id="(.*?)"', result)
+ assert np.unique(ids).size == len(ids)
+
+ def test_table_styles(self, df):
+ style = [{"selector": "th", "props": [("foo", "bar")]}] # default format
+ styler = Styler(df, table_styles=style)
+ result = " ".join(styler.to_html().split())
+ assert "th { foo: bar; }" in result
+
+ styler = df.style
+ result = styler.set_table_styles(style)
+ assert styler is result
+ assert styler.table_styles == style
+
+ # GH 39563
+ style = [{"selector": "th", "props": "foo:bar;"}] # css string format
+ styler = df.style.set_table_styles(style)
+ result = " ".join(styler.to_html().split())
+ assert "th { foo: bar; }" in result
+
+ def test_table_styles_multiple(self, df):
+ ctx = df.style.set_table_styles(
+ [
+ {"selector": "th,td", "props": "color:red;"},
+ {"selector": "tr", "props": "color:green;"},
+ ]
+ )._translate(True, True)["table_styles"]
+ assert ctx == [
+ {"selector": "th", "props": [("color", "red")]},
+ {"selector": "td", "props": [("color", "red")]},
+ {"selector": "tr", "props": [("color", "green")]},
+ ]
+
+ def test_table_styles_dict_multiple_selectors(self, df):
+ # GH 44011
+ result = df.style.set_table_styles(
+ {
+ "B": [
+ {"selector": "th,td", "props": [("border-left", "2px solid black")]}
+ ]
+ }
+ )._translate(True, True)["table_styles"]
+
+ expected = [
+ {"selector": "th.col1", "props": [("border-left", "2px solid black")]},
+ {"selector": "td.col1", "props": [("border-left", "2px solid black")]},
+ ]
+
+ assert result == expected
+
+ def test_maybe_convert_css_to_tuples(self):
+ expected = [("a", "b"), ("c", "d e")]
+ assert maybe_convert_css_to_tuples("a:b;c:d e;") == expected
+ assert maybe_convert_css_to_tuples("a: b ;c: d e ") == expected
+ expected = []
+ assert maybe_convert_css_to_tuples("") == expected
+
+ def test_maybe_convert_css_to_tuples_err(self):
+ msg = "Styles supplied as string must follow CSS rule formats"
+ with pytest.raises(ValueError, match=msg):
+ maybe_convert_css_to_tuples("err")
+
+ def test_table_attributes(self, df):
+ attributes = 'class="foo" data-bar'
+ styler = Styler(df, table_attributes=attributes)
+ result = styler.to_html()
+ assert 'class="foo" data-bar' in result
+
+ result = df.style.set_table_attributes(attributes).to_html()
+ assert 'class="foo" data-bar' in result
+
+ def test_apply_none(self):
+ def f(x):
+ return DataFrame(
+ np.where(x == x.max(), "color: red", ""),
+ index=x.index,
+ columns=x.columns,
+ )
+
+ result = DataFrame([[1, 2], [3, 4]]).style.apply(f, axis=None)._compute().ctx
+ assert result[(1, 1)] == [("color", "red")]
+
+ def test_trim(self, df):
+ result = df.style.to_html() # trim=True
+ assert result.count("#") == 0
+
+ result = df.style.highlight_max().to_html()
+ assert result.count("#") == len(df.columns)
+
+ def test_export(self, df, styler):
+ f = lambda x: "color: red" if x > 0 else "color: blue"
+ g = lambda x, z: f"color: {z}" if x > 0 else f"color: {z}"
+ style1 = styler
+ style1.map(f).map(g, z="b").highlight_max()._compute() # = render
+ result = style1.export()
+ style2 = df.style
+ style2.use(result)
+ assert style1._todo == style2._todo
+ style2.to_html()
+
+ def test_bad_apply_shape(self):
+ df = DataFrame([[1, 2], [3, 4]], index=["A", "B"], columns=["X", "Y"])
+
+ msg = "resulted in the apply method collapsing to a Series."
+ with pytest.raises(ValueError, match=msg):
+ df.style._apply(lambda x: "x")
+
+ msg = "created invalid {} labels"
+ with pytest.raises(ValueError, match=msg.format("index")):
+ df.style._apply(lambda x: [""])
+
+ with pytest.raises(ValueError, match=msg.format("index")):
+ df.style._apply(lambda x: ["", "", "", ""])
+
+ with pytest.raises(ValueError, match=msg.format("index")):
+ df.style._apply(lambda x: Series(["a:v;", ""], index=["A", "C"]), axis=0)
+
+ with pytest.raises(ValueError, match=msg.format("columns")):
+ df.style._apply(lambda x: ["", "", ""], axis=1)
+
+ with pytest.raises(ValueError, match=msg.format("columns")):
+ df.style._apply(lambda x: Series(["a:v;", ""], index=["X", "Z"]), axis=1)
+
+ msg = "returned ndarray with wrong shape"
+ with pytest.raises(ValueError, match=msg):
+ df.style._apply(lambda x: np.array([[""], [""]]), axis=None)
+
+ def test_apply_bad_return(self):
+ def f(x):
+ return ""
+
+ df = DataFrame([[1, 2], [3, 4]])
+ msg = (
+ "must return a DataFrame or ndarray when passed to `Styler.apply` "
+ "with axis=None"
+ )
+ with pytest.raises(TypeError, match=msg):
+ df.style._apply(f, axis=None)
+
+ @pytest.mark.parametrize("axis", ["index", "columns"])
+ def test_apply_bad_labels(self, axis):
+ def f(x):
+ return DataFrame(**{axis: ["bad", "labels"]})
+
+ df = DataFrame([[1, 2], [3, 4]])
+ msg = f"created invalid {axis} labels."
+ with pytest.raises(ValueError, match=msg):
+ df.style._apply(f, axis=None)
+
+ def test_get_level_lengths(self):
+ index = MultiIndex.from_product([["a", "b"], [0, 1, 2]])
+ expected = {
+ (0, 0): 3,
+ (0, 3): 3,
+ (1, 0): 1,
+ (1, 1): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ (1, 4): 1,
+ (1, 5): 1,
+ }
+ result = _get_level_lengths(index, sparsify=True, max_index=100)
+ tm.assert_dict_equal(result, expected)
+
+ expected = {
+ (0, 0): 1,
+ (0, 1): 1,
+ (0, 2): 1,
+ (0, 3): 1,
+ (0, 4): 1,
+ (0, 5): 1,
+ (1, 0): 1,
+ (1, 1): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ (1, 4): 1,
+ (1, 5): 1,
+ }
+ result = _get_level_lengths(index, sparsify=False, max_index=100)
+ tm.assert_dict_equal(result, expected)
+
+ def test_get_level_lengths_un_sorted(self):
+ index = MultiIndex.from_arrays([[1, 1, 2, 1], ["a", "b", "b", "d"]])
+ expected = {
+ (0, 0): 2,
+ (0, 2): 1,
+ (0, 3): 1,
+ (1, 0): 1,
+ (1, 1): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ }
+ result = _get_level_lengths(index, sparsify=True, max_index=100)
+ tm.assert_dict_equal(result, expected)
+
+ expected = {
+ (0, 0): 1,
+ (0, 1): 1,
+ (0, 2): 1,
+ (0, 3): 1,
+ (1, 0): 1,
+ (1, 1): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ }
+ result = _get_level_lengths(index, sparsify=False, max_index=100)
+ tm.assert_dict_equal(result, expected)
+
+ def test_mi_sparse_index_names(self, blank_value):
+ # Test the class names and displayed value are correct on rendering MI names
+ df = DataFrame(
+ {"A": [1, 2]},
+ index=MultiIndex.from_arrays(
+ [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"]
+ ),
+ )
+ result = df.style._translate(True, True)
+ head = result["head"][1]
+ expected = [
+ {
+ "class": "index_name level0",
+ "display_value": "idx_level_0",
+ "is_visible": True,
+ },
+ {
+ "class": "index_name level1",
+ "display_value": "idx_level_1",
+ "is_visible": True,
+ },
+ {
+ "class": "blank col0",
+ "display_value": blank_value,
+ "is_visible": True,
+ },
+ ]
+ for i, expected_dict in enumerate(expected):
+ assert expected_dict.items() <= head[i].items()
+
+ def test_mi_sparse_column_names(self, blank_value):
+ df = DataFrame(
+ np.arange(16).reshape(4, 4),
+ index=MultiIndex.from_arrays(
+ [["a", "a", "b", "a"], [0, 1, 1, 2]],
+ names=["idx_level_0", "idx_level_1"],
+ ),
+ columns=MultiIndex.from_arrays(
+ [["C1", "C1", "C2", "C2"], [1, 0, 1, 0]], names=["colnam_0", "colnam_1"]
+ ),
+ )
+ result = Styler(df, cell_ids=False)._translate(True, True)
+
+ for level in [0, 1]:
+ head = result["head"][level]
+ expected = [
+ {
+ "class": "blank",
+ "display_value": blank_value,
+ "is_visible": True,
+ },
+ {
+ "class": f"index_name level{level}",
+ "display_value": f"colnam_{level}",
+ "is_visible": True,
+ },
+ ]
+ for i, expected_dict in enumerate(expected):
+ assert expected_dict.items() <= head[i].items()
+
+ def test_hide_column_headers(self, df, styler):
+ ctx = styler.hide(axis="columns")._translate(True, True)
+ assert len(ctx["head"]) == 0 # no header entries with an unnamed index
+
+ df.index.name = "some_name"
+ ctx = df.style.hide(axis="columns")._translate(True, True)
+ assert len(ctx["head"]) == 1
+ # index names still visible, changed in #42101, reverted in 43404
+
+ def test_hide_single_index(self, df):
+ # GH 14194
+ # single unnamed index
+ ctx = df.style._translate(True, True)
+ assert ctx["body"][0][0]["is_visible"]
+ assert ctx["head"][0][0]["is_visible"]
+ ctx2 = df.style.hide(axis="index")._translate(True, True)
+ assert not ctx2["body"][0][0]["is_visible"]
+ assert not ctx2["head"][0][0]["is_visible"]
+
+ # single named index
+ ctx3 = df.set_index("A").style._translate(True, True)
+ assert ctx3["body"][0][0]["is_visible"]
+ assert len(ctx3["head"]) == 2 # 2 header levels
+ assert ctx3["head"][0][0]["is_visible"]
+
+ ctx4 = df.set_index("A").style.hide(axis="index")._translate(True, True)
+ assert not ctx4["body"][0][0]["is_visible"]
+ assert len(ctx4["head"]) == 1 # only 1 header levels
+ assert not ctx4["head"][0][0]["is_visible"]
+
+ def test_hide_multiindex(self):
+ # GH 14194
+ df = DataFrame(
+ {"A": [1, 2], "B": [1, 2]},
+ index=MultiIndex.from_arrays(
+ [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"]
+ ),
+ )
+ ctx1 = df.style._translate(True, True)
+ # tests for 'a' and '0'
+ assert ctx1["body"][0][0]["is_visible"]
+ assert ctx1["body"][0][1]["is_visible"]
+ # check for blank header rows
+ assert len(ctx1["head"][0]) == 4 # two visible indexes and two data columns
+
+ ctx2 = df.style.hide(axis="index")._translate(True, True)
+ # tests for 'a' and '0'
+ assert not ctx2["body"][0][0]["is_visible"]
+ assert not ctx2["body"][0][1]["is_visible"]
+ # check for blank header rows
+ assert len(ctx2["head"][0]) == 3 # one hidden (col name) and two data columns
+ assert not ctx2["head"][0][0]["is_visible"]
+
+ def test_hide_columns_single_level(self, df):
+ # GH 14194
+ # test hiding single column
+ ctx = df.style._translate(True, True)
+ assert ctx["head"][0][1]["is_visible"]
+ assert ctx["head"][0][1]["display_value"] == "A"
+ assert ctx["head"][0][2]["is_visible"]
+ assert ctx["head"][0][2]["display_value"] == "B"
+ assert ctx["body"][0][1]["is_visible"] # col A, row 1
+ assert ctx["body"][1][2]["is_visible"] # col B, row 1
+
+ ctx = df.style.hide("A", axis="columns")._translate(True, True)
+ assert not ctx["head"][0][1]["is_visible"]
+ assert not ctx["body"][0][1]["is_visible"] # col A, row 1
+ assert ctx["body"][1][2]["is_visible"] # col B, row 1
+
+ # test hiding multiple columns
+ ctx = df.style.hide(["A", "B"], axis="columns")._translate(True, True)
+ assert not ctx["head"][0][1]["is_visible"]
+ assert not ctx["head"][0][2]["is_visible"]
+ assert not ctx["body"][0][1]["is_visible"] # col A, row 1
+ assert not ctx["body"][1][2]["is_visible"] # col B, row 1
+
+ def test_hide_columns_index_mult_levels(self):
+ # GH 14194
+ # setup dataframe with multiple column levels and indices
+ i1 = MultiIndex.from_arrays(
+ [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"]
+ )
+ i2 = MultiIndex.from_arrays(
+ [["b", "b"], [0, 1]], names=["col_level_0", "col_level_1"]
+ )
+ df = DataFrame([[1, 2], [3, 4]], index=i1, columns=i2)
+ ctx = df.style._translate(True, True)
+ # column headers
+ assert ctx["head"][0][2]["is_visible"]
+ assert ctx["head"][1][2]["is_visible"]
+ assert ctx["head"][1][3]["display_value"] == "1"
+ # indices
+ assert ctx["body"][0][0]["is_visible"]
+ # data
+ assert ctx["body"][1][2]["is_visible"]
+ assert ctx["body"][1][2]["display_value"] == "3"
+ assert ctx["body"][1][3]["is_visible"]
+ assert ctx["body"][1][3]["display_value"] == "4"
+
+ # hide top column level, which hides both columns
+ ctx = df.style.hide("b", axis="columns")._translate(True, True)
+ assert not ctx["head"][0][2]["is_visible"] # b
+ assert not ctx["head"][1][2]["is_visible"] # 0
+ assert not ctx["body"][1][2]["is_visible"] # 3
+ assert ctx["body"][0][0]["is_visible"] # index
+
+ # hide first column only
+ ctx = df.style.hide([("b", 0)], axis="columns")._translate(True, True)
+ assert not ctx["head"][0][2]["is_visible"] # b
+ assert ctx["head"][0][3]["is_visible"] # b
+ assert not ctx["head"][1][2]["is_visible"] # 0
+ assert not ctx["body"][1][2]["is_visible"] # 3
+ assert ctx["body"][1][3]["is_visible"]
+ assert ctx["body"][1][3]["display_value"] == "4"
+
+ # hide second column and index
+ ctx = df.style.hide([("b", 1)], axis=1).hide(axis=0)._translate(True, True)
+ assert not ctx["body"][0][0]["is_visible"] # index
+ assert len(ctx["head"][0]) == 3
+ assert ctx["head"][0][1]["is_visible"] # b
+ assert ctx["head"][1][1]["is_visible"] # 0
+ assert not ctx["head"][1][2]["is_visible"] # 1
+ assert not ctx["body"][1][3]["is_visible"] # 4
+ assert ctx["body"][1][2]["is_visible"]
+ assert ctx["body"][1][2]["display_value"] == "3"
+
+ # hide top row level, which hides both rows so body empty
+ ctx = df.style.hide("a", axis="index")._translate(True, True)
+ assert ctx["body"] == []
+
+ # hide first row only
+ ctx = df.style.hide(("a", 0), axis="index")._translate(True, True)
+ for i in [0, 1, 2, 3]:
+ assert "row1" in ctx["body"][0][i]["class"] # row0 not included in body
+ assert ctx["body"][0][i]["is_visible"]
+
+ def test_pipe(self, df):
+ def set_caption_from_template(styler, a, b):
+ return styler.set_caption(f"Dataframe with a = {a} and b = {b}")
+
+ styler = df.style.pipe(set_caption_from_template, "A", b="B")
+ assert "Dataframe with a = A and b = B" in styler.to_html()
+
+ # Test with an argument that is a (callable, keyword_name) pair.
+ def f(a, b, styler):
+ return (a, b, styler)
+
+ styler = df.style
+ result = styler.pipe((f, "styler"), a=1, b=2)
+ assert result == (1, 2, styler)
+
+ def test_no_cell_ids(self):
+ # GH 35588
+ # GH 35663
+ df = DataFrame(data=[[0]])
+ styler = Styler(df, uuid="_", cell_ids=False)
+ styler.to_html()
+ s = styler.to_html() # render twice to ensure ctx is not updated
+ assert s.find('
') != -1
+
+ @pytest.mark.parametrize(
+ "classes",
+ [
+ DataFrame(
+ data=[["", "test-class"], [np.nan, None]],
+ columns=["A", "B"],
+ index=["a", "b"],
+ ),
+ DataFrame(data=[["test-class"]], columns=["B"], index=["a"]),
+ DataFrame(data=[["test-class", "unused"]], columns=["B", "C"], index=["a"]),
+ ],
+ )
+ def test_set_data_classes(self, classes):
+ # GH 36159
+ df = DataFrame(data=[[0, 1], [2, 3]], columns=["A", "B"], index=["a", "b"])
+ s = Styler(df, uuid_len=0, cell_ids=False).set_td_classes(classes).to_html()
+ assert ' | 0 | ' in s
+ assert '1 | ' in s
+ assert '2 | ' in s
+ assert '3 | ' in s
+ # GH 39317
+ s = Styler(df, uuid_len=0, cell_ids=True).set_td_classes(classes).to_html()
+ assert '0 | ' in s
+ assert '1 | ' in s
+ assert '2 | ' in s
+ assert '3 | ' in s
+
+ def test_set_data_classes_reindex(self):
+ # GH 39317
+ df = DataFrame(
+ data=[[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=[0, 1, 2], index=[0, 1, 2]
+ )
+ classes = DataFrame(
+ data=[["mi", "ma"], ["mu", "mo"]],
+ columns=[0, 2],
+ index=[0, 2],
+ )
+ s = Styler(df, uuid_len=0).set_td_classes(classes).to_html()
+ assert '0 | ' in s
+ assert '2 | ' in s
+ assert '4 | ' in s
+ assert '6 | ' in s
+ assert '8 | ' in s
+
+ def test_chaining_table_styles(self):
+ # GH 35607
+ df = DataFrame(data=[[0, 1], [1, 2]], columns=["A", "B"])
+ styler = df.style.set_table_styles(
+ [{"selector": "", "props": [("background-color", "yellow")]}]
+ ).set_table_styles(
+ [{"selector": ".col0", "props": [("background-color", "blue")]}],
+ overwrite=False,
+ )
+ assert len(styler.table_styles) == 2
+
+ def test_column_and_row_styling(self):
+ # GH 35607
+ df = DataFrame(data=[[0, 1], [1, 2]], columns=["A", "B"])
+ s = Styler(df, uuid_len=0)
+ s = s.set_table_styles({"A": [{"selector": "", "props": [("color", "blue")]}]})
+ assert "#T_ .col0 {\n color: blue;\n}" in s.to_html()
+ s = s.set_table_styles(
+ {0: [{"selector": "", "props": [("color", "blue")]}]}, axis=1
+ )
+ assert "#T_ .row0 {\n color: blue;\n}" in s.to_html()
+
+ @pytest.mark.parametrize("len_", [1, 5, 32, 33, 100])
+ def test_uuid_len(self, len_):
+ # GH 36345
+ df = DataFrame(data=[["A"]])
+ s = Styler(df, uuid_len=len_, cell_ids=False).to_html()
+ strt = s.find('id="T_')
+ end = s[strt + 6 :].find('"')
+ if len_ > 32:
+ assert end == 32
+ else:
+ assert end == len_
+
+ @pytest.mark.parametrize("len_", [-2, "bad", None])
+ def test_uuid_len_raises(self, len_):
+ # GH 36345
+ df = DataFrame(data=[["A"]])
+ msg = "``uuid_len`` must be an integer in range \\[0, 32\\]."
+ with pytest.raises(TypeError, match=msg):
+ Styler(df, uuid_len=len_, cell_ids=False).to_html()
+
+ @pytest.mark.parametrize(
+ "slc",
+ [
+ IndexSlice[:, :],
+ IndexSlice[:, 1],
+ IndexSlice[1, :],
+ IndexSlice[[1], [1]],
+ IndexSlice[1, [1]],
+ IndexSlice[[1], 1],
+ IndexSlice[1],
+ IndexSlice[1, 1],
+ slice(None, None, None),
+ [0, 1],
+ np.array([0, 1]),
+ Series([0, 1]),
+ ],
+ )
+ def test_non_reducing_slice(self, slc):
+ df = DataFrame([[0, 1], [2, 3]])
+
+ tslice_ = non_reducing_slice(slc)
+ assert isinstance(df.loc[tslice_], DataFrame)
+
+ @pytest.mark.parametrize("box", [list, Series, np.array])
+ def test_list_slice(self, box):
+ # like dataframe getitem
+ subset = box(["A"])
+
+ df = DataFrame({"A": [1, 2], "B": [3, 4]}, index=["A", "B"])
+ expected = IndexSlice[:, ["A"]]
+
+ result = non_reducing_slice(subset)
+ tm.assert_frame_equal(df.loc[result], df.loc[expected])
+
+ def test_non_reducing_slice_on_multiindex(self):
+ # GH 19861
+ dic = {
+ ("a", "d"): [1, 4],
+ ("a", "c"): [2, 3],
+ ("b", "c"): [3, 2],
+ ("b", "d"): [4, 1],
+ }
+ df = DataFrame(dic, index=[0, 1])
+ idx = IndexSlice
+ slice_ = idx[:, idx["b", "d"]]
+ tslice_ = non_reducing_slice(slice_)
+
+ result = df.loc[tslice_]
+ expected = DataFrame({("b", "d"): [4, 1]})
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "slice_",
+ [
+ IndexSlice[:, :],
+ # check cols
+ IndexSlice[:, IndexSlice[["a"]]], # inferred deeper need list
+ IndexSlice[:, IndexSlice[["a"], ["c"]]], # inferred deeper need list
+ IndexSlice[:, IndexSlice["a", "c", :]],
+ IndexSlice[:, IndexSlice["a", :, "e"]],
+ IndexSlice[:, IndexSlice[:, "c", "e"]],
+ IndexSlice[:, IndexSlice["a", ["c", "d"], :]], # check list
+ IndexSlice[:, IndexSlice["a", ["c", "d", "-"], :]], # don't allow missing
+ IndexSlice[:, IndexSlice["a", ["c", "d", "-"], "e"]], # no slice
+ # check rows
+ IndexSlice[IndexSlice[["U"]], :], # inferred deeper need list
+ IndexSlice[IndexSlice[["U"], ["W"]], :], # inferred deeper need list
+ IndexSlice[IndexSlice["U", "W", :], :],
+ IndexSlice[IndexSlice["U", :, "Y"], :],
+ IndexSlice[IndexSlice[:, "W", "Y"], :],
+ IndexSlice[IndexSlice[:, "W", ["Y", "Z"]], :], # check list
+ IndexSlice[IndexSlice[:, "W", ["Y", "Z", "-"]], :], # don't allow missing
+ IndexSlice[IndexSlice["U", "W", ["Y", "Z", "-"]], :], # no slice
+ # check simultaneous
+ IndexSlice[IndexSlice[:, "W", "Y"], IndexSlice["a", "c", :]],
+ ],
+ )
+ def test_non_reducing_multi_slice_on_multiindex(self, slice_):
+ # GH 33562
+ cols = MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]])
+ idxs = MultiIndex.from_product([["U", "V"], ["W", "X"], ["Y", "Z"]])
+ df = DataFrame(np.arange(64).reshape(8, 8), columns=cols, index=idxs)
+
+ for lvl in [0, 1]:
+ key = slice_[lvl]
+ if isinstance(key, tuple):
+ for subkey in key:
+ if isinstance(subkey, list) and "-" in subkey:
+ # not present in the index level, raises KeyError since 2.0
+ with pytest.raises(KeyError, match="-"):
+ df.loc[slice_]
+ return
+
+ expected = df.loc[slice_]
+ result = df.loc[non_reducing_slice(slice_)]
+ tm.assert_frame_equal(result, expected)
+
+
+def test_hidden_index_names(mi_df):
+ mi_df.index.names = ["Lev0", "Lev1"]
+ mi_styler = mi_df.style
+ ctx = mi_styler._translate(True, True)
+ assert len(ctx["head"]) == 3 # 2 column index levels + 1 index names row
+
+ mi_styler.hide(axis="index", names=True)
+ ctx = mi_styler._translate(True, True)
+ assert len(ctx["head"]) == 2 # index names row is unparsed
+ for i in range(4):
+ assert ctx["body"][0][i]["is_visible"] # 2 index levels + 2 data values visible
+
+ mi_styler.hide(axis="index", level=1)
+ ctx = mi_styler._translate(True, True)
+ assert len(ctx["head"]) == 2 # index names row is still hidden
+ assert ctx["body"][0][0]["is_visible"] is True
+ assert ctx["body"][0][1]["is_visible"] is False
+
+
+def test_hidden_column_names(mi_df):
+ mi_df.columns.names = ["Lev0", "Lev1"]
+ mi_styler = mi_df.style
+ ctx = mi_styler._translate(True, True)
+ assert ctx["head"][0][1]["display_value"] == "Lev0"
+ assert ctx["head"][1][1]["display_value"] == "Lev1"
+
+ mi_styler.hide(names=True, axis="columns")
+ ctx = mi_styler._translate(True, True)
+ assert ctx["head"][0][1]["display_value"] == " "
+ assert ctx["head"][1][1]["display_value"] == " "
+
+ mi_styler.hide(level=0, axis="columns")
+ ctx = mi_styler._translate(True, True)
+ assert len(ctx["head"]) == 1 # no index names and only one visible column headers
+ assert ctx["head"][0][1]["display_value"] == " "
+
+
+@pytest.mark.parametrize("caption", [1, ("a", "b", "c"), (1, "s")])
+def test_caption_raises(mi_styler, caption):
+ msg = "`caption` must be either a string or 2-tuple of strings."
+ with pytest.raises(ValueError, match=msg):
+ mi_styler.set_caption(caption)
+
+
+def test_hiding_headers_over_index_no_sparsify():
+ # GH 43464
+ midx = MultiIndex.from_product([[1, 2], ["a", "a", "b"]])
+ df = DataFrame(9, index=midx, columns=[0])
+ ctx = df.style._translate(False, False)
+ assert len(ctx["body"]) == 6
+ ctx = df.style.hide((1, "a"), axis=0)._translate(False, False)
+ assert len(ctx["body"]) == 4
+ assert "row2" in ctx["body"][0][0]["class"]
+
+
+def test_hiding_headers_over_columns_no_sparsify():
+ # GH 43464
+ midx = MultiIndex.from_product([[1, 2], ["a", "a", "b"]])
+ df = DataFrame(9, columns=midx, index=[0])
+ ctx = df.style._translate(False, False)
+ for ix in [(0, 1), (0, 2), (1, 1), (1, 2)]:
+ assert ctx["head"][ix[0]][ix[1]]["is_visible"] is True
+ ctx = df.style.hide((1, "a"), axis="columns")._translate(False, False)
+ for ix in [(0, 1), (0, 2), (1, 1), (1, 2)]:
+ assert ctx["head"][ix[0]][ix[1]]["is_visible"] is False
+
+
+def test_get_level_lengths_mi_hidden():
+ # GH 43464
+ index = MultiIndex.from_arrays([[1, 1, 1, 2, 2, 2], ["a", "a", "b", "a", "a", "b"]])
+ expected = {
+ (0, 2): 1,
+ (0, 3): 1,
+ (0, 4): 1,
+ (0, 5): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ (1, 4): 1,
+ (1, 5): 1,
+ }
+ result = _get_level_lengths(
+ index,
+ sparsify=False,
+ max_index=100,
+ hidden_elements=[0, 1, 0, 1], # hidden element can repeat if duplicated index
+ )
+ tm.assert_dict_equal(result, expected)
+
+
+def test_row_trimming_hide_index():
+ # gh 43703
+ df = DataFrame([[1], [2], [3], [4], [5]])
+ with option_context("styler.render.max_rows", 2):
+ ctx = df.style.hide([0, 1], axis="index")._translate(True, True)
+ assert len(ctx["body"]) == 3
+ for r, val in enumerate(["3", "4", "..."]):
+ assert ctx["body"][r][1]["display_value"] == val
+
+
+def test_row_trimming_hide_index_mi():
+ # gh 44247
+ df = DataFrame([[1], [2], [3], [4], [5]])
+ df.index = MultiIndex.from_product([[0], [0, 1, 2, 3, 4]])
+ with option_context("styler.render.max_rows", 2):
+ ctx = df.style.hide([(0, 0), (0, 1)], axis="index")._translate(True, True)
+ assert len(ctx["body"]) == 3
+
+ # level 0 index headers (sparsified)
+ assert {"value": 0, "attributes": 'rowspan="2"', "is_visible": True}.items() <= ctx[
+ "body"
+ ][0][0].items()
+ assert {"value": 0, "attributes": "", "is_visible": False}.items() <= ctx["body"][
+ 1
+ ][0].items()
+ assert {"value": "...", "is_visible": True}.items() <= ctx["body"][2][0].items()
+
+ for r, val in enumerate(["2", "3", "..."]):
+ assert ctx["body"][r][1]["display_value"] == val # level 1 index headers
+ for r, val in enumerate(["3", "4", "..."]):
+ assert ctx["body"][r][2]["display_value"] == val # data values
+
+
+def test_col_trimming_hide_columns():
+ # gh 44272
+ df = DataFrame([[1, 2, 3, 4, 5]])
+ with option_context("styler.render.max_columns", 2):
+ ctx = df.style.hide([0, 1], axis="columns")._translate(True, True)
+
+ assert len(ctx["head"][0]) == 6 # blank, [0, 1 (hidden)], [2 ,3 (visible)], + trim
+ for c, vals in enumerate([(1, False), (2, True), (3, True), ("...", True)]):
+ assert ctx["head"][0][c + 2]["value"] == vals[0]
+ assert ctx["head"][0][c + 2]["is_visible"] == vals[1]
+
+ assert len(ctx["body"][0]) == 6 # index + 2 hidden + 2 visible + trimming col
+
+
+def test_no_empty_apply(mi_styler):
+ # 45313
+ mi_styler.apply(lambda s: ["a:v;"] * 2, subset=[False, False])
+ mi_styler._compute()
+
+
+@pytest.mark.parametrize("format", ["html", "latex", "string"])
+def test_output_buffer(mi_styler, format):
+ # gh 47053
+ with tm.ensure_clean(f"delete_me.{format}") as f:
+ getattr(mi_styler, f"to_{format}")(f)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_tooltip.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_tooltip.py
new file mode 100644
index 0000000000000000000000000000000000000000..c49a0e05c67002ab0b6eebd1ffd3bda554622f4d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_tooltip.py
@@ -0,0 +1,85 @@
+import numpy as np
+import pytest
+
+from pandas import DataFrame
+
+pytest.importorskip("jinja2")
+from pandas.io.formats.style import Styler
+
+
+@pytest.fixture
+def df():
+ return DataFrame(
+ data=[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
+ columns=["A", "B", "C"],
+ index=["x", "y", "z"],
+ )
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+@pytest.mark.parametrize(
+ "ttips",
+ [
+ DataFrame( # Test basic reindex and ignoring blank
+ data=[["Min", "Max"], [np.nan, ""]],
+ columns=["A", "C"],
+ index=["x", "y"],
+ ),
+ DataFrame( # Test non-referenced columns, reversed col names, short index
+ data=[["Max", "Min", "Bad-Col"]], columns=["C", "A", "D"], index=["x"]
+ ),
+ ],
+)
+def test_tooltip_render(ttips, styler):
+ # GH 21266
+ result = styler.set_tooltips(ttips).to_html()
+
+ # test tooltip table level class
+ assert "#T_ .pd-t {\n visibility: hidden;\n" in result
+
+ # test 'Min' tooltip added
+ assert "#T_ #T__row0_col0:hover .pd-t {\n visibility: visible;\n}" in result
+ assert '#T_ #T__row0_col0 .pd-t::after {\n content: "Min";\n}' in result
+ assert 'class="data row0 col0" >0' in result
+
+ # test 'Max' tooltip added
+ assert "#T_ #T__row0_col2:hover .pd-t {\n visibility: visible;\n}" in result
+ assert '#T_ #T__row0_col2 .pd-t::after {\n content: "Max";\n}' in result
+ assert 'class="data row0 col2" >2' in result
+
+ # test Nan, empty string and bad column ignored
+ assert "#T_ #T__row1_col0:hover .pd-t {\n visibility: visible;\n}" not in result
+ assert "#T_ #T__row1_col1:hover .pd-t {\n visibility: visible;\n}" not in result
+ assert "#T_ #T__row0_col1:hover .pd-t {\n visibility: visible;\n}" not in result
+ assert "#T_ #T__row1_col2:hover .pd-t {\n visibility: visible;\n}" not in result
+ assert "Bad-Col" not in result
+
+
+def test_tooltip_ignored(styler):
+ # GH 21266
+ result = styler.to_html() # no set_tooltips() creates no
+ assert '' in result
+ assert '' not in result
+
+
+def test_tooltip_css_class(styler):
+ # GH 21266
+ result = styler.set_tooltips(
+ DataFrame([["tooltip"]], index=["x"], columns=["A"]),
+ css_class="other-class",
+ props=[("color", "green")],
+ ).to_html()
+ assert "#T_ .other-class {\n color: green;\n" in result
+ assert '#T_ #T__row0_col0 .other-class::after {\n content: "tooltip";\n' in result
+
+ # GH 39563
+ result = styler.set_tooltips( # set_tooltips overwrites previous
+ DataFrame([["tooltip"]], index=["x"], columns=["A"]),
+ css_class="another-class",
+ props="color:green;color:red;",
+ ).to_html()
+ assert "#T_ .another-class {\n color: green;\n color: red;\n}" in result
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_console.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_console.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd7b57df9baed18b172dc8398a61a49e9435f82a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_console.py
@@ -0,0 +1,72 @@
+import locale
+
+import pytest
+
+from pandas._config import detect_console_encoding
+
+
+class MockEncoding:
+ """
+ Used to add a side effect when accessing the 'encoding' property. If the
+ side effect is a str in nature, the value will be returned. Otherwise, the
+ side effect should be an exception that will be raised.
+ """
+
+ def __init__(self, encoding) -> None:
+ super().__init__()
+ self.val = encoding
+
+ @property
+ def encoding(self):
+ return self.raise_or_return(self.val)
+
+ @staticmethod
+ def raise_or_return(val):
+ if isinstance(val, str):
+ return val
+ else:
+ raise val
+
+
+@pytest.mark.parametrize("empty,filled", [["stdin", "stdout"], ["stdout", "stdin"]])
+def test_detect_console_encoding_from_stdout_stdin(monkeypatch, empty, filled):
+ # Ensures that when sys.stdout.encoding or sys.stdin.encoding is used when
+ # they have values filled.
+ # GH 21552
+ with monkeypatch.context() as context:
+ context.setattr(f"sys.{empty}", MockEncoding(""))
+ context.setattr(f"sys.{filled}", MockEncoding(filled))
+ assert detect_console_encoding() == filled
+
+
+@pytest.mark.parametrize("encoding", [AttributeError, OSError, "ascii"])
+def test_detect_console_encoding_fallback_to_locale(monkeypatch, encoding):
+ # GH 21552
+ with monkeypatch.context() as context:
+ context.setattr("locale.getpreferredencoding", lambda: "foo")
+ context.setattr("sys.stdout", MockEncoding(encoding))
+ assert detect_console_encoding() == "foo"
+
+
+@pytest.mark.parametrize(
+ "std,locale",
+ [
+ ["ascii", "ascii"],
+ ["ascii", locale.Error],
+ [AttributeError, "ascii"],
+ [AttributeError, locale.Error],
+ [OSError, "ascii"],
+ [OSError, locale.Error],
+ ],
+)
+def test_detect_console_encoding_fallback_to_default(monkeypatch, std, locale):
+ # When both the stdout/stdin encoding and locale preferred encoding checks
+ # fail (or return 'ascii', we should default to the sys default encoding.
+ # GH 21552
+ with monkeypatch.context() as context:
+ context.setattr(
+ "locale.getpreferredencoding", lambda: MockEncoding.raise_or_return(locale)
+ )
+ context.setattr("sys.stdout", MockEncoding(std))
+ context.setattr("sys.getdefaultencoding", lambda: "sysDefaultEncoding")
+ assert detect_console_encoding() == "sysDefaultEncoding"
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_css.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_css.py
new file mode 100644
index 0000000000000000000000000000000000000000..db436d8283b9972819f8eff099689cf492d45a83
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_css.py
@@ -0,0 +1,289 @@
+import pytest
+
+from pandas.errors import CSSWarning
+
+import pandas._testing as tm
+
+from pandas.io.formats.css import CSSResolver
+
+
+def assert_resolves(css, props, inherited=None):
+ resolve = CSSResolver()
+ actual = resolve(css, inherited=inherited)
+ assert props == actual
+
+
+def assert_same_resolution(css1, css2, inherited=None):
+ resolve = CSSResolver()
+ resolved1 = resolve(css1, inherited=inherited)
+ resolved2 = resolve(css2, inherited=inherited)
+ assert resolved1 == resolved2
+
+
+@pytest.mark.parametrize(
+ "name,norm,abnorm",
+ [
+ (
+ "whitespace",
+ "hello: world; foo: bar",
+ " \t hello \t :\n world \n ; \n foo: \tbar\n\n",
+ ),
+ ("case", "hello: world; foo: bar", "Hello: WORLD; foO: bar"),
+ ("empty-decl", "hello: world; foo: bar", "; hello: world;; foo: bar;\n; ;"),
+ ("empty-list", "", ";"),
+ ],
+)
+def test_css_parse_normalisation(name, norm, abnorm):
+ assert_same_resolution(norm, abnorm)
+
+
+@pytest.mark.parametrize(
+ "invalid_css,remainder",
+ [
+ # No colon
+ ("hello-world", ""),
+ ("border-style: solid; hello-world", "border-style: solid"),
+ (
+ "border-style: solid; hello-world; font-weight: bold",
+ "border-style: solid; font-weight: bold",
+ ),
+ # Unclosed string fail
+ # Invalid size
+ ("font-size: blah", "font-size: 1em"),
+ ("font-size: 1a2b", "font-size: 1em"),
+ ("font-size: 1e5pt", "font-size: 1em"),
+ ("font-size: 1+6pt", "font-size: 1em"),
+ ("font-size: 1unknownunit", "font-size: 1em"),
+ ("font-size: 10", "font-size: 1em"),
+ ("font-size: 10 pt", "font-size: 1em"),
+ # Too many args
+ ("border-top: 1pt solid red green", "border-top: 1pt solid green"),
+ ],
+)
+def test_css_parse_invalid(invalid_css, remainder):
+ with tm.assert_produces_warning(CSSWarning):
+ assert_same_resolution(invalid_css, remainder)
+
+
+@pytest.mark.parametrize(
+ "shorthand,expansions",
+ [
+ ("margin", ["margin-top", "margin-right", "margin-bottom", "margin-left"]),
+ ("padding", ["padding-top", "padding-right", "padding-bottom", "padding-left"]),
+ (
+ "border-width",
+ [
+ "border-top-width",
+ "border-right-width",
+ "border-bottom-width",
+ "border-left-width",
+ ],
+ ),
+ (
+ "border-color",
+ [
+ "border-top-color",
+ "border-right-color",
+ "border-bottom-color",
+ "border-left-color",
+ ],
+ ),
+ (
+ "border-style",
+ [
+ "border-top-style",
+ "border-right-style",
+ "border-bottom-style",
+ "border-left-style",
+ ],
+ ),
+ ],
+)
+def test_css_side_shorthands(shorthand, expansions):
+ top, right, bottom, left = expansions
+
+ assert_resolves(
+ f"{shorthand}: 1pt", {top: "1pt", right: "1pt", bottom: "1pt", left: "1pt"}
+ )
+
+ assert_resolves(
+ f"{shorthand}: 1pt 4pt", {top: "1pt", right: "4pt", bottom: "1pt", left: "4pt"}
+ )
+
+ assert_resolves(
+ f"{shorthand}: 1pt 4pt 2pt",
+ {top: "1pt", right: "4pt", bottom: "2pt", left: "4pt"},
+ )
+
+ assert_resolves(
+ f"{shorthand}: 1pt 4pt 2pt 0pt",
+ {top: "1pt", right: "4pt", bottom: "2pt", left: "0pt"},
+ )
+
+ with tm.assert_produces_warning(CSSWarning):
+ assert_resolves(f"{shorthand}: 1pt 1pt 1pt 1pt 1pt", {})
+
+
+@pytest.mark.parametrize(
+ "shorthand,sides",
+ [
+ ("border-top", ["top"]),
+ ("border-right", ["right"]),
+ ("border-bottom", ["bottom"]),
+ ("border-left", ["left"]),
+ ("border", ["top", "right", "bottom", "left"]),
+ ],
+)
+def test_css_border_shorthand_sides(shorthand, sides):
+ def create_border_dict(sides, color=None, style=None, width=None):
+ resolved = {}
+ for side in sides:
+ if color:
+ resolved[f"border-{side}-color"] = color
+ if style:
+ resolved[f"border-{side}-style"] = style
+ if width:
+ resolved[f"border-{side}-width"] = width
+ return resolved
+
+ assert_resolves(
+ f"{shorthand}: 1pt red solid", create_border_dict(sides, "red", "solid", "1pt")
+ )
+
+
+@pytest.mark.parametrize(
+ "prop, expected",
+ [
+ ("1pt red solid", ("red", "solid", "1pt")),
+ ("red 1pt solid", ("red", "solid", "1pt")),
+ ("red solid 1pt", ("red", "solid", "1pt")),
+ ("solid 1pt red", ("red", "solid", "1pt")),
+ ("red solid", ("red", "solid", "1.500000pt")),
+ # Note: color=black is not CSS conforming
+ # (See https://drafts.csswg.org/css-backgrounds/#border-shorthands)
+ ("1pt solid", ("black", "solid", "1pt")),
+ ("1pt red", ("red", "none", "1pt")),
+ ("red", ("red", "none", "1.500000pt")),
+ ("1pt", ("black", "none", "1pt")),
+ ("solid", ("black", "solid", "1.500000pt")),
+ # Sizes
+ ("1em", ("black", "none", "12pt")),
+ ],
+)
+def test_css_border_shorthands(prop, expected):
+ color, style, width = expected
+
+ assert_resolves(
+ f"border-left: {prop}",
+ {
+ "border-left-color": color,
+ "border-left-style": style,
+ "border-left-width": width,
+ },
+ )
+
+
+@pytest.mark.parametrize(
+ "style,inherited,equiv",
+ [
+ ("margin: 1px; margin: 2px", "", "margin: 2px"),
+ ("margin: 1px", "margin: 2px", "margin: 1px"),
+ ("margin: 1px; margin: inherit", "margin: 2px", "margin: 2px"),
+ (
+ "margin: 1px; margin-top: 2px",
+ "",
+ "margin-left: 1px; margin-right: 1px; "
+ "margin-bottom: 1px; margin-top: 2px",
+ ),
+ ("margin-top: 2px", "margin: 1px", "margin: 1px; margin-top: 2px"),
+ ("margin: 1px", "margin-top: 2px", "margin: 1px"),
+ (
+ "margin: 1px; margin-top: inherit",
+ "margin: 2px",
+ "margin: 1px; margin-top: 2px",
+ ),
+ ],
+)
+def test_css_precedence(style, inherited, equiv):
+ resolve = CSSResolver()
+ inherited_props = resolve(inherited)
+ style_props = resolve(style, inherited=inherited_props)
+ equiv_props = resolve(equiv)
+ assert style_props == equiv_props
+
+
+@pytest.mark.parametrize(
+ "style,equiv",
+ [
+ (
+ "margin: 1px; margin-top: inherit",
+ "margin-bottom: 1px; margin-right: 1px; margin-left: 1px",
+ ),
+ ("margin-top: inherit", ""),
+ ("margin-top: initial", ""),
+ ],
+)
+def test_css_none_absent(style, equiv):
+ assert_same_resolution(style, equiv)
+
+
+@pytest.mark.parametrize(
+ "size,resolved",
+ [
+ ("xx-small", "6pt"),
+ ("x-small", f"{7.5:f}pt"),
+ ("small", f"{9.6:f}pt"),
+ ("medium", "12pt"),
+ ("large", f"{13.5:f}pt"),
+ ("x-large", "18pt"),
+ ("xx-large", "24pt"),
+ ("8px", "6pt"),
+ ("1.25pc", "15pt"),
+ (".25in", "18pt"),
+ ("02.54cm", "72pt"),
+ ("25.4mm", "72pt"),
+ ("101.6q", "72pt"),
+ ("101.6q", "72pt"),
+ ],
+)
+@pytest.mark.parametrize("relative_to", [None, "16pt"]) # invariant to inherited size
+def test_css_absolute_font_size(size, relative_to, resolved):
+ if relative_to is None:
+ inherited = None
+ else:
+ inherited = {"font-size": relative_to}
+ assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited)
+
+
+@pytest.mark.parametrize(
+ "size,relative_to,resolved",
+ [
+ ("1em", None, "12pt"),
+ ("1.0em", None, "12pt"),
+ ("1.25em", None, "15pt"),
+ ("1em", "16pt", "16pt"),
+ ("1.0em", "16pt", "16pt"),
+ ("1.25em", "16pt", "20pt"),
+ ("1rem", "16pt", "12pt"),
+ ("1.0rem", "16pt", "12pt"),
+ ("1.25rem", "16pt", "15pt"),
+ ("100%", None, "12pt"),
+ ("125%", None, "15pt"),
+ ("100%", "16pt", "16pt"),
+ ("125%", "16pt", "20pt"),
+ ("2ex", None, "12pt"),
+ ("2.0ex", None, "12pt"),
+ ("2.50ex", None, "15pt"),
+ ("inherit", "16pt", "16pt"),
+ ("smaller", None, "10pt"),
+ ("smaller", "18pt", "15pt"),
+ ("larger", None, f"{14.4:f}pt"),
+ ("larger", "15pt", "18pt"),
+ ],
+)
+def test_css_relative_font_size(size, relative_to, resolved):
+ if relative_to is None:
+ inherited = None
+ else:
+ inherited = {"font-size": relative_to}
+ assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_eng_formatting.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_eng_formatting.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d581b5b92e0c8cbcfe21dbbdfb0f99ca05c1a4e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_eng_formatting.py
@@ -0,0 +1,254 @@
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ reset_option,
+ set_eng_float_format,
+)
+
+from pandas.io.formats.format import EngFormatter
+
+
+@pytest.fixture(autouse=True)
+def reset_float_format():
+ yield
+ reset_option("display.float_format")
+
+
+class TestEngFormatter:
+ def test_eng_float_formatter2(self, float_frame):
+ df = float_frame
+ df.loc[5] = 0
+
+ set_eng_float_format()
+ repr(df)
+
+ set_eng_float_format(use_eng_prefix=True)
+ repr(df)
+
+ set_eng_float_format(accuracy=0)
+ repr(df)
+
+ def test_eng_float_formatter(self):
+ df = DataFrame({"A": [1.41, 141.0, 14100, 1410000.0]})
+
+ set_eng_float_format()
+ result = df.to_string()
+ expected = (
+ " A\n"
+ "0 1.410E+00\n"
+ "1 141.000E+00\n"
+ "2 14.100E+03\n"
+ "3 1.410E+06"
+ )
+ assert result == expected
+
+ set_eng_float_format(use_eng_prefix=True)
+ result = df.to_string()
+ expected = " A\n0 1.410\n1 141.000\n2 14.100k\n3 1.410M"
+ assert result == expected
+
+ set_eng_float_format(accuracy=0)
+ result = df.to_string()
+ expected = " A\n0 1E+00\n1 141E+00\n2 14E+03\n3 1E+06"
+ assert result == expected
+
+ def compare(self, formatter, input, output):
+ formatted_input = formatter(input)
+ assert formatted_input == output
+
+ def compare_all(self, formatter, in_out):
+ """
+ Parameters:
+ -----------
+ formatter: EngFormatter under test
+ in_out: list of tuples. Each tuple = (number, expected_formatting)
+
+ It is tested if 'formatter(number) == expected_formatting'.
+ *number* should be >= 0 because formatter(-number) == fmt is also
+ tested. *fmt* is derived from *expected_formatting*
+ """
+ for input, output in in_out:
+ self.compare(formatter, input, output)
+ self.compare(formatter, -input, "-" + output[1:])
+
+ def test_exponents_with_eng_prefix(self):
+ formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
+ f = np.sqrt(2)
+ in_out = [
+ (f * 10**-24, " 1.414y"),
+ (f * 10**-23, " 14.142y"),
+ (f * 10**-22, " 141.421y"),
+ (f * 10**-21, " 1.414z"),
+ (f * 10**-20, " 14.142z"),
+ (f * 10**-19, " 141.421z"),
+ (f * 10**-18, " 1.414a"),
+ (f * 10**-17, " 14.142a"),
+ (f * 10**-16, " 141.421a"),
+ (f * 10**-15, " 1.414f"),
+ (f * 10**-14, " 14.142f"),
+ (f * 10**-13, " 141.421f"),
+ (f * 10**-12, " 1.414p"),
+ (f * 10**-11, " 14.142p"),
+ (f * 10**-10, " 141.421p"),
+ (f * 10**-9, " 1.414n"),
+ (f * 10**-8, " 14.142n"),
+ (f * 10**-7, " 141.421n"),
+ (f * 10**-6, " 1.414u"),
+ (f * 10**-5, " 14.142u"),
+ (f * 10**-4, " 141.421u"),
+ (f * 10**-3, " 1.414m"),
+ (f * 10**-2, " 14.142m"),
+ (f * 10**-1, " 141.421m"),
+ (f * 10**0, " 1.414"),
+ (f * 10**1, " 14.142"),
+ (f * 10**2, " 141.421"),
+ (f * 10**3, " 1.414k"),
+ (f * 10**4, " 14.142k"),
+ (f * 10**5, " 141.421k"),
+ (f * 10**6, " 1.414M"),
+ (f * 10**7, " 14.142M"),
+ (f * 10**8, " 141.421M"),
+ (f * 10**9, " 1.414G"),
+ (f * 10**10, " 14.142G"),
+ (f * 10**11, " 141.421G"),
+ (f * 10**12, " 1.414T"),
+ (f * 10**13, " 14.142T"),
+ (f * 10**14, " 141.421T"),
+ (f * 10**15, " 1.414P"),
+ (f * 10**16, " 14.142P"),
+ (f * 10**17, " 141.421P"),
+ (f * 10**18, " 1.414E"),
+ (f * 10**19, " 14.142E"),
+ (f * 10**20, " 141.421E"),
+ (f * 10**21, " 1.414Z"),
+ (f * 10**22, " 14.142Z"),
+ (f * 10**23, " 141.421Z"),
+ (f * 10**24, " 1.414Y"),
+ (f * 10**25, " 14.142Y"),
+ (f * 10**26, " 141.421Y"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ def test_exponents_without_eng_prefix(self):
+ formatter = EngFormatter(accuracy=4, use_eng_prefix=False)
+ f = np.pi
+ in_out = [
+ (f * 10**-24, " 3.1416E-24"),
+ (f * 10**-23, " 31.4159E-24"),
+ (f * 10**-22, " 314.1593E-24"),
+ (f * 10**-21, " 3.1416E-21"),
+ (f * 10**-20, " 31.4159E-21"),
+ (f * 10**-19, " 314.1593E-21"),
+ (f * 10**-18, " 3.1416E-18"),
+ (f * 10**-17, " 31.4159E-18"),
+ (f * 10**-16, " 314.1593E-18"),
+ (f * 10**-15, " 3.1416E-15"),
+ (f * 10**-14, " 31.4159E-15"),
+ (f * 10**-13, " 314.1593E-15"),
+ (f * 10**-12, " 3.1416E-12"),
+ (f * 10**-11, " 31.4159E-12"),
+ (f * 10**-10, " 314.1593E-12"),
+ (f * 10**-9, " 3.1416E-09"),
+ (f * 10**-8, " 31.4159E-09"),
+ (f * 10**-7, " 314.1593E-09"),
+ (f * 10**-6, " 3.1416E-06"),
+ (f * 10**-5, " 31.4159E-06"),
+ (f * 10**-4, " 314.1593E-06"),
+ (f * 10**-3, " 3.1416E-03"),
+ (f * 10**-2, " 31.4159E-03"),
+ (f * 10**-1, " 314.1593E-03"),
+ (f * 10**0, " 3.1416E+00"),
+ (f * 10**1, " 31.4159E+00"),
+ (f * 10**2, " 314.1593E+00"),
+ (f * 10**3, " 3.1416E+03"),
+ (f * 10**4, " 31.4159E+03"),
+ (f * 10**5, " 314.1593E+03"),
+ (f * 10**6, " 3.1416E+06"),
+ (f * 10**7, " 31.4159E+06"),
+ (f * 10**8, " 314.1593E+06"),
+ (f * 10**9, " 3.1416E+09"),
+ (f * 10**10, " 31.4159E+09"),
+ (f * 10**11, " 314.1593E+09"),
+ (f * 10**12, " 3.1416E+12"),
+ (f * 10**13, " 31.4159E+12"),
+ (f * 10**14, " 314.1593E+12"),
+ (f * 10**15, " 3.1416E+15"),
+ (f * 10**16, " 31.4159E+15"),
+ (f * 10**17, " 314.1593E+15"),
+ (f * 10**18, " 3.1416E+18"),
+ (f * 10**19, " 31.4159E+18"),
+ (f * 10**20, " 314.1593E+18"),
+ (f * 10**21, " 3.1416E+21"),
+ (f * 10**22, " 31.4159E+21"),
+ (f * 10**23, " 314.1593E+21"),
+ (f * 10**24, " 3.1416E+24"),
+ (f * 10**25, " 31.4159E+24"),
+ (f * 10**26, " 314.1593E+24"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ def test_rounding(self):
+ formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
+ in_out = [
+ (5.55555, " 5.556"),
+ (55.5555, " 55.556"),
+ (555.555, " 555.555"),
+ (5555.55, " 5.556k"),
+ (55555.5, " 55.556k"),
+ (555555, " 555.555k"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
+ in_out = [
+ (5.55555, " 5.6"),
+ (55.5555, " 55.6"),
+ (555.555, " 555.6"),
+ (5555.55, " 5.6k"),
+ (55555.5, " 55.6k"),
+ (555555, " 555.6k"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ formatter = EngFormatter(accuracy=0, use_eng_prefix=True)
+ in_out = [
+ (5.55555, " 6"),
+ (55.5555, " 56"),
+ (555.555, " 556"),
+ (5555.55, " 6k"),
+ (55555.5, " 56k"),
+ (555555, " 556k"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
+ result = formatter(0)
+ assert result == " 0.000"
+
+ def test_nan(self):
+ # Issue #11981
+
+ formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
+ result = formatter(np.nan)
+ assert result == "NaN"
+
+ df = DataFrame(
+ {
+ "a": [1.5, 10.3, 20.5],
+ "b": [50.3, 60.67, 70.12],
+ "c": [100.2, 101.33, 120.33],
+ }
+ )
+ pt = df.pivot_table(values="a", index="b", columns="c")
+ set_eng_float_format(accuracy=1)
+ result = pt.to_string()
+ assert "NaN" in result
+
+ def test_inf(self):
+ # Issue #11981
+
+ formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
+ result = formatter(np.inf)
+ assert result == "inf"
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_format.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ca29c219b55b0931885f2fbf92cbf1fd809c5b8
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_format.py
@@ -0,0 +1,2293 @@
+"""
+Tests for the file pandas.io.formats.format, *not* tests for general formatting
+of pandas objects.
+"""
+from datetime import datetime
+from io import StringIO
+from pathlib import Path
+import re
+from shutil import get_terminal_size
+
+import numpy as np
+import pytest
+
+from pandas._config import using_pyarrow_string_dtype
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Index,
+ MultiIndex,
+ NaT,
+ Series,
+ Timestamp,
+ date_range,
+ get_option,
+ option_context,
+ read_csv,
+ reset_option,
+)
+
+from pandas.io.formats import printing
+import pandas.io.formats.format as fmt
+
+
+@pytest.fixture(params=["string", "pathlike", "buffer"])
+def filepath_or_buffer_id(request):
+ """
+ A fixture yielding test ids for filepath_or_buffer testing.
+ """
+ return request.param
+
+
+@pytest.fixture
+def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
+ """
+ A fixture yielding a string representing a filepath, a path-like object
+ and a StringIO buffer. Also checks that buffer is not closed.
+ """
+ if filepath_or_buffer_id == "buffer":
+ buf = StringIO()
+ yield buf
+ assert not buf.closed
+ else:
+ assert isinstance(tmp_path, Path)
+ if filepath_or_buffer_id == "pathlike":
+ yield tmp_path / "foo"
+ else:
+ yield str(tmp_path / "foo")
+
+
+@pytest.fixture
+def assert_filepath_or_buffer_equals(
+ filepath_or_buffer, filepath_or_buffer_id, encoding
+):
+ """
+ Assertion helper for checking filepath_or_buffer.
+ """
+ if encoding is None:
+ encoding = "utf-8"
+
+ def _assert_filepath_or_buffer_equals(expected):
+ if filepath_or_buffer_id == "string":
+ with open(filepath_or_buffer, encoding=encoding) as f:
+ result = f.read()
+ elif filepath_or_buffer_id == "pathlike":
+ result = filepath_or_buffer.read_text(encoding=encoding)
+ elif filepath_or_buffer_id == "buffer":
+ result = filepath_or_buffer.getvalue()
+ assert result == expected
+
+ return _assert_filepath_or_buffer_equals
+
+
+def has_info_repr(df):
+ r = repr(df)
+ c1 = r.split("\n")[0].startswith("
+ # 2. Index
+ # 3. Columns
+ # 4. dtype
+ # 5. memory usage
+ # 6. trailing newline
+ nv = len(r.split("\n")) == 6
+ return has_info and nv
+
+
+def has_horizontally_truncated_repr(df):
+ try: # Check header row
+ fst_line = np.array(repr(df).splitlines()[0].split())
+ cand_col = np.where(fst_line == "...")[0][0]
+ except IndexError:
+ return False
+ # Make sure each row has this ... in the same place
+ r = repr(df)
+ for ix, _ in enumerate(r.splitlines()):
+ if not r.split()[cand_col] == "...":
+ return False
+ return True
+
+
+def has_vertically_truncated_repr(df):
+ r = repr(df)
+ only_dot_row = False
+ for row in r.splitlines():
+ if re.match(r"^[\.\ ]+$", row):
+ only_dot_row = True
+ return only_dot_row
+
+
+def has_truncated_repr(df):
+ return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
+
+
+def has_doubly_truncated_repr(df):
+ return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
+
+
+def has_expanded_repr(df):
+ r = repr(df)
+ for line in r.split("\n"):
+ if line.endswith("\\"):
+ return True
+ return False
+
+
+class TestDataFrameFormatting:
+ def test_repr_truncation(self):
+ max_len = 20
+ with option_context("display.max_colwidth", max_len):
+ df = DataFrame(
+ {
+ "A": np.random.default_rng(2).standard_normal(10),
+ "B": [
+ "a"
+ * np.random.default_rng(2).integers(max_len - 1, max_len + 1)
+ for _ in range(10)
+ ],
+ }
+ )
+ r = repr(df)
+ r = r[r.find("\n") + 1 :]
+
+ adj = printing.get_adjustment()
+
+ for line, value in zip(r.split("\n"), df["B"]):
+ if adj.len(value) + 1 > max_len:
+ assert "..." in line
+ else:
+ assert "..." not in line
+
+ with option_context("display.max_colwidth", 999999):
+ assert "..." not in repr(df)
+
+ with option_context("display.max_colwidth", max_len + 2):
+ assert "..." not in repr(df)
+
+ def test_repr_truncation_preserves_na(self):
+ # https://github.com/pandas-dev/pandas/issues/55630
+ df = DataFrame({"a": [pd.NA for _ in range(10)]})
+ with option_context("display.max_rows", 2, "display.show_dimensions", False):
+ assert repr(df) == " a\n0 \n.. ...\n9 "
+
+ def test_max_colwidth_negative_int_raises(self):
+ # Deprecation enforced from:
+ # https://github.com/pandas-dev/pandas/issues/31532
+ with pytest.raises(
+ ValueError, match="Value must be a nonnegative integer or None"
+ ):
+ with option_context("display.max_colwidth", -1):
+ pass
+
+ def test_repr_chop_threshold(self):
+ df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
+ reset_option("display.chop_threshold") # default None
+ assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
+
+ with option_context("display.chop_threshold", 0.2):
+ assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
+
+ with option_context("display.chop_threshold", 0.6):
+ assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
+
+ with option_context("display.chop_threshold", None):
+ assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
+
+ def test_repr_chop_threshold_column_below(self):
+ # GH 6839: validation case
+
+ df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
+
+ with option_context("display.chop_threshold", 0):
+ assert repr(df) == (
+ " 0 1\n"
+ "0 10.0 8.000000e-10\n"
+ "1 20.0 -1.000000e-11\n"
+ "2 30.0 2.000000e-09\n"
+ "3 40.0 -2.000000e-11"
+ )
+
+ with option_context("display.chop_threshold", 1e-8):
+ assert repr(df) == (
+ " 0 1\n"
+ "0 10.0 0.000000e+00\n"
+ "1 20.0 0.000000e+00\n"
+ "2 30.0 0.000000e+00\n"
+ "3 40.0 0.000000e+00"
+ )
+
+ with option_context("display.chop_threshold", 5e-11):
+ assert repr(df) == (
+ " 0 1\n"
+ "0 10.0 8.000000e-10\n"
+ "1 20.0 0.000000e+00\n"
+ "2 30.0 2.000000e-09\n"
+ "3 40.0 0.000000e+00"
+ )
+
+ def test_repr_no_backslash(self):
+ with option_context("mode.sim_interactive", True):
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
+ assert "\\" not in repr(df)
+
+ def test_expand_frame_repr(self):
+ df_small = DataFrame("hello", index=[0], columns=[0])
+ df_wide = DataFrame("hello", index=[0], columns=range(10))
+ df_tall = DataFrame("hello", index=range(30), columns=range(5))
+
+ with option_context("mode.sim_interactive", True):
+ with option_context(
+ "display.max_columns",
+ 10,
+ "display.width",
+ 20,
+ "display.max_rows",
+ 20,
+ "display.show_dimensions",
+ True,
+ ):
+ with option_context("display.expand_frame_repr", True):
+ assert not has_truncated_repr(df_small)
+ assert not has_expanded_repr(df_small)
+ assert not has_truncated_repr(df_wide)
+ assert has_expanded_repr(df_wide)
+ assert has_vertically_truncated_repr(df_tall)
+ assert has_expanded_repr(df_tall)
+
+ with option_context("display.expand_frame_repr", False):
+ assert not has_truncated_repr(df_small)
+ assert not has_expanded_repr(df_small)
+ assert not has_horizontally_truncated_repr(df_wide)
+ assert not has_expanded_repr(df_wide)
+ assert has_vertically_truncated_repr(df_tall)
+ assert not has_expanded_repr(df_tall)
+
+ def test_repr_non_interactive(self):
+ # in non interactive mode, there can be no dependency on the
+ # result of terminal auto size detection
+ df = DataFrame("hello", index=range(1000), columns=range(5))
+
+ with option_context(
+ "mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
+ ):
+ assert not has_truncated_repr(df)
+ assert not has_expanded_repr(df)
+
+ def test_repr_truncates_terminal_size(self, monkeypatch):
+ # see gh-21180
+
+ terminal_size = (118, 96)
+ monkeypatch.setattr(
+ "pandas.io.formats.format.get_terminal_size", lambda: terminal_size
+ )
+
+ index = range(5)
+ columns = MultiIndex.from_tuples(
+ [
+ ("This is a long title with > 37 chars.", "cat"),
+ ("This is a loooooonger title with > 43 chars.", "dog"),
+ ]
+ )
+ df = DataFrame(1, index=index, columns=columns)
+
+ result = repr(df)
+
+ h1, h2 = result.split("\n")[:2]
+ assert "long" in h1
+ assert "loooooonger" in h1
+ assert "cat" in h2
+ assert "dog" in h2
+
+ # regular columns
+ df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
+ result = repr(df2)
+
+ assert df2.columns[0] in result.split("\n")[0]
+
+ def test_repr_truncates_terminal_size_full(self, monkeypatch):
+ # GH 22984 ensure entire window is filled
+ terminal_size = (80, 24)
+ df = DataFrame(np.random.default_rng(2).random((1, 7)))
+
+ monkeypatch.setattr(
+ "pandas.io.formats.format.get_terminal_size", lambda: terminal_size
+ )
+ assert "..." not in str(df)
+
+ def test_repr_truncation_column_size(self):
+ # dataframe with last column very wide -> check it is not used to
+ # determine size of truncation (...) column
+ df = DataFrame(
+ {
+ "a": [108480, 30830],
+ "b": [12345, 12345],
+ "c": [12345, 12345],
+ "d": [12345, 12345],
+ "e": ["a" * 50] * 2,
+ }
+ )
+ assert "..." in str(df)
+ assert " ... " not in str(df)
+
+ def test_repr_max_columns_max_rows(self):
+ term_width, term_height = get_terminal_size()
+ if term_width < 10 or term_height < 10:
+ pytest.skip(f"terminal size too small, {term_width} x {term_height}")
+
+ def mkframe(n):
+ index = [f"{i:05d}" for i in range(n)]
+ return DataFrame(0, index, index)
+
+ df6 = mkframe(6)
+ df10 = mkframe(10)
+ with option_context("mode.sim_interactive", True):
+ with option_context("display.width", term_width * 2):
+ with option_context("display.max_rows", 5, "display.max_columns", 5):
+ assert not has_expanded_repr(mkframe(4))
+ assert not has_expanded_repr(mkframe(5))
+ assert not has_expanded_repr(df6)
+ assert has_doubly_truncated_repr(df6)
+
+ with option_context("display.max_rows", 20, "display.max_columns", 10):
+ # Out off max_columns boundary, but no extending
+ # since not exceeding width
+ assert not has_expanded_repr(df6)
+ assert not has_truncated_repr(df6)
+
+ with option_context("display.max_rows", 9, "display.max_columns", 10):
+ # out vertical bounds can not result in expanded repr
+ assert not has_expanded_repr(df10)
+ assert has_vertically_truncated_repr(df10)
+
+ # width=None in terminal, auto detection
+ with option_context(
+ "display.max_columns",
+ 100,
+ "display.max_rows",
+ term_width * 20,
+ "display.width",
+ None,
+ ):
+ df = mkframe((term_width // 7) - 2)
+ assert not has_expanded_repr(df)
+ df = mkframe((term_width // 7) + 2)
+ printing.pprint_thing(df._repr_fits_horizontal_())
+ assert has_expanded_repr(df)
+
+ def test_repr_min_rows(self):
+ df = DataFrame({"a": range(20)})
+
+ # default setting no truncation even if above min_rows
+ assert ".." not in repr(df)
+ assert ".." not in df._repr_html_()
+
+ df = DataFrame({"a": range(61)})
+
+ # default of max_rows 60 triggers truncation if above
+ assert ".." in repr(df)
+ assert ".." in df._repr_html_()
+
+ with option_context("display.max_rows", 10, "display.min_rows", 4):
+ # truncated after first two rows
+ assert ".." in repr(df)
+ assert "2 " not in repr(df)
+ assert "..." in df._repr_html_()
+ assert "2 | " not in df._repr_html_()
+
+ with option_context("display.max_rows", 12, "display.min_rows", None):
+ # when set to None, follow value of max_rows
+ assert "5 5" in repr(df)
+ assert "5 | " in df._repr_html_()
+
+ with option_context("display.max_rows", 10, "display.min_rows", 12):
+ # when set value higher as max_rows, use the minimum
+ assert "5 5" not in repr(df)
+ assert "5 | " not in df._repr_html_()
+
+ with option_context("display.max_rows", None, "display.min_rows", 12):
+ # max_rows of None -> never truncate
+ assert ".." not in repr(df)
+ assert ".." not in df._repr_html_()
+
+ def test_str_max_colwidth(self):
+ # GH 7856
+ df = DataFrame(
+ [
+ {
+ "a": "foo",
+ "b": "bar",
+ "c": "uncomfortably long line with lots of stuff",
+ "d": 1,
+ },
+ {"a": "foo", "b": "bar", "c": "stuff", "d": 1},
+ ]
+ )
+ df.set_index(["a", "b", "c"])
+ assert str(df) == (
+ " a b c d\n"
+ "0 foo bar uncomfortably long line with lots of stuff 1\n"
+ "1 foo bar stuff 1"
+ )
+ with option_context("max_colwidth", 20):
+ assert str(df) == (
+ " a b c d\n"
+ "0 foo bar uncomfortably lo... 1\n"
+ "1 foo bar stuff 1"
+ )
+
+ def test_auto_detect(self):
+ term_width, term_height = get_terminal_size()
+ fac = 1.05 # Arbitrary large factor to exceed term width
+ cols = range(int(term_width * fac))
+ index = range(10)
+ df = DataFrame(index=index, columns=cols)
+ with option_context("mode.sim_interactive", True):
+ with option_context("display.max_rows", None):
+ with option_context("display.max_columns", None):
+ # Wrap around with None
+ assert has_expanded_repr(df)
+ with option_context("display.max_rows", 0):
+ with option_context("display.max_columns", 0):
+ # Truncate with auto detection.
+ assert has_horizontally_truncated_repr(df)
+
+ index = range(int(term_height * fac))
+ df = DataFrame(index=index, columns=cols)
+ with option_context("display.max_rows", 0):
+ with option_context("display.max_columns", None):
+ # Wrap around with None
+ assert has_expanded_repr(df)
+ # Truncate vertically
+ assert has_vertically_truncated_repr(df)
+
+ with option_context("display.max_rows", None):
+ with option_context("display.max_columns", 0):
+ assert has_horizontally_truncated_repr(df)
+
+ def test_to_string_repr_unicode2(self):
+ idx = Index(["abc", "\u03c3a", "aegdvg"])
+ ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx)
+ rs = repr(ser).split("\n")
+ line_len = len(rs[0])
+ for line in rs[1:]:
+ try:
+ line = line.decode(get_option("display.encoding"))
+ except AttributeError:
+ pass
+ if not line.startswith("dtype:"):
+ assert len(line) == line_len
+
+ def test_east_asian_unicode_false(self):
+ # not aligned properly because of east asian width
+
+ # mid col
+ df = DataFrame(
+ {"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na あ 1\n"
+ "bb いいい 222\nc う 33333\n"
+ "ddd ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ # last col
+ df = DataFrame(
+ {"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na 1 あ\n"
+ "bb 222 いいい\nc 33333 う\n"
+ "ddd 4 ええええええ"
+ )
+ assert repr(df) == expected
+
+ # all col
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na あああああ あ\n"
+ "bb い いいい\nc う う\n"
+ "ddd えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # column name
+ df = DataFrame(
+ {
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ "あああああ": [1, 222, 33333, 4],
+ },
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " b あああああ\na あ 1\n"
+ "bb いいい 222\nc う 33333\n"
+ "ddd ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ # index
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=["あああ", "いいいいいい", "うう", "え"],
+ )
+ expected = (
+ " a b\nあああ あああああ あ\n"
+ "いいいいいい い いいい\nうう う う\n"
+ "え えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # index name
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=Index(["あ", "い", "うう", "え"], name="おおおお"),
+ )
+ expected = (
+ " a b\n"
+ "おおおお \n"
+ "あ あああああ あ\n"
+ "い い いいい\n"
+ "うう う う\n"
+ "え えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # all
+ df = DataFrame(
+ {
+ "あああ": ["あああ", "い", "う", "えええええ"],
+ "いいいいい": ["あ", "いいい", "う", "ええ"],
+ },
+ index=Index(["あ", "いいい", "うう", "え"], name="お"),
+ )
+ expected = (
+ " あああ いいいいい\n"
+ "お \n"
+ "あ あああ あ\n"
+ "いいい い いいい\n"
+ "うう う う\n"
+ "え えええええ ええ"
+ )
+ assert repr(df) == expected
+
+ # MultiIndex
+ idx = MultiIndex.from_tuples(
+ [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
+ )
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=idx,
+ )
+ expected = (
+ " a b\n"
+ "あ いい あああああ あ\n"
+ "う え い いいい\n"
+ "おおお かかかか う う\n"
+ "き くく えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # truncate
+ with option_context("display.max_rows", 3, "display.max_columns", 3):
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ "c": ["お", "か", "ききき", "くくくくくく"],
+ "ああああ": ["さ", "し", "す", "せ"],
+ },
+ columns=["a", "b", "c", "ああああ"],
+ )
+
+ expected = (
+ " a ... ああああ\n0 あああああ ... さ\n"
+ ".. ... ... ...\n3 えええ ... せ\n"
+ "\n[4 rows x 4 columns]"
+ )
+ assert repr(df) == expected
+
+ df.index = ["あああ", "いいいい", "う", "aaa"]
+ expected = (
+ " a ... ああああ\nあああ あああああ ... さ\n"
+ ".. ... ... ...\naaa えええ ... せ\n"
+ "\n[4 rows x 4 columns]"
+ )
+ assert repr(df) == expected
+
+ def test_east_asian_unicode_true(self):
+ # Enable Unicode option -----------------------------------------
+ with option_context("display.unicode.east_asian_width", True):
+ # mid col
+ df = DataFrame(
+ {"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na あ 1\n"
+ "bb いいい 222\nc う 33333\n"
+ "ddd ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ # last col
+ df = DataFrame(
+ {"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na 1 あ\n"
+ "bb 222 いいい\nc 33333 う\n"
+ "ddd 4 ええええええ"
+ )
+ assert repr(df) == expected
+
+ # all col
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\n"
+ "a あああああ あ\n"
+ "bb い いいい\n"
+ "c う う\n"
+ "ddd えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # column name
+ df = DataFrame(
+ {
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ "あああああ": [1, 222, 33333, 4],
+ },
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " b あああああ\n"
+ "a あ 1\n"
+ "bb いいい 222\n"
+ "c う 33333\n"
+ "ddd ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ # index
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=["あああ", "いいいいいい", "うう", "え"],
+ )
+ expected = (
+ " a b\n"
+ "あああ あああああ あ\n"
+ "いいいいいい い いいい\n"
+ "うう う う\n"
+ "え えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # index name
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=Index(["あ", "い", "うう", "え"], name="おおおお"),
+ )
+ expected = (
+ " a b\n"
+ "おおおお \n"
+ "あ あああああ あ\n"
+ "い い いいい\n"
+ "うう う う\n"
+ "え えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # all
+ df = DataFrame(
+ {
+ "あああ": ["あああ", "い", "う", "えええええ"],
+ "いいいいい": ["あ", "いいい", "う", "ええ"],
+ },
+ index=Index(["あ", "いいい", "うう", "え"], name="お"),
+ )
+ expected = (
+ " あああ いいいいい\n"
+ "お \n"
+ "あ あああ あ\n"
+ "いいい い いいい\n"
+ "うう う う\n"
+ "え えええええ ええ"
+ )
+ assert repr(df) == expected
+
+ # MultiIndex
+ idx = MultiIndex.from_tuples(
+ [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
+ )
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=idx,
+ )
+ expected = (
+ " a b\n"
+ "あ いい あああああ あ\n"
+ "う え い いいい\n"
+ "おおお かかかか う う\n"
+ "き くく えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # truncate
+ with option_context("display.max_rows", 3, "display.max_columns", 3):
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ "c": ["お", "か", "ききき", "くくくくくく"],
+ "ああああ": ["さ", "し", "す", "せ"],
+ },
+ columns=["a", "b", "c", "ああああ"],
+ )
+
+ expected = (
+ " a ... ああああ\n"
+ "0 あああああ ... さ\n"
+ ".. ... ... ...\n"
+ "3 えええ ... せ\n"
+ "\n[4 rows x 4 columns]"
+ )
+ assert repr(df) == expected
+
+ df.index = ["あああ", "いいいい", "う", "aaa"]
+ expected = (
+ " a ... ああああ\n"
+ "あああ あああああ ... さ\n"
+ "... ... ... ...\n"
+ "aaa えええ ... せ\n"
+ "\n[4 rows x 4 columns]"
+ )
+ assert repr(df) == expected
+
+ # ambiguous unicode
+ df = DataFrame(
+ {
+ "b": ["あ", "いいい", "¡¡", "ええええええ"],
+ "あああああ": [1, 222, 33333, 4],
+ },
+ index=["a", "bb", "c", "¡¡¡"],
+ )
+ expected = (
+ " b あああああ\n"
+ "a あ 1\n"
+ "bb いいい 222\n"
+ "c ¡¡ 33333\n"
+ "¡¡¡ ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ def test_to_string_buffer_all_unicode(self):
+ buf = StringIO()
+
+ empty = DataFrame({"c/\u03c3": Series(dtype=object)})
+ nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
+
+ print(empty, file=buf)
+ print(nonempty, file=buf)
+
+ # this should work
+ buf.getvalue()
+
+ @pytest.mark.parametrize(
+ "index_scalar",
+ [
+ "a" * 10,
+ 1,
+ Timestamp(2020, 1, 1),
+ pd.Period("2020-01-01"),
+ ],
+ )
+ @pytest.mark.parametrize("h", [10, 20])
+ @pytest.mark.parametrize("w", [10, 20])
+ def test_to_string_truncate_indices(self, index_scalar, h, w):
+ with option_context("display.expand_frame_repr", False):
+ df = DataFrame(
+ index=[index_scalar] * h, columns=[str(i) * 10 for i in range(w)]
+ )
+ with option_context("display.max_rows", 15):
+ if h == 20:
+ assert has_vertically_truncated_repr(df)
+ else:
+ assert not has_vertically_truncated_repr(df)
+ with option_context("display.max_columns", 15):
+ if w == 20:
+ assert has_horizontally_truncated_repr(df)
+ else:
+ assert not has_horizontally_truncated_repr(df)
+ with option_context("display.max_rows", 15, "display.max_columns", 15):
+ if h == 20 and w == 20:
+ assert has_doubly_truncated_repr(df)
+ else:
+ assert not has_doubly_truncated_repr(df)
+
+ def test_to_string_truncate_multilevel(self):
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
+ df = DataFrame(index=arrays, columns=arrays)
+ with option_context("display.max_rows", 7, "display.max_columns", 7):
+ assert has_doubly_truncated_repr(df)
+
+ @pytest.mark.parametrize("dtype", ["object", "datetime64[us]"])
+ def test_truncate_with_different_dtypes(self, dtype):
+ # 11594, 12045
+ # when truncated the dtypes of the splits can differ
+
+ # 11594
+ ser = Series(
+ [datetime(2012, 1, 1)] * 10
+ + [datetime(1012, 1, 2)]
+ + [datetime(2012, 1, 3)] * 10,
+ dtype=dtype,
+ )
+
+ with option_context("display.max_rows", 8):
+ result = str(ser)
+ assert dtype in result
+
+ def test_truncate_with_different_dtypes2(self):
+ # 12045
+ df = DataFrame({"text": ["some words"] + [None] * 9}, dtype=object)
+
+ with option_context("display.max_rows", 8, "display.max_columns", 3):
+ result = str(df)
+ assert "None" in result
+ assert "NaN" not in result
+
+ def test_truncate_with_different_dtypes_multiindex(self):
+ # GH#13000
+ df = DataFrame({"Vals": range(100)})
+ frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
+ result = repr(frame)
+
+ result2 = repr(frame.iloc[:5])
+ assert result.startswith(result2)
+
+ def test_datetimelike_frame(self):
+ # GH 12211
+ df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
+
+ with option_context("display.max_rows", 5):
+ result = str(df)
+ assert "2013-01-01 00:00:00+00:00" in result
+ assert "NaT" in result
+ assert "..." in result
+ assert "[6 rows x 1 columns]" in result
+
+ dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
+ df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
+ with option_context("display.max_rows", 5):
+ expected = (
+ " dt x\n"
+ "0 2011-01-01 00:00:00-05:00 1\n"
+ "1 2011-01-01 00:00:00-05:00 2\n"
+ ".. ... ..\n"
+ "8 NaT 9\n"
+ "9 NaT 10\n\n"
+ "[10 rows x 2 columns]"
+ )
+ assert repr(df) == expected
+
+ dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
+ df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
+ with option_context("display.max_rows", 5):
+ expected = (
+ " dt x\n"
+ "0 NaT 1\n"
+ "1 NaT 2\n"
+ ".. ... ..\n"
+ "8 2011-01-01 00:00:00-05:00 9\n"
+ "9 2011-01-01 00:00:00-05:00 10\n\n"
+ "[10 rows x 2 columns]"
+ )
+ assert repr(df) == expected
+
+ dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
+ Timestamp("2011-01-01", tz="US/Eastern")
+ ] * 5
+ df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
+ with option_context("display.max_rows", 5):
+ expected = (
+ " dt x\n"
+ "0 2011-01-01 00:00:00+09:00 1\n"
+ "1 2011-01-01 00:00:00+09:00 2\n"
+ ".. ... ..\n"
+ "8 2011-01-01 00:00:00-05:00 9\n"
+ "9 2011-01-01 00:00:00-05:00 10\n\n"
+ "[10 rows x 2 columns]"
+ )
+ assert repr(df) == expected
+
+ @pytest.mark.parametrize(
+ "start_date",
+ [
+ "2017-01-01 23:59:59.999999999",
+ "2017-01-01 23:59:59.99999999",
+ "2017-01-01 23:59:59.9999999",
+ "2017-01-01 23:59:59.999999",
+ "2017-01-01 23:59:59.99999",
+ "2017-01-01 23:59:59.9999",
+ ],
+ )
+ def test_datetimeindex_highprecision(self, start_date):
+ # GH19030
+ # Check that high-precision time values for the end of day are
+ # included in repr for DatetimeIndex
+ df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
+ result = str(df)
+ assert start_date in result
+
+ dti = date_range(start=start_date, freq="D", periods=5)
+ df = DataFrame({"A": range(5)}, index=dti)
+ result = str(df.index)
+ assert start_date in result
+
+ def test_string_repr_encoding(self, datapath):
+ filepath = datapath("io", "parser", "data", "unicode_series.csv")
+ df = read_csv(filepath, header=None, encoding="latin1")
+ repr(df)
+ repr(df[1])
+
+ def test_repr_corner(self):
+ # representing infs poses no problems
+ df = DataFrame({"foo": [-np.inf, np.inf]})
+ repr(df)
+
+ def test_frame_info_encoding(self):
+ index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
+ with option_context("display.max_rows", 1):
+ df = DataFrame(columns=["a", "b", "c"], index=index)
+ repr(df)
+ repr(df.T)
+
+ def test_wide_repr(self):
+ with option_context(
+ "mode.sim_interactive",
+ True,
+ "display.show_dimensions",
+ True,
+ "display.max_columns",
+ 20,
+ ):
+ max_cols = get_option("display.max_columns")
+ df = DataFrame([["a" * 25] * (max_cols - 1)] * 10)
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+
+ assert f"10 rows x {max_cols - 1} columns" in rep_str
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 120):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ def test_wide_repr_wide_columns(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 3)),
+ columns=["a" * 90, "b" * 90, "c" * 90],
+ )
+ rep_str = repr(df)
+
+ assert len(rep_str.splitlines()) == 20
+
+ def test_wide_repr_named(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ max_cols = get_option("display.max_columns")
+ df = DataFrame([["a" * 25] * (max_cols - 1)] * 10)
+ df.index.name = "DataFrame Index"
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 150):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ for line in wide_repr.splitlines()[1::13]:
+ assert "DataFrame Index" in line
+
+ def test_wide_repr_multiindex(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ midx = MultiIndex.from_arrays([["a" * 5] * 10] * 2)
+ max_cols = get_option("display.max_columns")
+ df = DataFrame([["a" * 25] * (max_cols - 1)] * 10, index=midx)
+ df.index.names = ["Level 0", "Level 1"]
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 150):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ for line in wide_repr.splitlines()[1::13]:
+ assert "Level 0 Level 1" in line
+
+ def test_wide_repr_multiindex_cols(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ max_cols = get_option("display.max_columns")
+ midx = MultiIndex.from_arrays([["a" * 5] * 10] * 2)
+ mcols = MultiIndex.from_arrays([["b" * 3] * (max_cols - 1)] * 2)
+ df = DataFrame(
+ [["c" * 25] * (max_cols - 1)] * 10, index=midx, columns=mcols
+ )
+ df.index.names = ["Level 0", "Level 1"]
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 150, "display.max_columns", 20):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ def test_wide_repr_unicode(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ max_cols = 20
+ df = DataFrame([["a" * 25] * 10] * (max_cols - 1))
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 150):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ def test_wide_repr_wide_long_columns(self):
+ with option_context("mode.sim_interactive", True):
+ df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
+
+ result = repr(df)
+ assert "ccccc" in result
+ assert "ddddd" in result
+
+ def test_long_series(self):
+ n = 1000
+ s = Series(
+ np.random.default_rng(2).integers(-50, 50, n),
+ index=[f"s{x:04d}" for x in range(n)],
+ dtype="int64",
+ )
+
+ str_rep = str(s)
+ nmatches = len(re.findall("dtype", str_rep))
+ assert nmatches == 1
+
+ def test_to_string_ascii_error(self):
+ data = [
+ (
+ "0 ",
+ " .gitignore ",
+ " 5 ",
+ " \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
+ )
+ ]
+ df = DataFrame(data)
+
+ # it works!
+ repr(df)
+
+ def test_show_dimensions(self):
+ df = DataFrame(123, index=range(10, 15), columns=range(30))
+
+ with option_context(
+ "display.max_rows",
+ 10,
+ "display.max_columns",
+ 40,
+ "display.width",
+ 500,
+ "display.expand_frame_repr",
+ "info",
+ "display.show_dimensions",
+ True,
+ ):
+ assert "5 rows" in str(df)
+ assert "5 rows" in df._repr_html_()
+ with option_context(
+ "display.max_rows",
+ 10,
+ "display.max_columns",
+ 40,
+ "display.width",
+ 500,
+ "display.expand_frame_repr",
+ "info",
+ "display.show_dimensions",
+ False,
+ ):
+ assert "5 rows" not in str(df)
+ assert "5 rows" not in df._repr_html_()
+ with option_context(
+ "display.max_rows",
+ 2,
+ "display.max_columns",
+ 2,
+ "display.width",
+ 500,
+ "display.expand_frame_repr",
+ "info",
+ "display.show_dimensions",
+ "truncate",
+ ):
+ assert "5 rows" in str(df)
+ assert "5 rows" in df._repr_html_()
+ with option_context(
+ "display.max_rows",
+ 10,
+ "display.max_columns",
+ 40,
+ "display.width",
+ 500,
+ "display.expand_frame_repr",
+ "info",
+ "display.show_dimensions",
+ "truncate",
+ ):
+ assert "5 rows" not in str(df)
+ assert "5 rows" not in df._repr_html_()
+
+ def test_info_repr(self):
+ # GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
+ # the terminal size to ensure that we try to print something "too big"
+ term_width, term_height = get_terminal_size()
+
+ max_rows = 60
+ max_cols = 20 + (max(term_width, 80) - 80) // 4
+ # Long
+ h, w = max_rows + 1, max_cols - 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ assert has_vertically_truncated_repr(df)
+ with option_context("display.large_repr", "info"):
+ assert has_info_repr(df)
+
+ # Wide
+ h, w = max_rows - 1, max_cols + 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ assert has_horizontally_truncated_repr(df)
+ with option_context(
+ "display.large_repr", "info", "display.max_columns", max_cols
+ ):
+ assert has_info_repr(df)
+
+ def test_info_repr_max_cols(self):
+ # GH #6939
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 5)))
+ with option_context(
+ "display.large_repr",
+ "info",
+ "display.max_columns",
+ 1,
+ "display.max_info_columns",
+ 4,
+ ):
+ assert has_non_verbose_info_repr(df)
+
+ with option_context(
+ "display.large_repr",
+ "info",
+ "display.max_columns",
+ 1,
+ "display.max_info_columns",
+ 5,
+ ):
+ assert not has_non_verbose_info_repr(df)
+
+ # FIXME: don't leave commented-out
+ # test verbose overrides
+ # set_option('display.max_info_columns', 4) # exceeded
+
+ def test_pprint_pathological_object(self):
+ """
+ If the test fails, it at least won't hang.
+ """
+
+ class A:
+ def __getitem__(self, key):
+ return 3 # obviously simplified
+
+ df = DataFrame([A()])
+ repr(df) # just don't die
+
+ def test_float_trim_zeros(self):
+ vals = [
+ 2.08430917305e10,
+ 3.52205017305e10,
+ 2.30674817305e10,
+ 2.03954217305e10,
+ 5.59897817305e10,
+ ]
+ skip = True
+ for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
+ if line.startswith("dtype:"):
+ continue
+ if _three_digit_exp():
+ assert ("+010" in line) or skip
+ else:
+ assert ("+10" in line) or skip
+ skip = False
+
+ @pytest.mark.parametrize(
+ "data, expected",
+ [
+ (["3.50"], "0 3.50\ndtype: object"),
+ ([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
+ ([np.nan], "0 NaN\ndtype: float64"),
+ ([None], "0 None\ndtype: object"),
+ (["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
+ ([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
+ ([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
+ ([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
+ ],
+ )
+ def test_repr_str_float_truncation(self, data, expected, using_infer_string):
+ # GH#38708
+ series = Series(data, dtype=object if "3.50" in data else None)
+ result = repr(series)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "float_format,expected",
+ [
+ ("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
+ ("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
+ ],
+ )
+ def test_repr_float_format_in_object_col(self, float_format, expected):
+ # GH#40024
+ df = Series([1000.0, "test"])
+ with option_context("display.float_format", float_format):
+ result = repr(df)
+
+ assert result == expected
+
+ def test_period(self):
+ # GH 12615
+ df = DataFrame(
+ {
+ "A": pd.period_range("2013-01", periods=4, freq="M"),
+ "B": [
+ pd.Period("2011-01", freq="M"),
+ pd.Period("2011-02-01", freq="D"),
+ pd.Period("2011-03-01 09:00", freq="h"),
+ pd.Period("2011-04", freq="M"),
+ ],
+ "C": list("abcd"),
+ }
+ )
+ exp = (
+ " A B C\n"
+ "0 2013-01 2011-01 a\n"
+ "1 2013-02 2011-02-01 b\n"
+ "2 2013-03 2011-03-01 09:00 c\n"
+ "3 2013-04 2011-04 d"
+ )
+ assert str(df) == exp
+
+ @pytest.mark.parametrize(
+ "length, max_rows, min_rows, expected",
+ [
+ (10, 10, 10, 10),
+ (10, 10, None, 10),
+ (10, 8, None, 8),
+ (20, 30, 10, 30), # max_rows > len(frame), hence max_rows
+ (50, 30, 10, 10), # max_rows < len(frame), hence min_rows
+ (100, 60, 10, 10), # same
+ (60, 60, 10, 60), # edge case
+ (61, 60, 10, 10), # edge case
+ ],
+ )
+ def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
+ """Check that display logic is correct.
+
+ GH #37359
+
+ See description here:
+ https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
+ """
+ formatter = fmt.DataFrameFormatter(
+ DataFrame(np.random.default_rng(2).random((length, 3))),
+ max_rows=max_rows,
+ min_rows=min_rows,
+ )
+ result = formatter.max_rows_fitted
+ assert result == expected
+
+
+def gen_series_formatting():
+ s1 = Series(["a"] * 100)
+ s2 = Series(["ab"] * 100)
+ s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
+ s4 = s3[::-1]
+ test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
+ return test_sers
+
+
+class TestSeriesFormatting:
+ def test_freq_name_separation(self):
+ s = Series(
+ np.random.default_rng(2).standard_normal(10),
+ index=date_range("1/1/2000", periods=10),
+ name=0,
+ )
+
+ result = repr(s)
+ assert "Freq: D, Name: 0" in result
+
+ def test_unicode_name_in_footer(self):
+ s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
+ sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
+ sf._get_footer() # should not raise exception
+
+ @pytest.mark.xfail(
+ using_pyarrow_string_dtype(), reason="Fixup when arrow is default"
+ )
+ def test_east_asian_unicode_series(self):
+ # not aligned properly because of east asian width
+
+ # unicode index
+ s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
+ expected = "".join(
+ [
+ "あ a\n",
+ "いい bb\n",
+ "ううう CCC\n",
+ "ええええ D\ndtype: object",
+ ]
+ )
+ assert repr(s) == expected
+
+ # unicode values
+ s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
+ expected = "".join(
+ [
+ "a あ\n",
+ "bb いい\n",
+ "c ううう\n",
+ "ddd ええええ\n",
+ "dtype: object",
+ ]
+ )
+
+ assert repr(s) == expected
+
+ # both
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["ああ", "いいいい", "う", "えええ"],
+ )
+ expected = "".join(
+ [
+ "ああ あ\n",
+ "いいいい いい\n",
+ "う ううう\n",
+ "えええ ええええ\n",
+ "dtype: object",
+ ]
+ )
+
+ assert repr(s) == expected
+
+ # unicode footer
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["ああ", "いいいい", "う", "えええ"],
+ name="おおおおおおお",
+ )
+ expected = (
+ "ああ あ\nいいいい いい\nう ううう\n"
+ "えええ ええええ\nName: おおおおおおお, dtype: object"
+ )
+ assert repr(s) == expected
+
+ # MultiIndex
+ idx = MultiIndex.from_tuples(
+ [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
+ )
+ s = Series([1, 22, 3333, 44444], index=idx)
+ expected = (
+ "あ いい 1\n"
+ "う え 22\n"
+ "おおお かかかか 3333\n"
+ "き くく 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # object dtype, shorter than unicode repr
+ s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
+ expected = (
+ "1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # object dtype, longer than unicode repr
+ s = Series(
+ [1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
+ )
+ expected = (
+ "1 1\n"
+ "AB 22\n"
+ "2011-01-01 00:00:00 3333\n"
+ "あああ 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # truncate
+ with option_context("display.max_rows", 3):
+ s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
+
+ expected = (
+ "0 あ\n ... \n"
+ "3 ええええ\n"
+ "Name: おおおおおおお, Length: 4, dtype: object"
+ )
+ assert repr(s) == expected
+
+ s.index = ["ああ", "いいいい", "う", "えええ"]
+ expected = (
+ "ああ あ\n ... \n"
+ "えええ ええええ\n"
+ "Name: おおおおおおお, Length: 4, dtype: object"
+ )
+ assert repr(s) == expected
+
+ # Enable Unicode option -----------------------------------------
+ with option_context("display.unicode.east_asian_width", True):
+ # unicode index
+ s = Series(
+ ["a", "bb", "CCC", "D"],
+ index=["あ", "いい", "ううう", "ええええ"],
+ )
+ expected = (
+ "あ a\nいい bb\nううう CCC\n"
+ "ええええ D\ndtype: object"
+ )
+ assert repr(s) == expected
+
+ # unicode values
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ "a あ\nbb いい\nc ううう\n"
+ "ddd ええええ\ndtype: object"
+ )
+ assert repr(s) == expected
+ # both
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["ああ", "いいいい", "う", "えええ"],
+ )
+ expected = (
+ "ああ あ\n"
+ "いいいい いい\n"
+ "う ううう\n"
+ "えええ ええええ\ndtype: object"
+ )
+ assert repr(s) == expected
+
+ # unicode footer
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["ああ", "いいいい", "う", "えええ"],
+ name="おおおおおおお",
+ )
+ expected = (
+ "ああ あ\n"
+ "いいいい いい\n"
+ "う ううう\n"
+ "えええ ええええ\n"
+ "Name: おおおおおおお, dtype: object"
+ )
+ assert repr(s) == expected
+
+ # MultiIndex
+ idx = MultiIndex.from_tuples(
+ [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
+ )
+ s = Series([1, 22, 3333, 44444], index=idx)
+ expected = (
+ "あ いい 1\n"
+ "う え 22\n"
+ "おおお かかかか 3333\n"
+ "き くく 44444\n"
+ "dtype: int64"
+ )
+ assert repr(s) == expected
+
+ # object dtype, shorter than unicode repr
+ s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
+ expected = (
+ "1 1\nAB 22\nNaN 3333\n"
+ "あああ 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # object dtype, longer than unicode repr
+ s = Series(
+ [1, 22, 3333, 44444],
+ index=[1, "AB", Timestamp("2011-01-01"), "あああ"],
+ )
+ expected = (
+ "1 1\n"
+ "AB 22\n"
+ "2011-01-01 00:00:00 3333\n"
+ "あああ 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # truncate
+ with option_context("display.max_rows", 3):
+ s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
+ expected = (
+ "0 あ\n ... \n"
+ "3 ええええ\n"
+ "Name: おおおおおおお, Length: 4, dtype: object"
+ )
+ assert repr(s) == expected
+
+ s.index = ["ああ", "いいいい", "う", "えええ"]
+ expected = (
+ "ああ あ\n"
+ " ... \n"
+ "えええ ええええ\n"
+ "Name: おおおおおおお, Length: 4, dtype: object"
+ )
+ assert repr(s) == expected
+
+ # ambiguous unicode
+ s = Series(
+ ["¡¡", "い¡¡", "ううう", "ええええ"],
+ index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"],
+ )
+ expected = (
+ "ああ ¡¡\n"
+ "¡¡¡¡いい い¡¡\n"
+ "¡¡ ううう\n"
+ "えええ ええええ\ndtype: object"
+ )
+ assert repr(s) == expected
+
+ def test_float_trim_zeros(self):
+ vals = [
+ 2.08430917305e10,
+ 3.52205017305e10,
+ 2.30674817305e10,
+ 2.03954217305e10,
+ 5.59897817305e10,
+ ]
+ for line in repr(Series(vals)).split("\n"):
+ if line.startswith("dtype:"):
+ continue
+ if _three_digit_exp():
+ assert "+010" in line
+ else:
+ assert "+10" in line
+
+ @pytest.mark.parametrize(
+ "start_date",
+ [
+ "2017-01-01 23:59:59.999999999",
+ "2017-01-01 23:59:59.99999999",
+ "2017-01-01 23:59:59.9999999",
+ "2017-01-01 23:59:59.999999",
+ "2017-01-01 23:59:59.99999",
+ "2017-01-01 23:59:59.9999",
+ ],
+ )
+ def test_datetimeindex_highprecision(self, start_date):
+ # GH19030
+ # Check that high-precision time values for the end of day are
+ # included in repr for DatetimeIndex
+ s1 = Series(date_range(start=start_date, freq="D", periods=5))
+ result = str(s1)
+ assert start_date in result
+
+ dti = date_range(start=start_date, freq="D", periods=5)
+ s2 = Series(3, index=dti)
+ result = str(s2.index)
+ assert start_date in result
+
+ def test_mixed_datetime64(self):
+ df = DataFrame({"A": [1, 2], "B": ["2012-01-01", "2012-01-02"]})
+ df["B"] = pd.to_datetime(df.B)
+
+ result = repr(df.loc[0])
+ assert "2012-01-01" in result
+
+ def test_period(self):
+ # GH 12615
+ index = pd.period_range("2013-01", periods=6, freq="M")
+ s = Series(np.arange(6, dtype="int64"), index=index)
+ exp = (
+ "2013-01 0\n"
+ "2013-02 1\n"
+ "2013-03 2\n"
+ "2013-04 3\n"
+ "2013-05 4\n"
+ "2013-06 5\n"
+ "Freq: M, dtype: int64"
+ )
+ assert str(s) == exp
+
+ s = Series(index)
+ exp = (
+ "0 2013-01\n"
+ "1 2013-02\n"
+ "2 2013-03\n"
+ "3 2013-04\n"
+ "4 2013-05\n"
+ "5 2013-06\n"
+ "dtype: period[M]"
+ )
+ assert str(s) == exp
+
+ # periods with mixed freq
+ s = Series(
+ [
+ pd.Period("2011-01", freq="M"),
+ pd.Period("2011-02-01", freq="D"),
+ pd.Period("2011-03-01 09:00", freq="h"),
+ ]
+ )
+ exp = (
+ "0 2011-01\n1 2011-02-01\n"
+ "2 2011-03-01 09:00\ndtype: object"
+ )
+ assert str(s) == exp
+
+ def test_max_multi_index_display(self):
+ # GH 7101
+
+ # doc example (indexing.rst)
+
+ # multi-index
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
+ tuples = list(zip(*arrays))
+ index = MultiIndex.from_tuples(tuples, names=["first", "second"])
+ s = Series(np.random.default_rng(2).standard_normal(8), index=index)
+
+ with option_context("display.max_rows", 10):
+ assert len(str(s).split("\n")) == 10
+ with option_context("display.max_rows", 3):
+ assert len(str(s).split("\n")) == 5
+ with option_context("display.max_rows", 2):
+ assert len(str(s).split("\n")) == 5
+ with option_context("display.max_rows", 1):
+ assert len(str(s).split("\n")) == 4
+ with option_context("display.max_rows", 0):
+ assert len(str(s).split("\n")) == 10
+
+ # index
+ s = Series(np.random.default_rng(2).standard_normal(8), None)
+
+ with option_context("display.max_rows", 10):
+ assert len(str(s).split("\n")) == 9
+ with option_context("display.max_rows", 3):
+ assert len(str(s).split("\n")) == 4
+ with option_context("display.max_rows", 2):
+ assert len(str(s).split("\n")) == 4
+ with option_context("display.max_rows", 1):
+ assert len(str(s).split("\n")) == 3
+ with option_context("display.max_rows", 0):
+ assert len(str(s).split("\n")) == 9
+
+ # Make sure #8532 is fixed
+ def test_consistent_format(self):
+ s = Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
+ with option_context("display.max_rows", 10, "display.show_dimensions", False):
+ res = repr(s)
+ exp = (
+ "0 1.0000\n1 1.0000\n2 1.0000\n3 "
+ "1.0000\n4 1.0000\n ... \n125 "
+ "1.0000\n126 1.0000\n127 0.9999\n128 "
+ "1.0000\n129 1.0000\ndtype: float64"
+ )
+ assert res == exp
+
+ def chck_ncols(self, s):
+ lines = [
+ line for line in repr(s).split("\n") if not re.match(r"[^\.]*\.+", line)
+ ][:-1]
+ ncolsizes = len({len(line.strip()) for line in lines})
+ assert ncolsizes == 1
+
+ @pytest.mark.xfail(
+ using_pyarrow_string_dtype(), reason="change when arrow is default"
+ )
+ def test_format_explicit(self):
+ test_sers = gen_series_formatting()
+ with option_context("display.max_rows", 4, "display.show_dimensions", False):
+ res = repr(test_sers["onel"])
+ exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object"
+ assert exp == res
+ res = repr(test_sers["twol"])
+ exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object"
+ assert exp == res
+ res = repr(test_sers["asc"])
+ exp = (
+ "0 a\n1 ab\n ... \n4 abcde\n5 "
+ "abcdef\ndtype: object"
+ )
+ assert exp == res
+ res = repr(test_sers["desc"])
+ exp = (
+ "5 abcdef\n4 abcde\n ... \n1 ab\n0 "
+ "a\ndtype: object"
+ )
+ assert exp == res
+
+ def test_ncols(self):
+ test_sers = gen_series_formatting()
+ for s in test_sers.values():
+ self.chck_ncols(s)
+
+ def test_max_rows_eq_one(self):
+ s = Series(range(10), dtype="int64")
+ with option_context("display.max_rows", 1):
+ strrepr = repr(s).split("\n")
+ exp1 = ["0", "0"]
+ res1 = strrepr[0].split()
+ assert exp1 == res1
+ exp2 = [".."]
+ res2 = strrepr[1].split()
+ assert exp2 == res2
+
+ def test_truncate_ndots(self):
+ def getndots(s):
+ return len(re.match(r"[^\.]*(\.*)", s).groups()[0])
+
+ s = Series([0, 2, 3, 6])
+ with option_context("display.max_rows", 2):
+ strrepr = repr(s).replace("\n", "")
+ assert getndots(strrepr) == 2
+
+ s = Series([0, 100, 200, 400])
+ with option_context("display.max_rows", 2):
+ strrepr = repr(s).replace("\n", "")
+ assert getndots(strrepr) == 3
+
+ def test_show_dimensions(self):
+ # gh-7117
+ s = Series(range(5))
+
+ assert "Length" not in repr(s)
+
+ with option_context("display.max_rows", 4):
+ assert "Length" in repr(s)
+
+ with option_context("display.show_dimensions", True):
+ assert "Length" in repr(s)
+
+ with option_context("display.max_rows", 4, "display.show_dimensions", False):
+ assert "Length" not in repr(s)
+
+ def test_repr_min_rows(self):
+ s = Series(range(20))
+
+ # default setting no truncation even if above min_rows
+ assert ".." not in repr(s)
+
+ s = Series(range(61))
+
+ # default of max_rows 60 triggers truncation if above
+ assert ".." in repr(s)
+
+ with option_context("display.max_rows", 10, "display.min_rows", 4):
+ # truncated after first two rows
+ assert ".." in repr(s)
+ assert "2 " not in repr(s)
+
+ with option_context("display.max_rows", 12, "display.min_rows", None):
+ # when set to None, follow value of max_rows
+ assert "5 5" in repr(s)
+
+ with option_context("display.max_rows", 10, "display.min_rows", 12):
+ # when set value higher as max_rows, use the minimum
+ assert "5 5" not in repr(s)
+
+ with option_context("display.max_rows", None, "display.min_rows", 12):
+ # max_rows of None -> never truncate
+ assert ".." not in repr(s)
+
+
+class TestGenericArrayFormatter:
+ def test_1d_array(self):
+ # _GenericArrayFormatter is used on types for which there isn't a dedicated
+ # formatter. np.bool_ is one of those types.
+ obj = fmt._GenericArrayFormatter(np.array([True, False]))
+ res = obj.get_result()
+ assert len(res) == 2
+ # Results should be right-justified.
+ assert res[0] == " True"
+ assert res[1] == " False"
+
+ def test_2d_array(self):
+ obj = fmt._GenericArrayFormatter(np.array([[True, False], [False, True]]))
+ res = obj.get_result()
+ assert len(res) == 2
+ assert res[0] == " [True, False]"
+ assert res[1] == " [False, True]"
+
+ def test_3d_array(self):
+ obj = fmt._GenericArrayFormatter(
+ np.array([[[True, True], [False, False]], [[False, True], [True, False]]])
+ )
+ res = obj.get_result()
+ assert len(res) == 2
+ assert res[0] == " [[True, True], [False, False]]"
+ assert res[1] == " [[False, True], [True, False]]"
+
+ def test_2d_extension_type(self):
+ # GH 33770
+
+ # Define a stub extension type with just enough code to run Series.__repr__()
+ class DtypeStub(pd.api.extensions.ExtensionDtype):
+ @property
+ def type(self):
+ return np.ndarray
+
+ @property
+ def name(self):
+ return "DtypeStub"
+
+ class ExtTypeStub(pd.api.extensions.ExtensionArray):
+ def __len__(self) -> int:
+ return 2
+
+ def __getitem__(self, ix):
+ return [ix == 1, ix == 0]
+
+ @property
+ def dtype(self):
+ return DtypeStub()
+
+ series = Series(ExtTypeStub(), copy=False)
+ res = repr(series) # This line crashed before #33770 was fixed.
+ expected = "\n".join(
+ ["0 [False True]", "1 [True False]", "dtype: DtypeStub"]
+ )
+ assert res == expected
+
+
+def _three_digit_exp():
+ return f"{1.7e8:.4g}" == "1.7e+008"
+
+
+class TestFloatArrayFormatter:
+ def test_misc(self):
+ obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
+ result = obj.get_result()
+ assert len(result) == 0
+
+ def test_format(self):
+ obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
+ result = obj.get_result()
+ assert result[0] == " 12.0"
+ assert result[1] == " 0.0"
+
+ def test_output_display_precision_trailing_zeroes(self):
+ # Issue #20359: trimming zeros while there is no decimal point
+
+ # Happens when display precision is set to zero
+ with option_context("display.precision", 0):
+ s = Series([840.0, 4200.0])
+ expected_output = "0 840\n1 4200\ndtype: float64"
+ assert str(s) == expected_output
+
+ @pytest.mark.parametrize(
+ "value,expected",
+ [
+ ([9.4444], " 0\n0 9"),
+ ([0.49], " 0\n0 5e-01"),
+ ([10.9999], " 0\n0 11"),
+ ([9.5444, 9.6], " 0\n0 10\n1 10"),
+ ([0.46, 0.78, -9.9999], " 0\n0 5e-01\n1 8e-01\n2 -1e+01"),
+ ],
+ )
+ def test_set_option_precision(self, value, expected):
+ # Issue #30122
+ # Precision was incorrectly shown
+
+ with option_context("display.precision", 0):
+ df_value = DataFrame(value)
+ assert str(df_value) == expected
+
+ def test_output_significant_digits(self):
+ # Issue #9764
+
+ # In case default display precision changes:
+ with option_context("display.precision", 6):
+ # DataFrame example from issue #9764
+ d = DataFrame(
+ {
+ "col1": [
+ 9.999e-8,
+ 1e-7,
+ 1.0001e-7,
+ 2e-7,
+ 4.999e-7,
+ 5e-7,
+ 5.0001e-7,
+ 6e-7,
+ 9.999e-7,
+ 1e-6,
+ 1.0001e-6,
+ 2e-6,
+ 4.999e-6,
+ 5e-6,
+ 5.0001e-6,
+ 6e-6,
+ ]
+ }
+ )
+
+ expected_output = {
+ (0, 6): " col1\n"
+ "0 9.999000e-08\n"
+ "1 1.000000e-07\n"
+ "2 1.000100e-07\n"
+ "3 2.000000e-07\n"
+ "4 4.999000e-07\n"
+ "5 5.000000e-07",
+ (1, 6): " col1\n"
+ "1 1.000000e-07\n"
+ "2 1.000100e-07\n"
+ "3 2.000000e-07\n"
+ "4 4.999000e-07\n"
+ "5 5.000000e-07",
+ (1, 8): " col1\n"
+ "1 1.000000e-07\n"
+ "2 1.000100e-07\n"
+ "3 2.000000e-07\n"
+ "4 4.999000e-07\n"
+ "5 5.000000e-07\n"
+ "6 5.000100e-07\n"
+ "7 6.000000e-07",
+ (8, 16): " col1\n"
+ "8 9.999000e-07\n"
+ "9 1.000000e-06\n"
+ "10 1.000100e-06\n"
+ "11 2.000000e-06\n"
+ "12 4.999000e-06\n"
+ "13 5.000000e-06\n"
+ "14 5.000100e-06\n"
+ "15 6.000000e-06",
+ (9, 16): " col1\n"
+ "9 0.000001\n"
+ "10 0.000001\n"
+ "11 0.000002\n"
+ "12 0.000005\n"
+ "13 0.000005\n"
+ "14 0.000005\n"
+ "15 0.000006",
+ }
+
+ for (start, stop), v in expected_output.items():
+ assert str(d[start:stop]) == v
+
+ def test_too_long(self):
+ # GH 10451
+ with option_context("display.precision", 4):
+ # need both a number > 1e6 and something that normally formats to
+ # having length > display.precision + 6
+ df = DataFrame({"x": [12345.6789]})
+ assert str(df) == " x\n0 12345.6789"
+ df = DataFrame({"x": [2e6]})
+ assert str(df) == " x\n0 2000000.0"
+ df = DataFrame({"x": [12345.6789, 2e6]})
+ assert str(df) == " x\n0 1.2346e+04\n1 2.0000e+06"
+
+
+class TestTimedelta64Formatter:
+ def test_days(self):
+ x = pd.to_timedelta(list(range(5)) + [NaT], unit="D")._values
+ result = fmt._Timedelta64Formatter(x).get_result()
+ assert result[0].strip() == "0 days"
+ assert result[1].strip() == "1 days"
+
+ result = fmt._Timedelta64Formatter(x[1:2]).get_result()
+ assert result[0].strip() == "1 days"
+
+ result = fmt._Timedelta64Formatter(x).get_result()
+ assert result[0].strip() == "0 days"
+ assert result[1].strip() == "1 days"
+
+ result = fmt._Timedelta64Formatter(x[1:2]).get_result()
+ assert result[0].strip() == "1 days"
+
+ def test_days_neg(self):
+ x = pd.to_timedelta(list(range(5)) + [NaT], unit="D")._values
+ result = fmt._Timedelta64Formatter(-x).get_result()
+ assert result[0].strip() == "0 days"
+ assert result[1].strip() == "-1 days"
+
+ def test_subdays(self):
+ y = pd.to_timedelta(list(range(5)) + [NaT], unit="s")._values
+ result = fmt._Timedelta64Formatter(y).get_result()
+ assert result[0].strip() == "0 days 00:00:00"
+ assert result[1].strip() == "0 days 00:00:01"
+
+ def test_subdays_neg(self):
+ y = pd.to_timedelta(list(range(5)) + [NaT], unit="s")._values
+ result = fmt._Timedelta64Formatter(-y).get_result()
+ assert result[0].strip() == "0 days 00:00:00"
+ assert result[1].strip() == "-1 days +23:59:59"
+
+ def test_zero(self):
+ x = pd.to_timedelta(list(range(1)) + [NaT], unit="D")._values
+ result = fmt._Timedelta64Formatter(x).get_result()
+ assert result[0].strip() == "0 days"
+
+ x = pd.to_timedelta(list(range(1)), unit="D")._values
+ result = fmt._Timedelta64Formatter(x).get_result()
+ assert result[0].strip() == "0 days"
+
+
+class TestDatetime64Formatter:
+ def test_mixed(self):
+ x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), NaT])._values
+ result = fmt._Datetime64Formatter(x).get_result()
+ assert result[0].strip() == "2013-01-01 00:00:00"
+ assert result[1].strip() == "2013-01-01 12:00:00"
+
+ def test_dates(self):
+ x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), NaT])._values
+ result = fmt._Datetime64Formatter(x).get_result()
+ assert result[0].strip() == "2013-01-01"
+ assert result[1].strip() == "2013-01-02"
+
+ def test_date_nanos(self):
+ x = Series([Timestamp(200)])._values
+ result = fmt._Datetime64Formatter(x).get_result()
+ assert result[0].strip() == "1970-01-01 00:00:00.000000200"
+
+ def test_dates_display(self):
+ # 10170
+ # make sure that we are consistently display date formatting
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="D"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-05 09:00:00"
+
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="s"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-01 09:00:04"
+
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="ms"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00.000"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-01 09:00:00.004"
+
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="us"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00.000000"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-01 09:00:00.000004"
+
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="ns"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00.000000000"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-01 09:00:00.000000004"
+
+ def test_datetime64formatter_yearmonth(self):
+ x = Series([datetime(2016, 1, 1), datetime(2016, 2, 2)])._values
+
+ def format_func(x):
+ return x.strftime("%Y-%m")
+
+ formatter = fmt._Datetime64Formatter(x, formatter=format_func)
+ result = formatter.get_result()
+ assert result == ["2016-01", "2016-02"]
+
+ def test_datetime64formatter_hoursecond(self):
+ x = Series(
+ pd.to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")
+ )._values
+
+ def format_func(x):
+ return x.strftime("%H:%M")
+
+ formatter = fmt._Datetime64Formatter(x, formatter=format_func)
+ result = formatter.get_result()
+ assert result == ["10:10", "12:12"]
+
+ def test_datetime64formatter_tz_ms(self):
+ x = (
+ Series(
+ np.array(["2999-01-01", "2999-01-02", "NaT"], dtype="datetime64[ms]")
+ )
+ .dt.tz_localize("US/Pacific")
+ ._values
+ )
+ result = fmt._Datetime64TZFormatter(x).get_result()
+ assert result[0].strip() == "2999-01-01 00:00:00-08:00"
+ assert result[1].strip() == "2999-01-02 00:00:00-08:00"
+
+
+class TestFormatPercentiles:
+ @pytest.mark.parametrize(
+ "percentiles, expected",
+ [
+ (
+ [0.01999, 0.02001, 0.5, 0.666666, 0.9999],
+ ["1.999%", "2.001%", "50%", "66.667%", "99.99%"],
+ ),
+ (
+ [0, 0.5, 0.02001, 0.5, 0.666666, 0.9999],
+ ["0%", "50%", "2.0%", "50%", "66.67%", "99.99%"],
+ ),
+ ([0.281, 0.29, 0.57, 0.58], ["28.1%", "29%", "57%", "58%"]),
+ ([0.28, 0.29, 0.57, 0.58], ["28%", "29%", "57%", "58%"]),
+ (
+ [0.9, 0.99, 0.999, 0.9999, 0.99999],
+ ["90%", "99%", "99.9%", "99.99%", "99.999%"],
+ ),
+ ],
+ )
+ def test_format_percentiles(self, percentiles, expected):
+ result = fmt.format_percentiles(percentiles)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "percentiles",
+ [
+ ([0.1, np.nan, 0.5]),
+ ([-0.001, 0.1, 0.5]),
+ ([2, 0.1, 0.5]),
+ ([0.1, 0.5, "a"]),
+ ],
+ )
+ def test_error_format_percentiles(self, percentiles):
+ msg = r"percentiles should all be in the interval \[0,1\]"
+ with pytest.raises(ValueError, match=msg):
+ fmt.format_percentiles(percentiles)
+
+ def test_format_percentiles_integer_idx(self):
+ # Issue #26660
+ result = fmt.format_percentiles(np.linspace(0, 1, 10 + 1))
+ expected = [
+ "0%",
+ "10%",
+ "20%",
+ "30%",
+ "40%",
+ "50%",
+ "60%",
+ "70%",
+ "80%",
+ "90%",
+ "100%",
+ ]
+ assert result == expected
+
+
+@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
+@pytest.mark.parametrize(
+ "encoding, data",
+ [(None, "abc"), ("utf-8", "abc"), ("gbk", "造成输出中文显示乱码"), ("foo", "abc")],
+)
+def test_filepath_or_buffer_arg(
+ method,
+ filepath_or_buffer,
+ assert_filepath_or_buffer_equals,
+ encoding,
+ data,
+ filepath_or_buffer_id,
+):
+ df = DataFrame([data])
+ if method in ["to_latex"]: # uses styler implementation
+ pytest.importorskip("jinja2")
+
+ if filepath_or_buffer_id not in ["string", "pathlike"] and encoding is not None:
+ with pytest.raises(
+ ValueError, match="buf is not a file name and encoding is specified."
+ ):
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ elif encoding == "foo":
+ with pytest.raises(LookupError, match="unknown encoding"):
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ else:
+ expected = getattr(df, method)()
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ assert_filepath_or_buffer_equals(expected)
+
+
+@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
+def test_filepath_or_buffer_bad_arg_raises(float_frame, method):
+ if method in ["to_latex"]: # uses styler implementation
+ pytest.importorskip("jinja2")
+ msg = "buf is not a file name and it has no write method"
+ with pytest.raises(TypeError, match=msg):
+ getattr(float_frame, method)(buf=object())
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_ipython_compat.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_ipython_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..8512f41396906de1f59bbb23d4b535f82c546132
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_ipython_compat.py
@@ -0,0 +1,90 @@
+import numpy as np
+
+import pandas._config.config as cf
+
+from pandas import (
+ DataFrame,
+ MultiIndex,
+)
+
+
+class TestTableSchemaRepr:
+ def test_publishes(self, ip):
+ ipython = ip.instance(config=ip.config)
+ df = DataFrame({"A": [1, 2]})
+ objects = [df["A"], df] # dataframe / series
+ expected_keys = [
+ {"text/plain", "application/vnd.dataresource+json"},
+ {"text/plain", "text/html", "application/vnd.dataresource+json"},
+ ]
+
+ opt = cf.option_context("display.html.table_schema", True)
+ last_obj = None
+ for obj, expected in zip(objects, expected_keys):
+ last_obj = obj
+ with opt:
+ formatted = ipython.display_formatter.format(obj)
+ assert set(formatted[0].keys()) == expected
+
+ with_latex = cf.option_context("styler.render.repr", "latex")
+
+ with opt, with_latex:
+ formatted = ipython.display_formatter.format(last_obj)
+
+ expected = {
+ "text/plain",
+ "text/html",
+ "text/latex",
+ "application/vnd.dataresource+json",
+ }
+ assert set(formatted[0].keys()) == expected
+
+ def test_publishes_not_implemented(self, ip):
+ # column MultiIndex
+ # GH#15996
+ midx = MultiIndex.from_product([["A", "B"], ["a", "b", "c"]])
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, len(midx))), columns=midx
+ )
+
+ opt = cf.option_context("display.html.table_schema", True)
+
+ with opt:
+ formatted = ip.instance(config=ip.config).display_formatter.format(df)
+
+ expected = {"text/plain", "text/html"}
+ assert set(formatted[0].keys()) == expected
+
+ def test_config_on(self):
+ df = DataFrame({"A": [1, 2]})
+ with cf.option_context("display.html.table_schema", True):
+ result = df._repr_data_resource_()
+
+ assert result is not None
+
+ def test_config_default_off(self):
+ df = DataFrame({"A": [1, 2]})
+ with cf.option_context("display.html.table_schema", False):
+ result = df._repr_data_resource_()
+
+ assert result is None
+
+ def test_enable_data_resource_formatter(self, ip):
+ # GH#10491
+ formatters = ip.instance(config=ip.config).display_formatter.formatters
+ mimetype = "application/vnd.dataresource+json"
+
+ with cf.option_context("display.html.table_schema", True):
+ assert "application/vnd.dataresource+json" in formatters
+ assert formatters[mimetype].enabled
+
+ # still there, just disabled
+ assert "application/vnd.dataresource+json" in formatters
+ assert not formatters[mimetype].enabled
+
+ # able to re-set
+ with cf.option_context("display.html.table_schema", True):
+ assert "application/vnd.dataresource+json" in formatters
+ assert formatters[mimetype].enabled
+ # smoke test that it works
+ ip.instance(config=ip.config).display_formatter.format(cf)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_printing.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_printing.py
new file mode 100644
index 0000000000000000000000000000000000000000..acf2bc72c687d44dd1769468d21fba1bb04443b0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_printing.py
@@ -0,0 +1,129 @@
+# Note! This file is aimed specifically at pandas.io.formats.printing utility
+# functions, not the general printing of pandas objects.
+import string
+
+import pandas._config.config as cf
+
+from pandas.io.formats import printing
+
+
+def test_adjoin():
+ data = [["a", "b", "c"], ["dd", "ee", "ff"], ["ggg", "hhh", "iii"]]
+ expected = "a dd ggg\nb ee hhh\nc ff iii"
+
+ adjoined = printing.adjoin(2, *data)
+
+ assert adjoined == expected
+
+
+class TestPPrintThing:
+ def test_repr_binary_type(self):
+ letters = string.ascii_letters
+ try:
+ raw = bytes(letters, encoding=cf.get_option("display.encoding"))
+ except TypeError:
+ raw = bytes(letters)
+ b = str(raw.decode("utf-8"))
+ res = printing.pprint_thing(b, quote_strings=True)
+ assert res == repr(b)
+ res = printing.pprint_thing(b, quote_strings=False)
+ assert res == b
+
+ def test_repr_obeys_max_seq_limit(self):
+ with cf.option_context("display.max_seq_items", 2000):
+ assert len(printing.pprint_thing(list(range(1000)))) > 1000
+
+ with cf.option_context("display.max_seq_items", 5):
+ assert len(printing.pprint_thing(list(range(1000)))) < 100
+
+ with cf.option_context("display.max_seq_items", 1):
+ assert len(printing.pprint_thing(list(range(1000)))) < 9
+
+ def test_repr_set(self):
+ assert printing.pprint_thing({1}) == "{1}"
+
+
+class TestFormatBase:
+ def test_adjoin(self):
+ data = [["a", "b", "c"], ["dd", "ee", "ff"], ["ggg", "hhh", "iii"]]
+ expected = "a dd ggg\nb ee hhh\nc ff iii"
+
+ adjoined = printing.adjoin(2, *data)
+
+ assert adjoined == expected
+
+ def test_adjoin_unicode(self):
+ data = [["あ", "b", "c"], ["dd", "ええ", "ff"], ["ggg", "hhh", "いいい"]]
+ expected = "あ dd ggg\nb ええ hhh\nc ff いいい"
+ adjoined = printing.adjoin(2, *data)
+ assert adjoined == expected
+
+ adj = printing._EastAsianTextAdjustment()
+
+ expected = """あ dd ggg
+b ええ hhh
+c ff いいい"""
+
+ adjoined = adj.adjoin(2, *data)
+ assert adjoined == expected
+ cols = adjoined.split("\n")
+ assert adj.len(cols[0]) == 13
+ assert adj.len(cols[1]) == 13
+ assert adj.len(cols[2]) == 16
+
+ expected = """あ dd ggg
+b ええ hhh
+c ff いいい"""
+
+ adjoined = adj.adjoin(7, *data)
+ assert adjoined == expected
+ cols = adjoined.split("\n")
+ assert adj.len(cols[0]) == 23
+ assert adj.len(cols[1]) == 23
+ assert adj.len(cols[2]) == 26
+
+ def test_justify(self):
+ adj = printing._EastAsianTextAdjustment()
+
+ def just(x, *args, **kwargs):
+ # wrapper to test single str
+ return adj.justify([x], *args, **kwargs)[0]
+
+ assert just("abc", 5, mode="left") == "abc "
+ assert just("abc", 5, mode="center") == " abc "
+ assert just("abc", 5, mode="right") == " abc"
+ assert just("abc", 5, mode="left") == "abc "
+ assert just("abc", 5, mode="center") == " abc "
+ assert just("abc", 5, mode="right") == " abc"
+
+ assert just("パンダ", 5, mode="left") == "パンダ"
+ assert just("パンダ", 5, mode="center") == "パンダ"
+ assert just("パンダ", 5, mode="right") == "パンダ"
+
+ assert just("パンダ", 10, mode="left") == "パンダ "
+ assert just("パンダ", 10, mode="center") == " パンダ "
+ assert just("パンダ", 10, mode="right") == " パンダ"
+
+ def test_east_asian_len(self):
+ adj = printing._EastAsianTextAdjustment()
+
+ assert adj.len("abc") == 3
+ assert adj.len("abc") == 3
+
+ assert adj.len("パンダ") == 6
+ assert adj.len("パンダ") == 5
+ assert adj.len("パンダpanda") == 11
+ assert adj.len("パンダpanda") == 10
+
+ def test_ambiguous_width(self):
+ adj = printing._EastAsianTextAdjustment()
+ assert adj.len("¡¡ab") == 4
+
+ with cf.option_context("display.unicode.ambiguous_as_wide", True):
+ adj = printing._EastAsianTextAdjustment()
+ assert adj.len("¡¡ab") == 6
+
+ data = [["あ", "b", "c"], ["dd", "ええ", "ff"], ["ggg", "¡¡ab", "いいい"]]
+ expected = "あ dd ggg \nb ええ ¡¡ab\nc ff いいい"
+ adjoined = adj.adjoin(2, *data)
+ assert adjoined == expected
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_csv.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_csv.py
new file mode 100644
index 0000000000000000000000000000000000000000..0db49a73621eab7fa59a76827a50b862fad41dca
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_csv.py
@@ -0,0 +1,758 @@
+import io
+import os
+import sys
+from zipfile import ZipFile
+
+from _csv import Error
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Index,
+ compat,
+)
+import pandas._testing as tm
+
+
+class TestToCSV:
+ def test_to_csv_with_single_column(self):
+ # see gh-18676, https://bugs.python.org/issue32255
+ #
+ # Python's CSV library adds an extraneous '""'
+ # before the newline when the NaN-value is in
+ # the first row. Otherwise, only the newline
+ # character is added. This behavior is inconsistent
+ # and was patched in https://bugs.python.org/pull_request4672.
+ df1 = DataFrame([None, 1])
+ expected1 = """\
+""
+1.0
+"""
+ with tm.ensure_clean("test.csv") as path:
+ df1.to_csv(path, header=None, index=None)
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected1
+
+ df2 = DataFrame([1, None])
+ expected2 = """\
+1.0
+""
+"""
+ with tm.ensure_clean("test.csv") as path:
+ df2.to_csv(path, header=None, index=None)
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected2
+
+ def test_to_csv_default_encoding(self):
+ # GH17097
+ df = DataFrame({"col": ["AAAAA", "ÄÄÄÄÄ", "ßßßßß", "聞聞聞聞聞"]})
+
+ with tm.ensure_clean("test.csv") as path:
+ # the default to_csv encoding is uft-8.
+ df.to_csv(path)
+ tm.assert_frame_equal(pd.read_csv(path, index_col=0), df)
+
+ def test_to_csv_quotechar(self):
+ df = DataFrame({"col": [1, 2]})
+ expected = """\
+"","col"
+"0","1"
+"1","2"
+"""
+
+ with tm.ensure_clean("test.csv") as path:
+ df.to_csv(path, quoting=1) # 1=QUOTE_ALL
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected
+
+ expected = """\
+$$,$col$
+$0$,$1$
+$1$,$2$
+"""
+
+ with tm.ensure_clean("test.csv") as path:
+ df.to_csv(path, quoting=1, quotechar="$")
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected
+
+ with tm.ensure_clean("test.csv") as path:
+ with pytest.raises(TypeError, match="quotechar"):
+ df.to_csv(path, quoting=1, quotechar=None)
+
+ def test_to_csv_doublequote(self):
+ df = DataFrame({"col": ['a"a', '"bb"']})
+ expected = '''\
+"","col"
+"0","a""a"
+"1","""bb"""
+'''
+
+ with tm.ensure_clean("test.csv") as path:
+ df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected
+
+ with tm.ensure_clean("test.csv") as path:
+ with pytest.raises(Error, match="escapechar"):
+ df.to_csv(path, doublequote=False) # no escapechar set
+
+ def test_to_csv_escapechar(self):
+ df = DataFrame({"col": ['a"a', '"bb"']})
+ expected = """\
+"","col"
+"0","a\\"a"
+"1","\\"bb\\""
+"""
+
+ with tm.ensure_clean("test.csv") as path: # QUOTE_ALL
+ df.to_csv(path, quoting=1, doublequote=False, escapechar="\\")
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected
+
+ df = DataFrame({"col": ["a,a", ",bb,"]})
+ expected = """\
+,col
+0,a\\,a
+1,\\,bb\\,
+"""
+
+ with tm.ensure_clean("test.csv") as path:
+ df.to_csv(path, quoting=3, escapechar="\\") # QUOTE_NONE
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected
+
+ def test_csv_to_string(self):
+ df = DataFrame({"col": [1, 2]})
+ expected_rows = [",col", "0,1", "1,2"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.to_csv() == expected
+
+ def test_to_csv_decimal(self):
+ # see gh-781
+ df = DataFrame({"col1": [1], "col2": ["a"], "col3": [10.1]})
+
+ expected_rows = [",col1,col2,col3", "0,1,a,10.1"]
+ expected_default = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.to_csv() == expected_default
+
+ expected_rows = [";col1;col2;col3", "0;1;a;10,1"]
+ expected_european_excel = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.to_csv(decimal=",", sep=";") == expected_european_excel
+
+ expected_rows = [",col1,col2,col3", "0,1,a,10.10"]
+ expected_float_format_default = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.to_csv(float_format="%.2f") == expected_float_format_default
+
+ expected_rows = [";col1;col2;col3", "0;1;a;10,10"]
+ expected_float_format = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert (
+ df.to_csv(decimal=",", sep=";", float_format="%.2f")
+ == expected_float_format
+ )
+
+ # see gh-11553: testing if decimal is taken into account for '0.0'
+ df = DataFrame({"a": [0, 1.1], "b": [2.2, 3.3], "c": 1})
+
+ expected_rows = ["a,b,c", "0^0,2^2,1", "1^1,3^3,1"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.to_csv(index=False, decimal="^") == expected
+
+ # same but for an index
+ assert df.set_index("a").to_csv(decimal="^") == expected
+
+ # same for a multi-index
+ assert df.set_index(["a", "b"]).to_csv(decimal="^") == expected
+
+ def test_to_csv_float_format(self):
+ # testing if float_format is taken into account for the index
+ # GH 11553
+ df = DataFrame({"a": [0, 1], "b": [2.2, 3.3], "c": 1})
+
+ expected_rows = ["a,b,c", "0,2.20,1", "1,3.30,1"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.set_index("a").to_csv(float_format="%.2f") == expected
+
+ # same for a multi-index
+ assert df.set_index(["a", "b"]).to_csv(float_format="%.2f") == expected
+
+ def test_to_csv_na_rep(self):
+ # see gh-11553
+ #
+ # Testing if NaN values are correctly represented in the index.
+ df = DataFrame({"a": [0, np.nan], "b": [0, 1], "c": [2, 3]})
+ expected_rows = ["a,b,c", "0.0,0,2", "_,1,3"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
+ assert df.set_index("a").to_csv(na_rep="_") == expected
+ assert df.set_index(["a", "b"]).to_csv(na_rep="_") == expected
+
+ # now with an index containing only NaNs
+ df = DataFrame({"a": np.nan, "b": [0, 1], "c": [2, 3]})
+ expected_rows = ["a,b,c", "_,0,2", "_,1,3"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
+ assert df.set_index("a").to_csv(na_rep="_") == expected
+ assert df.set_index(["a", "b"]).to_csv(na_rep="_") == expected
+
+ # check if na_rep parameter does not break anything when no NaN
+ df = DataFrame({"a": 0, "b": [0, 1], "c": [2, 3]})
+ expected_rows = ["a,b,c", "0,0,2", "0,1,3"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
+ assert df.set_index("a").to_csv(na_rep="_") == expected
+ assert df.set_index(["a", "b"]).to_csv(na_rep="_") == expected
+
+ csv = pd.Series(["a", pd.NA, "c"]).to_csv(na_rep="ZZZZZ")
+ expected = tm.convert_rows_list_to_csv_str([",0", "0,a", "1,ZZZZZ", "2,c"])
+ assert expected == csv
+
+ def test_to_csv_na_rep_nullable_string(self, nullable_string_dtype):
+ # GH 29975
+ # Make sure full na_rep shows up when a dtype is provided
+ expected = tm.convert_rows_list_to_csv_str([",0", "0,a", "1,ZZZZZ", "2,c"])
+ csv = pd.Series(["a", pd.NA, "c"], dtype=nullable_string_dtype).to_csv(
+ na_rep="ZZZZZ"
+ )
+ assert expected == csv
+
+ def test_to_csv_date_format(self):
+ # GH 10209
+ df_sec = DataFrame({"A": pd.date_range("20130101", periods=5, freq="s")})
+ df_day = DataFrame({"A": pd.date_range("20130101", periods=5, freq="d")})
+
+ expected_rows = [
+ ",A",
+ "0,2013-01-01 00:00:00",
+ "1,2013-01-01 00:00:01",
+ "2,2013-01-01 00:00:02",
+ "3,2013-01-01 00:00:03",
+ "4,2013-01-01 00:00:04",
+ ]
+ expected_default_sec = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df_sec.to_csv() == expected_default_sec
+
+ expected_rows = [
+ ",A",
+ "0,2013-01-01 00:00:00",
+ "1,2013-01-02 00:00:00",
+ "2,2013-01-03 00:00:00",
+ "3,2013-01-04 00:00:00",
+ "4,2013-01-05 00:00:00",
+ ]
+ expected_ymdhms_day = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df_day.to_csv(date_format="%Y-%m-%d %H:%M:%S") == expected_ymdhms_day
+
+ expected_rows = [
+ ",A",
+ "0,2013-01-01",
+ "1,2013-01-01",
+ "2,2013-01-01",
+ "3,2013-01-01",
+ "4,2013-01-01",
+ ]
+ expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df_sec.to_csv(date_format="%Y-%m-%d") == expected_ymd_sec
+
+ expected_rows = [
+ ",A",
+ "0,2013-01-01",
+ "1,2013-01-02",
+ "2,2013-01-03",
+ "3,2013-01-04",
+ "4,2013-01-05",
+ ]
+ expected_default_day = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df_day.to_csv() == expected_default_day
+ assert df_day.to_csv(date_format="%Y-%m-%d") == expected_default_day
+
+ # see gh-7791
+ #
+ # Testing if date_format parameter is taken into account
+ # for multi-indexed DataFrames.
+ df_sec["B"] = 0
+ df_sec["C"] = 1
+
+ expected_rows = ["A,B,C", "2013-01-01,0,1.0"]
+ expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
+
+ df_sec_grouped = df_sec.groupby([pd.Grouper(key="A", freq="1h"), "B"])
+ assert df_sec_grouped.mean().to_csv(date_format="%Y-%m-%d") == expected_ymd_sec
+
+ def test_to_csv_different_datetime_formats(self):
+ # GH#21734
+ df = DataFrame(
+ {
+ "date": pd.to_datetime("1970-01-01"),
+ "datetime": pd.date_range("1970-01-01", periods=2, freq="h"),
+ }
+ )
+ expected_rows = [
+ "date,datetime",
+ "1970-01-01,1970-01-01 00:00:00",
+ "1970-01-01,1970-01-01 01:00:00",
+ ]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.to_csv(index=False) == expected
+
+ def test_to_csv_date_format_in_categorical(self):
+ # GH#40754
+ ser = pd.Series(pd.to_datetime(["2021-03-27", pd.NaT], format="%Y-%m-%d"))
+ ser = ser.astype("category")
+ expected = tm.convert_rows_list_to_csv_str(["0", "2021-03-27", '""'])
+ assert ser.to_csv(index=False) == expected
+
+ ser = pd.Series(
+ pd.date_range(
+ start="2021-03-27", freq="D", periods=1, tz="Europe/Berlin"
+ ).append(pd.DatetimeIndex([pd.NaT]))
+ )
+ ser = ser.astype("category")
+ assert ser.to_csv(index=False, date_format="%Y-%m-%d") == expected
+
+ def test_to_csv_float_ea_float_format(self):
+ # GH#45991
+ df = DataFrame({"a": [1.1, 2.02, pd.NA, 6.000006], "b": "c"})
+ df["a"] = df["a"].astype("Float64")
+ result = df.to_csv(index=False, float_format="%.5f")
+ expected = tm.convert_rows_list_to_csv_str(
+ ["a,b", "1.10000,c", "2.02000,c", ",c", "6.00001,c"]
+ )
+ assert result == expected
+
+ def test_to_csv_float_ea_no_float_format(self):
+ # GH#45991
+ df = DataFrame({"a": [1.1, 2.02, pd.NA, 6.000006], "b": "c"})
+ df["a"] = df["a"].astype("Float64")
+ result = df.to_csv(index=False)
+ expected = tm.convert_rows_list_to_csv_str(
+ ["a,b", "1.1,c", "2.02,c", ",c", "6.000006,c"]
+ )
+ assert result == expected
+
+ def test_to_csv_multi_index(self):
+ # see gh-6618
+ df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]))
+
+ exp_rows = [",1", ",2", "0,1"]
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
+ assert df.to_csv() == exp
+
+ exp_rows = ["1", "2", "1"]
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
+ assert df.to_csv(index=False) == exp
+
+ df = DataFrame(
+ [1],
+ columns=pd.MultiIndex.from_arrays([[1], [2]]),
+ index=pd.MultiIndex.from_arrays([[1], [2]]),
+ )
+
+ exp_rows = [",,1", ",,2", "1,2,1"]
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
+ assert df.to_csv() == exp
+
+ exp_rows = ["1", "2", "1"]
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
+ assert df.to_csv(index=False) == exp
+
+ df = DataFrame([1], columns=pd.MultiIndex.from_arrays([["foo"], ["bar"]]))
+
+ exp_rows = [",foo", ",bar", "0,1"]
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
+ assert df.to_csv() == exp
+
+ exp_rows = ["foo", "bar", "1"]
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
+ assert df.to_csv(index=False) == exp
+
+ @pytest.mark.parametrize(
+ "ind,expected",
+ [
+ (
+ pd.MultiIndex(levels=[[1.0]], codes=[[0]], names=["x"]),
+ "x,data\n1.0,1\n",
+ ),
+ (
+ pd.MultiIndex(
+ levels=[[1.0], [2.0]], codes=[[0], [0]], names=["x", "y"]
+ ),
+ "x,y,data\n1.0,2.0,1\n",
+ ),
+ ],
+ )
+ def test_to_csv_single_level_multi_index(self, ind, expected, frame_or_series):
+ # see gh-19589
+ obj = frame_or_series(pd.Series([1], ind, name="data"))
+
+ result = obj.to_csv(lineterminator="\n", header=True)
+ assert result == expected
+
+ def test_to_csv_string_array_ascii(self):
+ # GH 10813
+ str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}]
+ df = DataFrame(str_array)
+ expected_ascii = """\
+,names
+0,"['foo', 'bar']"
+1,"['baz', 'qux']"
+"""
+ with tm.ensure_clean("str_test.csv") as path:
+ df.to_csv(path, encoding="ascii")
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected_ascii
+
+ def test_to_csv_string_array_utf8(self):
+ # GH 10813
+ str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}]
+ df = DataFrame(str_array)
+ expected_utf8 = """\
+,names
+0,"['foo', 'bar']"
+1,"['baz', 'qux']"
+"""
+ with tm.ensure_clean("unicode_test.csv") as path:
+ df.to_csv(path, encoding="utf-8")
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected_utf8
+
+ def test_to_csv_string_with_lf(self):
+ # GH 20353
+ data = {"int": [1, 2, 3], "str_lf": ["abc", "d\nef", "g\nh\n\ni"]}
+ df = DataFrame(data)
+ with tm.ensure_clean("lf_test.csv") as path:
+ # case 1: The default line terminator(=os.linesep)(PR 21406)
+ os_linesep = os.linesep.encode("utf-8")
+ expected_noarg = (
+ b"int,str_lf"
+ + os_linesep
+ + b"1,abc"
+ + os_linesep
+ + b'2,"d\nef"'
+ + os_linesep
+ + b'3,"g\nh\n\ni"'
+ + os_linesep
+ )
+ df.to_csv(path, index=False)
+ with open(path, "rb") as f:
+ assert f.read() == expected_noarg
+ with tm.ensure_clean("lf_test.csv") as path:
+ # case 2: LF as line terminator
+ expected_lf = b'int,str_lf\n1,abc\n2,"d\nef"\n3,"g\nh\n\ni"\n'
+ df.to_csv(path, lineterminator="\n", index=False)
+ with open(path, "rb") as f:
+ assert f.read() == expected_lf
+ with tm.ensure_clean("lf_test.csv") as path:
+ # case 3: CRLF as line terminator
+ # 'lineterminator' should not change inner element
+ expected_crlf = b'int,str_lf\r\n1,abc\r\n2,"d\nef"\r\n3,"g\nh\n\ni"\r\n'
+ df.to_csv(path, lineterminator="\r\n", index=False)
+ with open(path, "rb") as f:
+ assert f.read() == expected_crlf
+
+ def test_to_csv_string_with_crlf(self):
+ # GH 20353
+ data = {"int": [1, 2, 3], "str_crlf": ["abc", "d\r\nef", "g\r\nh\r\n\r\ni"]}
+ df = DataFrame(data)
+ with tm.ensure_clean("crlf_test.csv") as path:
+ # case 1: The default line terminator(=os.linesep)(PR 21406)
+ os_linesep = os.linesep.encode("utf-8")
+ expected_noarg = (
+ b"int,str_crlf"
+ + os_linesep
+ + b"1,abc"
+ + os_linesep
+ + b'2,"d\r\nef"'
+ + os_linesep
+ + b'3,"g\r\nh\r\n\r\ni"'
+ + os_linesep
+ )
+ df.to_csv(path, index=False)
+ with open(path, "rb") as f:
+ assert f.read() == expected_noarg
+ with tm.ensure_clean("crlf_test.csv") as path:
+ # case 2: LF as line terminator
+ expected_lf = b'int,str_crlf\n1,abc\n2,"d\r\nef"\n3,"g\r\nh\r\n\r\ni"\n'
+ df.to_csv(path, lineterminator="\n", index=False)
+ with open(path, "rb") as f:
+ assert f.read() == expected_lf
+ with tm.ensure_clean("crlf_test.csv") as path:
+ # case 3: CRLF as line terminator
+ # 'lineterminator' should not change inner element
+ expected_crlf = (
+ b"int,str_crlf\r\n"
+ b"1,abc\r\n"
+ b'2,"d\r\nef"\r\n'
+ b'3,"g\r\nh\r\n\r\ni"\r\n'
+ )
+ df.to_csv(path, lineterminator="\r\n", index=False)
+ with open(path, "rb") as f:
+ assert f.read() == expected_crlf
+
+ def test_to_csv_stdout_file(self, capsys):
+ # GH 21561
+ df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["name_1", "name_2"])
+ expected_rows = [",name_1,name_2", "0,foo,bar", "1,baz,qux"]
+ expected_ascii = tm.convert_rows_list_to_csv_str(expected_rows)
+
+ df.to_csv(sys.stdout, encoding="ascii")
+ captured = capsys.readouterr()
+
+ assert captured.out == expected_ascii
+ assert not sys.stdout.closed
+
+ @pytest.mark.xfail(
+ compat.is_platform_windows(),
+ reason=(
+ "Especially in Windows, file stream should not be passed"
+ "to csv writer without newline='' option."
+ "(https://docs.python.org/3/library/csv.html#csv.writer)"
+ ),
+ )
+ def test_to_csv_write_to_open_file(self):
+ # GH 21696
+ df = DataFrame({"a": ["x", "y", "z"]})
+ expected = """\
+manual header
+x
+y
+z
+"""
+ with tm.ensure_clean("test.txt") as path:
+ with open(path, "w", encoding="utf-8") as f:
+ f.write("manual header\n")
+ df.to_csv(f, header=None, index=None)
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected
+
+ def test_to_csv_write_to_open_file_with_newline_py3(self):
+ # see gh-21696
+ # see gh-20353
+ df = DataFrame({"a": ["x", "y", "z"]})
+ expected_rows = ["x", "y", "z"]
+ expected = "manual header\n" + tm.convert_rows_list_to_csv_str(expected_rows)
+ with tm.ensure_clean("test.txt") as path:
+ with open(path, "w", newline="", encoding="utf-8") as f:
+ f.write("manual header\n")
+ df.to_csv(f, header=None, index=None)
+
+ with open(path, "rb") as f:
+ assert f.read() == bytes(expected, "utf-8")
+
+ @pytest.mark.parametrize("to_infer", [True, False])
+ @pytest.mark.parametrize("read_infer", [True, False])
+ def test_to_csv_compression(
+ self, compression_only, read_infer, to_infer, compression_to_extension
+ ):
+ # see gh-15008
+ compression = compression_only
+
+ # We'll complete file extension subsequently.
+ filename = "test."
+ filename += compression_to_extension[compression]
+
+ df = DataFrame({"A": [1]})
+
+ to_compression = "infer" if to_infer else compression
+ read_compression = "infer" if read_infer else compression
+
+ with tm.ensure_clean(filename) as path:
+ df.to_csv(path, compression=to_compression)
+ result = pd.read_csv(path, index_col=0, compression=read_compression)
+ tm.assert_frame_equal(result, df)
+
+ def test_to_csv_compression_dict(self, compression_only):
+ # GH 26023
+ method = compression_only
+ df = DataFrame({"ABC": [1]})
+ filename = "to_csv_compress_as_dict."
+ extension = {
+ "gzip": "gz",
+ "zstd": "zst",
+ }.get(method, method)
+ filename += extension
+ with tm.ensure_clean(filename) as path:
+ df.to_csv(path, compression={"method": method})
+ read_df = pd.read_csv(path, index_col=0)
+ tm.assert_frame_equal(read_df, df)
+
+ def test_to_csv_compression_dict_no_method_raises(self):
+ # GH 26023
+ df = DataFrame({"ABC": [1]})
+ compression = {"some_option": True}
+ msg = "must have key 'method'"
+
+ with tm.ensure_clean("out.zip") as path:
+ with pytest.raises(ValueError, match=msg):
+ df.to_csv(path, compression=compression)
+
+ @pytest.mark.parametrize("compression", ["zip", "infer"])
+ @pytest.mark.parametrize("archive_name", ["test_to_csv.csv", "test_to_csv.zip"])
+ def test_to_csv_zip_arguments(self, compression, archive_name):
+ # GH 26023
+ df = DataFrame({"ABC": [1]})
+ with tm.ensure_clean("to_csv_archive_name.zip") as path:
+ df.to_csv(
+ path, compression={"method": compression, "archive_name": archive_name}
+ )
+ with ZipFile(path) as zp:
+ assert len(zp.filelist) == 1
+ archived_file = zp.filelist[0].filename
+ assert archived_file == archive_name
+
+ @pytest.mark.parametrize(
+ "filename,expected_arcname",
+ [
+ ("archive.csv", "archive.csv"),
+ ("archive.tsv", "archive.tsv"),
+ ("archive.csv.zip", "archive.csv"),
+ ("archive.tsv.zip", "archive.tsv"),
+ ("archive.zip", "archive"),
+ ],
+ )
+ def test_to_csv_zip_infer_name(self, tmp_path, filename, expected_arcname):
+ # GH 39465
+ df = DataFrame({"ABC": [1]})
+ path = tmp_path / filename
+ df.to_csv(path, compression="zip")
+ with ZipFile(path) as zp:
+ assert len(zp.filelist) == 1
+ archived_file = zp.filelist[0].filename
+ assert archived_file == expected_arcname
+
+ @pytest.mark.parametrize("df_new_type", ["Int64"])
+ def test_to_csv_na_rep_long_string(self, df_new_type):
+ # see gh-25099
+ df = DataFrame({"c": [float("nan")] * 3})
+ df = df.astype(df_new_type)
+ expected_rows = ["c", "mynull", "mynull", "mynull"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
+ result = df.to_csv(index=False, na_rep="mynull", encoding="ascii")
+
+ assert expected == result
+
+ def test_to_csv_timedelta_precision(self):
+ # GH 6783
+ s = pd.Series([1, 1]).astype("timedelta64[ns]")
+ buf = io.StringIO()
+ s.to_csv(buf)
+ result = buf.getvalue()
+ expected_rows = [
+ ",0",
+ "0,0 days 00:00:00.000000001",
+ "1,0 days 00:00:00.000000001",
+ ]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert result == expected
+
+ def test_na_rep_truncated(self):
+ # https://github.com/pandas-dev/pandas/issues/31447
+ result = pd.Series(range(8, 12)).to_csv(na_rep="-")
+ expected = tm.convert_rows_list_to_csv_str([",0", "0,8", "1,9", "2,10", "3,11"])
+ assert result == expected
+
+ result = pd.Series([True, False]).to_csv(na_rep="nan")
+ expected = tm.convert_rows_list_to_csv_str([",0", "0,True", "1,False"])
+ assert result == expected
+
+ result = pd.Series([1.1, 2.2]).to_csv(na_rep=".")
+ expected = tm.convert_rows_list_to_csv_str([",0", "0,1.1", "1,2.2"])
+ assert result == expected
+
+ @pytest.mark.parametrize("errors", ["surrogatepass", "ignore", "replace"])
+ def test_to_csv_errors(self, errors):
+ # GH 22610
+ data = ["\ud800foo"]
+ ser = pd.Series(data, index=Index(data, dtype=object), dtype=object)
+ with tm.ensure_clean("test.csv") as path:
+ ser.to_csv(path, errors=errors)
+ # No use in reading back the data as it is not the same anymore
+ # due to the error handling
+
+ @pytest.mark.parametrize("mode", ["wb", "w"])
+ def test_to_csv_binary_handle(self, mode):
+ """
+ Binary file objects should work (if 'mode' contains a 'b') or even without
+ it in most cases.
+
+ GH 35058 and GH 19827
+ """
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD")),
+ index=Index([f"i-{i}" for i in range(30)]),
+ )
+ with tm.ensure_clean() as path:
+ with open(path, mode="w+b") as handle:
+ df.to_csv(handle, mode=mode)
+ tm.assert_frame_equal(df, pd.read_csv(path, index_col=0))
+
+ @pytest.mark.parametrize("mode", ["wb", "w"])
+ def test_to_csv_encoding_binary_handle(self, mode):
+ """
+ Binary file objects should honor a specified encoding.
+
+ GH 23854 and GH 13068 with binary handles
+ """
+ # example from GH 23854
+ content = "a, b, 🐟".encode("utf-8-sig")
+ buffer = io.BytesIO(content)
+ df = pd.read_csv(buffer, encoding="utf-8-sig")
+
+ buffer = io.BytesIO()
+ df.to_csv(buffer, mode=mode, encoding="utf-8-sig", index=False)
+ buffer.seek(0) # tests whether file handle wasn't closed
+ assert buffer.getvalue().startswith(content)
+
+ # example from GH 13068
+ with tm.ensure_clean() as path:
+ with open(path, "w+b") as handle:
+ DataFrame().to_csv(handle, mode=mode, encoding="utf-8-sig")
+
+ handle.seek(0)
+ assert handle.read().startswith(b'\xef\xbb\xbf""')
+
+
+def test_to_csv_iterative_compression_name(compression):
+ # GH 38714
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD")),
+ index=Index([f"i-{i}" for i in range(30)]),
+ )
+ with tm.ensure_clean() as path:
+ df.to_csv(path, compression=compression, chunksize=1)
+ tm.assert_frame_equal(
+ pd.read_csv(path, compression=compression, index_col=0), df
+ )
+
+
+def test_to_csv_iterative_compression_buffer(compression):
+ # GH 38714
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD")),
+ index=Index([f"i-{i}" for i in range(30)]),
+ )
+ with io.BytesIO() as buffer:
+ df.to_csv(buffer, compression=compression, chunksize=1)
+ buffer.seek(0)
+ tm.assert_frame_equal(
+ pd.read_csv(buffer, compression=compression, index_col=0), df
+ )
+ assert not buffer.closed
+
+
+def test_to_csv_pos_args_deprecation():
+ # GH-54229
+ df = DataFrame({"a": [1, 2, 3]})
+ msg = (
+ r"Starting with pandas version 3.0 all arguments of to_csv except for the "
+ r"argument 'path_or_buf' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ buffer = io.BytesIO()
+ df.to_csv(buffer, ";")
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_excel.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_excel.py
new file mode 100644
index 0000000000000000000000000000000000000000..927a9f4961f6ff7ae51f74aceb0cb36dc6754c21
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_excel.py
@@ -0,0 +1,429 @@
+"""Tests formatting as writer-agnostic ExcelCells
+
+ExcelFormatter is tested implicitly in pandas/tests/io/excel
+"""
+import string
+
+import pytest
+
+from pandas.errors import CSSWarning
+
+import pandas._testing as tm
+
+from pandas.io.formats.excel import (
+ CssExcelCell,
+ CSSToExcelConverter,
+)
+
+
+@pytest.mark.parametrize(
+ "css,expected",
+ [
+ # FONT
+ # - name
+ ("font-family: foo,bar", {"font": {"name": "foo"}}),
+ ('font-family: "foo bar",baz', {"font": {"name": "foo bar"}}),
+ ("font-family: foo,\nbar", {"font": {"name": "foo"}}),
+ ("font-family: foo, bar, baz", {"font": {"name": "foo"}}),
+ ("font-family: bar, foo", {"font": {"name": "bar"}}),
+ ("font-family: 'foo bar', baz", {"font": {"name": "foo bar"}}),
+ ("font-family: 'foo \\'bar', baz", {"font": {"name": "foo 'bar"}}),
+ ('font-family: "foo \\"bar", baz', {"font": {"name": 'foo "bar'}}),
+ ('font-family: "foo ,bar", baz', {"font": {"name": "foo ,bar"}}),
+ # - family
+ ("font-family: serif", {"font": {"name": "serif", "family": 1}}),
+ ("font-family: Serif", {"font": {"name": "serif", "family": 1}}),
+ ("font-family: roman, serif", {"font": {"name": "roman", "family": 1}}),
+ ("font-family: roman, sans-serif", {"font": {"name": "roman", "family": 2}}),
+ ("font-family: roman, sans serif", {"font": {"name": "roman"}}),
+ ("font-family: roman, sansserif", {"font": {"name": "roman"}}),
+ ("font-family: roman, cursive", {"font": {"name": "roman", "family": 4}}),
+ ("font-family: roman, fantasy", {"font": {"name": "roman", "family": 5}}),
+ # - size
+ ("font-size: 1em", {"font": {"size": 12}}),
+ ("font-size: xx-small", {"font": {"size": 6}}),
+ ("font-size: x-small", {"font": {"size": 7.5}}),
+ ("font-size: small", {"font": {"size": 9.6}}),
+ ("font-size: medium", {"font": {"size": 12}}),
+ ("font-size: large", {"font": {"size": 13.5}}),
+ ("font-size: x-large", {"font": {"size": 18}}),
+ ("font-size: xx-large", {"font": {"size": 24}}),
+ ("font-size: 50%", {"font": {"size": 6}}),
+ # - bold
+ ("font-weight: 100", {"font": {"bold": False}}),
+ ("font-weight: 200", {"font": {"bold": False}}),
+ ("font-weight: 300", {"font": {"bold": False}}),
+ ("font-weight: 400", {"font": {"bold": False}}),
+ ("font-weight: normal", {"font": {"bold": False}}),
+ ("font-weight: lighter", {"font": {"bold": False}}),
+ ("font-weight: bold", {"font": {"bold": True}}),
+ ("font-weight: bolder", {"font": {"bold": True}}),
+ ("font-weight: 700", {"font": {"bold": True}}),
+ ("font-weight: 800", {"font": {"bold": True}}),
+ ("font-weight: 900", {"font": {"bold": True}}),
+ # - italic
+ ("font-style: italic", {"font": {"italic": True}}),
+ ("font-style: oblique", {"font": {"italic": True}}),
+ # - underline
+ ("text-decoration: underline", {"font": {"underline": "single"}}),
+ ("text-decoration: overline", {}),
+ ("text-decoration: none", {}),
+ # - strike
+ ("text-decoration: line-through", {"font": {"strike": True}}),
+ (
+ "text-decoration: underline line-through",
+ {"font": {"strike": True, "underline": "single"}},
+ ),
+ (
+ "text-decoration: underline; text-decoration: line-through",
+ {"font": {"strike": True}},
+ ),
+ # - color
+ ("color: red", {"font": {"color": "FF0000"}}),
+ ("color: #ff0000", {"font": {"color": "FF0000"}}),
+ ("color: #f0a", {"font": {"color": "FF00AA"}}),
+ # - shadow
+ ("text-shadow: none", {"font": {"shadow": False}}),
+ ("text-shadow: 0px -0em 0px #CCC", {"font": {"shadow": False}}),
+ ("text-shadow: 0px -0em 0px #999", {"font": {"shadow": False}}),
+ ("text-shadow: 0px -0em 0px", {"font": {"shadow": False}}),
+ ("text-shadow: 2px -0em 0px #CCC", {"font": {"shadow": True}}),
+ ("text-shadow: 0px -2em 0px #CCC", {"font": {"shadow": True}}),
+ ("text-shadow: 0px -0em 2px #CCC", {"font": {"shadow": True}}),
+ ("text-shadow: 0px -0em 2px", {"font": {"shadow": True}}),
+ ("text-shadow: 0px -2em", {"font": {"shadow": True}}),
+ # FILL
+ # - color, fillType
+ (
+ "background-color: red",
+ {"fill": {"fgColor": "FF0000", "patternType": "solid"}},
+ ),
+ (
+ "background-color: #ff0000",
+ {"fill": {"fgColor": "FF0000", "patternType": "solid"}},
+ ),
+ (
+ "background-color: #f0a",
+ {"fill": {"fgColor": "FF00AA", "patternType": "solid"}},
+ ),
+ # BORDER
+ # - style
+ (
+ "border-style: solid",
+ {
+ "border": {
+ "top": {"style": "medium"},
+ "bottom": {"style": "medium"},
+ "left": {"style": "medium"},
+ "right": {"style": "medium"},
+ }
+ },
+ ),
+ (
+ "border-style: solid; border-width: thin",
+ {
+ "border": {
+ "top": {"style": "thin"},
+ "bottom": {"style": "thin"},
+ "left": {"style": "thin"},
+ "right": {"style": "thin"},
+ }
+ },
+ ),
+ (
+ "border-top-style: solid; border-top-width: thin",
+ {"border": {"top": {"style": "thin"}}},
+ ),
+ (
+ "border-top-style: solid; border-top-width: 1pt",
+ {"border": {"top": {"style": "thin"}}},
+ ),
+ ("border-top-style: solid", {"border": {"top": {"style": "medium"}}}),
+ (
+ "border-top-style: solid; border-top-width: medium",
+ {"border": {"top": {"style": "medium"}}},
+ ),
+ (
+ "border-top-style: solid; border-top-width: 2pt",
+ {"border": {"top": {"style": "medium"}}},
+ ),
+ (
+ "border-top-style: solid; border-top-width: thick",
+ {"border": {"top": {"style": "thick"}}},
+ ),
+ (
+ "border-top-style: solid; border-top-width: 4pt",
+ {"border": {"top": {"style": "thick"}}},
+ ),
+ (
+ "border-top-style: dotted",
+ {"border": {"top": {"style": "mediumDashDotDot"}}},
+ ),
+ (
+ "border-top-style: dotted; border-top-width: thin",
+ {"border": {"top": {"style": "dotted"}}},
+ ),
+ ("border-top-style: dashed", {"border": {"top": {"style": "mediumDashed"}}}),
+ (
+ "border-top-style: dashed; border-top-width: thin",
+ {"border": {"top": {"style": "dashed"}}},
+ ),
+ ("border-top-style: double", {"border": {"top": {"style": "double"}}}),
+ # - color
+ (
+ "border-style: solid; border-color: #0000ff",
+ {
+ "border": {
+ "top": {"style": "medium", "color": "0000FF"},
+ "right": {"style": "medium", "color": "0000FF"},
+ "bottom": {"style": "medium", "color": "0000FF"},
+ "left": {"style": "medium", "color": "0000FF"},
+ }
+ },
+ ),
+ (
+ "border-top-style: double; border-top-color: blue",
+ {"border": {"top": {"style": "double", "color": "0000FF"}}},
+ ),
+ (
+ "border-top-style: solid; border-top-color: #06c",
+ {"border": {"top": {"style": "medium", "color": "0066CC"}}},
+ ),
+ (
+ "border-top-color: blue",
+ {"border": {"top": {"color": "0000FF", "style": "none"}}},
+ ),
+ # ALIGNMENT
+ # - horizontal
+ ("text-align: center", {"alignment": {"horizontal": "center"}}),
+ ("text-align: left", {"alignment": {"horizontal": "left"}}),
+ ("text-align: right", {"alignment": {"horizontal": "right"}}),
+ ("text-align: justify", {"alignment": {"horizontal": "justify"}}),
+ # - vertical
+ ("vertical-align: top", {"alignment": {"vertical": "top"}}),
+ ("vertical-align: text-top", {"alignment": {"vertical": "top"}}),
+ ("vertical-align: middle", {"alignment": {"vertical": "center"}}),
+ ("vertical-align: bottom", {"alignment": {"vertical": "bottom"}}),
+ ("vertical-align: text-bottom", {"alignment": {"vertical": "bottom"}}),
+ # - wrap_text
+ ("white-space: nowrap", {"alignment": {"wrap_text": False}}),
+ ("white-space: pre", {"alignment": {"wrap_text": False}}),
+ ("white-space: pre-line", {"alignment": {"wrap_text": False}}),
+ ("white-space: normal", {"alignment": {"wrap_text": True}}),
+ # NUMBER FORMAT
+ ("number-format: 0%", {"number_format": {"format_code": "0%"}}),
+ (
+ "number-format: 0§[Red](0)§-§@;",
+ {"number_format": {"format_code": "0;[red](0);-;@"}}, # GH 46152
+ ),
+ ],
+)
+def test_css_to_excel(css, expected):
+ convert = CSSToExcelConverter()
+ assert expected == convert(css)
+
+
+def test_css_to_excel_multiple():
+ convert = CSSToExcelConverter()
+ actual = convert(
+ """
+ font-weight: bold;
+ text-decoration: underline;
+ color: red;
+ border-width: thin;
+ text-align: center;
+ vertical-align: top;
+ unused: something;
+ """
+ )
+ assert {
+ "font": {"bold": True, "underline": "single", "color": "FF0000"},
+ "border": {
+ "top": {"style": "thin"},
+ "right": {"style": "thin"},
+ "bottom": {"style": "thin"},
+ "left": {"style": "thin"},
+ },
+ "alignment": {"horizontal": "center", "vertical": "top"},
+ } == actual
+
+
+@pytest.mark.parametrize(
+ "css,inherited,expected",
+ [
+ ("font-weight: bold", "", {"font": {"bold": True}}),
+ ("", "font-weight: bold", {"font": {"bold": True}}),
+ (
+ "font-weight: bold",
+ "font-style: italic",
+ {"font": {"bold": True, "italic": True}},
+ ),
+ ("font-style: normal", "font-style: italic", {"font": {"italic": False}}),
+ ("font-style: inherit", "", {}),
+ (
+ "font-style: normal; font-style: inherit",
+ "font-style: italic",
+ {"font": {"italic": True}},
+ ),
+ ],
+)
+def test_css_to_excel_inherited(css, inherited, expected):
+ convert = CSSToExcelConverter(inherited)
+ assert expected == convert(css)
+
+
+@pytest.mark.parametrize(
+ "input_color,output_color",
+ (
+ list(CSSToExcelConverter.NAMED_COLORS.items())
+ + [("#" + rgb, rgb) for rgb in CSSToExcelConverter.NAMED_COLORS.values()]
+ + [("#F0F", "FF00FF"), ("#ABC", "AABBCC")]
+ ),
+)
+def test_css_to_excel_good_colors(input_color, output_color):
+ # see gh-18392
+ css = (
+ f"border-top-color: {input_color}; "
+ f"border-right-color: {input_color}; "
+ f"border-bottom-color: {input_color}; "
+ f"border-left-color: {input_color}; "
+ f"background-color: {input_color}; "
+ f"color: {input_color}"
+ )
+
+ expected = {}
+
+ expected["fill"] = {"patternType": "solid", "fgColor": output_color}
+
+ expected["font"] = {"color": output_color}
+
+ expected["border"] = {
+ k: {"color": output_color, "style": "none"}
+ for k in ("top", "right", "bottom", "left")
+ }
+
+ with tm.assert_produces_warning(None):
+ convert = CSSToExcelConverter()
+ assert expected == convert(css)
+
+
+@pytest.mark.parametrize("input_color", [None, "not-a-color"])
+def test_css_to_excel_bad_colors(input_color):
+ # see gh-18392
+ css = (
+ f"border-top-color: {input_color}; "
+ f"border-right-color: {input_color}; "
+ f"border-bottom-color: {input_color}; "
+ f"border-left-color: {input_color}; "
+ f"background-color: {input_color}; "
+ f"color: {input_color}"
+ )
+
+ expected = {}
+
+ if input_color is not None:
+ expected["fill"] = {"patternType": "solid"}
+
+ with tm.assert_produces_warning(CSSWarning):
+ convert = CSSToExcelConverter()
+ assert expected == convert(css)
+
+
+def tests_css_named_colors_valid():
+ upper_hexs = set(map(str.upper, string.hexdigits))
+ for color in CSSToExcelConverter.NAMED_COLORS.values():
+ assert len(color) == 6 and all(c in upper_hexs for c in color)
+
+
+def test_css_named_colors_from_mpl_present():
+ mpl_colors = pytest.importorskip("matplotlib.colors")
+
+ pd_colors = CSSToExcelConverter.NAMED_COLORS
+ for name, color in mpl_colors.CSS4_COLORS.items():
+ assert name in pd_colors and pd_colors[name] == color[1:]
+
+
+@pytest.mark.parametrize(
+ "styles,expected",
+ [
+ ([("color", "green"), ("color", "red")], "color: red;"),
+ ([("font-weight", "bold"), ("font-weight", "normal")], "font-weight: normal;"),
+ ([("text-align", "center"), ("TEXT-ALIGN", "right")], "text-align: right;"),
+ ],
+)
+def test_css_excel_cell_precedence(styles, expected):
+ """It applies favors latter declarations over former declarations"""
+ # See GH 47371
+ converter = CSSToExcelConverter()
+ converter._call_cached.cache_clear()
+ css_styles = {(0, 0): styles}
+ cell = CssExcelCell(
+ row=0,
+ col=0,
+ val="",
+ style=None,
+ css_styles=css_styles,
+ css_row=0,
+ css_col=0,
+ css_converter=converter,
+ )
+ converter._call_cached.cache_clear()
+
+ assert cell.style == converter(expected)
+
+
+@pytest.mark.parametrize(
+ "styles,cache_hits,cache_misses",
+ [
+ ([[("color", "green"), ("color", "red"), ("color", "green")]], 0, 1),
+ (
+ [
+ [("font-weight", "bold")],
+ [("font-weight", "normal"), ("font-weight", "bold")],
+ ],
+ 1,
+ 1,
+ ),
+ ([[("text-align", "center")], [("TEXT-ALIGN", "center")]], 1, 1),
+ (
+ [
+ [("font-weight", "bold"), ("text-align", "center")],
+ [("font-weight", "bold"), ("text-align", "left")],
+ ],
+ 0,
+ 2,
+ ),
+ (
+ [
+ [("font-weight", "bold"), ("text-align", "center")],
+ [("font-weight", "bold"), ("text-align", "left")],
+ [("font-weight", "bold"), ("text-align", "center")],
+ ],
+ 1,
+ 2,
+ ),
+ ],
+)
+def test_css_excel_cell_cache(styles, cache_hits, cache_misses):
+ """It caches unique cell styles"""
+ # See GH 47371
+ converter = CSSToExcelConverter()
+ converter._call_cached.cache_clear()
+
+ css_styles = {(0, i): _style for i, _style in enumerate(styles)}
+ for css_row, css_col in css_styles:
+ CssExcelCell(
+ row=0,
+ col=0,
+ val="",
+ style=None,
+ css_styles=css_styles,
+ css_row=css_row,
+ css_col=css_col,
+ css_converter=converter,
+ )
+ cache_info = converter._call_cached.cache_info()
+ converter._call_cached.cache_clear()
+
+ assert cache_info.hits == cache_hits
+ assert cache_info.misses == cache_misses
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_html.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_html.py
new file mode 100644
index 0000000000000000000000000000000000000000..790ba92f70c40095af3f40396135be2842b33229
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_html.py
@@ -0,0 +1,1177 @@
+from datetime import datetime
+from io import StringIO
+import itertools
+import re
+import textwrap
+
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Index,
+ MultiIndex,
+ get_option,
+ option_context,
+)
+import pandas._testing as tm
+
+import pandas.io.formats.format as fmt
+
+lorem_ipsum = (
+ "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod "
+ "tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim "
+ "veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex "
+ "ea commodo consequat. Duis aute irure dolor in reprehenderit in "
+ "voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur "
+ "sint occaecat cupidatat non proident, sunt in culpa qui officia "
+ "deserunt mollit anim id est laborum."
+)
+
+
+def expected_html(datapath, name):
+ """
+ Read HTML file from formats data directory.
+
+ Parameters
+ ----------
+ datapath : pytest fixture
+ The datapath fixture injected into a test by pytest.
+ name : str
+ The name of the HTML file without the suffix.
+
+ Returns
+ -------
+ str : contents of HTML file.
+ """
+ filename = ".".join([name, "html"])
+ filepath = datapath("io", "formats", "data", "html", filename)
+ with open(filepath, encoding="utf-8") as f:
+ html = f.read()
+ return html.rstrip()
+
+
+@pytest.fixture(params=["mixed", "empty"])
+def biggie_df_fixture(request):
+ """Fixture for a big mixed Dataframe and an empty Dataframe"""
+ if request.param == "mixed":
+ df = DataFrame(
+ {
+ "A": np.random.default_rng(2).standard_normal(200),
+ "B": Index([f"{i}?!" for i in range(200)]),
+ },
+ index=np.arange(200),
+ )
+ df.loc[:20, "A"] = np.nan
+ df.loc[:20, "B"] = np.nan
+ return df
+ elif request.param == "empty":
+ df = DataFrame(index=np.arange(200))
+ return df
+
+
+@pytest.fixture(params=fmt.VALID_JUSTIFY_PARAMETERS)
+def justify(request):
+ return request.param
+
+
+@pytest.mark.parametrize("col_space", [30, 50])
+def test_to_html_with_col_space(col_space):
+ df = DataFrame(np.random.default_rng(2).random(size=(1, 3)))
+ # check that col_space affects HTML generation
+ # and be very brittle about it.
+ result = df.to_html(col_space=col_space)
+ hdrs = [x for x in result.split(r"\n") if re.search(r"\s]", x)]
+ assert len(hdrs) > 0
+ for h in hdrs:
+ assert "min-width" in h
+ assert str(col_space) in h
+
+
+def test_to_html_with_column_specific_col_space_raises():
+ df = DataFrame(
+ np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"]
+ )
+
+ msg = (
+ "Col_space length\\(\\d+\\) should match "
+ "DataFrame number of columns\\(\\d+\\)"
+ )
+ with pytest.raises(ValueError, match=msg):
+ df.to_html(col_space=[30, 40])
+
+ with pytest.raises(ValueError, match=msg):
+ df.to_html(col_space=[30, 40, 50, 60])
+
+ msg = "unknown column"
+ with pytest.raises(ValueError, match=msg):
+ df.to_html(col_space={"a": "foo", "b": 23, "d": 34})
+
+
+def test_to_html_with_column_specific_col_space():
+ df = DataFrame(
+ np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"]
+ )
+
+ result = df.to_html(col_space={"a": "2em", "b": 23})
+ hdrs = [x for x in result.split("\n") if re.search(r" | \s]", x)]
+ assert 'min-width: 2em;">a | ' in hdrs[1]
+ assert 'min-width: 23px;">b' in hdrs[2]
+ assert "c | " in hdrs[3]
+
+ result = df.to_html(col_space=["1em", 2, 3])
+ hdrs = [x for x in result.split("\n") if re.search(r"\s]", x)]
+ assert 'min-width: 1em;">a | ' in hdrs[1]
+ assert 'min-width: 2px;">b' in hdrs[2]
+ assert 'min-width: 3px;">c' in hdrs[3]
+
+
+def test_to_html_with_empty_string_label():
+ # GH 3547, to_html regards empty string labels as repeated labels
+ data = {"c1": ["a", "b"], "c2": ["a", ""], "data": [1, 2]}
+ df = DataFrame(data).set_index(["c1", "c2"])
+ result = df.to_html()
+ assert "rowspan" not in result
+
+
+@pytest.mark.parametrize(
+ "df,expected",
+ [
+ (DataFrame({"\u03c3": np.arange(10.0)}), "unicode_1"),
+ (DataFrame({"A": ["\u03c3"]}), "unicode_2"),
+ ],
+)
+def test_to_html_unicode(df, expected, datapath):
+ expected = expected_html(datapath, expected)
+ result = df.to_html()
+ assert result == expected
+
+
+def test_to_html_encoding(float_frame, tmp_path):
+ # GH 28663
+ path = tmp_path / "test.html"
+ float_frame.to_html(path, encoding="gbk")
+ with open(str(path), encoding="gbk") as f:
+ assert float_frame.to_html() == f.read()
+
+
+def test_to_html_decimal(datapath):
+ # GH 12031
+ df = DataFrame({"A": [6.0, 3.1, 2.2]})
+ result = df.to_html(decimal=",")
+ expected = expected_html(datapath, "gh12031_expected_output")
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "kwargs,string,expected",
+ [
+ ({}, "", "escaped"),
+ ({"escape": False}, "bold", "escape_disabled"),
+ ],
+)
+def test_to_html_escaped(kwargs, string, expected, datapath):
+ a = "strl2": {a: string, b: string}}
+ result = DataFrame(test_dict).to_html(**kwargs)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize("index_is_named", [True, False])
+def test_to_html_multiindex_index_false(index_is_named, datapath):
+ # GH 8452
+ df = DataFrame(
+ {"a": range(2), "b": range(3, 5), "c": range(5, 7), "d": range(3, 5)}
+ )
+ df.columns = MultiIndex.from_product([["a", "b"], ["c", "d"]])
+ if index_is_named:
+ df.index = Index(df.index.values, name="idx")
+ result = df.to_html(index=False)
+ expected = expected_html(datapath, "gh8452_expected_output")
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "multi_sparse,expected",
+ [
+ (False, "multiindex_sparsify_false_multi_sparse_1"),
+ (False, "multiindex_sparsify_false_multi_sparse_2"),
+ (True, "multiindex_sparsify_1"),
+ (True, "multiindex_sparsify_2"),
+ ],
+)
+def test_to_html_multiindex_sparsify(multi_sparse, expected, datapath):
+ index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]], names=["foo", None])
+ df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
+ if expected.endswith("2"):
+ df.columns = index[::2]
+ with option_context("display.multi_sparse", multi_sparse):
+ result = df.to_html()
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "max_rows,expected",
+ [
+ (60, "gh14882_expected_output_1"),
+ # Test that ... appears in a middle level
+ (56, "gh14882_expected_output_2"),
+ ],
+)
+def test_to_html_multiindex_odd_even_truncate(max_rows, expected, datapath):
+ # GH 14882 - Issue on truncation with odd length DataFrame
+ index = MultiIndex.from_product(
+ [[100, 200, 300], [10, 20, 30], [1, 2, 3, 4, 5, 6, 7]], names=["a", "b", "c"]
+ )
+ df = DataFrame({"n": range(len(index))}, index=index)
+ result = df.to_html(max_rows=max_rows)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "df,formatters,expected",
+ [
+ (
+ DataFrame(
+ [[0, 1], [2, 3], [4, 5], [6, 7]],
+ columns=Index(["foo", None], dtype=object),
+ index=np.arange(4),
+ ),
+ {"__index__": lambda x: "abcd"[x]},
+ "index_formatter",
+ ),
+ (
+ DataFrame({"months": [datetime(2016, 1, 1), datetime(2016, 2, 2)]}),
+ {"months": lambda x: x.strftime("%Y-%m")},
+ "datetime64_monthformatter",
+ ),
+ (
+ DataFrame(
+ {
+ "hod": pd.to_datetime(
+ ["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f"
+ )
+ }
+ ),
+ {"hod": lambda x: x.strftime("%H:%M")},
+ "datetime64_hourformatter",
+ ),
+ (
+ DataFrame(
+ {
+ "i": pd.Series([1, 2], dtype="int64"),
+ "f": pd.Series([1, 2], dtype="float64"),
+ "I": pd.Series([1, 2], dtype="Int64"),
+ "s": pd.Series([1, 2], dtype="string"),
+ "b": pd.Series([True, False], dtype="boolean"),
+ "c": pd.Series(["a", "b"], dtype=pd.CategoricalDtype(["a", "b"])),
+ "o": pd.Series([1, "2"], dtype=object),
+ }
+ ),
+ [lambda x: "formatted"] * 7,
+ "various_dtypes_formatted",
+ ),
+ ],
+)
+def test_to_html_formatters(df, formatters, expected, datapath):
+ expected = expected_html(datapath, expected)
+ result = df.to_html(formatters=formatters)
+ assert result == expected
+
+
+def test_to_html_regression_GH6098():
+ df = DataFrame(
+ {
+ "clé1": ["a", "a", "b", "b", "a"],
+ "clé2": ["1er", "2ème", "1er", "2ème", "1er"],
+ "données1": np.random.default_rng(2).standard_normal(5),
+ "données2": np.random.default_rng(2).standard_normal(5),
+ }
+ )
+
+ # it works
+ df.pivot_table(index=["clé1"], columns=["clé2"])._repr_html_()
+
+
+def test_to_html_truncate(datapath):
+ index = pd.date_range(start="20010101", freq="D", periods=20)
+ df = DataFrame(index=index, columns=range(20))
+ result = df.to_html(max_rows=8, max_cols=4)
+ expected = expected_html(datapath, "truncate")
+ assert result == expected
+
+
+@pytest.mark.parametrize("size", [1, 5])
+def test_html_invalid_formatters_arg_raises(size):
+ # issue-28469
+ df = DataFrame(columns=["a", "b", "c"])
+ msg = "Formatters length({}) should match DataFrame number of columns(3)"
+ with pytest.raises(ValueError, match=re.escape(msg.format(size))):
+ df.to_html(formatters=["{}".format] * size)
+
+
+def test_to_html_truncate_formatter(datapath):
+ # issue-25955
+ data = [
+ {"A": 1, "B": 2, "C": 3, "D": 4},
+ {"A": 5, "B": 6, "C": 7, "D": 8},
+ {"A": 9, "B": 10, "C": 11, "D": 12},
+ {"A": 13, "B": 14, "C": 15, "D": 16},
+ ]
+
+ df = DataFrame(data)
+ fmt = lambda x: str(x) + "_mod"
+ formatters = [fmt, fmt, None, None]
+ result = df.to_html(formatters=formatters, max_cols=3)
+ expected = expected_html(datapath, "truncate_formatter")
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "sparsify,expected",
+ [(True, "truncate_multi_index"), (False, "truncate_multi_index_sparse_off")],
+)
+def test_to_html_truncate_multi_index(sparsify, expected, datapath):
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
+ df = DataFrame(index=arrays, columns=arrays)
+ result = df.to_html(max_rows=7, max_cols=7, sparsify=sparsify)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "option,result,expected",
+ [
+ (None, lambda df: df.to_html(), "1"),
+ (None, lambda df: df.to_html(border=2), "2"),
+ (2, lambda df: df.to_html(), "2"),
+ (2, lambda df: df._repr_html_(), "2"),
+ ],
+)
+def test_to_html_border(option, result, expected):
+ df = DataFrame({"A": [1, 2]})
+ if option is None:
+ result = result(df)
+ else:
+ with option_context("display.html.border", option):
+ result = result(df)
+ expected = f'border="{expected}"'
+ assert expected in result
+
+
+@pytest.mark.parametrize("biggie_df_fixture", ["mixed"], indirect=True)
+def test_to_html(biggie_df_fixture):
+ # TODO: split this test
+ df = biggie_df_fixture
+ s = df.to_html()
+
+ buf = StringIO()
+ retval = df.to_html(buf=buf)
+ assert retval is None
+ assert buf.getvalue() == s
+
+ assert isinstance(s, str)
+
+ df.to_html(columns=["B", "A"], col_space=17)
+ df.to_html(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
+
+ df.to_html(columns=["B", "A"], float_format=str)
+ df.to_html(columns=["B", "A"], col_space=12, float_format=str)
+
+
+@pytest.mark.parametrize("biggie_df_fixture", ["empty"], indirect=True)
+def test_to_html_empty_dataframe(biggie_df_fixture):
+ df = biggie_df_fixture
+ df.to_html()
+
+
+def test_to_html_filename(biggie_df_fixture, tmpdir):
+ df = biggie_df_fixture
+ expected = df.to_html()
+ path = tmpdir.join("test.html")
+ df.to_html(path)
+ result = path.read()
+ assert result == expected
+
+
+def test_to_html_with_no_bold():
+ df = DataFrame({"x": np.random.default_rng(2).standard_normal(5)})
+ html = df.to_html(bold_rows=False)
+ result = html[html.find("")]
+ assert "B" not in result
+
+
+@pytest.mark.parametrize(
+ "columns,justify,expected",
+ [
+ (
+ MultiIndex.from_arrays(
+ [np.arange(2).repeat(2), np.mod(range(4), 2)],
+ names=["CL0", "CL1"],
+ ),
+ "left",
+ "multiindex_1",
+ ),
+ (
+ MultiIndex.from_arrays([np.arange(4), np.mod(range(4), 2)]),
+ "right",
+ "multiindex_2",
+ ),
+ ],
+)
+def test_to_html_multiindex(columns, justify, expected, datapath):
+ df = DataFrame([list("abcd"), list("efgh")], columns=columns)
+ result = df.to_html(justify=justify)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+def test_to_html_justify(justify, datapath):
+ df = DataFrame(
+ {"A": [6, 30000, 2], "B": [1, 2, 70000], "C": [223442, 0, 1]},
+ columns=["A", "B", "C"],
+ )
+ result = df.to_html(justify=justify)
+ expected = expected_html(datapath, "justify").format(justify=justify)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "justify", ["super-right", "small-left", "noinherit", "tiny", "pandas"]
+)
+def test_to_html_invalid_justify(justify):
+ # GH 17527
+ df = DataFrame()
+ msg = "Invalid value for justify parameter"
+
+ with pytest.raises(ValueError, match=msg):
+ df.to_html(justify=justify)
+
+
+class TestHTMLIndex:
+ @pytest.fixture
+ def df(self):
+ index = ["foo", "bar", "baz"]
+ df = DataFrame(
+ {"A": [1, 2, 3], "B": [1.2, 3.4, 5.6], "C": ["one", "two", np.nan]},
+ columns=["A", "B", "C"],
+ index=index,
+ )
+ return df
+
+ @pytest.fixture
+ def expected_without_index(self, datapath):
+ return expected_html(datapath, "index_2")
+
+ def test_to_html_flat_index_without_name(
+ self, datapath, df, expected_without_index
+ ):
+ expected_with_index = expected_html(datapath, "index_1")
+ assert df.to_html() == expected_with_index
+
+ result = df.to_html(index=False)
+ for i in df.index:
+ assert i not in result
+ assert result == expected_without_index
+
+ def test_to_html_flat_index_with_name(self, datapath, df, expected_without_index):
+ df.index = Index(["foo", "bar", "baz"], name="idx")
+ expected_with_index = expected_html(datapath, "index_3")
+ assert df.to_html() == expected_with_index
+ assert df.to_html(index=False) == expected_without_index
+
+ def test_to_html_multiindex_without_names(
+ self, datapath, df, expected_without_index
+ ):
+ tuples = [("foo", "car"), ("foo", "bike"), ("bar", "car")]
+ df.index = MultiIndex.from_tuples(tuples)
+
+ expected_with_index = expected_html(datapath, "index_4")
+ assert df.to_html() == expected_with_index
+
+ result = df.to_html(index=False)
+ for i in ["foo", "bar", "car", "bike"]:
+ assert i not in result
+ # must be the same result as normal index
+ assert result == expected_without_index
+
+ def test_to_html_multiindex_with_names(self, datapath, df, expected_without_index):
+ tuples = [("foo", "car"), ("foo", "bike"), ("bar", "car")]
+ df.index = MultiIndex.from_tuples(tuples, names=["idx1", "idx2"])
+ expected_with_index = expected_html(datapath, "index_5")
+ assert df.to_html() == expected_with_index
+ assert df.to_html(index=False) == expected_without_index
+
+
+@pytest.mark.parametrize("classes", ["sortable draggable", ["sortable", "draggable"]])
+def test_to_html_with_classes(classes, datapath):
+ df = DataFrame()
+ expected = expected_html(datapath, "with_classes")
+ result = df.to_html(classes=classes)
+ assert result == expected
+
+
+def test_to_html_no_index_max_rows(datapath):
+ # GH 14998
+ df = DataFrame({"A": [1, 2, 3, 4]})
+ result = df.to_html(index=False, max_rows=1)
+ expected = expected_html(datapath, "gh14998_expected_output")
+ assert result == expected
+
+
+def test_to_html_multiindex_max_cols(datapath):
+ # GH 6131
+ index = MultiIndex(
+ levels=[["ba", "bb", "bc"], ["ca", "cb", "cc"]],
+ codes=[[0, 1, 2], [0, 1, 2]],
+ names=["b", "c"],
+ )
+ columns = MultiIndex(
+ levels=[["d"], ["aa", "ab", "ac"]],
+ codes=[[0, 0, 0], [0, 1, 2]],
+ names=[None, "a"],
+ )
+ data = np.array(
+ [[1.0, np.nan, np.nan], [np.nan, 2.0, np.nan], [np.nan, np.nan, 3.0]]
+ )
+ df = DataFrame(data, index, columns)
+ result = df.to_html(max_cols=2)
+ expected = expected_html(datapath, "gh6131_expected_output")
+ assert result == expected
+
+
+def test_to_html_multi_indexes_index_false(datapath):
+ # GH 22579
+ df = DataFrame(
+ {"a": range(10), "b": range(10, 20), "c": range(10, 20), "d": range(10, 20)}
+ )
+ df.columns = MultiIndex.from_product([["a", "b"], ["c", "d"]])
+ df.index = MultiIndex.from_product([["a", "b"], ["c", "d", "e", "f", "g"]])
+ result = df.to_html(index=False)
+ expected = expected_html(datapath, "gh22579_expected_output")
+ assert result == expected
+
+
+@pytest.mark.parametrize("index_names", [True, False])
+@pytest.mark.parametrize("header", [True, False])
+@pytest.mark.parametrize("index", [True, False])
+@pytest.mark.parametrize(
+ "column_index, column_type",
+ [
+ (Index([0, 1]), "unnamed_standard"),
+ (Index([0, 1], name="columns.name"), "named_standard"),
+ (MultiIndex.from_product([["a"], ["b", "c"]]), "unnamed_multi"),
+ (
+ MultiIndex.from_product(
+ [["a"], ["b", "c"]], names=["columns.name.0", "columns.name.1"]
+ ),
+ "named_multi",
+ ),
+ ],
+)
+@pytest.mark.parametrize(
+ "row_index, row_type",
+ [
+ (Index([0, 1]), "unnamed_standard"),
+ (Index([0, 1], name="index.name"), "named_standard"),
+ (MultiIndex.from_product([["a"], ["b", "c"]]), "unnamed_multi"),
+ (
+ MultiIndex.from_product(
+ [["a"], ["b", "c"]], names=["index.name.0", "index.name.1"]
+ ),
+ "named_multi",
+ ),
+ ],
+)
+def test_to_html_basic_alignment(
+ datapath, row_index, row_type, column_index, column_type, index, header, index_names
+):
+ # GH 22747, GH 22579
+ df = DataFrame(np.zeros((2, 2), dtype=int), index=row_index, columns=column_index)
+ result = df.to_html(index=index, header=header, index_names=index_names)
+
+ if not index:
+ row_type = "none"
+ elif not index_names and row_type.startswith("named"):
+ row_type = "un" + row_type
+
+ if not header:
+ column_type = "none"
+ elif not index_names and column_type.startswith("named"):
+ column_type = "un" + column_type
+
+ filename = "index_" + row_type + "_columns_" + column_type
+ expected = expected_html(datapath, filename)
+ assert result == expected
+
+
+@pytest.mark.parametrize("index_names", [True, False])
+@pytest.mark.parametrize("header", [True, False])
+@pytest.mark.parametrize("index", [True, False])
+@pytest.mark.parametrize(
+ "column_index, column_type",
+ [
+ (Index(np.arange(8)), "unnamed_standard"),
+ (Index(np.arange(8), name="columns.name"), "named_standard"),
+ (
+ MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
+ "unnamed_multi",
+ ),
+ (
+ MultiIndex.from_product(
+ [["a", "b"], ["c", "d"], ["e", "f"]], names=["foo", None, "baz"]
+ ),
+ "named_multi",
+ ),
+ ],
+)
+@pytest.mark.parametrize(
+ "row_index, row_type",
+ [
+ (Index(np.arange(8)), "unnamed_standard"),
+ (Index(np.arange(8), name="index.name"), "named_standard"),
+ (
+ MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
+ "unnamed_multi",
+ ),
+ (
+ MultiIndex.from_product(
+ [["a", "b"], ["c", "d"], ["e", "f"]], names=["foo", None, "baz"]
+ ),
+ "named_multi",
+ ),
+ ],
+)
+def test_to_html_alignment_with_truncation(
+ datapath, row_index, row_type, column_index, column_type, index, header, index_names
+):
+ # GH 22747, GH 22579
+ df = DataFrame(np.arange(64).reshape(8, 8), index=row_index, columns=column_index)
+ result = df.to_html(
+ max_rows=4, max_cols=4, index=index, header=header, index_names=index_names
+ )
+
+ if not index:
+ row_type = "none"
+ elif not index_names and row_type.startswith("named"):
+ row_type = "un" + row_type
+
+ if not header:
+ column_type = "none"
+ elif not index_names and column_type.startswith("named"):
+ column_type = "un" + column_type
+
+ filename = "trunc_df_index_" + row_type + "_columns_" + column_type
+ expected = expected_html(datapath, filename)
+ assert result == expected
+
+
+@pytest.mark.parametrize("index", [False, 0])
+def test_to_html_truncation_index_false_max_rows(datapath, index):
+ # GH 15019
+ data = [
+ [1.764052, 0.400157],
+ [0.978738, 2.240893],
+ [1.867558, -0.977278],
+ [0.950088, -0.151357],
+ [-0.103219, 0.410599],
+ ]
+ df = DataFrame(data)
+ result = df.to_html(max_rows=4, index=index)
+ expected = expected_html(datapath, "gh15019_expected_output")
+ assert result == expected
+
+
+@pytest.mark.parametrize("index", [False, 0])
+@pytest.mark.parametrize(
+ "col_index_named, expected_output",
+ [(False, "gh22783_expected_output"), (True, "gh22783_named_columns_index")],
+)
+def test_to_html_truncation_index_false_max_cols(
+ datapath, index, col_index_named, expected_output
+):
+ # GH 22783
+ data = [
+ [1.764052, 0.400157, 0.978738, 2.240893, 1.867558],
+ [-0.977278, 0.950088, -0.151357, -0.103219, 0.410599],
+ ]
+ df = DataFrame(data)
+ if col_index_named:
+ df.columns.rename("columns.name", inplace=True)
+ result = df.to_html(max_cols=4, index=index)
+ expected = expected_html(datapath, expected_output)
+ assert result == expected
+
+
+@pytest.mark.parametrize("notebook", [True, False])
+def test_to_html_notebook_has_style(notebook):
+ df = DataFrame({"A": [1, 2, 3]})
+ result = df.to_html(notebook=notebook)
+
+ if notebook:
+ assert "tbody tr th:only-of-type" in result
+ assert "vertical-align: middle;" in result
+ assert "thead th" in result
+ else:
+ assert "tbody tr th:only-of-type" not in result
+ assert "vertical-align: middle;" not in result
+ assert "thead th" not in result
+
+
+def test_to_html_with_index_names_false():
+ # GH 16493
+ df = DataFrame({"A": [1, 2]}, index=Index(["a", "b"], name="myindexname"))
+ result = df.to_html(index_names=False)
+ assert "myindexname" not in result
+
+
+def test_to_html_with_id():
+ # GH 8496
+ df = DataFrame({"A": [1, 2]}, index=Index(["a", "b"], name="myindexname"))
+ result = df.to_html(index_names=False, table_id="TEST_ID")
+ assert ' id="TEST_ID"' in result
+
+
+@pytest.mark.parametrize(
+ "value,float_format,expected",
+ [
+ (0.19999, "%.3f", "gh21625_expected_output"),
+ (100.0, "%.0f", "gh22270_expected_output"),
+ ],
+)
+def test_to_html_float_format_no_fixed_width(value, float_format, expected, datapath):
+ # GH 21625, GH 22270
+ df = DataFrame({"x": [value]})
+ expected = expected_html(datapath, expected)
+ result = df.to_html(float_format=float_format)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "render_links,expected",
+ [(True, "render_links_true"), (False, "render_links_false")],
+)
+def test_to_html_render_links(render_links, expected, datapath):
+ # GH 2679
+ data = [
+ [0, "https://pandas.pydata.org/?q1=a&q2=b", "pydata.org"],
+ [0, "www.pydata.org", "pydata.org"],
+ ]
+ df = DataFrame(data, columns=Index(["foo", "bar", None], dtype=object))
+
+ result = df.to_html(render_links=render_links)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "method,expected",
+ [
+ ("to_html", lambda x: lorem_ipsum),
+ ("_repr_html_", lambda x: lorem_ipsum[: x - 4] + "..."), # regression case
+ ],
+)
+@pytest.mark.parametrize("max_colwidth", [10, 20, 50, 100])
+def test_ignore_display_max_colwidth(method, expected, max_colwidth):
+ # see gh-17004
+ df = DataFrame([lorem_ipsum])
+ with option_context("display.max_colwidth", max_colwidth):
+ result = getattr(df, method)()
+ expected = expected(max_colwidth)
+ assert expected in result
+
+
+@pytest.mark.parametrize("classes", [True, 0])
+def test_to_html_invalid_classes_type(classes):
+ # GH 25608
+ df = DataFrame()
+ msg = "classes must be a string, list, or tuple"
+
+ with pytest.raises(TypeError, match=msg):
+ df.to_html(classes=classes)
+
+
+def test_to_html_round_column_headers():
+ # GH 17280
+ df = DataFrame([1], columns=[0.55555])
+ with option_context("display.precision", 3):
+ html = df.to_html(notebook=False)
+ notebook = df.to_html(notebook=True)
+ assert "0.55555" in html
+ assert "0.556" in notebook
+
+
+@pytest.mark.parametrize("unit", ["100px", "10%", "5em", 150])
+def test_to_html_with_col_space_units(unit):
+ # GH 25941
+ df = DataFrame(np.random.default_rng(2).random(size=(1, 3)))
+ result = df.to_html(col_space=unit)
+ result = result.split("tbody")[0]
+ hdrs = [x for x in result.split("\n") if re.search(r"\s]", x)]
+ if isinstance(unit, int):
+ unit = str(unit) + "px"
+ for h in hdrs:
+ expected = f' | '
+ assert expected in h
+
+
+class TestReprHTML:
+ def test_html_repr_min_rows_default(self, datapath):
+ # gh-27991
+
+ # default setting no truncation even if above min_rows
+ df = DataFrame({"a": range(20)})
+ result = df._repr_html_()
+ expected = expected_html(datapath, "html_repr_min_rows_default_no_truncation")
+ assert result == expected
+
+ # default of max_rows 60 triggers truncation if above
+ df = DataFrame({"a": range(61)})
+ result = df._repr_html_()
+ expected = expected_html(datapath, "html_repr_min_rows_default_truncated")
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "max_rows,min_rows,expected",
+ [
+ # truncated after first two rows
+ (10, 4, "html_repr_max_rows_10_min_rows_4"),
+ # when set to None, follow value of max_rows
+ (12, None, "html_repr_max_rows_12_min_rows_None"),
+ # when set value higher as max_rows, use the minimum
+ (10, 12, "html_repr_max_rows_10_min_rows_12"),
+ # max_rows of None -> never truncate
+ (None, 12, "html_repr_max_rows_None_min_rows_12"),
+ ],
+ )
+ def test_html_repr_min_rows(self, datapath, max_rows, min_rows, expected):
+ # gh-27991
+
+ df = DataFrame({"a": range(61)})
+ expected = expected_html(datapath, expected)
+ with option_context("display.max_rows", max_rows, "display.min_rows", min_rows):
+ result = df._repr_html_()
+ assert result == expected
+
+ def test_repr_html_ipython_config(self, ip):
+ code = textwrap.dedent(
+ """\
+ from pandas import DataFrame
+ df = DataFrame({"A": [1, 2]})
+ df._repr_html_()
+
+ cfg = get_ipython().config
+ cfg['IPKernelApp']['parent_appname']
+ df._repr_html_()
+ """
+ )
+ result = ip.run_cell(code, silent=True)
+ assert not result.error_in_exec
+
+ def test_info_repr_html(self):
+ max_rows = 60
+ max_cols = 20
+ # Long
+ h, w = max_rows + 1, max_cols - 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ assert r"<class" not in df._repr_html_()
+ with option_context("display.large_repr", "info"):
+ assert r"<class" in df._repr_html_()
+
+ # Wide
+ h, w = max_rows - 1, max_cols + 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ assert "{40 + h}" in reg_repr
+
+ h = max_rows + 1
+ df = DataFrame(
+ {
+ "idx": np.linspace(-10, 10, h),
+ "A": np.arange(1, 1 + h),
+ "B": np.arange(41, 41 + h),
+ }
+ ).set_index("idx")
+ long_repr = df._repr_html_()
+ assert ".." in long_repr
+ assert "31 | " not in long_repr
+ assert f"{h} rows " in long_repr
+ assert "2 columns" in long_repr
+
+ def test_repr_html_long_multiindex(self):
+ max_rows = 60
+ max_L1 = max_rows // 2
+
+ tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
+ idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((max_L1 * 2, 2)),
+ index=idx,
+ columns=["A", "B"],
+ )
+ with option_context("display.max_rows", 60, "display.max_columns", 20):
+ reg_repr = df._repr_html_()
+ assert "..." not in reg_repr
+
+ tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
+ idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal(((max_L1 + 1) * 2, 2)),
+ index=idx,
+ columns=["A", "B"],
+ )
+ long_repr = df._repr_html_()
+ assert "..." in long_repr
+
+ def test_repr_html_long_and_wide(self):
+ max_cols = 20
+ max_rows = 60
+
+ h, w = max_rows - 1, max_cols - 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ with option_context("display.max_rows", 60, "display.max_columns", 20):
+ assert "..." not in df._repr_html_()
+
+ h, w = max_rows + 1, max_cols + 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ with option_context("display.max_rows", 60, "display.max_columns", 20):
+ assert "..." in df._repr_html_()
+
+
+def test_to_html_multilevel(multiindex_year_month_day_dataframe_random_data):
+ ymd = multiindex_year_month_day_dataframe_random_data
+
+ ymd.columns.name = "foo"
+ ymd.to_html()
+ ymd.T.to_html()
+
+
+@pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
+def test_to_html_na_rep_and_float_format(na_rep, datapath):
+ # https://github.com/pandas-dev/pandas/issues/13828
+ df = DataFrame(
+ [
+ ["A", 1.2225],
+ ["A", None],
+ ],
+ columns=["Group", "Data"],
+ )
+ result = df.to_html(na_rep=na_rep, float_format="{:.2f}".format)
+ expected = expected_html(datapath, "gh13828_expected_output")
+ expected = expected.format(na_rep=na_rep)
+ assert result == expected
+
+
+def test_to_html_na_rep_non_scalar_data(datapath):
+ # GH47103
+ df = DataFrame([{"a": 1, "b": [1, 2, 3]}])
+ result = df.to_html(na_rep="-")
+ expected = expected_html(datapath, "gh47103_expected_output")
+ assert result == expected
+
+
+def test_to_html_float_format_object_col(datapath):
+ # GH#40024
+ df = DataFrame(data={"x": [1000.0, "test"]})
+ result = df.to_html(float_format=lambda x: f"{x:,.0f}")
+ expected = expected_html(datapath, "gh40024_expected_output")
+ assert result == expected
+
+
+def test_to_html_multiindex_col_with_colspace():
+ # GH#53885
+ df = DataFrame([[1, 2]])
+ df.columns = MultiIndex.from_tuples([(1, 1), (2, 1)])
+ result = df.to_html(col_space=100)
+ expected = (
+ '\n'
+ " \n"
+ " \n"
+ ' | \n'
+ ' 1 | \n'
+ ' 2 | \n'
+ " \n"
+ " \n"
+ ' | \n'
+ ' 1 | \n'
+ ' 1 | \n'
+ " \n"
+ " \n"
+ " \n"
+ " \n"
+ " 0 | \n"
+ " 1 | \n"
+ " 2 | \n"
+ " \n"
+ " \n"
+ " "
+ )
+ assert result == expected
+
+
+def test_to_html_tuple_col_with_colspace():
+ # GH#53885
+ df = DataFrame({("a", "b"): [1], "b": [2]})
+ result = df.to_html(col_space=100)
+ expected = (
+ '\n'
+ " \n"
+ ' \n'
+ ' | \n'
+ ' (a, b) | \n'
+ ' b | \n'
+ " \n"
+ " \n"
+ " \n"
+ " \n"
+ " 0 | \n"
+ " 1 | \n"
+ " 2 | \n"
+ " \n"
+ " \n"
+ " "
+ )
+ assert result == expected
+
+
+def test_to_html_empty_complex_array():
+ # GH#54167
+ df = DataFrame({"x": np.array([], dtype="complex")})
+ result = df.to_html(col_space=100)
+ expected = (
+ '\n'
+ " \n"
+ ' \n'
+ ' | \n'
+ ' x | \n'
+ " \n"
+ " \n"
+ " \n"
+ " \n"
+ " "
+ )
+ assert result == expected
+
+
+def test_to_html_pos_args_deprecation():
+ # GH-54229
+ df = DataFrame({"a": [1, 2, 3]})
+ msg = (
+ r"Starting with pandas version 3.0 all arguments of to_html except for the "
+ r"argument 'buf' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ df.to_html(None, None)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_latex.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_latex.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fd96dff27d06dc3056b56c5f7e8eb054e98bd8f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_latex.py
@@ -0,0 +1,1425 @@
+import codecs
+from datetime import datetime
+from textwrap import dedent
+
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Series,
+)
+import pandas._testing as tm
+
+pytest.importorskip("jinja2")
+
+
+def _dedent(string):
+ """Dedent without new line in the beginning.
+
+ Built-in textwrap.dedent would keep new line character in the beginning
+ of multi-line string starting from the new line.
+ This version drops the leading new line character.
+ """
+ return dedent(string).lstrip()
+
+
+@pytest.fixture
+def df_short():
+ """Short dataframe for testing table/tabular/longtable LaTeX env."""
+ return DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+
+
+class TestToLatex:
+ def test_to_latex_to_file(self, float_frame):
+ with tm.ensure_clean("test.tex") as path:
+ float_frame.to_latex(path)
+ with open(path, encoding="utf-8") as f:
+ assert float_frame.to_latex() == f.read()
+
+ def test_to_latex_to_file_utf8_with_encoding(self):
+ # test with utf-8 and encoding option (GH 7061)
+ df = DataFrame([["au\xdfgangen"]])
+ with tm.ensure_clean("test.tex") as path:
+ df.to_latex(path, encoding="utf-8")
+ with codecs.open(path, "r", encoding="utf-8") as f:
+ assert df.to_latex() == f.read()
+
+ def test_to_latex_to_file_utf8_without_encoding(self):
+ # test with utf-8 without encoding option
+ df = DataFrame([["au\xdfgangen"]])
+ with tm.ensure_clean("test.tex") as path:
+ df.to_latex(path)
+ with codecs.open(path, "r", encoding="utf-8") as f:
+ assert df.to_latex() == f.read()
+
+ def test_to_latex_tabular_with_index(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_tabular_without_index(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(index=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{rl}
+ \toprule
+ a & b \\
+ \midrule
+ 1 & b1 \\
+ 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "bad_column_format",
+ [5, 1.2, ["l", "r"], ("r", "c"), {"r", "c", "l"}, {"a": "r", "b": "l"}],
+ )
+ def test_to_latex_bad_column_format(self, bad_column_format):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ msg = r"`column_format` must be str or unicode"
+ with pytest.raises(ValueError, match=msg):
+ df.to_latex(column_format=bad_column_format)
+
+ def test_to_latex_column_format_just_works(self, float_frame):
+ # GH Bug #9402
+ float_frame.to_latex(column_format="lcr")
+
+ def test_to_latex_column_format(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(column_format="lcr")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lcr}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_float_format_object_col(self):
+ # GH#40024
+ ser = Series([1000.0, "test"])
+ result = ser.to_latex(float_format="{:,.0f}".format)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & 0 \\
+ \midrule
+ 0 & 1,000 \\
+ 1 & test \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_empty_tabular(self):
+ df = DataFrame()
+ result = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{l}
+ \toprule
+ \midrule
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_series(self):
+ s = Series(["a", "b", "c"])
+ result = s.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & 0 \\
+ \midrule
+ 0 & a \\
+ 1 & b \\
+ 2 & c \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_midrule_location(self):
+ # GH 18326
+ df = DataFrame({"a": [1, 2]})
+ df.index.name = "foo"
+ result = df.to_latex(index_names=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lr}
+ \toprule
+ & a \\
+ \midrule
+ 0 & 1 \\
+ 1 & 2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_pos_args_deprecation(self):
+ # GH-54229
+ df = DataFrame(
+ {
+ "name": ["Raphael", "Donatello"],
+ "age": [26, 45],
+ "height": [181.23, 177.65],
+ }
+ )
+ msg = (
+ r"Starting with pandas version 3.0 all arguments of to_latex except for "
+ r"the argument 'buf' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ df.to_latex(None, None)
+
+
+class TestToLatexLongtable:
+ def test_to_latex_empty_longtable(self):
+ df = DataFrame()
+ result = df.to_latex(longtable=True)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{l}
+ \toprule
+ \midrule
+ \endfirsthead
+ \toprule
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{0}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_with_index(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(longtable=True)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_without_index(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(index=False, longtable=True)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{rl}
+ \toprule
+ a & b \\
+ \midrule
+ \endfirsthead
+ \toprule
+ a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{2}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 1 & b1 \\
+ 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "df, expected_number",
+ [
+ (DataFrame({"a": [1, 2]}), 1),
+ (DataFrame({"a": [1, 2], "b": [3, 4]}), 2),
+ (DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}), 3),
+ ],
+ )
+ def test_to_latex_longtable_continued_on_next_page(self, df, expected_number):
+ result = df.to_latex(index=False, longtable=True)
+ assert rf"\multicolumn{{{expected_number}}}" in result
+
+
+class TestToLatexHeader:
+ def test_to_latex_no_header_with_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_no_header_without_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(index=False, header=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{rl}
+ \toprule
+ \midrule
+ 1 & b1 \\
+ 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_specified_header_with_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=["AA", "BB"])
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & AA & BB \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_specified_header_without_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=["AA", "BB"], index=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{rl}
+ \toprule
+ AA & BB \\
+ \midrule
+ 1 & b1 \\
+ 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "header, num_aliases",
+ [
+ (["A"], 1),
+ (("B",), 1),
+ (("Col1", "Col2", "Col3"), 3),
+ (("Col1", "Col2", "Col3", "Col4"), 4),
+ ],
+ )
+ def test_to_latex_number_of_items_in_header_missmatch_raises(
+ self,
+ header,
+ num_aliases,
+ ):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ msg = f"Writing 2 cols but got {num_aliases} aliases"
+ with pytest.raises(ValueError, match=msg):
+ df.to_latex(header=header)
+
+ def test_to_latex_decimal(self):
+ # GH 12031
+ df = DataFrame({"a": [1.0, 2.1], "b": ["b1", "b2"]})
+ result = df.to_latex(decimal=",")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1,000000 & b1 \\
+ 1 & 2,100000 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexBold:
+ def test_to_latex_bold_rows(self):
+ # GH 16707
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(bold_rows=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ \textbf{0} & 1 & b1 \\
+ \textbf{1} & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_no_bold_rows(self):
+ # GH 16707
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(bold_rows=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexCaptionLabel:
+ @pytest.fixture
+ def caption_table(self):
+ """Caption for table/tabular LaTeX environment."""
+ return "a table in a \\texttt{table/tabular} environment"
+
+ @pytest.fixture
+ def short_caption(self):
+ """Short caption for testing \\caption[short_caption]{full_caption}."""
+ return "a table"
+
+ @pytest.fixture
+ def label_table(self):
+ """Label for table/tabular LaTeX environment."""
+ return "tab:table_tabular"
+
+ @pytest.fixture
+ def caption_longtable(self):
+ """Caption for longtable LaTeX environment."""
+ return "a table in a \\texttt{longtable} environment"
+
+ @pytest.fixture
+ def label_longtable(self):
+ """Label for longtable LaTeX environment."""
+ return "tab:longtable"
+
+ def test_to_latex_caption_only(self, df_short, caption_table):
+ # GH 25436
+ result = df_short.to_latex(caption=caption_table)
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption{a table in a \texttt{table/tabular} environment}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_label_only(self, df_short, label_table):
+ # GH 25436
+ result = df_short.to_latex(label=label_table)
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \label{tab:table_tabular}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_caption_and_label(self, df_short, caption_table, label_table):
+ # GH 25436
+ result = df_short.to_latex(caption=caption_table, label=label_table)
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption{a table in a \texttt{table/tabular} environment}
+ \label{tab:table_tabular}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_caption_and_shortcaption(
+ self,
+ df_short,
+ caption_table,
+ short_caption,
+ ):
+ result = df_short.to_latex(caption=(caption_table, short_caption))
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption[a table]{a table in a \texttt{table/tabular} environment}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_caption_and_shortcaption_list_is_ok(self, df_short):
+ caption = ("Long-long-caption", "Short")
+ result_tuple = df_short.to_latex(caption=caption)
+ result_list = df_short.to_latex(caption=list(caption))
+ assert result_tuple == result_list
+
+ def test_to_latex_caption_shortcaption_and_label(
+ self,
+ df_short,
+ caption_table,
+ short_caption,
+ label_table,
+ ):
+ # test when the short_caption is provided alongside caption and label
+ result = df_short.to_latex(
+ caption=(caption_table, short_caption),
+ label=label_table,
+ )
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption[a table]{a table in a \texttt{table/tabular} environment}
+ \label{tab:table_tabular}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "bad_caption",
+ [
+ ("full_caption", "short_caption", "extra_string"),
+ ("full_caption", "short_caption", 1),
+ ("full_caption", "short_caption", None),
+ ("full_caption",),
+ (None,),
+ ],
+ )
+ def test_to_latex_bad_caption_raises(self, bad_caption):
+ # test that wrong number of params is raised
+ df = DataFrame({"a": [1]})
+ msg = "`caption` must be either a string or 2-tuple of strings"
+ with pytest.raises(ValueError, match=msg):
+ df.to_latex(caption=bad_caption)
+
+ def test_to_latex_two_chars_caption(self, df_short):
+ # test that two chars caption is handled correctly
+ # it must not be unpacked into long_caption, short_caption.
+ result = df_short.to_latex(caption="xy")
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption{xy}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_caption_only(self, df_short, caption_longtable):
+ # GH 25436
+ # test when no caption and no label is provided
+ # is performed by test_to_latex_longtable()
+ result = df_short.to_latex(longtable=True, caption=caption_longtable)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \caption{a table in a \texttt{longtable} environment} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \caption[]{a table in a \texttt{longtable} environment} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_label_only(self, df_short, label_longtable):
+ # GH 25436
+ result = df_short.to_latex(longtable=True, label=label_longtable)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \label{tab:longtable} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_caption_and_label(
+ self,
+ df_short,
+ caption_longtable,
+ label_longtable,
+ ):
+ # GH 25436
+ result = df_short.to_latex(
+ longtable=True,
+ caption=caption_longtable,
+ label=label_longtable,
+ )
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \caption{a table in a \texttt{longtable} environment} \label{tab:longtable} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \caption[]{a table in a \texttt{longtable} environment} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_caption_shortcaption_and_label(
+ self,
+ df_short,
+ caption_longtable,
+ short_caption,
+ label_longtable,
+ ):
+ # test when the caption, the short_caption and the label are provided
+ result = df_short.to_latex(
+ longtable=True,
+ caption=(caption_longtable, short_caption),
+ label=label_longtable,
+ )
+ expected = _dedent(
+ r"""
+\begin{longtable}{lrl}
+\caption[a table]{a table in a \texttt{longtable} environment} \label{tab:longtable} \\
+\toprule
+ & a & b \\
+\midrule
+\endfirsthead
+\caption[]{a table in a \texttt{longtable} environment} \\
+\toprule
+ & a & b \\
+\midrule
+\endhead
+\midrule
+\multicolumn{3}{r}{Continued on next page} \\
+\midrule
+\endfoot
+\bottomrule
+\endlastfoot
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\end{longtable}
+"""
+ )
+ assert result == expected
+
+
+class TestToLatexEscape:
+ @pytest.fixture
+ def df_with_symbols(self):
+ """Dataframe with special characters for testing chars escaping."""
+ a = "a"
+ b = "b"
+ yield DataFrame({"co$e^x$": {a: "a", b: "b"}, "co^l1": {a: "a", b: "b"}})
+
+ def test_to_latex_escape_false(self, df_with_symbols):
+ result = df_with_symbols.to_latex(escape=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ & co$e^x$ & co^l1 \\
+ \midrule
+ a & a & a \\
+ b & b & b \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_escape_default(self, df_with_symbols):
+ # gh50871: in v2.0 escape is False by default (styler.format.escape=None)
+ default = df_with_symbols.to_latex()
+ specified_true = df_with_symbols.to_latex(escape=True)
+ assert default != specified_true
+
+ def test_to_latex_special_escape(self):
+ df = DataFrame([r"a\b\c", r"^a^b^c", r"~a~b~c"])
+ result = df.to_latex(escape=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & 0 \\
+ \midrule
+ 0 & a\textbackslash b\textbackslash c \\
+ 1 & \textasciicircum a\textasciicircum b\textasciicircum c \\
+ 2 & \textasciitilde a\textasciitilde b\textasciitilde c \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_escape_special_chars(self):
+ special_characters = ["&", "%", "$", "#", "_", "{", "}", "~", "^", "\\"]
+ df = DataFrame(data=special_characters)
+ result = df.to_latex(escape=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & 0 \\
+ \midrule
+ 0 & \& \\
+ 1 & \% \\
+ 2 & \$ \\
+ 3 & \# \\
+ 4 & \_ \\
+ 5 & \{ \\
+ 6 & \} \\
+ 7 & \textasciitilde \\
+ 8 & \textasciicircum \\
+ 9 & \textbackslash \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_specified_header_special_chars_without_escape(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=["$A$", "$B$"], escape=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & $A$ & $B$ \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexPosition:
+ def test_to_latex_position(self):
+ the_position = "h"
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(position=the_position)
+ expected = _dedent(
+ r"""
+ \begin{table}[h]
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_position(self):
+ the_position = "t"
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(longtable=True, position=the_position)
+ expected = _dedent(
+ r"""
+ \begin{longtable}[t]{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexFormatters:
+ def test_to_latex_with_formatters(self):
+ df = DataFrame(
+ {
+ "datetime64": [
+ datetime(2016, 1, 1),
+ datetime(2016, 2, 5),
+ datetime(2016, 3, 3),
+ ],
+ "float": [1.0, 2.0, 3.0],
+ "int": [1, 2, 3],
+ "object": [(1, 2), True, False],
+ }
+ )
+
+ formatters = {
+ "datetime64": lambda x: x.strftime("%Y-%m"),
+ "float": lambda x: f"[{x: 4.1f}]",
+ "int": lambda x: f"0x{x:x}",
+ "object": lambda x: f"-{x!s}-",
+ "__index__": lambda x: f"index: {x}",
+ }
+ result = df.to_latex(formatters=dict(formatters))
+
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrl}
+ \toprule
+ & datetime64 & float & int & object \\
+ \midrule
+ index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\
+ index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\
+ index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_float_format_no_fixed_width_3decimals(self):
+ # GH 21625
+ df = DataFrame({"x": [0.19999]})
+ result = df.to_latex(float_format="%.3f")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lr}
+ \toprule
+ & x \\
+ \midrule
+ 0 & 0.200 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_float_format_no_fixed_width_integer(self):
+ # GH 22270
+ df = DataFrame({"x": [100.0]})
+ result = df.to_latex(float_format="%.0f")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lr}
+ \toprule
+ & x \\
+ \midrule
+ 0 & 100 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
+ def test_to_latex_na_rep_and_float_format(self, na_rep):
+ df = DataFrame(
+ [
+ ["A", 1.2225],
+ ["A", None],
+ ],
+ columns=["Group", "Data"],
+ )
+ result = df.to_latex(na_rep=na_rep, float_format="{:.2f}".format)
+ expected = _dedent(
+ rf"""
+ \begin{{tabular}}{{llr}}
+ \toprule
+ & Group & Data \\
+ \midrule
+ 0 & A & 1.22 \\
+ 1 & A & {na_rep} \\
+ \bottomrule
+ \end{{tabular}}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexMultiindex:
+ @pytest.fixture
+ def multiindex_frame(self):
+ """Multiindex dataframe for testing multirow LaTeX macros."""
+ yield DataFrame.from_dict(
+ {
+ ("c1", 0): Series({x: x for x in range(4)}),
+ ("c1", 1): Series({x: x + 4 for x in range(4)}),
+ ("c2", 0): Series({x: x for x in range(4)}),
+ ("c2", 1): Series({x: x + 4 for x in range(4)}),
+ ("c3", 0): Series({x: x for x in range(4)}),
+ }
+ ).T
+
+ @pytest.fixture
+ def multicolumn_frame(self):
+ """Multicolumn dataframe for testing multicolumn LaTeX macros."""
+ yield DataFrame(
+ {
+ ("c1", 0): {x: x for x in range(5)},
+ ("c1", 1): {x: x + 5 for x in range(5)},
+ ("c2", 0): {x: x for x in range(5)},
+ ("c2", 1): {x: x + 5 for x in range(5)},
+ ("c3", 0): {x: x for x in range(5)},
+ }
+ )
+
+ def test_to_latex_multindex_header(self):
+ # GH 16718
+ df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]})
+ df = df.set_index(["a", "b"])
+ observed = df.to_latex(header=["r1", "r2"], multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrr}
+ \toprule
+ & & r1 & r2 \\
+ a & b & & \\
+ \midrule
+ 0 & 1 & 2 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert observed == expected
+
+ def test_to_latex_multiindex_empty_name(self):
+ # GH 18669
+ mi = pd.MultiIndex.from_product([[1, 2]], names=[""])
+ df = DataFrame(-1, index=mi, columns=range(4))
+ observed = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrr}
+ \toprule
+ & 0 & 1 & 2 & 3 \\
+ & & & & \\
+ \midrule
+ 1 & -1 & -1 & -1 & -1 \\
+ 2 & -1 & -1 & -1 & -1 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert observed == expected
+
+ def test_to_latex_multiindex_column_tabular(self):
+ df = DataFrame({("x", "y"): ["a"]})
+ result = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & x \\
+ & y \\
+ \midrule
+ 0 & a \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multiindex_small_tabular(self):
+ df = DataFrame({("x", "y"): ["a"]}).T
+ result = df.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ & & 0 \\
+ \midrule
+ x & y & a \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multiindex_tabular(self, multiindex_frame):
+ result = multiindex_frame.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrrr}
+ \toprule
+ & & 0 & 1 & 2 & 3 \\
+ \midrule
+ c1 & 0 & 0 & 1 & 2 & 3 \\
+ & 1 & 4 & 5 & 6 & 7 \\
+ c2 & 0 & 0 & 1 & 2 & 3 \\
+ & 1 & 4 & 5 & 6 & 7 \\
+ c3 & 0 & 0 & 1 & 2 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multicolumn_tabular(self, multiindex_frame):
+ # GH 14184
+ df = multiindex_frame.T
+ df.columns.names = ["a", "b"]
+ result = df.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrr}
+ \toprule
+ a & \multicolumn{2}{r}{c1} & \multicolumn{2}{r}{c2} & c3 \\
+ b & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 4 & 0 & 4 & 0 \\
+ 1 & 1 & 5 & 1 & 5 & 1 \\
+ 2 & 2 & 6 & 2 & 6 & 2 \\
+ 3 & 3 & 7 & 3 & 7 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_index_has_name_tabular(self):
+ # GH 10660
+ df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]})
+ result = df.set_index(["a", "b"]).to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llr}
+ \toprule
+ & & c \\
+ a & b & \\
+ \midrule
+ 0 & a & 1 \\
+ & b & 2 \\
+ 1 & a & 3 \\
+ & b & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_groupby_tabular(self):
+ # GH 10660
+ df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]})
+ result = (
+ df.groupby("a")
+ .describe()
+ .to_latex(float_format="{:.1f}".format, escape=True)
+ )
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrrrrr}
+ \toprule
+ & \multicolumn{8}{r}{c} \\
+ & count & mean & std & min & 25\% & 50\% & 75\% & max \\
+ a & & & & & & & & \\
+ \midrule
+ 0 & 2.0 & 1.5 & 0.7 & 1.0 & 1.2 & 1.5 & 1.8 & 2.0 \\
+ 1 & 2.0 & 3.5 & 0.7 & 3.0 & 3.2 & 3.5 & 3.8 & 4.0 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multiindex_dupe_level(self):
+ # see gh-14484
+ #
+ # If an index is repeated in subsequent rows, it should be
+ # replaced with a blank in the created table. This should
+ # ONLY happen if all higher order indices (to the left) are
+ # equal too. In this test, 'c' has to be printed both times
+ # because the higher order index 'A' != 'B'.
+ df = DataFrame(
+ index=pd.MultiIndex.from_tuples([("A", "c"), ("B", "c")]), columns=["col"]
+ )
+ result = df.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ & & col \\
+ \midrule
+ A & c & NaN \\
+ B & c & NaN \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multicolumn_default(self, multicolumn_frame):
+ result = multicolumn_frame.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrr}
+ \toprule
+ & \multicolumn{2}{r}{c1} & \multicolumn{2}{r}{c2} & c3 \\
+ & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 5 & 0 & 5 & 0 \\
+ 1 & 1 & 6 & 1 & 6 & 1 \\
+ 2 & 2 & 7 & 2 & 7 & 2 \\
+ 3 & 3 & 8 & 3 & 8 & 3 \\
+ 4 & 4 & 9 & 4 & 9 & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multicolumn_false(self, multicolumn_frame):
+ result = multicolumn_frame.to_latex(multicolumn=False, multicolumn_format="l")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrr}
+ \toprule
+ & c1 & & c2 & & c3 \\
+ & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 5 & 0 & 5 & 0 \\
+ 1 & 1 & 6 & 1 & 6 & 1 \\
+ 2 & 2 & 7 & 2 & 7 & 2 \\
+ 3 & 3 & 8 & 3 & 8 & 3 \\
+ 4 & 4 & 9 & 4 & 9 & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multirow_true(self, multicolumn_frame):
+ result = multicolumn_frame.T.to_latex(multirow=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrrrr}
+ \toprule
+ & & 0 & 1 & 2 & 3 & 4 \\
+ \midrule
+ \multirow[t]{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ \multirow[t]{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
+ \cline{1-7}
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multicolumnrow_with_multicol_format(self, multicolumn_frame):
+ multicolumn_frame.index = multicolumn_frame.T.index
+ result = multicolumn_frame.T.to_latex(
+ multirow=True,
+ multicolumn=True,
+ multicolumn_format="c",
+ )
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrrrr}
+ \toprule
+ & & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\
+ & & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ \multirow[t]{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ \multirow[t]{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
+ \cline{1-7}
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize("name0", [None, "named0"])
+ @pytest.mark.parametrize("name1", [None, "named1"])
+ @pytest.mark.parametrize("axes", [[0], [1], [0, 1]])
+ def test_to_latex_multiindex_names(self, name0, name1, axes):
+ # GH 18667
+ names = [name0, name1]
+ mi = pd.MultiIndex.from_product([[1, 2], [3, 4]])
+ df = DataFrame(-1, index=mi.copy(), columns=mi.copy())
+ for idx in axes:
+ df.axes[idx].names = names
+
+ idx_names = tuple(n or "" for n in names)
+ idx_names_row = (
+ f"{idx_names[0]} & {idx_names[1]} & & & & \\\\\n"
+ if (0 in axes and any(names))
+ else ""
+ )
+ col_names = [n if (bool(n) and 1 in axes) else "" for n in names]
+ observed = df.to_latex(multirow=False)
+ # pylint: disable-next=consider-using-f-string
+ expected = r"""\begin{tabular}{llrrrr}
+\toprule
+ & %s & \multicolumn{2}{r}{1} & \multicolumn{2}{r}{2} \\
+ & %s & 3 & 4 & 3 & 4 \\
+%s\midrule
+1 & 3 & -1 & -1 & -1 & -1 \\
+ & 4 & -1 & -1 & -1 & -1 \\
+2 & 3 & -1 & -1 & -1 & -1 \\
+ & 4 & -1 & -1 & -1 & -1 \\
+\bottomrule
+\end{tabular}
+""" % tuple(
+ list(col_names) + [idx_names_row]
+ )
+ assert observed == expected
+
+ @pytest.mark.parametrize("one_row", [True, False])
+ def test_to_latex_multiindex_nans(self, one_row):
+ # GH 14249
+ df = DataFrame({"a": [None, 1], "b": [2, 3], "c": [4, 5]})
+ if one_row:
+ df = df.iloc[[0]]
+ observed = df.set_index(["a", "b"]).to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llr}
+ \toprule
+ & & c \\
+ a & b & \\
+ \midrule
+ NaN & 2 & 4 \\
+ """
+ )
+ if not one_row:
+ expected += r"""1.000000 & 3 & 5 \\
+"""
+ expected += r"""\bottomrule
+\end{tabular}
+"""
+ assert observed == expected
+
+ def test_to_latex_non_string_index(self):
+ # GH 19981
+ df = DataFrame([[1, 2, 3]] * 2).set_index([0, 1])
+ result = df.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llr}
+ \toprule
+ & & 2 \\
+ 0 & 1 & \\
+ \midrule
+ 1 & 2 & 3 \\
+ & 2 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multiindex_multirow(self):
+ # GH 16719
+ mi = pd.MultiIndex.from_product(
+ [[0.0, 1.0], [3.0, 2.0, 1.0], ["0", "1"]], names=["i", "val0", "val1"]
+ )
+ df = DataFrame(index=mi)
+ result = df.to_latex(multirow=True, escape=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ i & val0 & val1 \\
+ \midrule
+ \multirow[t]{6}{*}{0.000000} & \multirow[t]{2}{*}{3.000000} & 0 \\
+ & & 1 \\
+ \cline{2-3}
+ & \multirow[t]{2}{*}{2.000000} & 0 \\
+ & & 1 \\
+ \cline{2-3}
+ & \multirow[t]{2}{*}{1.000000} & 0 \\
+ & & 1 \\
+ \cline{1-3} \cline{2-3}
+ \multirow[t]{6}{*}{1.000000} & \multirow[t]{2}{*}{3.000000} & 0 \\
+ & & 1 \\
+ \cline{2-3}
+ & \multirow[t]{2}{*}{2.000000} & 0 \\
+ & & 1 \\
+ \cline{2-3}
+ & \multirow[t]{2}{*}{1.000000} & 0 \\
+ & & 1 \\
+ \cline{1-3} \cline{2-3}
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_markdown.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_markdown.py
new file mode 100644
index 0000000000000000000000000000000000000000..85eca834ff0d43ca30eb4043ed9f97fd3807899b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_markdown.py
@@ -0,0 +1,106 @@
+from io import (
+ BytesIO,
+ StringIO,
+)
+
+import pytest
+
+import pandas as pd
+import pandas._testing as tm
+
+pytest.importorskip("tabulate")
+
+
+def test_simple():
+ buf = StringIO()
+ df = pd.DataFrame([1, 2, 3])
+ df.to_markdown(buf=buf)
+ result = buf.getvalue()
+ assert (
+ result == "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |"
+ )
+
+
+def test_empty_frame():
+ buf = StringIO()
+ df = pd.DataFrame({"id": [], "first_name": [], "last_name": []}).set_index("id")
+ df.to_markdown(buf=buf)
+ result = buf.getvalue()
+ assert result == (
+ "| id | first_name | last_name |\n"
+ "|------|--------------|-------------|"
+ )
+
+
+def test_other_tablefmt():
+ buf = StringIO()
+ df = pd.DataFrame([1, 2, 3])
+ df.to_markdown(buf=buf, tablefmt="jira")
+ result = buf.getvalue()
+ assert result == "|| || 0 ||\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |"
+
+
+def test_other_headers():
+ buf = StringIO()
+ df = pd.DataFrame([1, 2, 3])
+ df.to_markdown(buf=buf, headers=["foo", "bar"])
+ result = buf.getvalue()
+ assert result == (
+ "| foo | bar |\n|------:|------:|\n| 0 "
+ "| 1 |\n| 1 | 2 |\n| 2 | 3 |"
+ )
+
+
+def test_series():
+ buf = StringIO()
+ s = pd.Series([1, 2, 3], name="foo")
+ s.to_markdown(buf=buf)
+ result = buf.getvalue()
+ assert result == (
+ "| | foo |\n|---:|------:|\n| 0 | 1 "
+ "|\n| 1 | 2 |\n| 2 | 3 |"
+ )
+
+
+def test_no_buf():
+ df = pd.DataFrame([1, 2, 3])
+ result = df.to_markdown()
+ assert (
+ result == "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |"
+ )
+
+
+@pytest.mark.parametrize("index", [True, False])
+def test_index(index):
+ # GH 32667
+
+ df = pd.DataFrame([1, 2, 3])
+
+ result = df.to_markdown(index=index)
+
+ if index:
+ expected = (
+ "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |"
+ )
+ else:
+ expected = "| 0 |\n|----:|\n| 1 |\n| 2 |\n| 3 |"
+ assert result == expected
+
+
+def test_showindex_disallowed_in_kwargs():
+ # GH 32667; disallowing showindex in kwargs enforced in 2.0
+ df = pd.DataFrame([1, 2, 3])
+ with pytest.raises(ValueError, match="Pass 'index' instead of 'showindex"):
+ df.to_markdown(index=True, showindex=True)
+
+
+def test_markdown_pos_args_deprecatation():
+ # GH-54229
+ df = pd.DataFrame({"a": [1, 2, 3]})
+ msg = (
+ r"Starting with pandas version 3.0 all arguments of to_markdown except for the "
+ r"argument 'buf' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ buffer = BytesIO()
+ df.to_markdown(buffer, "grid")
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_string.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_string.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e5a5005cb0761c104ed6de26cd5a6ef730c08d5
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_string.py
@@ -0,0 +1,1216 @@
+from datetime import (
+ datetime,
+ timedelta,
+)
+from io import StringIO
+import re
+import sys
+from textwrap import dedent
+
+import numpy as np
+import pytest
+
+from pandas._config import using_pyarrow_string_dtype
+
+from pandas import (
+ CategoricalIndex,
+ DataFrame,
+ Index,
+ NaT,
+ Series,
+ Timestamp,
+ concat,
+ date_range,
+ get_option,
+ option_context,
+ read_csv,
+ timedelta_range,
+ to_datetime,
+)
+import pandas._testing as tm
+
+
+def _three_digit_exp():
+ return f"{1.7e8:.4g}" == "1.7e+008"
+
+
+class TestDataFrameToStringFormatters:
+ def test_to_string_masked_ea_with_formatter(self):
+ # GH#39336
+ df = DataFrame(
+ {
+ "a": Series([0.123456789, 1.123456789], dtype="Float64"),
+ "b": Series([1, 2], dtype="Int64"),
+ }
+ )
+ result = df.to_string(formatters=["{:.2f}".format, "{:.2f}".format])
+ expected = dedent(
+ """\
+ a b
+ 0 0.12 1.00
+ 1 1.12 2.00"""
+ )
+ assert result == expected
+
+ def test_to_string_with_formatters(self):
+ df = DataFrame(
+ {
+ "int": [1, 2, 3],
+ "float": [1.0, 2.0, 3.0],
+ "object": [(1, 2), True, False],
+ },
+ columns=["int", "float", "object"],
+ )
+
+ formatters = [
+ ("int", lambda x: f"0x{x:x}"),
+ ("float", lambda x: f"[{x: 4.1f}]"),
+ ("object", lambda x: f"-{x!s}-"),
+ ]
+ result = df.to_string(formatters=dict(formatters))
+ result2 = df.to_string(formatters=list(zip(*formatters))[1])
+ assert result == (
+ " int float object\n"
+ "0 0x1 [ 1.0] -(1, 2)-\n"
+ "1 0x2 [ 2.0] -True-\n"
+ "2 0x3 [ 3.0] -False-"
+ )
+ assert result == result2
+
+ def test_to_string_with_datetime64_monthformatter(self):
+ months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]
+ x = DataFrame({"months": months})
+
+ def format_func(x):
+ return x.strftime("%Y-%m")
+
+ result = x.to_string(formatters={"months": format_func})
+ expected = dedent(
+ """\
+ months
+ 0 2016-01
+ 1 2016-02"""
+ )
+ assert result.strip() == expected
+
+ def test_to_string_with_datetime64_hourformatter(self):
+ x = DataFrame(
+ {"hod": to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")}
+ )
+
+ def format_func(x):
+ return x.strftime("%H:%M")
+
+ result = x.to_string(formatters={"hod": format_func})
+ expected = dedent(
+ """\
+ hod
+ 0 10:10
+ 1 12:12"""
+ )
+ assert result.strip() == expected
+
+ def test_to_string_with_formatters_unicode(self):
+ df = DataFrame({"c/\u03c3": [1, 2, 3]})
+ result = df.to_string(formatters={"c/\u03c3": str})
+ expected = dedent(
+ """\
+ c/\u03c3
+ 0 1
+ 1 2
+ 2 3"""
+ )
+ assert result == expected
+
+ def test_to_string_index_formatter(self):
+ df = DataFrame([range(5), range(5, 10), range(10, 15)])
+
+ rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
+
+ xp = dedent(
+ """\
+ 0 1 2 3 4
+ a 0 1 2 3 4
+ b 5 6 7 8 9
+ c 10 11 12 13 14\
+ """
+ )
+ assert rs == xp
+
+ def test_no_extra_space(self):
+ # GH#52690: Check that no extra space is given
+ col1 = "TEST"
+ col2 = "PANDAS"
+ col3 = "to_string"
+ expected = f"{col1:<6s} {col2:<7s} {col3:<10s}"
+ df = DataFrame([{"col1": "TEST", "col2": "PANDAS", "col3": "to_string"}])
+ d = {"col1": "{:<6s}".format, "col2": "{:<7s}".format, "col3": "{:<10s}".format}
+ result = df.to_string(index=False, header=False, formatters=d)
+ assert result == expected
+
+
+class TestDataFrameToStringColSpace:
+ def test_to_string_with_column_specific_col_space_raises(self):
+ df = DataFrame(
+ np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"]
+ )
+
+ msg = (
+ "Col_space length\\(\\d+\\) should match "
+ "DataFrame number of columns\\(\\d+\\)"
+ )
+ with pytest.raises(ValueError, match=msg):
+ df.to_string(col_space=[30, 40])
+
+ with pytest.raises(ValueError, match=msg):
+ df.to_string(col_space=[30, 40, 50, 60])
+
+ msg = "unknown column"
+ with pytest.raises(ValueError, match=msg):
+ df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
+
+ def test_to_string_with_column_specific_col_space(self):
+ df = DataFrame(
+ np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"]
+ )
+
+ result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
+ # 3 separating space + each col_space for (id, a, b, c)
+ assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
+
+ result = df.to_string(col_space=[10, 11, 12])
+ assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
+
+ def test_to_string_with_col_space(self):
+ df = DataFrame(np.random.default_rng(2).random(size=(1, 3)))
+ c10 = len(df.to_string(col_space=10).split("\n")[1])
+ c20 = len(df.to_string(col_space=20).split("\n")[1])
+ c30 = len(df.to_string(col_space=30).split("\n")[1])
+ assert c10 < c20 < c30
+
+ # GH#8230
+ # col_space wasn't being applied with header=False
+ with_header = df.to_string(col_space=20)
+ with_header_row1 = with_header.splitlines()[1]
+ no_header = df.to_string(col_space=20, header=False)
+ assert len(with_header_row1) == len(no_header)
+
+ def test_to_string_repr_tuples(self):
+ buf = StringIO()
+
+ df = DataFrame({"tups": list(zip(range(10), range(10)))})
+ repr(df)
+ df.to_string(col_space=10, buf=buf)
+
+
+class TestDataFrameToStringHeader:
+ def test_to_string_header_false(self):
+ # GH#49230
+ df = DataFrame([1, 2])
+ df.index.name = "a"
+ s = df.to_string(header=False)
+ expected = "a \n0 1\n1 2"
+ assert s == expected
+
+ df = DataFrame([[1, 2], [3, 4]])
+ df.index.name = "a"
+ s = df.to_string(header=False)
+ expected = "a \n0 1 2\n1 3 4"
+ assert s == expected
+
+ def test_to_string_multindex_header(self):
+ # GH#16718
+ df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(["a", "b"])
+ res = df.to_string(header=["r1", "r2"])
+ exp = " r1 r2\na b \n0 1 2 3"
+ assert res == exp
+
+ def test_to_string_no_header(self):
+ df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
+
+ df_s = df.to_string(header=False)
+ expected = "0 1 4\n1 2 5\n2 3 6"
+
+ assert df_s == expected
+
+ def test_to_string_specified_header(self):
+ df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
+
+ df_s = df.to_string(header=["X", "Y"])
+ expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
+
+ assert df_s == expected
+
+ msg = "Writing 2 cols but got 1 aliases"
+ with pytest.raises(ValueError, match=msg):
+ df.to_string(header=["X"])
+
+
+class TestDataFrameToStringLineWidth:
+ def test_to_string_line_width(self):
+ df = DataFrame(123, index=range(10, 15), columns=range(30))
+ lines = df.to_string(line_width=80)
+ assert max(len(line) for line in lines.split("\n")) == 80
+
+ def test_to_string_line_width_no_index(self):
+ # GH#13998, GH#22505
+ df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1, index=False)
+ expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1, index=False)
+ expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
+
+ df_s = df.to_string(line_width=1, index=False)
+ expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
+
+ assert df_s == expected
+
+ def test_to_string_line_width_no_header(self):
+ # GH#53054
+ df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1, header=False)
+ expected = "0 1 \\\n1 2 \n2 3 \n\n0 4 \n1 5 \n2 6 "
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1, header=False)
+ expected = "0 11 \\\n1 22 \n2 33 \n\n0 4 \n1 5 \n2 6 "
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
+
+ df_s = df.to_string(line_width=1, header=False)
+ expected = "0 11 \\\n1 22 \n2 -33 \n\n0 4 \n1 5 \n2 -6 "
+
+ assert df_s == expected
+
+ def test_to_string_line_width_with_both_index_and_header(self):
+ # GH#53054
+ df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1)
+ expected = (
+ " x \\\n0 1 \n1 2 \n2 3 \n\n y \n0 4 \n1 5 \n2 6 "
+ )
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1)
+ expected = (
+ " x \\\n0 11 \n1 22 \n2 33 \n\n y \n0 4 \n1 5 \n2 6 "
+ )
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
+
+ df_s = df.to_string(line_width=1)
+ expected = (
+ " x \\\n0 11 \n1 22 \n2 -33 \n\n y \n0 4 \n1 5 \n2 -6 "
+ )
+
+ assert df_s == expected
+
+ def test_to_string_line_width_no_index_no_header(self):
+ # GH#53054
+ df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1, index=False, header=False)
+ expected = "1 \\\n2 \n3 \n\n4 \n5 \n6 "
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
+
+ df_s = df.to_string(line_width=1, index=False, header=False)
+ expected = "11 \\\n22 \n33 \n\n4 \n5 \n6 "
+
+ assert df_s == expected
+
+ df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
+
+ df_s = df.to_string(line_width=1, index=False, header=False)
+ expected = " 11 \\\n 22 \n-33 \n\n 4 \n 5 \n-6 "
+
+ assert df_s == expected
+
+
+class TestToStringNumericFormatting:
+ def test_to_string_float_format_no_fixed_width(self):
+ # GH#21625
+ df = DataFrame({"x": [0.19999]})
+ expected = " x\n0 0.200"
+ assert df.to_string(float_format="%.3f") == expected
+
+ # GH#22270
+ df = DataFrame({"x": [100.0]})
+ expected = " x\n0 100"
+ assert df.to_string(float_format="%.0f") == expected
+
+ def test_to_string_small_float_values(self):
+ df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
+
+ result = df.to_string()
+ # sadness per above
+ if _three_digit_exp():
+ expected = (
+ " a\n"
+ "0 1.500000e+000\n"
+ "1 1.000000e-017\n"
+ "2 -5.500000e-007"
+ )
+ else:
+ expected = (
+ " a\n"
+ "0 1.500000e+00\n"
+ "1 1.000000e-17\n"
+ "2 -5.500000e-07"
+ )
+ assert result == expected
+
+ # but not all exactly zero
+ df = df * 0
+ result = df.to_string()
+ expected = " 0\n0 0\n1 0\n2 -0"
+ # TODO: assert that these match??
+
+ def test_to_string_complex_float_formatting(self):
+ # GH #25514, 25745
+ with option_context("display.precision", 5):
+ df = DataFrame(
+ {
+ "x": [
+ (0.4467846931321966 + 0.0715185102060818j),
+ (0.2739442392974528 + 0.23515228785438969j),
+ (0.26974928742135185 + 0.3250604054898979j),
+ (-1j),
+ ]
+ }
+ )
+ result = df.to_string()
+ expected = (
+ " x\n0 0.44678+0.07152j\n"
+ "1 0.27394+0.23515j\n"
+ "2 0.26975+0.32506j\n"
+ "3 -0.00000-1.00000j"
+ )
+ assert result == expected
+
+ def test_to_string_format_inf(self):
+ # GH#24861
+ df = DataFrame(
+ {
+ "A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
+ "B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
+ }
+ )
+ result = df.to_string()
+
+ expected = (
+ " A B\n"
+ "0 -inf -inf\n"
+ "1 inf inf\n"
+ "2 -1.0000 foo\n"
+ "3 -2.1234 foooo\n"
+ "4 3.0000 fooooo\n"
+ "5 4.0000 bar"
+ )
+ assert result == expected
+
+ df = DataFrame(
+ {
+ "A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
+ "B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
+ }
+ )
+ result = df.to_string()
+
+ expected = (
+ " A B\n"
+ "0 -inf -inf\n"
+ "1 inf inf\n"
+ "2 -1.0 foo\n"
+ "3 -2.0 foooo\n"
+ "4 3.0 fooooo\n"
+ "5 4.0 bar"
+ )
+ assert result == expected
+
+ def test_to_string_int_formatting(self):
+ df = DataFrame({"x": [-15, 20, 25, -35]})
+ assert issubclass(df["x"].dtype.type, np.integer)
+
+ output = df.to_string()
+ expected = " x\n0 -15\n1 20\n2 25\n3 -35"
+ assert output == expected
+
+ def test_to_string_float_formatting(self):
+ with option_context(
+ "display.precision",
+ 5,
+ "display.notebook_repr_html",
+ False,
+ ):
+ df = DataFrame(
+ {"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
+ )
+
+ df_s = df.to_string()
+
+ if _three_digit_exp():
+ expected = (
+ " x\n0 0.00000e+000\n1 2.50000e-001\n"
+ "2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
+ "5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
+ "8 -1.00000e+006"
+ )
+ else:
+ expected = (
+ " x\n0 0.00000e+00\n1 2.50000e-01\n"
+ "2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
+ "5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
+ "8 -1.00000e+06"
+ )
+ assert df_s == expected
+
+ df = DataFrame({"x": [3234, 0.253]})
+ df_s = df.to_string()
+
+ expected = " x\n0 3234.000\n1 0.253"
+ assert df_s == expected
+
+ assert get_option("display.precision") == 6
+
+ df = DataFrame({"x": [1e9, 0.2512]})
+ df_s = df.to_string()
+
+ if _three_digit_exp():
+ expected = " x\n0 1.000000e+009\n1 2.512000e-001"
+ else:
+ expected = " x\n0 1.000000e+09\n1 2.512000e-01"
+ assert df_s == expected
+
+
+class TestDataFrameToString:
+ def test_to_string_decimal(self):
+ # GH#23614
+ df = DataFrame({"A": [6.0, 3.1, 2.2]})
+ expected = " A\n0 6,0\n1 3,1\n2 2,2"
+ assert df.to_string(decimal=",") == expected
+
+ def test_to_string_left_justify_cols(self):
+ df = DataFrame({"x": [3234, 0.253]})
+ df_s = df.to_string(justify="left")
+ expected = " x \n0 3234.000\n1 0.253"
+ assert df_s == expected
+
+ def test_to_string_format_na(self):
+ df = DataFrame(
+ {
+ "A": [np.nan, -1, -2.1234, 3, 4],
+ "B": [np.nan, "foo", "foooo", "fooooo", "bar"],
+ }
+ )
+ result = df.to_string()
+
+ expected = (
+ " A B\n"
+ "0 NaN NaN\n"
+ "1 -1.0000 foo\n"
+ "2 -2.1234 foooo\n"
+ "3 3.0000 fooooo\n"
+ "4 4.0000 bar"
+ )
+ assert result == expected
+
+ df = DataFrame(
+ {
+ "A": [np.nan, -1.0, -2.0, 3.0, 4.0],
+ "B": [np.nan, "foo", "foooo", "fooooo", "bar"],
+ }
+ )
+ result = df.to_string()
+
+ expected = (
+ " A B\n"
+ "0 NaN NaN\n"
+ "1 -1.0 foo\n"
+ "2 -2.0 foooo\n"
+ "3 3.0 fooooo\n"
+ "4 4.0 bar"
+ )
+ assert result == expected
+
+ def test_to_string_with_dict_entries(self):
+ df = DataFrame({"A": [{"a": 1, "b": 2}]})
+
+ val = df.to_string()
+ assert "'a': 1" in val
+ assert "'b': 2" in val
+
+ def test_to_string_with_categorical_columns(self):
+ # GH#35439
+ data = [[4, 2], [3, 2], [4, 3]]
+ cols = ["aaaaaaaaa", "b"]
+ df = DataFrame(data, columns=cols)
+ df_cat_cols = DataFrame(data, columns=CategoricalIndex(cols))
+
+ assert df.to_string() == df_cat_cols.to_string()
+
+ def test_repr_embedded_ndarray(self):
+ arr = np.empty(10, dtype=[("err", object)])
+ for i in range(len(arr)):
+ arr["err"][i] = np.random.default_rng(2).standard_normal(i)
+
+ df = DataFrame(arr)
+ repr(df["err"])
+ repr(df)
+ df.to_string()
+
+ def test_to_string_truncate(self):
+ # GH 9784 - dont truncate when calling DataFrame.to_string
+ df = DataFrame(
+ [
+ {
+ "a": "foo",
+ "b": "bar",
+ "c": "let's make this a very VERY long line that is longer "
+ "than the default 50 character limit",
+ "d": 1,
+ },
+ {"a": "foo", "b": "bar", "c": "stuff", "d": 1},
+ ]
+ )
+ df.set_index(["a", "b", "c"])
+ assert df.to_string() == (
+ " a b "
+ " c d\n"
+ "0 foo bar let's make this a very VERY long line t"
+ "hat is longer than the default 50 character limit 1\n"
+ "1 foo bar "
+ " stuff 1"
+ )
+ with option_context("max_colwidth", 20):
+ # the display option has no effect on the to_string method
+ assert df.to_string() == (
+ " a b "
+ " c d\n"
+ "0 foo bar let's make this a very VERY long line t"
+ "hat is longer than the default 50 character limit 1\n"
+ "1 foo bar "
+ " stuff 1"
+ )
+ assert df.to_string(max_colwidth=20) == (
+ " a b c d\n"
+ "0 foo bar let's make this ... 1\n"
+ "1 foo bar stuff 1"
+ )
+
+ @pytest.mark.parametrize(
+ "input_array, expected",
+ [
+ ({"A": ["a"]}, "A\na"),
+ ({"A": ["a", "b"], "B": ["c", "dd"]}, "A B\na c\nb dd"),
+ ({"A": ["a", 1], "B": ["aa", 1]}, "A B\na aa\n1 1"),
+ ],
+ )
+ def test_format_remove_leading_space_dataframe(self, input_array, expected):
+ # GH#24980
+ df = DataFrame(input_array).to_string(index=False)
+ assert df == expected
+
+ @pytest.mark.parametrize(
+ "data,expected",
+ [
+ (
+ {"col1": [1, 2], "col2": [3, 4]},
+ " col1 col2\n0 1 3\n1 2 4",
+ ),
+ (
+ {"col1": ["Abc", 0.756], "col2": [np.nan, 4.5435]},
+ " col1 col2\n0 Abc NaN\n1 0.756 4.5435",
+ ),
+ (
+ {"col1": [np.nan, "a"], "col2": [0.009, 3.543], "col3": ["Abc", 23]},
+ " col1 col2 col3\n0 NaN 0.009 Abc\n1 a 3.543 23",
+ ),
+ ],
+ )
+ def test_to_string_max_rows_zero(self, data, expected):
+ # GH#35394
+ result = DataFrame(data=data).to_string(max_rows=0)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "max_cols, max_rows, expected",
+ [
+ (
+ 10,
+ None,
+ " 0 1 2 3 4 ... 6 7 8 9 10\n"
+ " 0 0 0 0 0 ... 0 0 0 0 0\n"
+ " 0 0 0 0 0 ... 0 0 0 0 0\n"
+ " 0 0 0 0 0 ... 0 0 0 0 0\n"
+ " 0 0 0 0 0 ... 0 0 0 0 0",
+ ),
+ (
+ None,
+ 2,
+ " 0 1 2 3 4 5 6 7 8 9 10\n"
+ " 0 0 0 0 0 0 0 0 0 0 0\n"
+ " .. .. .. .. .. .. .. .. .. .. ..\n"
+ " 0 0 0 0 0 0 0 0 0 0 0",
+ ),
+ (
+ 10,
+ 2,
+ " 0 1 2 3 4 ... 6 7 8 9 10\n"
+ " 0 0 0 0 0 ... 0 0 0 0 0\n"
+ " .. .. .. .. .. ... .. .. .. .. ..\n"
+ " 0 0 0 0 0 ... 0 0 0 0 0",
+ ),
+ (
+ 9,
+ 2,
+ " 0 1 2 3 ... 7 8 9 10\n"
+ " 0 0 0 0 ... 0 0 0 0\n"
+ " .. .. .. .. ... .. .. .. ..\n"
+ " 0 0 0 0 ... 0 0 0 0",
+ ),
+ (
+ 1,
+ 1,
+ " 0 ...\n 0 ...\n.. ...",
+ ),
+ ],
+ )
+ def test_truncation_no_index(self, max_cols, max_rows, expected):
+ df = DataFrame([[0] * 11] * 4)
+ assert (
+ df.to_string(index=False, max_cols=max_cols, max_rows=max_rows) == expected
+ )
+
+ def test_to_string_no_index(self):
+ # GH#16839, GH#13032
+ df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
+
+ df_s = df.to_string(index=False)
+ # Leading space is expected for positive numbers.
+ expected = " x y z\n11 33 AAA\n22 -44 "
+ assert df_s == expected
+
+ df_s = df[["y", "x", "z"]].to_string(index=False)
+ expected = " y x z\n 33 11 AAA\n-44 22 "
+ assert df_s == expected
+
+ def test_to_string_unicode_columns(self, float_frame):
+ df = DataFrame({"\u03c3": np.arange(10.0)})
+
+ buf = StringIO()
+ df.to_string(buf=buf)
+ buf.getvalue()
+
+ buf = StringIO()
+ df.info(buf=buf)
+ buf.getvalue()
+
+ result = float_frame.to_string()
+ assert isinstance(result, str)
+
+ @pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
+ def test_to_string_na_rep_and_float_format(self, na_rep):
+ # GH#13828
+ df = DataFrame([["A", 1.2225], ["A", None]], columns=["Group", "Data"])
+ result = df.to_string(na_rep=na_rep, float_format="{:.2f}".format)
+ expected = dedent(
+ f"""\
+ Group Data
+ 0 A 1.22
+ 1 A {na_rep}"""
+ )
+ assert result == expected
+
+ def test_to_string_string_dtype(self):
+ # GH#50099
+ pytest.importorskip("pyarrow")
+ df = DataFrame(
+ {"x": ["foo", "bar", "baz"], "y": ["a", "b", "c"], "z": [1, 2, 3]}
+ )
+ df = df.astype(
+ {"x": "string[pyarrow]", "y": "string[python]", "z": "int64[pyarrow]"}
+ )
+ result = df.dtypes.to_string()
+ expected = dedent(
+ """\
+ x string[pyarrow]
+ y string[python]
+ z int64[pyarrow]"""
+ )
+ assert result == expected
+
+ def test_to_string_pos_args_deprecation(self):
+ # GH#54229
+ df = DataFrame({"a": [1, 2, 3]})
+ msg = (
+ "Starting with pandas version 3.0 all arguments of to_string "
+ "except for the "
+ "argument 'buf' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ buf = StringIO()
+ df.to_string(buf, None, None, True, True)
+
+ def test_to_string_utf8_columns(self):
+ n = "\u05d0".encode()
+ df = DataFrame([1, 2], columns=[n])
+
+ with option_context("display.max_rows", 1):
+ repr(df)
+
+ def test_to_string_unicode_two(self):
+ dm = DataFrame({"c/\u03c3": []})
+ buf = StringIO()
+ dm.to_string(buf)
+
+ def test_to_string_unicode_three(self):
+ dm = DataFrame(["\xc2"])
+ buf = StringIO()
+ dm.to_string(buf)
+
+ def test_to_string_with_float_index(self):
+ index = Index([1.5, 2, 3, 4, 5])
+ df = DataFrame(np.arange(5), index=index)
+
+ result = df.to_string()
+ expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
+ assert result == expected
+
+ def test_to_string(self):
+ # big mixed
+ biggie = DataFrame(
+ {
+ "A": np.random.default_rng(2).standard_normal(200),
+ "B": Index([f"{i}?!" for i in range(200)]),
+ },
+ )
+
+ biggie.loc[:20, "A"] = np.nan
+ biggie.loc[:20, "B"] = np.nan
+ s = biggie.to_string()
+
+ buf = StringIO()
+ retval = biggie.to_string(buf=buf)
+ assert retval is None
+ assert buf.getvalue() == s
+
+ assert isinstance(s, str)
+
+ # print in right order
+ result = biggie.to_string(
+ columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
+ )
+ lines = result.split("\n")
+ header = lines[0].strip().split()
+ joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
+ recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
+ tm.assert_series_equal(recons["B"], biggie["B"])
+ assert recons["A"].count() == biggie["A"].count()
+ assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
+
+ # FIXME: don't leave commented-out
+ # expected = ['B', 'A']
+ # assert header == expected
+
+ result = biggie.to_string(columns=["A"], col_space=17)
+ header = result.split("\n")[0].strip().split()
+ expected = ["A"]
+ assert header == expected
+
+ biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
+
+ biggie.to_string(columns=["B", "A"], float_format=str)
+ biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
+
+ frame = DataFrame(index=np.arange(200))
+ frame.to_string()
+
+ # TODO: split or simplify this test?
+ @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="fix when arrow is default")
+ def test_to_string_index_with_nan(self):
+ # GH#2850
+ df = DataFrame(
+ {
+ "id1": {0: "1a3", 1: "9h4"},
+ "id2": {0: np.nan, 1: "d67"},
+ "id3": {0: "78d", 1: "79d"},
+ "value": {0: 123, 1: 64},
+ }
+ )
+
+ # multi-index
+ y = df.set_index(["id1", "id2", "id3"])
+ result = y.to_string()
+ expected = (
+ " value\nid1 id2 id3 \n"
+ "1a3 NaN 78d 123\n9h4 d67 79d 64"
+ )
+ assert result == expected
+
+ # index
+ y = df.set_index("id2")
+ result = y.to_string()
+ expected = (
+ " id1 id3 value\nid2 \n"
+ "NaN 1a3 78d 123\nd67 9h4 79d 64"
+ )
+ assert result == expected
+
+ # with append (this failed in 0.12)
+ y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
+ result = y.to_string()
+ expected = (
+ " value\nid1 id2 id3 \n"
+ "1a3 NaN 78d 123\n9h4 d67 79d 64"
+ )
+ assert result == expected
+
+ # all-nan in mi
+ df2 = df.copy()
+ df2.loc[:, "id2"] = np.nan
+ y = df2.set_index("id2")
+ result = y.to_string()
+ expected = (
+ " id1 id3 value\nid2 \n"
+ "NaN 1a3 78d 123\nNaN 9h4 79d 64"
+ )
+ assert result == expected
+
+ # partial nan in mi
+ df2 = df.copy()
+ df2.loc[:, "id2"] = np.nan
+ y = df2.set_index(["id2", "id3"])
+ result = y.to_string()
+ expected = (
+ " id1 value\nid2 id3 \n"
+ "NaN 78d 1a3 123\n 79d 9h4 64"
+ )
+ assert result == expected
+
+ df = DataFrame(
+ {
+ "id1": {0: np.nan, 1: "9h4"},
+ "id2": {0: np.nan, 1: "d67"},
+ "id3": {0: np.nan, 1: "79d"},
+ "value": {0: 123, 1: 64},
+ }
+ )
+
+ y = df.set_index(["id1", "id2", "id3"])
+ result = y.to_string()
+ expected = (
+ " value\nid1 id2 id3 \n"
+ "NaN NaN NaN 123\n9h4 d67 79d 64"
+ )
+ assert result == expected
+
+ def test_to_string_nonunicode_nonascii_alignment(self):
+ df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
+ rep_str = df.to_string()
+ lines = rep_str.split("\n")
+ assert len(lines[1]) == len(lines[2])
+
+ def test_unicode_problem_decoding_as_ascii(self):
+ df = DataFrame({"c/\u03c3": Series({"test": np.nan})})
+ str(df.to_string())
+
+ def test_to_string_repr_unicode(self):
+ buf = StringIO()
+
+ unicode_values = ["\u03c3"] * 10
+ unicode_values = np.array(unicode_values, dtype=object)
+ df = DataFrame({"unicode": unicode_values})
+ df.to_string(col_space=10, buf=buf)
+
+ # it works!
+ repr(df)
+ # it works even if sys.stdin in None
+ _stdin = sys.stdin
+ try:
+ sys.stdin = None
+ repr(df)
+ finally:
+ sys.stdin = _stdin
+
+
+class TestSeriesToString:
+ def test_to_string_without_index(self):
+ # GH#11729 Test index=False option
+ ser = Series([1, 2, 3, 4])
+ result = ser.to_string(index=False)
+ expected = "\n".join(["1", "2", "3", "4"])
+ assert result == expected
+
+ def test_to_string_name(self):
+ ser = Series(range(100), dtype="int64")
+ ser.name = "myser"
+ res = ser.to_string(max_rows=2, name=True)
+ exp = "0 0\n ..\n99 99\nName: myser"
+ assert res == exp
+ res = ser.to_string(max_rows=2, name=False)
+ exp = "0 0\n ..\n99 99"
+ assert res == exp
+
+ def test_to_string_dtype(self):
+ ser = Series(range(100), dtype="int64")
+ res = ser.to_string(max_rows=2, dtype=True)
+ exp = "0 0\n ..\n99 99\ndtype: int64"
+ assert res == exp
+ res = ser.to_string(max_rows=2, dtype=False)
+ exp = "0 0\n ..\n99 99"
+ assert res == exp
+
+ def test_to_string_length(self):
+ ser = Series(range(100), dtype="int64")
+ res = ser.to_string(max_rows=2, length=True)
+ exp = "0 0\n ..\n99 99\nLength: 100"
+ assert res == exp
+
+ def test_to_string_na_rep(self):
+ ser = Series(index=range(100), dtype=np.float64)
+ res = ser.to_string(na_rep="foo", max_rows=2)
+ exp = "0 foo\n ..\n99 foo"
+ assert res == exp
+
+ def test_to_string_float_format(self):
+ ser = Series(range(10), dtype="float64")
+ res = ser.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2)
+ exp = "0 0.0\n ..\n9 9.0"
+ assert res == exp
+
+ def test_to_string_header(self):
+ ser = Series(range(10), dtype="int64")
+ ser.index.name = "foo"
+ res = ser.to_string(header=True, max_rows=2)
+ exp = "foo\n0 0\n ..\n9 9"
+ assert res == exp
+ res = ser.to_string(header=False, max_rows=2)
+ exp = "0 0\n ..\n9 9"
+ assert res == exp
+
+ def test_to_string_empty_col(self):
+ # GH#13653
+ ser = Series(["", "Hello", "World", "", "", "Mooooo", "", ""])
+ res = ser.to_string(index=False)
+ exp = " \n Hello\n World\n \n \nMooooo\n \n "
+ assert re.match(exp, res)
+
+ def test_to_string_timedelta64(self):
+ Series(np.array([1100, 20], dtype="timedelta64[ns]")).to_string()
+
+ ser = Series(date_range("2012-1-1", periods=3, freq="D"))
+
+ # GH#2146
+
+ # adding NaTs
+ y = ser - ser.shift(1)
+ result = y.to_string()
+ assert "1 days" in result
+ assert "00:00:00" not in result
+ assert "NaT" in result
+
+ # with frac seconds
+ o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
+ y = ser - o
+ result = y.to_string()
+ assert "-1 days +23:59:59.999850" in result
+
+ # rounding?
+ o = Series([datetime(2012, 1, 1, 1)] * 3)
+ y = ser - o
+ result = y.to_string()
+ assert "-1 days +23:00:00" in result
+ assert "1 days 23:00:00" in result
+
+ o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
+ y = ser - o
+ result = y.to_string()
+ assert "-1 days +22:59:00" in result
+ assert "1 days 22:59:00" in result
+
+ o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
+ y = ser - o
+ result = y.to_string()
+ assert "-1 days +22:58:59.999850" in result
+ assert "0 days 22:58:59.999850" in result
+
+ # neg time
+ td = timedelta(minutes=5, seconds=3)
+ s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
+ y = ser - s2
+ result = y.to_string()
+ assert "-1 days +23:54:57" in result
+
+ td = timedelta(microseconds=550)
+ s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
+ y = ser - td
+ result = y.to_string()
+ assert "2012-01-01 23:59:59.999450" in result
+
+ # no boxing of the actual elements
+ td = Series(timedelta_range("1 days", periods=3))
+ result = td.to_string()
+ assert result == "0 1 days\n1 2 days\n2 3 days"
+
+ def test_to_string(self):
+ ts = Series(
+ np.arange(10, dtype=np.float64),
+ index=date_range("2020-01-01", periods=10, freq="B"),
+ )
+ buf = StringIO()
+
+ s = ts.to_string()
+
+ retval = ts.to_string(buf=buf)
+ assert retval is None
+ assert buf.getvalue().strip() == s
+
+ # pass float_format
+ format = "%.4f".__mod__
+ result = ts.to_string(float_format=format)
+ result = [x.split()[1] for x in result.split("\n")[:-1]]
+ expected = [format(x) for x in ts]
+ assert result == expected
+
+ # empty string
+ result = ts[:0].to_string()
+ assert result == "Series([], Freq: B)"
+
+ result = ts[:0].to_string(length=0)
+ assert result == "Series([], Freq: B)"
+
+ # name and length
+ cp = ts.copy()
+ cp.name = "foo"
+ result = cp.to_string(length=True, name=True, dtype=True)
+ last_line = result.split("\n")[-1].strip()
+ assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
+
+ @pytest.mark.parametrize(
+ "input_array, expected",
+ [
+ ("a", "a"),
+ (["a", "b"], "a\nb"),
+ ([1, "a"], "1\na"),
+ (1, "1"),
+ ([0, -1], " 0\n-1"),
+ (1.0, "1.0"),
+ ([" a", " b"], " a\n b"),
+ ([".1", "1"], ".1\n 1"),
+ (["10", "-10"], " 10\n-10"),
+ ],
+ )
+ def test_format_remove_leading_space_series(self, input_array, expected):
+ # GH: 24980
+ ser = Series(input_array)
+ result = ser.to_string(index=False)
+ assert result == expected
+
+ def test_to_string_complex_number_trims_zeros(self):
+ ser = Series([1.000000 + 1.000000j, 1.0 + 1.0j, 1.05 + 1.0j])
+ result = ser.to_string()
+ expected = dedent(
+ """\
+ 0 1.00+1.00j
+ 1 1.00+1.00j
+ 2 1.05+1.00j"""
+ )
+ assert result == expected
+
+ def test_nullable_float_to_string(self, float_ea_dtype):
+ # https://github.com/pandas-dev/pandas/issues/36775
+ dtype = float_ea_dtype
+ ser = Series([0.0, 1.0, None], dtype=dtype)
+ result = ser.to_string()
+ expected = dedent(
+ """\
+ 0 0.0
+ 1 1.0
+ 2 """
+ )
+ assert result == expected
+
+ def test_nullable_int_to_string(self, any_int_ea_dtype):
+ # https://github.com/pandas-dev/pandas/issues/36775
+ dtype = any_int_ea_dtype
+ ser = Series([0, 1, None], dtype=dtype)
+ result = ser.to_string()
+ expected = dedent(
+ """\
+ 0 0
+ 1 1
+ 2 """
+ )
+ assert result == expected
+
+ def test_to_string_mixed(self):
+ ser = Series(["foo", np.nan, -1.23, 4.56])
+ result = ser.to_string()
+ expected = "".join(["0 foo\n", "1 NaN\n", "2 -1.23\n", "3 4.56"])
+ assert result == expected
+
+ # but don't count NAs as floats
+ ser = Series(["foo", np.nan, "bar", "baz"])
+ result = ser.to_string()
+ expected = "".join(["0 foo\n", "1 NaN\n", "2 bar\n", "3 baz"])
+ assert result == expected
+
+ ser = Series(["foo", 5, "bar", "baz"])
+ result = ser.to_string()
+ expected = "".join(["0 foo\n", "1 5\n", "2 bar\n", "3 baz"])
+ assert result == expected
+
+ def test_to_string_float_na_spacing(self):
+ ser = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
+ ser[::2] = np.nan
+
+ result = ser.to_string()
+ expected = (
+ "0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
+ )
+ assert result == expected
+
+ def test_to_string_with_datetimeindex(self):
+ index = date_range("20130102", periods=6)
+ ser = Series(1, index=index)
+ result = ser.to_string()
+ assert "2013-01-02" in result
+
+ # nat in index
+ s2 = Series(2, index=[Timestamp("20130111"), NaT])
+ ser = concat([s2, ser])
+ result = ser.to_string()
+ assert "NaT" in result
+
+ # nat in summary
+ result = str(s2.index)
+ assert "NaT" in result
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f82549ad05e94587b5b7d9566ea8b50e22b9ccd6
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b1bdb3486879e3b7915965e24244d4b1f9130a1
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef3cf22d8699ddb474e074f7fe9cc60d3fd11f8d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..491659da6fd820e1ce8d878617b0483082689688
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/conftest.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e848cd48b42d70033af233bf71c6904b9d069f9
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/conftest.py
@@ -0,0 +1,9 @@
+import pytest
+
+
+@pytest.fixture(params=["split", "records", "index", "columns", "values"])
+def orient(request):
+ """
+ Fixture for orients excluding the table format.
+ """
+ return request.param
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff7d34c85c01599707e648c4d9964773d16a13fc
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py
@@ -0,0 +1,130 @@
+from io import (
+ BytesIO,
+ StringIO,
+)
+
+import pytest
+
+import pandas.util._test_decorators as td
+
+import pandas as pd
+import pandas._testing as tm
+
+
+def test_compression_roundtrip(compression):
+ df = pd.DataFrame(
+ [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
+ index=["A", "B"],
+ columns=["X", "Y", "Z"],
+ )
+
+ with tm.ensure_clean() as path:
+ df.to_json(path, compression=compression)
+ tm.assert_frame_equal(df, pd.read_json(path, compression=compression))
+
+ # explicitly ensure file was compressed.
+ with tm.decompress_file(path, compression) as fh:
+ result = fh.read().decode("utf8")
+ data = StringIO(result)
+ tm.assert_frame_equal(df, pd.read_json(data))
+
+
+def test_read_zipped_json(datapath):
+ uncompressed_path = datapath("io", "json", "data", "tsframe_v012.json")
+ uncompressed_df = pd.read_json(uncompressed_path)
+
+ compressed_path = datapath("io", "json", "data", "tsframe_v012.json.zip")
+ compressed_df = pd.read_json(compressed_path, compression="zip")
+
+ tm.assert_frame_equal(uncompressed_df, compressed_df)
+
+
+@td.skip_if_not_us_locale
+@pytest.mark.single_cpu
+def test_with_s3_url(compression, s3_public_bucket, s3so):
+ # Bucket created in tests/io/conftest.py
+ df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))
+
+ with tm.ensure_clean() as path:
+ df.to_json(path, compression=compression)
+ with open(path, "rb") as f:
+ s3_public_bucket.put_object(Key="test-1", Body=f)
+
+ roundtripped_df = pd.read_json(
+ f"s3://{s3_public_bucket.name}/test-1",
+ compression=compression,
+ storage_options=s3so,
+ )
+ tm.assert_frame_equal(df, roundtripped_df)
+
+
+def test_lines_with_compression(compression):
+ with tm.ensure_clean() as path:
+ df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))
+ df.to_json(path, orient="records", lines=True, compression=compression)
+ roundtripped_df = pd.read_json(path, lines=True, compression=compression)
+ tm.assert_frame_equal(df, roundtripped_df)
+
+
+def test_chunksize_with_compression(compression):
+ with tm.ensure_clean() as path:
+ df = pd.read_json(StringIO('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}'))
+ df.to_json(path, orient="records", lines=True, compression=compression)
+
+ with pd.read_json(
+ path, lines=True, chunksize=1, compression=compression
+ ) as res:
+ roundtripped_df = pd.concat(res)
+ tm.assert_frame_equal(df, roundtripped_df)
+
+
+def test_write_unsupported_compression_type():
+ df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))
+ with tm.ensure_clean() as path:
+ msg = "Unrecognized compression type: unsupported"
+ with pytest.raises(ValueError, match=msg):
+ df.to_json(path, compression="unsupported")
+
+
+def test_read_unsupported_compression_type():
+ with tm.ensure_clean() as path:
+ msg = "Unrecognized compression type: unsupported"
+ with pytest.raises(ValueError, match=msg):
+ pd.read_json(path, compression="unsupported")
+
+
+@pytest.mark.parametrize(
+ "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
+)
+@pytest.mark.parametrize("to_infer", [True, False])
+@pytest.mark.parametrize("read_infer", [True, False])
+def test_to_json_compression(
+ compression_only, read_infer, to_infer, compression_to_extension, infer_string
+):
+ with pd.option_context("future.infer_string", infer_string):
+ # see gh-15008
+ compression = compression_only
+
+ # We'll complete file extension subsequently.
+ filename = "test."
+ filename += compression_to_extension[compression]
+
+ df = pd.DataFrame({"A": [1]})
+
+ to_compression = "infer" if to_infer else compression
+ read_compression = "infer" if read_infer else compression
+
+ with tm.ensure_clean(filename) as path:
+ df.to_json(path, compression=to_compression)
+ result = pd.read_json(path, compression=read_compression)
+ tm.assert_frame_equal(result, df)
+
+
+def test_to_json_compression_mode(compression):
+ # GH 39985 (read_json does not support user-provided binary files)
+ expected = pd.DataFrame({"A": [1]})
+
+ with BytesIO() as buffer:
+ expected.to_json(buffer, compression=compression)
+ # df = pd.read_json(buffer, compression=compression)
+ # tm.assert_frame_equal(expected, df)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc88fc3ba18263ac78f7057bfb5950f0420646b4
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py
@@ -0,0 +1,21 @@
+"""
+Tests for the deprecated keyword arguments for `read_json`.
+"""
+from io import StringIO
+
+import pandas as pd
+import pandas._testing as tm
+
+from pandas.io.json import read_json
+
+
+def test_good_kwargs():
+ df = pd.DataFrame({"A": [2, 4, 6], "B": [3, 6, 9]}, index=[0, 1, 2])
+
+ with tm.assert_produces_warning(None):
+ data1 = StringIO(df.to_json(orient="split"))
+ tm.assert_frame_equal(df, read_json(data1, orient="split"))
+ data2 = StringIO(df.to_json(orient="columns"))
+ tm.assert_frame_equal(df, read_json(data2, orient="columns"))
+ data3 = StringIO(df.to_json(orient="index"))
+ tm.assert_frame_equal(df, read_json(data3, orient="index"))
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc101bb9c8b6d7c5f230b408ecf060a4af520106
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema.py
@@ -0,0 +1,873 @@
+"""Tests for Table Schema integration."""
+from collections import OrderedDict
+from io import StringIO
+import json
+
+import numpy as np
+import pytest
+
+from pandas.core.dtypes.dtypes import (
+ CategoricalDtype,
+ DatetimeTZDtype,
+ PeriodDtype,
+)
+
+import pandas as pd
+from pandas import DataFrame
+import pandas._testing as tm
+
+from pandas.io.json._table_schema import (
+ as_json_table_type,
+ build_table_schema,
+ convert_json_field_to_pandas_type,
+ convert_pandas_type_to_json_field,
+ set_default_names,
+)
+
+
+@pytest.fixture
+def df_schema():
+ return DataFrame(
+ {
+ "A": [1, 2, 3, 4],
+ "B": ["a", "b", "c", "c"],
+ "C": pd.date_range("2016-01-01", freq="d", periods=4),
+ "D": pd.timedelta_range("1h", periods=4, freq="min"),
+ },
+ index=pd.Index(range(4), name="idx"),
+ )
+
+
+@pytest.fixture
+def df_table():
+ return DataFrame(
+ {
+ "A": [1, 2, 3, 4],
+ "B": ["a", "b", "c", "c"],
+ "C": pd.date_range("2016-01-01", freq="d", periods=4),
+ "D": pd.timedelta_range("1h", periods=4, freq="min"),
+ "E": pd.Series(pd.Categorical(["a", "b", "c", "c"])),
+ "F": pd.Series(pd.Categorical(["a", "b", "c", "c"], ordered=True)),
+ "G": [1.0, 2.0, 3, 4.0],
+ "H": pd.date_range("2016-01-01", freq="d", periods=4, tz="US/Central"),
+ },
+ index=pd.Index(range(4), name="idx"),
+ )
+
+
+class TestBuildSchema:
+ def test_build_table_schema(self, df_schema, using_infer_string):
+ result = build_table_schema(df_schema, version=False)
+ expected = {
+ "fields": [
+ {"name": "idx", "type": "integer"},
+ {"name": "A", "type": "integer"},
+ {"name": "B", "type": "string"},
+ {"name": "C", "type": "datetime"},
+ {"name": "D", "type": "duration"},
+ ],
+ "primaryKey": ["idx"],
+ }
+ if using_infer_string:
+ expected["fields"][2] = {"name": "B", "type": "any", "extDtype": "string"}
+ assert result == expected
+ result = build_table_schema(df_schema)
+ assert "pandas_version" in result
+
+ def test_series(self):
+ s = pd.Series([1, 2, 3], name="foo")
+ result = build_table_schema(s, version=False)
+ expected = {
+ "fields": [
+ {"name": "index", "type": "integer"},
+ {"name": "foo", "type": "integer"},
+ ],
+ "primaryKey": ["index"],
+ }
+ assert result == expected
+ result = build_table_schema(s)
+ assert "pandas_version" in result
+
+ def test_series_unnamed(self):
+ result = build_table_schema(pd.Series([1, 2, 3]), version=False)
+ expected = {
+ "fields": [
+ {"name": "index", "type": "integer"},
+ {"name": "values", "type": "integer"},
+ ],
+ "primaryKey": ["index"],
+ }
+ assert result == expected
+
+ def test_multiindex(self, df_schema, using_infer_string):
+ df = df_schema
+ idx = pd.MultiIndex.from_product([("a", "b"), (1, 2)])
+ df.index = idx
+
+ result = build_table_schema(df, version=False)
+ expected = {
+ "fields": [
+ {"name": "level_0", "type": "string"},
+ {"name": "level_1", "type": "integer"},
+ {"name": "A", "type": "integer"},
+ {"name": "B", "type": "string"},
+ {"name": "C", "type": "datetime"},
+ {"name": "D", "type": "duration"},
+ ],
+ "primaryKey": ["level_0", "level_1"],
+ }
+ if using_infer_string:
+ expected["fields"][0] = {
+ "name": "level_0",
+ "type": "any",
+ "extDtype": "string",
+ }
+ expected["fields"][3] = {"name": "B", "type": "any", "extDtype": "string"}
+ assert result == expected
+
+ df.index.names = ["idx0", None]
+ expected["fields"][0]["name"] = "idx0"
+ expected["primaryKey"] = ["idx0", "level_1"]
+ result = build_table_schema(df, version=False)
+ assert result == expected
+
+
+class TestTableSchemaType:
+ @pytest.mark.parametrize("int_type", [int, np.int16, np.int32, np.int64])
+ def test_as_json_table_type_int_data(self, int_type):
+ int_data = [1, 2, 3]
+ assert as_json_table_type(np.array(int_data, dtype=int_type).dtype) == "integer"
+
+ @pytest.mark.parametrize("float_type", [float, np.float16, np.float32, np.float64])
+ def test_as_json_table_type_float_data(self, float_type):
+ float_data = [1.0, 2.0, 3.0]
+ assert (
+ as_json_table_type(np.array(float_data, dtype=float_type).dtype) == "number"
+ )
+
+ @pytest.mark.parametrize("bool_type", [bool, np.bool_])
+ def test_as_json_table_type_bool_data(self, bool_type):
+ bool_data = [True, False]
+ assert (
+ as_json_table_type(np.array(bool_data, dtype=bool_type).dtype) == "boolean"
+ )
+
+ @pytest.mark.parametrize(
+ "date_data",
+ [
+ pd.to_datetime(["2016"]),
+ pd.to_datetime(["2016"], utc=True),
+ pd.Series(pd.to_datetime(["2016"])),
+ pd.Series(pd.to_datetime(["2016"], utc=True)),
+ pd.period_range("2016", freq="Y", periods=3),
+ ],
+ )
+ def test_as_json_table_type_date_data(self, date_data):
+ assert as_json_table_type(date_data.dtype) == "datetime"
+
+ @pytest.mark.parametrize(
+ "str_data",
+ [pd.Series(["a", "b"], dtype=object), pd.Index(["a", "b"], dtype=object)],
+ )
+ def test_as_json_table_type_string_data(self, str_data):
+ assert as_json_table_type(str_data.dtype) == "string"
+
+ @pytest.mark.parametrize(
+ "cat_data",
+ [
+ pd.Categorical(["a"]),
+ pd.Categorical([1]),
+ pd.Series(pd.Categorical([1])),
+ pd.CategoricalIndex([1]),
+ pd.Categorical([1]),
+ ],
+ )
+ def test_as_json_table_type_categorical_data(self, cat_data):
+ assert as_json_table_type(cat_data.dtype) == "any"
+
+ # ------
+ # dtypes
+ # ------
+ @pytest.mark.parametrize("int_dtype", [int, np.int16, np.int32, np.int64])
+ def test_as_json_table_type_int_dtypes(self, int_dtype):
+ assert as_json_table_type(int_dtype) == "integer"
+
+ @pytest.mark.parametrize("float_dtype", [float, np.float16, np.float32, np.float64])
+ def test_as_json_table_type_float_dtypes(self, float_dtype):
+ assert as_json_table_type(float_dtype) == "number"
+
+ @pytest.mark.parametrize("bool_dtype", [bool, np.bool_])
+ def test_as_json_table_type_bool_dtypes(self, bool_dtype):
+ assert as_json_table_type(bool_dtype) == "boolean"
+
+ @pytest.mark.parametrize(
+ "date_dtype",
+ [
+ np.dtype(" None:
+ self.hexed = hexed
+ self.binary = bytes.fromhex(hexed)
+
+ def __str__(self) -> str:
+ return self.hexed
+
+ hexed = "574b4454ba8c5eb4f98a8f45"
+ binthing = BinaryThing(hexed)
+
+ # verify the proper conversion of printable content
+ df_printable = DataFrame({"A": [binthing.hexed]})
+ assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
+
+ # check if non-printable content throws appropriate Exception
+ df_nonprintable = DataFrame({"A": [binthing]})
+ msg = "Unsupported UTF-8 sequence length when encoding string"
+ with pytest.raises(OverflowError, match=msg):
+ df_nonprintable.to_json()
+
+ # the same with multiple columns threw segfaults
+ df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
+ with pytest.raises(OverflowError, match=msg):
+ df_mixed.to_json()
+
+ # default_handler should resolve exceptions for non-string types
+ result = df_nonprintable.to_json(default_handler=str)
+ expected = f'{{"A":{{"0":"{hexed}"}}}}'
+ assert result == expected
+ assert (
+ df_mixed.to_json(default_handler=str)
+ == f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
+ )
+
+ def test_label_overflow(self):
+ # GH14256: buffer length not checked when writing label
+ result = DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
+ expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
+ assert result == expected
+
+ def test_series_non_unique_index(self):
+ s = Series(["a", "b"], index=[1, 1])
+
+ msg = "Series index must be unique for orient='index'"
+ with pytest.raises(ValueError, match=msg):
+ s.to_json(orient="index")
+
+ tm.assert_series_equal(
+ s,
+ read_json(
+ StringIO(s.to_json(orient="split")), orient="split", typ="series"
+ ),
+ )
+ unserialized = read_json(
+ StringIO(s.to_json(orient="records")), orient="records", typ="series"
+ )
+ tm.assert_equal(s.values, unserialized.values)
+
+ def test_series_default_orient(self, string_series):
+ assert string_series.to_json() == string_series.to_json(orient="index")
+
+ def test_series_roundtrip_simple(self, orient, string_series, using_infer_string):
+ data = StringIO(string_series.to_json(orient=orient))
+ result = read_json(data, typ="series", orient=orient)
+
+ expected = string_series
+ if using_infer_string and orient in ("split", "index", "columns"):
+ # These schemas don't contain dtypes, so we infer string
+ expected.index = expected.index.astype("string[pyarrow_numpy]")
+ if orient in ("values", "records"):
+ expected = expected.reset_index(drop=True)
+ if orient != "split":
+ expected.name = None
+
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("dtype", [False, None])
+ def test_series_roundtrip_object(self, orient, dtype, object_series):
+ data = StringIO(object_series.to_json(orient=orient))
+ result = read_json(data, typ="series", orient=orient, dtype=dtype)
+
+ expected = object_series
+ if orient in ("values", "records"):
+ expected = expected.reset_index(drop=True)
+ if orient != "split":
+ expected.name = None
+
+ tm.assert_series_equal(result, expected)
+
+ def test_series_roundtrip_empty(self, orient):
+ empty_series = Series([], index=[], dtype=np.float64)
+ data = StringIO(empty_series.to_json(orient=orient))
+ result = read_json(data, typ="series", orient=orient)
+
+ expected = empty_series.reset_index(drop=True)
+ if orient in ("split"):
+ expected.index = expected.index.astype(np.float64)
+
+ tm.assert_series_equal(result, expected)
+
+ def test_series_roundtrip_timeseries(self, orient, datetime_series):
+ data = StringIO(datetime_series.to_json(orient=orient))
+ result = read_json(data, typ="series", orient=orient)
+
+ expected = datetime_series
+ if orient in ("values", "records"):
+ expected = expected.reset_index(drop=True)
+ if orient != "split":
+ expected.name = None
+
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize("dtype", [np.float64, int])
+ def test_series_roundtrip_numeric(self, orient, dtype):
+ s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
+ data = StringIO(s.to_json(orient=orient))
+ result = read_json(data, typ="series", orient=orient)
+
+ expected = s.copy()
+ if orient in ("values", "records"):
+ expected = expected.reset_index(drop=True)
+
+ tm.assert_series_equal(result, expected)
+
+ def test_series_to_json_except(self):
+ s = Series([1, 2, 3])
+ msg = "Invalid value 'garbage' for option 'orient'"
+ with pytest.raises(ValueError, match=msg):
+ s.to_json(orient="garbage")
+
+ def test_series_from_json_precise_float(self):
+ s = Series([4.56, 4.56, 4.56])
+ result = read_json(StringIO(s.to_json()), typ="series", precise_float=True)
+ tm.assert_series_equal(result, s, check_index_type=False)
+
+ def test_series_with_dtype(self):
+ # GH 21986
+ s = Series([4.56, 4.56, 4.56])
+ result = read_json(StringIO(s.to_json()), typ="series", dtype=np.int64)
+ expected = Series([4] * 3)
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "dtype,expected",
+ [
+ (True, Series(["2000-01-01"], dtype="datetime64[ns]")),
+ (False, Series([946684800000])),
+ ],
+ )
+ def test_series_with_dtype_datetime(self, dtype, expected):
+ s = Series(["2000-01-01"], dtype="datetime64[ns]")
+ data = StringIO(s.to_json())
+ result = read_json(data, typ="series", dtype=dtype)
+ tm.assert_series_equal(result, expected)
+
+ def test_frame_from_json_precise_float(self):
+ df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
+ result = read_json(StringIO(df.to_json()), precise_float=True)
+ tm.assert_frame_equal(result, df)
+
+ def test_typ(self):
+ s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
+ result = read_json(StringIO(s.to_json()), typ=None)
+ tm.assert_series_equal(result, s)
+
+ def test_reconstruction_index(self):
+ df = DataFrame([[1, 2, 3], [4, 5, 6]])
+ result = read_json(StringIO(df.to_json()))
+ tm.assert_frame_equal(result, df)
+
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
+ result = read_json(StringIO(df.to_json()))
+ tm.assert_frame_equal(result, df)
+
+ def test_path(self, float_frame, int_frame, datetime_frame):
+ with tm.ensure_clean("test.json") as path:
+ for df in [float_frame, int_frame, datetime_frame]:
+ df.to_json(path)
+ read_json(path)
+
+ def test_axis_dates(self, datetime_series, datetime_frame):
+ # frame
+ json = StringIO(datetime_frame.to_json())
+ result = read_json(json)
+ tm.assert_frame_equal(result, datetime_frame)
+
+ # series
+ json = StringIO(datetime_series.to_json())
+ result = read_json(json, typ="series")
+ tm.assert_series_equal(result, datetime_series, check_names=False)
+ assert result.name is None
+
+ def test_convert_dates(self, datetime_series, datetime_frame):
+ # frame
+ df = datetime_frame
+ df["date"] = Timestamp("20130101").as_unit("ns")
+
+ json = StringIO(df.to_json())
+ result = read_json(json)
+ tm.assert_frame_equal(result, df)
+
+ df["foo"] = 1.0
+ json = StringIO(df.to_json(date_unit="ns"))
+
+ result = read_json(json, convert_dates=False)
+ expected = df.copy()
+ expected["date"] = expected["date"].values.view("i8")
+ expected["foo"] = expected["foo"].astype("int64")
+ tm.assert_frame_equal(result, expected)
+
+ # series
+ ts = Series(Timestamp("20130101").as_unit("ns"), index=datetime_series.index)
+ json = StringIO(ts.to_json())
+ result = read_json(json, typ="series")
+ tm.assert_series_equal(result, ts)
+
+ @pytest.mark.parametrize("date_format", ["epoch", "iso"])
+ @pytest.mark.parametrize("as_object", [True, False])
+ @pytest.mark.parametrize("date_typ", [datetime.date, datetime.datetime, Timestamp])
+ def test_date_index_and_values(self, date_format, as_object, date_typ):
+ data = [date_typ(year=2020, month=1, day=1), pd.NaT]
+ if as_object:
+ data.append("a")
+
+ ser = Series(data, index=data)
+ result = ser.to_json(date_format=date_format)
+
+ if date_format == "epoch":
+ expected = '{"1577836800000":1577836800000,"null":null}'
+ else:
+ expected = (
+ '{"2020-01-01T00:00:00.000":"2020-01-01T00:00:00.000","null":null}'
+ )
+
+ if as_object:
+ expected = expected.replace("}", ',"a":"a"}')
+
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "infer_word",
+ [
+ "trade_time",
+ "date",
+ "datetime",
+ "sold_at",
+ "modified",
+ "timestamp",
+ "timestamps",
+ ],
+ )
+ def test_convert_dates_infer(self, infer_word):
+ # GH10747
+
+ data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
+ expected = DataFrame(
+ [[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
+ )
+
+ result = read_json(StringIO(ujson_dumps(data)))[["id", infer_word]]
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "date,date_unit",
+ [
+ ("20130101 20:43:42.123", None),
+ ("20130101 20:43:42", "s"),
+ ("20130101 20:43:42.123", "ms"),
+ ("20130101 20:43:42.123456", "us"),
+ ("20130101 20:43:42.123456789", "ns"),
+ ],
+ )
+ def test_date_format_frame(self, date, date_unit, datetime_frame):
+ df = datetime_frame
+
+ df["date"] = Timestamp(date).as_unit("ns")
+ df.iloc[1, df.columns.get_loc("date")] = pd.NaT
+ df.iloc[5, df.columns.get_loc("date")] = pd.NaT
+ if date_unit:
+ json = df.to_json(date_format="iso", date_unit=date_unit)
+ else:
+ json = df.to_json(date_format="iso")
+
+ result = read_json(StringIO(json))
+ expected = df.copy()
+ tm.assert_frame_equal(result, expected)
+
+ def test_date_format_frame_raises(self, datetime_frame):
+ df = datetime_frame
+ msg = "Invalid value 'foo' for option 'date_unit'"
+ with pytest.raises(ValueError, match=msg):
+ df.to_json(date_format="iso", date_unit="foo")
+
+ @pytest.mark.parametrize(
+ "date,date_unit",
+ [
+ ("20130101 20:43:42.123", None),
+ ("20130101 20:43:42", "s"),
+ ("20130101 20:43:42.123", "ms"),
+ ("20130101 20:43:42.123456", "us"),
+ ("20130101 20:43:42.123456789", "ns"),
+ ],
+ )
+ def test_date_format_series(self, date, date_unit, datetime_series):
+ ts = Series(Timestamp(date).as_unit("ns"), index=datetime_series.index)
+ ts.iloc[1] = pd.NaT
+ ts.iloc[5] = pd.NaT
+ if date_unit:
+ json = ts.to_json(date_format="iso", date_unit=date_unit)
+ else:
+ json = ts.to_json(date_format="iso")
+
+ result = read_json(StringIO(json), typ="series")
+ expected = ts.copy()
+ tm.assert_series_equal(result, expected)
+
+ def test_date_format_series_raises(self, datetime_series):
+ ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index)
+ msg = "Invalid value 'foo' for option 'date_unit'"
+ with pytest.raises(ValueError, match=msg):
+ ts.to_json(date_format="iso", date_unit="foo")
+
+ @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
+ def test_date_unit(self, unit, datetime_frame):
+ df = datetime_frame
+ df["date"] = Timestamp("20130101 20:43:42").as_unit("ns")
+ dl = df.columns.get_loc("date")
+ df.iloc[1, dl] = Timestamp("19710101 20:43:42")
+ df.iloc[2, dl] = Timestamp("21460101 20:43:42")
+ df.iloc[4, dl] = pd.NaT
+
+ json = df.to_json(date_format="epoch", date_unit=unit)
+
+ # force date unit
+ result = read_json(StringIO(json), date_unit=unit)
+ tm.assert_frame_equal(result, df)
+
+ # detect date unit
+ result = read_json(StringIO(json), date_unit=None)
+ tm.assert_frame_equal(result, df)
+
+ @pytest.mark.parametrize("unit", ["s", "ms", "us"])
+ def test_iso_non_nano_datetimes(self, unit):
+ # Test that numpy datetimes
+ # in an Index or a column with non-nano resolution can be serialized
+ # correctly
+ # GH53686
+ index = DatetimeIndex(
+ [np.datetime64("2023-01-01T11:22:33.123456", unit)],
+ dtype=f"datetime64[{unit}]",
+ )
+ df = DataFrame(
+ {
+ "date": Series(
+ [np.datetime64("2022-01-01T11:22:33.123456", unit)],
+ dtype=f"datetime64[{unit}]",
+ index=index,
+ ),
+ "date_obj": Series(
+ [np.datetime64("2023-01-01T11:22:33.123456", unit)],
+ dtype=object,
+ index=index,
+ ),
+ },
+ )
+
+ buf = StringIO()
+ df.to_json(buf, date_format="iso", date_unit=unit)
+ buf.seek(0)
+
+ # read_json always reads datetimes in nanosecond resolution
+ # TODO: check_dtype/check_index_type should be removable
+ # once read_json gets non-nano support
+ tm.assert_frame_equal(
+ read_json(buf, convert_dates=["date", "date_obj"]),
+ df,
+ check_index_type=False,
+ check_dtype=False,
+ )
+
+ def test_weird_nested_json(self):
+ # this used to core dump the parser
+ s = r"""{
+ "status": "success",
+ "data": {
+ "posts": [
+ {
+ "id": 1,
+ "title": "A blog post",
+ "body": "Some useful content"
+ },
+ {
+ "id": 2,
+ "title": "Another blog post",
+ "body": "More content"
+ }
+ ]
+ }
+ }"""
+ read_json(StringIO(s))
+
+ def test_doc_example(self):
+ dfj2 = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 2)), columns=list("AB")
+ )
+ dfj2["date"] = Timestamp("20130101")
+ dfj2["ints"] = range(5)
+ dfj2["bools"] = True
+ dfj2.index = date_range("20130101", periods=5)
+
+ json = StringIO(dfj2.to_json())
+ result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
+ tm.assert_frame_equal(result, result)
+
+ def test_round_trip_exception(self, datapath):
+ # GH 3867
+ path = datapath("io", "json", "data", "teams.csv")
+ df = pd.read_csv(path)
+ s = df.to_json()
+
+ result = read_json(StringIO(s))
+ res = result.reindex(index=df.index, columns=df.columns)
+ msg = "The 'downcast' keyword in fillna is deprecated"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ res = res.fillna(np.nan, downcast=False)
+ tm.assert_frame_equal(res, df)
+
+ @pytest.mark.network
+ @pytest.mark.single_cpu
+ @pytest.mark.parametrize(
+ "field,dtype",
+ [
+ ["created_at", pd.DatetimeTZDtype(tz="UTC")],
+ ["closed_at", "datetime64[ns]"],
+ ["updated_at", pd.DatetimeTZDtype(tz="UTC")],
+ ],
+ )
+ def test_url(self, field, dtype, httpserver):
+ data = '{"created_at": ["2023-06-23T18:21:36Z"], "closed_at": ["2023-06-23T18:21:36"], "updated_at": ["2023-06-23T18:21:36Z"]}\n' # noqa: E501
+ httpserver.serve_content(content=data)
+ result = read_json(httpserver.url, convert_dates=True)
+ assert result[field].dtype == dtype
+
+ def test_timedelta(self):
+ converter = lambda x: pd.to_timedelta(x, unit="ms")
+
+ ser = Series([timedelta(23), timedelta(seconds=5)])
+ assert ser.dtype == "timedelta64[ns]"
+
+ result = read_json(StringIO(ser.to_json()), typ="series").apply(converter)
+ tm.assert_series_equal(result, ser)
+
+ ser = Series([timedelta(23), timedelta(seconds=5)], index=Index([0, 1]))
+ assert ser.dtype == "timedelta64[ns]"
+ result = read_json(StringIO(ser.to_json()), typ="series").apply(converter)
+ tm.assert_series_equal(result, ser)
+
+ frame = DataFrame([timedelta(23), timedelta(seconds=5)])
+ assert frame[0].dtype == "timedelta64[ns]"
+ tm.assert_frame_equal(
+ frame, read_json(StringIO(frame.to_json())).apply(converter)
+ )
+
+ def test_timedelta2(self):
+ frame = DataFrame(
+ {
+ "a": [timedelta(days=23), timedelta(seconds=5)],
+ "b": [1, 2],
+ "c": date_range(start="20130101", periods=2),
+ }
+ )
+ data = StringIO(frame.to_json(date_unit="ns"))
+ result = read_json(data)
+ result["a"] = pd.to_timedelta(result.a, unit="ns")
+ result["c"] = pd.to_datetime(result.c)
+ tm.assert_frame_equal(frame, result)
+
+ def test_mixed_timedelta_datetime(self):
+ td = timedelta(23)
+ ts = Timestamp("20130101")
+ frame = DataFrame({"a": [td, ts]}, dtype=object)
+
+ expected = DataFrame(
+ {"a": [pd.Timedelta(td).as_unit("ns")._value, ts.as_unit("ns")._value]}
+ )
+ data = StringIO(frame.to_json(date_unit="ns"))
+ result = read_json(data, dtype={"a": "int64"})
+ tm.assert_frame_equal(result, expected, check_index_type=False)
+
+ @pytest.mark.parametrize("as_object", [True, False])
+ @pytest.mark.parametrize("date_format", ["iso", "epoch"])
+ @pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])
+ def test_timedelta_to_json(self, as_object, date_format, timedelta_typ):
+ # GH28156: to_json not correctly formatting Timedelta
+ data = [timedelta_typ(days=1), timedelta_typ(days=2), pd.NaT]
+ if as_object:
+ data.append("a")
+
+ ser = Series(data, index=data)
+ if date_format == "iso":
+ expected = (
+ '{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}'
+ )
+ else:
+ expected = '{"86400000":86400000,"172800000":172800000,"null":null}'
+
+ if as_object:
+ expected = expected.replace("}", ',"a":"a"}')
+
+ result = ser.to_json(date_format=date_format)
+ assert result == expected
+
+ @pytest.mark.parametrize("as_object", [True, False])
+ @pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])
+ def test_timedelta_to_json_fractional_precision(self, as_object, timedelta_typ):
+ data = [timedelta_typ(milliseconds=42)]
+ ser = Series(data, index=data)
+ if as_object:
+ ser = ser.astype(object)
+
+ result = ser.to_json()
+ expected = '{"42":42}'
+ assert result == expected
+
+ def test_default_handler(self):
+ value = object()
+ frame = DataFrame({"a": [7, value]})
+ expected = DataFrame({"a": [7, str(value)]})
+ result = read_json(StringIO(frame.to_json(default_handler=str)))
+ tm.assert_frame_equal(expected, result, check_index_type=False)
+
+ def test_default_handler_indirect(self):
+ def default(obj):
+ if isinstance(obj, complex):
+ return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)]
+ return str(obj)
+
+ df_list = [
+ 9,
+ DataFrame(
+ {"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]},
+ columns=["a", "b"],
+ ),
+ ]
+ expected = (
+ '[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
+ '["re",4.0],["im",-5.0]],"N\\/A"]]]'
+ )
+ assert (
+ ujson_dumps(df_list, default_handler=default, orient="values") == expected
+ )
+
+ def test_default_handler_numpy_unsupported_dtype(self):
+ # GH12554 to_json raises 'Unhandled numpy dtype 15'
+ df = DataFrame(
+ {"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]},
+ columns=["a", "b"],
+ )
+ expected = (
+ '[["(1+0j)","(nan+0j)"],'
+ '["(2.3+0j)","(nan+0j)"],'
+ '["(4-5j)","(1.2+0j)"]]'
+ )
+ assert df.to_json(default_handler=str, orient="values") == expected
+
+ def test_default_handler_raises(self):
+ msg = "raisin"
+
+ def my_handler_raises(obj):
+ raise TypeError(msg)
+
+ with pytest.raises(TypeError, match=msg):
+ DataFrame({"a": [1, 2, object()]}).to_json(
+ default_handler=my_handler_raises
+ )
+ with pytest.raises(TypeError, match=msg):
+ DataFrame({"a": [1, 2, complex(4, -5)]}).to_json(
+ default_handler=my_handler_raises
+ )
+
+ def test_categorical(self):
+ # GH4377 df.to_json segfaults with non-ndarray blocks
+ df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
+ df["B"] = df["A"]
+ expected = df.to_json()
+
+ df["B"] = df["A"].astype("category")
+ assert expected == df.to_json()
+
+ s = df["A"]
+ sc = df["B"]
+ assert s.to_json() == sc.to_json()
+
+ def test_datetime_tz(self):
+ # GH4377 df.to_json segfaults with non-ndarray blocks
+ tz_range = date_range("20130101", periods=3, tz="US/Eastern")
+ tz_naive = tz_range.tz_convert("utc").tz_localize(None)
+
+ df = DataFrame({"A": tz_range, "B": date_range("20130101", periods=3)})
+
+ df_naive = df.copy()
+ df_naive["A"] = tz_naive
+ expected = df_naive.to_json()
+ assert expected == df.to_json()
+
+ stz = Series(tz_range)
+ s_naive = Series(tz_naive)
+ assert stz.to_json() == s_naive.to_json()
+
+ def test_sparse(self):
+ # GH4377 df.to_json segfaults with non-ndarray blocks
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
+ df.loc[:8] = np.nan
+
+ sdf = df.astype("Sparse")
+ expected = df.to_json()
+ assert expected == sdf.to_json()
+
+ s = Series(np.random.default_rng(2).standard_normal(10))
+ s.loc[:8] = np.nan
+ ss = s.astype("Sparse")
+
+ expected = s.to_json()
+ assert expected == ss.to_json()
+
+ @pytest.mark.parametrize(
+ "ts",
+ [
+ Timestamp("2013-01-10 05:00:00Z"),
+ Timestamp("2013-01-10 00:00:00", tz="US/Eastern"),
+ Timestamp("2013-01-10 00:00:00-0500"),
+ ],
+ )
+ def test_tz_is_utc(self, ts):
+ exp = '"2013-01-10T05:00:00.000Z"'
+
+ assert ujson_dumps(ts, iso_dates=True) == exp
+ dt = ts.to_pydatetime()
+ assert ujson_dumps(dt, iso_dates=True) == exp
+
+ def test_tz_is_naive(self):
+ ts = Timestamp("2013-01-10 05:00:00")
+ exp = '"2013-01-10T05:00:00.000"'
+
+ assert ujson_dumps(ts, iso_dates=True) == exp
+ dt = ts.to_pydatetime()
+ assert ujson_dumps(dt, iso_dates=True) == exp
+
+ @pytest.mark.parametrize(
+ "tz_range",
+ [
+ date_range("2013-01-01 05:00:00Z", periods=2),
+ date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),
+ date_range("2013-01-01 00:00:00-0500", periods=2),
+ ],
+ )
+ def test_tz_range_is_utc(self, tz_range):
+ exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
+ dfexp = (
+ '{"DT":{'
+ '"0":"2013-01-01T05:00:00.000Z",'
+ '"1":"2013-01-02T05:00:00.000Z"}}'
+ )
+
+ assert ujson_dumps(tz_range, iso_dates=True) == exp
+ dti = DatetimeIndex(tz_range)
+ # Ensure datetimes in object array are serialized correctly
+ # in addition to the normal DTI case
+ assert ujson_dumps(dti, iso_dates=True) == exp
+ assert ujson_dumps(dti.astype(object), iso_dates=True) == exp
+ df = DataFrame({"DT": dti})
+ result = ujson_dumps(df, iso_dates=True)
+ assert result == dfexp
+ assert ujson_dumps(df.astype({"DT": object}), iso_dates=True)
+
+ def test_tz_range_is_naive(self):
+ dti = date_range("2013-01-01 05:00:00", periods=2)
+
+ exp = '["2013-01-01T05:00:00.000","2013-01-02T05:00:00.000"]'
+ dfexp = '{"DT":{"0":"2013-01-01T05:00:00.000","1":"2013-01-02T05:00:00.000"}}'
+
+ # Ensure datetimes in object array are serialized correctly
+ # in addition to the normal DTI case
+ assert ujson_dumps(dti, iso_dates=True) == exp
+ assert ujson_dumps(dti.astype(object), iso_dates=True) == exp
+ df = DataFrame({"DT": dti})
+ result = ujson_dumps(df, iso_dates=True)
+ assert result == dfexp
+ assert ujson_dumps(df.astype({"DT": object}), iso_dates=True)
+
+ def test_read_inline_jsonl(self):
+ # GH9180
+
+ result = read_json(StringIO('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n'), lines=True)
+ expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.single_cpu
+ @td.skip_if_not_us_locale
+ def test_read_s3_jsonl(self, s3_public_bucket_with_data, s3so):
+ # GH17200
+
+ result = read_json(
+ f"s3n://{s3_public_bucket_with_data.name}/items.jsonl",
+ lines=True,
+ storage_options=s3so,
+ )
+ expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
+ tm.assert_frame_equal(result, expected)
+
+ def test_read_local_jsonl(self):
+ # GH17200
+ with tm.ensure_clean("tmp_items.json") as path:
+ with open(path, "w", encoding="utf-8") as infile:
+ infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
+ result = read_json(path, lines=True)
+ expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
+ tm.assert_frame_equal(result, expected)
+
+ def test_read_jsonl_unicode_chars(self):
+ # GH15132: non-ascii unicode characters
+ # \u201d == RIGHT DOUBLE QUOTATION MARK
+
+ # simulate file handle
+ json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
+ json = StringIO(json)
+ result = read_json(json, lines=True)
+ expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
+ tm.assert_frame_equal(result, expected)
+
+ # simulate string
+ json = StringIO('{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n')
+ result = read_json(json, lines=True)
+ expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)])
+ def test_to_json_large_numbers(self, bigNum):
+ # GH34473
+ series = Series(bigNum, dtype=object, index=["articleId"])
+ json = series.to_json()
+ expected = '{"articleId":' + str(bigNum) + "}"
+ assert json == expected
+
+ df = DataFrame(bigNum, dtype=object, index=["articleId"], columns=[0])
+ json = df.to_json()
+ expected = '{"0":{"articleId":' + str(bigNum) + "}}"
+ assert json == expected
+
+ @pytest.mark.parametrize("bigNum", [-(2**63) - 1, 2**64])
+ def test_read_json_large_numbers(self, bigNum):
+ # GH20599, 26068
+ json = StringIO('{"articleId":' + str(bigNum) + "}")
+ msg = r"Value is too small|Value is too big"
+ with pytest.raises(ValueError, match=msg):
+ read_json(json)
+
+ json = StringIO('{"0":{"articleId":' + str(bigNum) + "}}")
+ with pytest.raises(ValueError, match=msg):
+ read_json(json)
+
+ def test_read_json_large_numbers2(self):
+ # GH18842
+ json = '{"articleId": "1404366058080022500245"}'
+ json = StringIO(json)
+ result = read_json(json, typ="series")
+ expected = Series(1.404366e21, index=["articleId"])
+ tm.assert_series_equal(result, expected)
+
+ json = '{"0": {"articleId": "1404366058080022500245"}}'
+ json = StringIO(json)
+ result = read_json(json)
+ expected = DataFrame(1.404366e21, index=["articleId"], columns=[0])
+ tm.assert_frame_equal(result, expected)
+
+ def test_to_jsonl(self):
+ # GH9180
+ df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
+ result = df.to_json(orient="records", lines=True)
+ expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n'
+ assert result == expected
+
+ df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
+ result = df.to_json(orient="records", lines=True)
+ expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n'
+ assert result == expected
+ tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
+
+ # GH15096: escaped characters in columns and data
+ df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
+ result = df.to_json(orient="records", lines=True)
+ expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n'
+ assert result == expected
+
+ tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
+
+ # TODO: there is a near-identical test for pytables; can we share?
+ @pytest.mark.xfail(reason="GH#13774 encoding kwarg not supported", raises=TypeError)
+ @pytest.mark.parametrize(
+ "val",
+ [
+ [b"E\xc9, 17", b"", b"a", b"b", b"c"],
+ [b"E\xc9, 17", b"a", b"b", b"c"],
+ [b"EE, 17", b"", b"a", b"b", b"c"],
+ [b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
+ [b"", b"a", b"b", b"c"],
+ [b"\xf8\xfc", b"a", b"b", b"c"],
+ [b"A\xf8\xfc", b"", b"a", b"b", b"c"],
+ [np.nan, b"", b"b", b"c"],
+ [b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
+ ],
+ )
+ @pytest.mark.parametrize("dtype", ["category", object])
+ def test_latin_encoding(self, dtype, val):
+ # GH 13774
+ ser = Series(
+ [x.decode("latin-1") if isinstance(x, bytes) else x for x in val],
+ dtype=dtype,
+ )
+ encoding = "latin-1"
+ with tm.ensure_clean("test.json") as path:
+ ser.to_json(path, encoding=encoding)
+ retr = read_json(StringIO(path), encoding=encoding)
+ tm.assert_series_equal(ser, retr, check_categorical=False)
+
+ def test_data_frame_size_after_to_json(self):
+ # GH15344
+ df = DataFrame({"a": [str(1)]})
+
+ size_before = df.memory_usage(index=True, deep=True).sum()
+ df.to_json()
+ size_after = df.memory_usage(index=True, deep=True).sum()
+
+ assert size_before == size_after
+
+ @pytest.mark.parametrize(
+ "index", [None, [1, 2], [1.0, 2.0], ["a", "b"], ["1", "2"], ["1.", "2."]]
+ )
+ @pytest.mark.parametrize("columns", [["a", "b"], ["1", "2"], ["1.", "2."]])
+ def test_from_json_to_json_table_index_and_columns(self, index, columns):
+ # GH25433 GH25435
+ expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns)
+ dfjson = expected.to_json(orient="table")
+
+ result = read_json(StringIO(dfjson), orient="table")
+ tm.assert_frame_equal(result, expected)
+
+ def test_from_json_to_json_table_dtypes(self):
+ # GH21345
+ expected = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
+ dfjson = expected.to_json(orient="table")
+ result = read_json(StringIO(dfjson), orient="table")
+ tm.assert_frame_equal(result, expected)
+
+ # TODO: We are casting to string which coerces None to NaN before casting back
+ # to object, ending up with incorrect na values
+ @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="incorrect na conversion")
+ @pytest.mark.parametrize("orient", ["split", "records", "index", "columns"])
+ def test_to_json_from_json_columns_dtypes(self, orient):
+ # GH21892 GH33205
+ expected = DataFrame.from_dict(
+ {
+ "Integer": Series([1, 2, 3], dtype="int64"),
+ "Float": Series([None, 2.0, 3.0], dtype="float64"),
+ "Object": Series([None, "", "c"], dtype="object"),
+ "Bool": Series([True, False, True], dtype="bool"),
+ "Category": Series(["a", "b", None], dtype="category"),
+ "Datetime": Series(
+ ["2020-01-01", None, "2020-01-03"], dtype="datetime64[ns]"
+ ),
+ }
+ )
+ dfjson = expected.to_json(orient=orient)
+
+ result = read_json(
+ StringIO(dfjson),
+ orient=orient,
+ dtype={
+ "Integer": "int64",
+ "Float": "float64",
+ "Object": "object",
+ "Bool": "bool",
+ "Category": "category",
+ "Datetime": "datetime64[ns]",
+ },
+ )
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("dtype", [True, {"b": int, "c": int}])
+ def test_read_json_table_dtype_raises(self, dtype):
+ # GH21345
+ df = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
+ dfjson = df.to_json(orient="table")
+ msg = "cannot pass both dtype and orient='table'"
+ with pytest.raises(ValueError, match=msg):
+ read_json(dfjson, orient="table", dtype=dtype)
+
+ @pytest.mark.parametrize("orient", ["index", "columns", "records", "values"])
+ def test_read_json_table_empty_axes_dtype(self, orient):
+ # GH28558
+
+ expected = DataFrame()
+ result = read_json(StringIO("{}"), orient=orient, convert_axes=True)
+ tm.assert_index_equal(result.index, expected.index)
+ tm.assert_index_equal(result.columns, expected.columns)
+
+ def test_read_json_table_convert_axes_raises(self):
+ # GH25433 GH25435
+ df = DataFrame([[1, 2], [3, 4]], index=[1.0, 2.0], columns=["1.", "2."])
+ dfjson = df.to_json(orient="table")
+ msg = "cannot pass both convert_axes and orient='table'"
+ with pytest.raises(ValueError, match=msg):
+ read_json(dfjson, orient="table", convert_axes=True)
+
+ @pytest.mark.parametrize(
+ "data, expected",
+ [
+ (
+ DataFrame([[1, 2], [4, 5]], columns=["a", "b"]),
+ {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
+ ),
+ (
+ DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo"),
+ {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
+ ),
+ (
+ DataFrame(
+ [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
+ ),
+ {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
+ ),
+ (Series([1, 2, 3], name="A"), {"name": "A", "data": [1, 2, 3]}),
+ (
+ Series([1, 2, 3], name="A").rename_axis("foo"),
+ {"name": "A", "data": [1, 2, 3]},
+ ),
+ (
+ Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]]),
+ {"name": "A", "data": [1, 2]},
+ ),
+ ],
+ )
+ def test_index_false_to_json_split(self, data, expected):
+ # GH 17394
+ # Testing index=False in to_json with orient='split'
+
+ result = data.to_json(orient="split", index=False)
+ result = json.loads(result)
+
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "data",
+ [
+ (DataFrame([[1, 2], [4, 5]], columns=["a", "b"])),
+ (DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo")),
+ (
+ DataFrame(
+ [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
+ )
+ ),
+ (Series([1, 2, 3], name="A")),
+ (Series([1, 2, 3], name="A").rename_axis("foo")),
+ (Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]])),
+ ],
+ )
+ def test_index_false_to_json_table(self, data):
+ # GH 17394
+ # Testing index=False in to_json with orient='table'
+
+ result = data.to_json(orient="table", index=False)
+ result = json.loads(result)
+
+ expected = {
+ "schema": pd.io.json.build_table_schema(data, index=False),
+ "data": DataFrame(data).to_dict(orient="records"),
+ }
+
+ assert result == expected
+
+ @pytest.mark.parametrize("orient", ["index", "columns"])
+ def test_index_false_error_to_json(self, orient):
+ # GH 17394, 25513
+ # Testing error message from to_json with index=False
+
+ df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"])
+
+ msg = (
+ "'index=False' is only valid when 'orient' is 'split', "
+ "'table', 'records', or 'values'"
+ )
+ with pytest.raises(ValueError, match=msg):
+ df.to_json(orient=orient, index=False)
+
+ @pytest.mark.parametrize("orient", ["records", "values"])
+ def test_index_true_error_to_json(self, orient):
+ # GH 25513
+ # Testing error message from to_json with index=True
+
+ df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"])
+
+ msg = (
+ "'index=True' is only valid when 'orient' is 'split', "
+ "'table', 'index', or 'columns'"
+ )
+ with pytest.raises(ValueError, match=msg):
+ df.to_json(orient=orient, index=True)
+
+ @pytest.mark.parametrize("orient", ["split", "table"])
+ @pytest.mark.parametrize("index", [True, False])
+ def test_index_false_from_json_to_json(self, orient, index):
+ # GH25170
+ # Test index=False in from_json to_json
+ expected = DataFrame({"a": [1, 2], "b": [3, 4]})
+ dfjson = expected.to_json(orient=orient, index=index)
+ result = read_json(StringIO(dfjson), orient=orient)
+ tm.assert_frame_equal(result, expected)
+
+ def test_read_timezone_information(self):
+ # GH 25546
+ result = read_json(
+ StringIO('{"2019-01-01T11:00:00.000Z":88}'), typ="series", orient="index"
+ )
+ exp_dti = DatetimeIndex(["2019-01-01 11:00:00"], dtype="M8[ns, UTC]")
+ expected = Series([88], index=exp_dti)
+ tm.assert_series_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "url",
+ [
+ "s3://example-fsspec/",
+ "gcs://another-fsspec/file.json",
+ "https://example-site.com/data",
+ "some-protocol://data.txt",
+ ],
+ )
+ def test_read_json_with_url_value(self, url):
+ # GH 36271
+ result = read_json(StringIO(f'{{"url":{{"0":"{url}"}}}}'))
+ expected = DataFrame({"url": [url]})
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "compression",
+ ["", ".gz", ".bz2", ".tar"],
+ )
+ def test_read_json_with_very_long_file_path(self, compression):
+ # GH 46718
+ long_json_path = f'{"a" * 1000}.json{compression}'
+ with pytest.raises(
+ FileNotFoundError, match=f"File {long_json_path} does not exist"
+ ):
+ # path too long for Windows is handled in file_exists() but raises in
+ # _get_data_from_filepath()
+ read_json(long_json_path)
+
+ @pytest.mark.parametrize(
+ "date_format,key", [("epoch", 86400000), ("iso", "P1DT0H0M0S")]
+ )
+ def test_timedelta_as_label(self, date_format, key):
+ df = DataFrame([[1]], columns=[pd.Timedelta("1D")])
+ expected = f'{{"{key}":{{"0":1}}}}'
+ result = df.to_json(date_format=date_format)
+
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "orient,expected",
+ [
+ ("index", "{\"('a', 'b')\":{\"('c', 'd')\":1}}"),
+ ("columns", "{\"('c', 'd')\":{\"('a', 'b')\":1}}"),
+ # TODO: the below have separate encoding procedures
+ pytest.param(
+ "split",
+ "",
+ marks=pytest.mark.xfail(
+ reason="Produces JSON but not in a consistent manner"
+ ),
+ ),
+ pytest.param(
+ "table",
+ "",
+ marks=pytest.mark.xfail(
+ reason="Produces JSON but not in a consistent manner"
+ ),
+ ),
+ ],
+ )
+ def test_tuple_labels(self, orient, expected):
+ # GH 20500
+ df = DataFrame([[1]], index=[("a", "b")], columns=[("c", "d")])
+ result = df.to_json(orient=orient)
+ assert result == expected
+
+ @pytest.mark.parametrize("indent", [1, 2, 4])
+ def test_to_json_indent(self, indent):
+ # GH 12004
+ df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])
+
+ result = df.to_json(indent=indent)
+ spaces = " " * indent
+ expected = f"""{{
+{spaces}"a":{{
+{spaces}{spaces}"0":"foo",
+{spaces}{spaces}"1":"baz"
+{spaces}}},
+{spaces}"b":{{
+{spaces}{spaces}"0":"bar",
+{spaces}{spaces}"1":"qux"
+{spaces}}}
+}}"""
+
+ assert result == expected
+
+ @pytest.mark.skipif(
+ using_pyarrow_string_dtype(),
+ reason="Adjust expected when infer_string is default, no bug here, "
+ "just a complicated parametrization",
+ )
+ @pytest.mark.parametrize(
+ "orient,expected",
+ [
+ (
+ "split",
+ """{
+ "columns":[
+ "a",
+ "b"
+ ],
+ "index":[
+ 0,
+ 1
+ ],
+ "data":[
+ [
+ "foo",
+ "bar"
+ ],
+ [
+ "baz",
+ "qux"
+ ]
+ ]
+}""",
+ ),
+ (
+ "records",
+ """[
+ {
+ "a":"foo",
+ "b":"bar"
+ },
+ {
+ "a":"baz",
+ "b":"qux"
+ }
+]""",
+ ),
+ (
+ "index",
+ """{
+ "0":{
+ "a":"foo",
+ "b":"bar"
+ },
+ "1":{
+ "a":"baz",
+ "b":"qux"
+ }
+}""",
+ ),
+ (
+ "columns",
+ """{
+ "a":{
+ "0":"foo",
+ "1":"baz"
+ },
+ "b":{
+ "0":"bar",
+ "1":"qux"
+ }
+}""",
+ ),
+ (
+ "values",
+ """[
+ [
+ "foo",
+ "bar"
+ ],
+ [
+ "baz",
+ "qux"
+ ]
+]""",
+ ),
+ (
+ "table",
+ """{
+ "schema":{
+ "fields":[
+ {
+ "name":"index",
+ "type":"integer"
+ },
+ {
+ "name":"a",
+ "type":"string"
+ },
+ {
+ "name":"b",
+ "type":"string"
+ }
+ ],
+ "primaryKey":[
+ "index"
+ ],
+ "pandas_version":"1.4.0"
+ },
+ "data":[
+ {
+ "index":0,
+ "a":"foo",
+ "b":"bar"
+ },
+ {
+ "index":1,
+ "a":"baz",
+ "b":"qux"
+ }
+ ]
+}""",
+ ),
+ ],
+ )
+ def test_json_indent_all_orients(self, orient, expected):
+ # GH 12004
+ df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])
+ result = df.to_json(orient=orient, indent=4)
+ assert result == expected
+
+ def test_json_negative_indent_raises(self):
+ with pytest.raises(ValueError, match="must be a nonnegative integer"):
+ DataFrame().to_json(indent=-1)
+
+ def test_emca_262_nan_inf_support(self):
+ # GH 12213
+ data = StringIO(
+ '["a", NaN, "NaN", Infinity, "Infinity", -Infinity, "-Infinity"]'
+ )
+ result = read_json(data)
+ expected = DataFrame(
+ ["a", None, "NaN", np.inf, "Infinity", -np.inf, "-Infinity"]
+ )
+ tm.assert_frame_equal(result, expected)
+
+ def test_frame_int_overflow(self):
+ # GH 30320
+ encoded_json = json.dumps([{"col": "31900441201190696999"}, {"col": "Text"}])
+ expected = DataFrame({"col": ["31900441201190696999", "Text"]})
+ result = read_json(StringIO(encoded_json))
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "dataframe,expected",
+ [
+ (
+ DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]}),
+ '{"(0, \'x\')":1,"(0, \'y\')":"a","(1, \'x\')":2,'
+ '"(1, \'y\')":"b","(2, \'x\')":3,"(2, \'y\')":"c"}',
+ )
+ ],
+ )
+ def test_json_multiindex(self, dataframe, expected):
+ series = dataframe.stack(future_stack=True)
+ result = series.to_json(orient="index")
+ assert result == expected
+
+ @pytest.mark.single_cpu
+ def test_to_s3(self, s3_public_bucket, s3so):
+ # GH 28375
+ mock_bucket_name, target_file = s3_public_bucket.name, "test.json"
+ df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]})
+ df.to_json(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so)
+ timeout = 5
+ while True:
+ if target_file in (obj.key for obj in s3_public_bucket.objects.all()):
+ break
+ time.sleep(0.1)
+ timeout -= 0.1
+ assert timeout > 0, "Timed out waiting for file to appear on moto"
+
+ def test_json_pandas_nulls(self, nulls_fixture, request):
+ # GH 31615
+ if isinstance(nulls_fixture, Decimal):
+ mark = pytest.mark.xfail(reason="not implemented")
+ request.applymarker(mark)
+
+ result = DataFrame([[nulls_fixture]]).to_json()
+ assert result == '{"0":{"0":null}}'
+
+ def test_readjson_bool_series(self):
+ # GH31464
+ result = read_json(StringIO("[true, true, false]"), typ="series")
+ expected = Series([True, True, False])
+ tm.assert_series_equal(result, expected)
+
+ def test_to_json_multiindex_escape(self):
+ # GH 15273
+ df = DataFrame(
+ True,
+ index=date_range("2017-01-20", "2017-01-23"),
+ columns=["foo", "bar"],
+ ).stack(future_stack=True)
+ result = df.to_json()
+ expected = (
+ "{\"(Timestamp('2017-01-20 00:00:00'), 'foo')\":true,"
+ "\"(Timestamp('2017-01-20 00:00:00'), 'bar')\":true,"
+ "\"(Timestamp('2017-01-21 00:00:00'), 'foo')\":true,"
+ "\"(Timestamp('2017-01-21 00:00:00'), 'bar')\":true,"
+ "\"(Timestamp('2017-01-22 00:00:00'), 'foo')\":true,"
+ "\"(Timestamp('2017-01-22 00:00:00'), 'bar')\":true,"
+ "\"(Timestamp('2017-01-23 00:00:00'), 'foo')\":true,"
+ "\"(Timestamp('2017-01-23 00:00:00'), 'bar')\":true}"
+ )
+ assert result == expected
+
+ def test_to_json_series_of_objects(self):
+ class _TestObject:
+ def __init__(self, a, b, _c, d) -> None:
+ self.a = a
+ self.b = b
+ self._c = _c
+ self.d = d
+
+ def e(self):
+ return 5
+
+ # JSON keys should be all non-callable non-underscore attributes, see GH-42768
+ series = Series([_TestObject(a=1, b=2, _c=3, d=4)])
+ assert json.loads(series.to_json()) == {"0": {"a": 1, "b": 2, "d": 4}}
+
+ @pytest.mark.parametrize(
+ "data,expected",
+ [
+ (
+ Series({0: -6 + 8j, 1: 0 + 1j, 2: 9 - 5j}),
+ '{"0":{"imag":8.0,"real":-6.0},'
+ '"1":{"imag":1.0,"real":0.0},'
+ '"2":{"imag":-5.0,"real":9.0}}',
+ ),
+ (
+ Series({0: -9.39 + 0.66j, 1: 3.95 + 9.32j, 2: 4.03 - 0.17j}),
+ '{"0":{"imag":0.66,"real":-9.39},'
+ '"1":{"imag":9.32,"real":3.95},'
+ '"2":{"imag":-0.17,"real":4.03}}',
+ ),
+ (
+ DataFrame([[-2 + 3j, -1 - 0j], [4 - 3j, -0 - 10j]]),
+ '{"0":{"0":{"imag":3.0,"real":-2.0},'
+ '"1":{"imag":-3.0,"real":4.0}},'
+ '"1":{"0":{"imag":0.0,"real":-1.0},'
+ '"1":{"imag":-10.0,"real":0.0}}}',
+ ),
+ (
+ DataFrame(
+ [[-0.28 + 0.34j, -1.08 - 0.39j], [0.41 - 0.34j, -0.78 - 1.35j]]
+ ),
+ '{"0":{"0":{"imag":0.34,"real":-0.28},'
+ '"1":{"imag":-0.34,"real":0.41}},'
+ '"1":{"0":{"imag":-0.39,"real":-1.08},'
+ '"1":{"imag":-1.35,"real":-0.78}}}',
+ ),
+ ],
+ )
+ def test_complex_data_tojson(self, data, expected):
+ # GH41174
+ result = data.to_json()
+ assert result == expected
+
+ def test_json_uint64(self):
+ # GH21073
+ expected = (
+ '{"columns":["col1"],"index":[0,1],'
+ '"data":[[13342205958987758245],[12388075603347835679]]}'
+ )
+ df = DataFrame(data={"col1": [13342205958987758245, 12388075603347835679]})
+ result = df.to_json(orient="split")
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "orient", ["split", "records", "values", "index", "columns"]
+ )
+ def test_read_json_dtype_backend(
+ self, string_storage, dtype_backend, orient, using_infer_string
+ ):
+ # GH#50750
+ pa = pytest.importorskip("pyarrow")
+ df = DataFrame(
+ {
+ "a": Series([1, np.nan, 3], dtype="Int64"),
+ "b": Series([1, 2, 3], dtype="Int64"),
+ "c": Series([1.5, np.nan, 2.5], dtype="Float64"),
+ "d": Series([1.5, 2.0, 2.5], dtype="Float64"),
+ "e": [True, False, None],
+ "f": [True, False, True],
+ "g": ["a", "b", "c"],
+ "h": ["a", "b", None],
+ }
+ )
+
+ if using_infer_string:
+ string_array = ArrowStringArrayNumpySemantics(pa.array(["a", "b", "c"]))
+ string_array_na = ArrowStringArrayNumpySemantics(pa.array(["a", "b", None]))
+ elif string_storage == "python":
+ string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
+ string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
+
+ elif dtype_backend == "pyarrow":
+ pa = pytest.importorskip("pyarrow")
+ from pandas.arrays import ArrowExtensionArray
+
+ string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
+ string_array_na = ArrowExtensionArray(pa.array(["a", "b", None]))
+
+ else:
+ string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
+ string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
+
+ out = df.to_json(orient=orient)
+ with pd.option_context("mode.string_storage", string_storage):
+ result = read_json(
+ StringIO(out), dtype_backend=dtype_backend, orient=orient
+ )
+
+ expected = DataFrame(
+ {
+ "a": Series([1, np.nan, 3], dtype="Int64"),
+ "b": Series([1, 2, 3], dtype="Int64"),
+ "c": Series([1.5, np.nan, 2.5], dtype="Float64"),
+ "d": Series([1.5, 2.0, 2.5], dtype="Float64"),
+ "e": Series([True, False, NA], dtype="boolean"),
+ "f": Series([True, False, True], dtype="boolean"),
+ "g": string_array,
+ "h": string_array_na,
+ }
+ )
+
+ if dtype_backend == "pyarrow":
+ from pandas.arrays import ArrowExtensionArray
+
+ expected = DataFrame(
+ {
+ col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))
+ for col in expected.columns
+ }
+ )
+
+ if orient == "values":
+ expected.columns = list(range(8))
+
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize("orient", ["split", "records", "index"])
+ def test_read_json_nullable_series(self, string_storage, dtype_backend, orient):
+ # GH#50750
+ pa = pytest.importorskip("pyarrow")
+ ser = Series([1, np.nan, 3], dtype="Int64")
+
+ out = ser.to_json(orient=orient)
+ with pd.option_context("mode.string_storage", string_storage):
+ result = read_json(
+ StringIO(out), dtype_backend=dtype_backend, orient=orient, typ="series"
+ )
+
+ expected = Series([1, np.nan, 3], dtype="Int64")
+
+ if dtype_backend == "pyarrow":
+ from pandas.arrays import ArrowExtensionArray
+
+ expected = Series(ArrowExtensionArray(pa.array(expected, from_pandas=True)))
+
+ tm.assert_series_equal(result, expected)
+
+ def test_invalid_dtype_backend(self):
+ msg = (
+ "dtype_backend numpy is invalid, only 'numpy_nullable' and "
+ "'pyarrow' are allowed."
+ )
+ with pytest.raises(ValueError, match=msg):
+ read_json("test", dtype_backend="numpy")
+
+
+def test_invalid_engine():
+ # GH 48893
+ ser = Series(range(1))
+ out = ser.to_json()
+ with pytest.raises(ValueError, match="The engine type foo"):
+ read_json(out, engine="foo")
+
+
+def test_pyarrow_engine_lines_false():
+ # GH 48893
+ ser = Series(range(1))
+ out = ser.to_json()
+ with pytest.raises(ValueError, match="currently pyarrow engine only supports"):
+ read_json(out, engine="pyarrow", lines=False)
+
+
+def test_json_roundtrip_string_inference(orient):
+ pytest.importorskip("pyarrow")
+ df = DataFrame(
+ [["a", "b"], ["c", "d"]], index=["row 1", "row 2"], columns=["col 1", "col 2"]
+ )
+ out = df.to_json()
+ with pd.option_context("future.infer_string", True):
+ result = read_json(StringIO(out))
+ expected = DataFrame(
+ [["a", "b"], ["c", "d"]],
+ dtype="string[pyarrow_numpy]",
+ index=Index(["row 1", "row 2"], dtype="string[pyarrow_numpy]"),
+ columns=Index(["col 1", "col 2"], dtype="string[pyarrow_numpy]"),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_json_pos_args_deprecation():
+ # GH-54229
+ df = DataFrame({"a": [1, 2, 3]})
+ msg = (
+ r"Starting with pandas version 3.0 all arguments of to_json except for the "
+ r"argument 'path_or_buf' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ buf = BytesIO()
+ df.to_json(buf, "split")
+
+
+@td.skip_if_no("pyarrow")
+def test_to_json_ea_null():
+ # GH#57224
+ df = DataFrame(
+ {
+ "a": Series([1, NA], dtype="int64[pyarrow]"),
+ "b": Series([2, NA], dtype="Int64"),
+ }
+ )
+ result = df.to_json(orient="records", lines=True)
+ expected = """{"a":1,"b":2}
+{"a":null,"b":null}
+"""
+ assert result == expected
+
+
+def test_read_json_lines_rangeindex():
+ # GH 57429
+ data = """
+{"a": 1, "b": 2}
+{"a": 3, "b": 4}
+"""
+ result = read_json(StringIO(data), lines=True).index
+ expected = RangeIndex(2)
+ tm.assert_index_equal(result, expected, exact=True)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_readlines.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_readlines.py
new file mode 100644
index 0000000000000000000000000000000000000000..d96ccb4b94cc2c567c8d7e59cad3227017f9200d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_readlines.py
@@ -0,0 +1,543 @@
+from collections.abc import Iterator
+from io import StringIO
+from pathlib import Path
+
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ read_json,
+)
+import pandas._testing as tm
+
+from pandas.io.json._json import JsonReader
+
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+)
+
+
+@pytest.fixture
+def lines_json_df():
+ df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
+ return df.to_json(lines=True, orient="records")
+
+
+@pytest.fixture(params=["ujson", "pyarrow"])
+def engine(request):
+ if request.param == "pyarrow":
+ pytest.importorskip("pyarrow.json")
+ return request.param
+
+
+def test_read_jsonl():
+ # GH9180
+ result = read_json(StringIO('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n'), lines=True)
+ expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_jsonl_engine_pyarrow(datapath, engine):
+ result = read_json(
+ datapath("io", "json", "data", "line_delimited.json"),
+ lines=True,
+ engine=engine,
+ )
+ expected = DataFrame({"a": [1, 3, 5], "b": [2, 4, 6]})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_datetime(request, engine):
+ # GH33787
+ if engine == "pyarrow":
+ # GH 48893
+ reason = "Pyarrow only supports a file path as an input and line delimited json"
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
+
+ df = DataFrame(
+ [([1, 2], ["2020-03-05", "2020-04-08T09:58:49+00:00"], "hector")],
+ columns=["accounts", "date", "name"],
+ )
+ json_line = df.to_json(lines=True, orient="records")
+
+ if engine == "pyarrow":
+ result = read_json(StringIO(json_line), engine=engine)
+ else:
+ result = read_json(StringIO(json_line), engine=engine)
+ expected = DataFrame(
+ [[1, "2020-03-05", "hector"], [2, "2020-04-08T09:58:49+00:00", "hector"]],
+ columns=["accounts", "date", "name"],
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_jsonl_unicode_chars():
+ # GH15132: non-ascii unicode characters
+ # \u201d == RIGHT DOUBLE QUOTATION MARK
+
+ # simulate file handle
+ json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
+ json = StringIO(json)
+ result = read_json(json, lines=True)
+ expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
+ tm.assert_frame_equal(result, expected)
+
+ # simulate string
+ json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
+ result = read_json(StringIO(json), lines=True)
+ expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_to_jsonl():
+ # GH9180
+ df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
+ result = df.to_json(orient="records", lines=True)
+ expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n'
+ assert result == expected
+
+ df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
+ result = df.to_json(orient="records", lines=True)
+ expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n'
+ assert result == expected
+ tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
+
+ # GH15096: escaped characters in columns and data
+ df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
+ result = df.to_json(orient="records", lines=True)
+ expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n'
+ assert result == expected
+ tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)
+
+
+def test_to_jsonl_count_new_lines():
+ # GH36888
+ df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
+ actual_new_lines_count = df.to_json(orient="records", lines=True).count("\n")
+ expected_new_lines_count = 2
+ assert actual_new_lines_count == expected_new_lines_count
+
+
+@pytest.mark.parametrize("chunksize", [1, 1.0])
+def test_readjson_chunks(request, lines_json_df, chunksize, engine):
+ # Basic test that read_json(chunks=True) gives the same result as
+ # read_json(chunks=False)
+ # GH17048: memory usage when lines=True
+
+ if engine == "pyarrow":
+ # GH 48893
+ reason = (
+ "Pyarrow only supports a file path as an input and line delimited json"
+ "and doesn't support chunksize parameter."
+ )
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
+
+ unchunked = read_json(StringIO(lines_json_df), lines=True)
+ with read_json(
+ StringIO(lines_json_df), lines=True, chunksize=chunksize, engine=engine
+ ) as reader:
+ chunked = pd.concat(reader)
+
+ tm.assert_frame_equal(chunked, unchunked)
+
+
+def test_readjson_chunksize_requires_lines(lines_json_df, engine):
+ msg = "chunksize can only be passed if lines=True"
+ with pytest.raises(ValueError, match=msg):
+ with read_json(
+ StringIO(lines_json_df), lines=False, chunksize=2, engine=engine
+ ) as _:
+ pass
+
+
+def test_readjson_chunks_series(request, engine):
+ if engine == "pyarrow":
+ # GH 48893
+ reason = (
+ "Pyarrow only supports a file path as an input and line delimited json"
+ "and doesn't support chunksize parameter."
+ )
+ request.applymarker(pytest.mark.xfail(reason=reason))
+
+ # Test reading line-format JSON to Series with chunksize param
+ s = pd.Series({"A": 1, "B": 2})
+
+ strio = StringIO(s.to_json(lines=True, orient="records"))
+ unchunked = read_json(strio, lines=True, typ="Series", engine=engine)
+
+ strio = StringIO(s.to_json(lines=True, orient="records"))
+ with read_json(
+ strio, lines=True, typ="Series", chunksize=1, engine=engine
+ ) as reader:
+ chunked = pd.concat(reader)
+
+ tm.assert_series_equal(chunked, unchunked)
+
+
+def test_readjson_each_chunk(request, lines_json_df, engine):
+ if engine == "pyarrow":
+ # GH 48893
+ reason = (
+ "Pyarrow only supports a file path as an input and line delimited json"
+ "and doesn't support chunksize parameter."
+ )
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
+
+ # Other tests check that the final result of read_json(chunksize=True)
+ # is correct. This checks the intermediate chunks.
+ with read_json(
+ StringIO(lines_json_df), lines=True, chunksize=2, engine=engine
+ ) as reader:
+ chunks = list(reader)
+ assert chunks[0].shape == (2, 2)
+ assert chunks[1].shape == (1, 2)
+
+
+def test_readjson_chunks_from_file(request, engine):
+ if engine == "pyarrow":
+ # GH 48893
+ reason = (
+ "Pyarrow only supports a file path as an input and line delimited json"
+ "and doesn't support chunksize parameter."
+ )
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
+
+ with tm.ensure_clean("test.json") as path:
+ df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
+ df.to_json(path, lines=True, orient="records")
+ with read_json(path, lines=True, chunksize=1, engine=engine) as reader:
+ chunked = pd.concat(reader)
+ unchunked = read_json(path, lines=True, engine=engine)
+ tm.assert_frame_equal(unchunked, chunked)
+
+
+@pytest.mark.parametrize("chunksize", [None, 1])
+def test_readjson_chunks_closes(chunksize):
+ with tm.ensure_clean("test.json") as path:
+ df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
+ df.to_json(path, lines=True, orient="records")
+ reader = JsonReader(
+ path,
+ orient=None,
+ typ="frame",
+ dtype=True,
+ convert_axes=True,
+ convert_dates=True,
+ keep_default_dates=True,
+ precise_float=False,
+ date_unit=None,
+ encoding=None,
+ lines=True,
+ chunksize=chunksize,
+ compression=None,
+ nrows=None,
+ )
+ with reader:
+ reader.read()
+ assert (
+ reader.handles.handle.closed
+ ), f"didn't close stream with chunksize = {chunksize}"
+
+
+@pytest.mark.parametrize("chunksize", [0, -1, 2.2, "foo"])
+def test_readjson_invalid_chunksize(lines_json_df, chunksize, engine):
+ msg = r"'chunksize' must be an integer >=1"
+
+ with pytest.raises(ValueError, match=msg):
+ with read_json(
+ StringIO(lines_json_df), lines=True, chunksize=chunksize, engine=engine
+ ) as _:
+ pass
+
+
+@pytest.mark.parametrize("chunksize", [None, 1, 2])
+def test_readjson_chunks_multiple_empty_lines(chunksize):
+ j = """
+
+ {"A":1,"B":4}
+
+
+
+ {"A":2,"B":5}
+
+
+
+
+
+
+
+ {"A":3,"B":6}
+ """
+ orig = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
+ test = read_json(StringIO(j), lines=True, chunksize=chunksize)
+ if chunksize is not None:
+ with test:
+ test = pd.concat(test)
+ tm.assert_frame_equal(orig, test, obj=f"chunksize: {chunksize}")
+
+
+def test_readjson_unicode(request, monkeypatch, engine):
+ if engine == "pyarrow":
+ # GH 48893
+ reason = (
+ "Pyarrow only supports a file path as an input and line delimited json"
+ "and doesn't support chunksize parameter."
+ )
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
+
+ with tm.ensure_clean("test.json") as path:
+ monkeypatch.setattr("locale.getpreferredencoding", lambda do_setlocale: "cp949")
+ with open(path, "w", encoding="utf-8") as f:
+ f.write('{"£©µÀÆÖÞßéöÿ":["АБВГДабвгд가"]}')
+
+ result = read_json(path, engine=engine)
+ expected = DataFrame({"£©µÀÆÖÞßéöÿ": ["АБВГДабвгд가"]})
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("nrows", [1, 2])
+def test_readjson_nrows(nrows, engine):
+ # GH 33916
+ # Test reading line-format JSON to Series with nrows param
+ jsonl = """{"a": 1, "b": 2}
+ {"a": 3, "b": 4}
+ {"a": 5, "b": 6}
+ {"a": 7, "b": 8}"""
+ result = read_json(StringIO(jsonl), lines=True, nrows=nrows)
+ expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows]
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("nrows,chunksize", [(2, 2), (4, 2)])
+def test_readjson_nrows_chunks(request, nrows, chunksize, engine):
+ # GH 33916
+ # Test reading line-format JSON to Series with nrows and chunksize param
+ if engine == "pyarrow":
+ # GH 48893
+ reason = (
+ "Pyarrow only supports a file path as an input and line delimited json"
+ "and doesn't support chunksize parameter."
+ )
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
+
+ jsonl = """{"a": 1, "b": 2}
+ {"a": 3, "b": 4}
+ {"a": 5, "b": 6}
+ {"a": 7, "b": 8}"""
+
+ if engine != "pyarrow":
+ with read_json(
+ StringIO(jsonl), lines=True, nrows=nrows, chunksize=chunksize, engine=engine
+ ) as reader:
+ chunked = pd.concat(reader)
+ else:
+ with read_json(
+ jsonl, lines=True, nrows=nrows, chunksize=chunksize, engine=engine
+ ) as reader:
+ chunked = pd.concat(reader)
+ expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows]
+ tm.assert_frame_equal(chunked, expected)
+
+
+def test_readjson_nrows_requires_lines(engine):
+ # GH 33916
+ # Test ValueError raised if nrows is set without setting lines in read_json
+ jsonl = """{"a": 1, "b": 2}
+ {"a": 3, "b": 4}
+ {"a": 5, "b": 6}
+ {"a": 7, "b": 8}"""
+ msg = "nrows can only be passed if lines=True"
+ with pytest.raises(ValueError, match=msg):
+ read_json(jsonl, lines=False, nrows=2, engine=engine)
+
+
+def test_readjson_lines_chunks_fileurl(request, datapath, engine):
+ # GH 27135
+ # Test reading line-format JSON from file url
+ if engine == "pyarrow":
+ # GH 48893
+ reason = (
+ "Pyarrow only supports a file path as an input and line delimited json"
+ "and doesn't support chunksize parameter."
+ )
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))
+
+ df_list_expected = [
+ DataFrame([[1, 2]], columns=["a", "b"], index=[0]),
+ DataFrame([[3, 4]], columns=["a", "b"], index=[1]),
+ DataFrame([[5, 6]], columns=["a", "b"], index=[2]),
+ ]
+ os_path = datapath("io", "json", "data", "line_delimited.json")
+ file_url = Path(os_path).as_uri()
+ with read_json(file_url, lines=True, chunksize=1, engine=engine) as url_reader:
+ for index, chuck in enumerate(url_reader):
+ tm.assert_frame_equal(chuck, df_list_expected[index])
+
+
+def test_chunksize_is_incremental():
+ # See https://github.com/pandas-dev/pandas/issues/34548
+ jsonl = (
+ """{"a": 1, "b": 2}
+ {"a": 3, "b": 4}
+ {"a": 5, "b": 6}
+ {"a": 7, "b": 8}\n"""
+ * 1000
+ )
+
+ class MyReader:
+ def __init__(self, contents) -> None:
+ self.read_count = 0
+ self.stringio = StringIO(contents)
+
+ def read(self, *args):
+ self.read_count += 1
+ return self.stringio.read(*args)
+
+ def __iter__(self) -> Iterator:
+ self.read_count += 1
+ return iter(self.stringio)
+
+ reader = MyReader(jsonl)
+ assert len(list(read_json(reader, lines=True, chunksize=100))) > 1
+ assert reader.read_count > 10
+
+
+@pytest.mark.parametrize("orient_", ["split", "index", "table"])
+def test_to_json_append_orient(orient_):
+ # GH 35849
+ # Test ValueError when orient is not 'records'
+ df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
+ msg = (
+ r"mode='a' \(append\) is only supported when "
+ "lines is True and orient is 'records'"
+ )
+ with pytest.raises(ValueError, match=msg):
+ df.to_json(mode="a", orient=orient_)
+
+
+def test_to_json_append_lines():
+ # GH 35849
+ # Test ValueError when lines is not True
+ df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
+ msg = (
+ r"mode='a' \(append\) is only supported when "
+ "lines is True and orient is 'records'"
+ )
+ with pytest.raises(ValueError, match=msg):
+ df.to_json(mode="a", lines=False, orient="records")
+
+
+@pytest.mark.parametrize("mode_", ["r", "x"])
+def test_to_json_append_mode(mode_):
+ # GH 35849
+ # Test ValueError when mode is not supported option
+ df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
+ msg = (
+ f"mode={mode_} is not a valid option."
+ "Only 'w' and 'a' are currently supported."
+ )
+ with pytest.raises(ValueError, match=msg):
+ df.to_json(mode=mode_, lines=False, orient="records")
+
+
+def test_to_json_append_output_consistent_columns():
+ # GH 35849
+ # Testing that resulting output reads in as expected.
+ # Testing same columns, new rows
+ df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
+ df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]})
+
+ expected = DataFrame({"col1": [1, 2, 3, 4], "col2": ["a", "b", "c", "d"]})
+ with tm.ensure_clean("test.json") as path:
+ # Save dataframes to the same file
+ df1.to_json(path, lines=True, orient="records")
+ df2.to_json(path, mode="a", lines=True, orient="records")
+
+ # Read path file
+ result = read_json(path, lines=True)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_to_json_append_output_inconsistent_columns():
+ # GH 35849
+ # Testing that resulting output reads in as expected.
+ # Testing one new column, one old column, new rows
+ df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
+ df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]})
+
+ expected = DataFrame(
+ {
+ "col1": [1, 2, None, None],
+ "col2": ["a", "b", "e", "f"],
+ "col3": [np.nan, np.nan, "!", "#"],
+ }
+ )
+ with tm.ensure_clean("test.json") as path:
+ # Save dataframes to the same file
+ df1.to_json(path, mode="a", lines=True, orient="records")
+ df3.to_json(path, mode="a", lines=True, orient="records")
+
+ # Read path file
+ result = read_json(path, lines=True)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_to_json_append_output_different_columns():
+ # GH 35849
+ # Testing that resulting output reads in as expected.
+ # Testing same, differing and new columns
+ df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
+ df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]})
+ df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]})
+ df4 = DataFrame({"col4": [True, False]})
+
+ expected = DataFrame(
+ {
+ "col1": [1, 2, 3, 4, None, None, None, None],
+ "col2": ["a", "b", "c", "d", "e", "f", np.nan, np.nan],
+ "col3": [np.nan, np.nan, np.nan, np.nan, "!", "#", np.nan, np.nan],
+ "col4": [None, None, None, None, None, None, True, False],
+ }
+ ).astype({"col4": "float"})
+ with tm.ensure_clean("test.json") as path:
+ # Save dataframes to the same file
+ df1.to_json(path, mode="a", lines=True, orient="records")
+ df2.to_json(path, mode="a", lines=True, orient="records")
+ df3.to_json(path, mode="a", lines=True, orient="records")
+ df4.to_json(path, mode="a", lines=True, orient="records")
+
+ # Read path file
+ result = read_json(path, lines=True)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_to_json_append_output_different_columns_reordered():
+ # GH 35849
+ # Testing that resulting output reads in as expected.
+ # Testing specific result column order.
+ df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})
+ df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]})
+ df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]})
+ df4 = DataFrame({"col4": [True, False]})
+
+ # df4, df3, df2, df1 (in that order)
+ expected = DataFrame(
+ {
+ "col4": [True, False, None, None, None, None, None, None],
+ "col2": [np.nan, np.nan, "e", "f", "c", "d", "a", "b"],
+ "col3": [np.nan, np.nan, "!", "#", np.nan, np.nan, np.nan, np.nan],
+ "col1": [None, None, None, None, 3, 4, 1, 2],
+ }
+ ).astype({"col4": "float"})
+ with tm.ensure_clean("test.json") as path:
+ # Save dataframes to the same file
+ df4.to_json(path, mode="a", lines=True, orient="records")
+ df3.to_json(path, mode="a", lines=True, orient="records")
+ df2.to_json(path, mode="a", lines=True, orient="records")
+ df1.to_json(path, mode="a", lines=True, orient="records")
+
+ # Read path file
+ result = read_json(path, lines=True)
+ tm.assert_frame_equal(result, expected)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_ujson.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_ujson.py
new file mode 100644
index 0000000000000000000000000000000000000000..56ea9ea625dff721a2e989d858ae89c22aa69f4d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/json/test_ujson.py
@@ -0,0 +1,1087 @@
+import calendar
+import datetime
+import decimal
+import json
+import locale
+import math
+import re
+import time
+
+import dateutil
+import numpy as np
+import pytest
+import pytz
+
+import pandas._libs.json as ujson
+from pandas.compat import IS64
+
+from pandas import (
+ DataFrame,
+ DatetimeIndex,
+ Index,
+ NaT,
+ PeriodIndex,
+ Series,
+ Timedelta,
+ Timestamp,
+ date_range,
+)
+import pandas._testing as tm
+
+
+def _clean_dict(d):
+ """
+ Sanitize dictionary for JSON by converting all keys to strings.
+
+ Parameters
+ ----------
+ d : dict
+ The dictionary to convert.
+
+ Returns
+ -------
+ cleaned_dict : dict
+ """
+ return {str(k): v for k, v in d.items()}
+
+
+@pytest.fixture(
+ params=[None, "split", "records", "values", "index"] # Column indexed by default.
+)
+def orient(request):
+ return request.param
+
+
+class TestUltraJSONTests:
+ @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865")
+ def test_encode_decimal(self):
+ sut = decimal.Decimal("1337.1337")
+ encoded = ujson.ujson_dumps(sut, double_precision=15)
+ decoded = ujson.ujson_loads(encoded)
+ assert decoded == 1337.1337
+
+ sut = decimal.Decimal("0.95")
+ encoded = ujson.ujson_dumps(sut, double_precision=1)
+ assert encoded == "1.0"
+
+ decoded = ujson.ujson_loads(encoded)
+ assert decoded == 1.0
+
+ sut = decimal.Decimal("0.94")
+ encoded = ujson.ujson_dumps(sut, double_precision=1)
+ assert encoded == "0.9"
+
+ decoded = ujson.ujson_loads(encoded)
+ assert decoded == 0.9
+
+ sut = decimal.Decimal("1.95")
+ encoded = ujson.ujson_dumps(sut, double_precision=1)
+ assert encoded == "2.0"
+
+ decoded = ujson.ujson_loads(encoded)
+ assert decoded == 2.0
+
+ sut = decimal.Decimal("-1.95")
+ encoded = ujson.ujson_dumps(sut, double_precision=1)
+ assert encoded == "-2.0"
+
+ decoded = ujson.ujson_loads(encoded)
+ assert decoded == -2.0
+
+ sut = decimal.Decimal("0.995")
+ encoded = ujson.ujson_dumps(sut, double_precision=2)
+ assert encoded == "1.0"
+
+ decoded = ujson.ujson_loads(encoded)
+ assert decoded == 1.0
+
+ sut = decimal.Decimal("0.9995")
+ encoded = ujson.ujson_dumps(sut, double_precision=3)
+ assert encoded == "1.0"
+
+ decoded = ujson.ujson_loads(encoded)
+ assert decoded == 1.0
+
+ sut = decimal.Decimal("0.99999999999999944")
+ encoded = ujson.ujson_dumps(sut, double_precision=15)
+ assert encoded == "1.0"
+
+ decoded = ujson.ujson_loads(encoded)
+ assert decoded == 1.0
+
+ @pytest.mark.parametrize("ensure_ascii", [True, False])
+ def test_encode_string_conversion(self, ensure_ascii):
+ string_input = "A string \\ / \b \f \n \r \t &"
+ not_html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t <\\/script> &"'
+ html_encoded = (
+ '"A string \\\\ \\/ \\b \\f \\n \\r \\t \\u003c\\/script\\u003e \\u0026"'
+ )
+
+ def helper(expected_output, **encode_kwargs):
+ output = ujson.ujson_dumps(
+ string_input, ensure_ascii=ensure_ascii, **encode_kwargs
+ )
+
+ assert output == expected_output
+ assert string_input == json.loads(output)
+ assert string_input == ujson.ujson_loads(output)
+
+ # Default behavior assumes encode_html_chars=False.
+ helper(not_html_encoded)
+
+ # Make sure explicit encode_html_chars=False works.
+ helper(not_html_encoded, encode_html_chars=False)
+
+ # Make sure explicit encode_html_chars=True does the encoding.
+ helper(html_encoded, encode_html_chars=True)
+
+ @pytest.mark.parametrize(
+ "long_number", [-4342969734183514, -12345678901234.56789012, -528656961.4399388]
+ )
+ def test_double_long_numbers(self, long_number):
+ sut = {"a": long_number}
+ encoded = ujson.ujson_dumps(sut, double_precision=15)
+
+ decoded = ujson.ujson_loads(encoded)
+ assert sut == decoded
+
+ def test_encode_non_c_locale(self):
+ lc_category = locale.LC_NUMERIC
+
+ # We just need one of these locales to work.
+ for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
+ if tm.can_set_locale(new_locale, lc_category):
+ with tm.set_locale(new_locale, lc_category):
+ assert ujson.ujson_loads(ujson.ujson_dumps(4.78e60)) == 4.78e60
+ assert ujson.ujson_loads("4.78", precise_float=True) == 4.78
+ break
+
+ def test_decimal_decode_test_precise(self):
+ sut = {"a": 4.56}
+ encoded = ujson.ujson_dumps(sut)
+ decoded = ujson.ujson_loads(encoded, precise_float=True)
+ assert sut == decoded
+
+ def test_encode_double_tiny_exponential(self):
+ num = 1e-40
+ assert num == ujson.ujson_loads(ujson.ujson_dumps(num))
+ num = 1e-100
+ assert num == ujson.ujson_loads(ujson.ujson_dumps(num))
+ num = -1e-45
+ assert num == ujson.ujson_loads(ujson.ujson_dumps(num))
+ num = -1e-145
+ assert np.allclose(num, ujson.ujson_loads(ujson.ujson_dumps(num)))
+
+ @pytest.mark.parametrize("unicode_key", ["key1", "بن"])
+ def test_encode_dict_with_unicode_keys(self, unicode_key):
+ unicode_dict = {unicode_key: "value1"}
+ assert unicode_dict == ujson.ujson_loads(ujson.ujson_dumps(unicode_dict))
+
+ @pytest.mark.parametrize(
+ "double_input", [math.pi, -math.pi] # Should work with negatives too.
+ )
+ def test_encode_double_conversion(self, double_input):
+ output = ujson.ujson_dumps(double_input)
+ assert round(double_input, 5) == round(json.loads(output), 5)
+ assert round(double_input, 5) == round(ujson.ujson_loads(output), 5)
+
+ def test_encode_with_decimal(self):
+ decimal_input = 1.0
+ output = ujson.ujson_dumps(decimal_input)
+
+ assert output == "1.0"
+
+ def test_encode_array_of_nested_arrays(self):
+ nested_input = [[[[]]]] * 20
+ output = ujson.ujson_dumps(nested_input)
+
+ assert nested_input == json.loads(output)
+ assert nested_input == ujson.ujson_loads(output)
+
+ def test_encode_array_of_doubles(self):
+ doubles_input = [31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
+ output = ujson.ujson_dumps(doubles_input)
+
+ assert doubles_input == json.loads(output)
+ assert doubles_input == ujson.ujson_loads(output)
+
+ def test_double_precision(self):
+ double_input = 30.012345678901234
+ output = ujson.ujson_dumps(double_input, double_precision=15)
+
+ assert double_input == json.loads(output)
+ assert double_input == ujson.ujson_loads(output)
+
+ for double_precision in (3, 9):
+ output = ujson.ujson_dumps(double_input, double_precision=double_precision)
+ rounded_input = round(double_input, double_precision)
+
+ assert rounded_input == json.loads(output)
+ assert rounded_input == ujson.ujson_loads(output)
+
+ @pytest.mark.parametrize(
+ "invalid_val",
+ [
+ 20,
+ -1,
+ "9",
+ None,
+ ],
+ )
+ def test_invalid_double_precision(self, invalid_val):
+ double_input = 30.12345678901234567890
+ expected_exception = ValueError if isinstance(invalid_val, int) else TypeError
+ msg = (
+ r"Invalid value '.*' for option 'double_precision', max is '15'|"
+ r"an integer is required \(got type |"
+ r"object cannot be interpreted as an integer"
+ )
+ with pytest.raises(expected_exception, match=msg):
+ ujson.ujson_dumps(double_input, double_precision=invalid_val)
+
+ def test_encode_string_conversion2(self):
+ string_input = "A string \\ / \b \f \n \r \t"
+ output = ujson.ujson_dumps(string_input)
+
+ assert string_input == json.loads(output)
+ assert string_input == ujson.ujson_loads(output)
+ assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
+
+ @pytest.mark.parametrize(
+ "unicode_input",
+ ["Räksmörgås اسامة بن محمد بن عوض بن لادن", "\xe6\x97\xa5\xd1\x88"],
+ )
+ def test_encode_unicode_conversion(self, unicode_input):
+ enc = ujson.ujson_dumps(unicode_input)
+ dec = ujson.ujson_loads(enc)
+
+ assert enc == json.dumps(unicode_input)
+ assert dec == json.loads(enc)
+
+ def test_encode_control_escaping(self):
+ escaped_input = "\x19"
+ enc = ujson.ujson_dumps(escaped_input)
+ dec = ujson.ujson_loads(enc)
+
+ assert escaped_input == dec
+ assert enc == json.dumps(escaped_input)
+
+ def test_encode_unicode_surrogate_pair(self):
+ surrogate_input = "\xf0\x90\x8d\x86"
+ enc = ujson.ujson_dumps(surrogate_input)
+ dec = ujson.ujson_loads(enc)
+
+ assert enc == json.dumps(surrogate_input)
+ assert dec == json.loads(enc)
+
+ def test_encode_unicode_4bytes_utf8(self):
+ four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
+ enc = ujson.ujson_dumps(four_bytes_input)
+ dec = ujson.ujson_loads(enc)
+
+ assert enc == json.dumps(four_bytes_input)
+ assert dec == json.loads(enc)
+
+ def test_encode_unicode_4bytes_utf8highest(self):
+ four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
+ enc = ujson.ujson_dumps(four_bytes_input)
+
+ dec = ujson.ujson_loads(enc)
+
+ assert enc == json.dumps(four_bytes_input)
+ assert dec == json.loads(enc)
+
+ def test_encode_unicode_error(self):
+ string = "'\udac0'"
+ msg = (
+ r"'utf-8' codec can't encode character '\\udac0' "
+ r"in position 1: surrogates not allowed"
+ )
+ with pytest.raises(UnicodeEncodeError, match=msg):
+ ujson.ujson_dumps([string])
+
+ def test_encode_array_in_array(self):
+ arr_in_arr_input = [[[[]]]]
+ output = ujson.ujson_dumps(arr_in_arr_input)
+
+ assert arr_in_arr_input == json.loads(output)
+ assert output == json.dumps(arr_in_arr_input)
+ assert arr_in_arr_input == ujson.ujson_loads(output)
+
+ @pytest.mark.parametrize(
+ "num_input",
+ [
+ 31337,
+ -31337, # Negative number.
+ -9223372036854775808, # Large negative number.
+ ],
+ )
+ def test_encode_num_conversion(self, num_input):
+ output = ujson.ujson_dumps(num_input)
+ assert num_input == json.loads(output)
+ assert output == json.dumps(num_input)
+ assert num_input == ujson.ujson_loads(output)
+
+ def test_encode_list_conversion(self):
+ list_input = [1, 2, 3, 4]
+ output = ujson.ujson_dumps(list_input)
+
+ assert list_input == json.loads(output)
+ assert list_input == ujson.ujson_loads(output)
+
+ def test_encode_dict_conversion(self):
+ dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
+ output = ujson.ujson_dumps(dict_input)
+
+ assert dict_input == json.loads(output)
+ assert dict_input == ujson.ujson_loads(output)
+
+ @pytest.mark.parametrize("builtin_value", [None, True, False])
+ def test_encode_builtin_values_conversion(self, builtin_value):
+ output = ujson.ujson_dumps(builtin_value)
+ assert builtin_value == json.loads(output)
+ assert output == json.dumps(builtin_value)
+ assert builtin_value == ujson.ujson_loads(output)
+
+ def test_encode_datetime_conversion(self):
+ datetime_input = datetime.datetime.fromtimestamp(time.time())
+ output = ujson.ujson_dumps(datetime_input, date_unit="s")
+ expected = calendar.timegm(datetime_input.utctimetuple())
+
+ assert int(expected) == json.loads(output)
+ assert int(expected) == ujson.ujson_loads(output)
+
+ def test_encode_date_conversion(self):
+ date_input = datetime.date.fromtimestamp(time.time())
+ output = ujson.ujson_dumps(date_input, date_unit="s")
+
+ tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)
+ expected = calendar.timegm(tup)
+
+ assert int(expected) == json.loads(output)
+ assert int(expected) == ujson.ujson_loads(output)
+
+ @pytest.mark.parametrize(
+ "test",
+ [datetime.time(), datetime.time(1, 2, 3), datetime.time(10, 12, 15, 343243)],
+ )
+ def test_encode_time_conversion_basic(self, test):
+ output = ujson.ujson_dumps(test)
+ expected = f'"{test.isoformat()}"'
+ assert expected == output
+
+ def test_encode_time_conversion_pytz(self):
+ # see gh-11473: to_json segfaults with timezone-aware datetimes
+ test = datetime.time(10, 12, 15, 343243, pytz.utc)
+ output = ujson.ujson_dumps(test)
+ expected = f'"{test.isoformat()}"'
+ assert expected == output
+
+ def test_encode_time_conversion_dateutil(self):
+ # see gh-11473: to_json segfaults with timezone-aware datetimes
+ test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
+ output = ujson.ujson_dumps(test)
+ expected = f'"{test.isoformat()}"'
+ assert expected == output
+
+ @pytest.mark.parametrize(
+ "decoded_input", [NaT, np.datetime64("NaT"), np.nan, np.inf, -np.inf]
+ )
+ def test_encode_as_null(self, decoded_input):
+ assert ujson.ujson_dumps(decoded_input) == "null", "Expected null"
+
+ def test_datetime_units(self):
+ val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
+ stamp = Timestamp(val).as_unit("ns")
+
+ roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="s"))
+ assert roundtrip == stamp._value // 10**9
+
+ roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="ms"))
+ assert roundtrip == stamp._value // 10**6
+
+ roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="us"))
+ assert roundtrip == stamp._value // 10**3
+
+ roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="ns"))
+ assert roundtrip == stamp._value
+
+ msg = "Invalid value 'foo' for option 'date_unit'"
+ with pytest.raises(ValueError, match=msg):
+ ujson.ujson_dumps(val, date_unit="foo")
+
+ def test_encode_to_utf8(self):
+ unencoded = "\xe6\x97\xa5\xd1\x88"
+
+ enc = ujson.ujson_dumps(unencoded, ensure_ascii=False)
+ dec = ujson.ujson_loads(enc)
+
+ assert enc == json.dumps(unencoded, ensure_ascii=False)
+ assert dec == json.loads(enc)
+
+ def test_decode_from_unicode(self):
+ unicode_input = '{"obj": 31337}'
+
+ dec1 = ujson.ujson_loads(unicode_input)
+ dec2 = ujson.ujson_loads(str(unicode_input))
+
+ assert dec1 == dec2
+
+ def test_encode_recursion_max(self):
+ # 8 is the max recursion depth
+
+ class O2:
+ member = 0
+
+ class O1:
+ member = 0
+
+ decoded_input = O1()
+ decoded_input.member = O2()
+ decoded_input.member.member = decoded_input
+
+ with pytest.raises(OverflowError, match="Maximum recursion level reached"):
+ ujson.ujson_dumps(decoded_input)
+
+ def test_decode_jibberish(self):
+ jibberish = "fdsa sda v9sa fdsa"
+ msg = "Unexpected character found when decoding 'false'"
+ with pytest.raises(ValueError, match=msg):
+ ujson.ujson_loads(jibberish)
+
+ @pytest.mark.parametrize(
+ "broken_json",
+ [
+ "[", # Broken array start.
+ "{", # Broken object start.
+ "]", # Broken array end.
+ "}", # Broken object end.
+ ],
+ )
+ def test_decode_broken_json(self, broken_json):
+ msg = "Expected object or value"
+ with pytest.raises(ValueError, match=msg):
+ ujson.ujson_loads(broken_json)
+
+ @pytest.mark.parametrize("too_big_char", ["[", "{"])
+ def test_decode_depth_too_big(self, too_big_char):
+ with pytest.raises(ValueError, match="Reached object decoding depth limit"):
+ ujson.ujson_loads(too_big_char * (1024 * 1024))
+
+ @pytest.mark.parametrize(
+ "bad_string",
+ [
+ '"TESTING', # Unterminated.
+ '"TESTING\\"', # Unterminated escape.
+ "tru", # Broken True.
+ "fa", # Broken False.
+ "n", # Broken None.
+ ],
+ )
+ def test_decode_bad_string(self, bad_string):
+ msg = (
+ "Unexpected character found when decoding|"
+ "Unmatched ''\"' when when decoding 'string'"
+ )
+ with pytest.raises(ValueError, match=msg):
+ ujson.ujson_loads(bad_string)
+
+ @pytest.mark.parametrize(
+ "broken_json, err_msg",
+ [
+ (
+ '{{1337:""}}',
+ "Key name of object must be 'string' when decoding 'object'",
+ ),
+ ('{{"key":"}', "Unmatched ''\"' when when decoding 'string'"),
+ ("[[[true", "Unexpected character found when decoding array value (2)"),
+ ],
+ )
+ def test_decode_broken_json_leak(self, broken_json, err_msg):
+ for _ in range(1000):
+ with pytest.raises(ValueError, match=re.escape(err_msg)):
+ ujson.ujson_loads(broken_json)
+
+ @pytest.mark.parametrize(
+ "invalid_dict",
+ [
+ "{{{{31337}}}}", # No key.
+ '{{{{"key":}}}}', # No value.
+ '{{{{"key"}}}}', # No colon or value.
+ ],
+ )
+ def test_decode_invalid_dict(self, invalid_dict):
+ msg = (
+ "Key name of object must be 'string' when decoding 'object'|"
+ "No ':' found when decoding object value|"
+ "Expected object or value"
+ )
+ with pytest.raises(ValueError, match=msg):
+ ujson.ujson_loads(invalid_dict)
+
+ @pytest.mark.parametrize(
+ "numeric_int_as_str", ["31337", "-31337"] # Should work with negatives.
+ )
+ def test_decode_numeric_int(self, numeric_int_as_str):
+ assert int(numeric_int_as_str) == ujson.ujson_loads(numeric_int_as_str)
+
+ def test_encode_null_character(self):
+ wrapped_input = "31337 \x00 1337"
+ output = ujson.ujson_dumps(wrapped_input)
+
+ assert wrapped_input == json.loads(output)
+ assert output == json.dumps(wrapped_input)
+ assert wrapped_input == ujson.ujson_loads(output)
+
+ alone_input = "\x00"
+ output = ujson.ujson_dumps(alone_input)
+
+ assert alone_input == json.loads(output)
+ assert output == json.dumps(alone_input)
+ assert alone_input == ujson.ujson_loads(output)
+ assert '" \\u0000\\r\\n "' == ujson.ujson_dumps(" \u0000\r\n ")
+
+ def test_decode_null_character(self):
+ wrapped_input = '"31337 \\u0000 31337"'
+ assert ujson.ujson_loads(wrapped_input) == json.loads(wrapped_input)
+
+ def test_encode_list_long_conversion(self):
+ long_input = [
+ 9223372036854775807,
+ 9223372036854775807,
+ 9223372036854775807,
+ 9223372036854775807,
+ 9223372036854775807,
+ 9223372036854775807,
+ ]
+ output = ujson.ujson_dumps(long_input)
+
+ assert long_input == json.loads(output)
+ assert long_input == ujson.ujson_loads(output)
+
+ @pytest.mark.parametrize("long_input", [9223372036854775807, 18446744073709551615])
+ def test_encode_long_conversion(self, long_input):
+ output = ujson.ujson_dumps(long_input)
+
+ assert long_input == json.loads(output)
+ assert output == json.dumps(long_input)
+ assert long_input == ujson.ujson_loads(output)
+
+ @pytest.mark.parametrize("bigNum", [2**64, -(2**63) - 1])
+ def test_dumps_ints_larger_than_maxsize(self, bigNum):
+ encoding = ujson.ujson_dumps(bigNum)
+ assert str(bigNum) == encoding
+
+ with pytest.raises(
+ ValueError,
+ match="Value is too big|Value is too small",
+ ):
+ assert ujson.ujson_loads(encoding) == bigNum
+
+ @pytest.mark.parametrize(
+ "int_exp", ["1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"]
+ )
+ def test_decode_numeric_int_exp(self, int_exp):
+ assert ujson.ujson_loads(int_exp) == json.loads(int_exp)
+
+ def test_loads_non_str_bytes_raises(self):
+ msg = "a bytes-like object is required, not 'NoneType'"
+ with pytest.raises(TypeError, match=msg):
+ ujson.ujson_loads(None)
+
+ @pytest.mark.parametrize("val", [3590016419, 2**31, 2**32, (2**32) - 1])
+ def test_decode_number_with_32bit_sign_bit(self, val):
+ # Test that numbers that fit within 32 bits but would have the
+ # sign bit set (2**31 <= x < 2**32) are decoded properly.
+ doc = f'{{"id": {val}}}'
+ assert ujson.ujson_loads(doc)["id"] == val
+
+ def test_encode_big_escape(self):
+ # Make sure no Exception is raised.
+ for _ in range(10):
+ base = "\u00e5".encode()
+ escape_input = base * 1024 * 1024 * 2
+ ujson.ujson_dumps(escape_input)
+
+ def test_decode_big_escape(self):
+ # Make sure no Exception is raised.
+ for _ in range(10):
+ base = "\u00e5".encode()
+ quote = b'"'
+
+ escape_input = quote + (base * 1024 * 1024 * 2) + quote
+ ujson.ujson_loads(escape_input)
+
+ def test_to_dict(self):
+ d = {"key": 31337}
+
+ class DictTest:
+ def toDict(self):
+ return d
+
+ o = DictTest()
+ output = ujson.ujson_dumps(o)
+
+ dec = ujson.ujson_loads(output)
+ assert dec == d
+
+ def test_default_handler(self):
+ class _TestObject:
+ def __init__(self, val) -> None:
+ self.val = val
+
+ @property
+ def recursive_attr(self):
+ return _TestObject("recursive_attr")
+
+ def __str__(self) -> str:
+ return str(self.val)
+
+ msg = "Maximum recursion level reached"
+ with pytest.raises(OverflowError, match=msg):
+ ujson.ujson_dumps(_TestObject("foo"))
+ assert '"foo"' == ujson.ujson_dumps(_TestObject("foo"), default_handler=str)
+
+ def my_handler(_):
+ return "foobar"
+
+ assert '"foobar"' == ujson.ujson_dumps(
+ _TestObject("foo"), default_handler=my_handler
+ )
+
+ def my_handler_raises(_):
+ raise TypeError("I raise for anything")
+
+ with pytest.raises(TypeError, match="I raise for anything"):
+ ujson.ujson_dumps(_TestObject("foo"), default_handler=my_handler_raises)
+
+ def my_int_handler(_):
+ return 42
+
+ assert (
+ ujson.ujson_loads(
+ ujson.ujson_dumps(_TestObject("foo"), default_handler=my_int_handler)
+ )
+ == 42
+ )
+
+ def my_obj_handler(_):
+ return datetime.datetime(2013, 2, 3)
+
+ assert ujson.ujson_loads(
+ ujson.ujson_dumps(datetime.datetime(2013, 2, 3))
+ ) == ujson.ujson_loads(
+ ujson.ujson_dumps(_TestObject("foo"), default_handler=my_obj_handler)
+ )
+
+ obj_list = [_TestObject("foo"), _TestObject("bar")]
+ assert json.loads(json.dumps(obj_list, default=str)) == ujson.ujson_loads(
+ ujson.ujson_dumps(obj_list, default_handler=str)
+ )
+
+ def test_encode_object(self):
+ class _TestObject:
+ def __init__(self, a, b, _c, d) -> None:
+ self.a = a
+ self.b = b
+ self._c = _c
+ self.d = d
+
+ def e(self):
+ return 5
+
+ # JSON keys should be all non-callable non-underscore attributes, see GH-42768
+ test_object = _TestObject(a=1, b=2, _c=3, d=4)
+ assert ujson.ujson_loads(ujson.ujson_dumps(test_object)) == {
+ "a": 1,
+ "b": 2,
+ "d": 4,
+ }
+
+ def test_ujson__name__(self):
+ # GH 52898
+ assert ujson.__name__ == "pandas._libs.json"
+
+
+class TestNumpyJSONTests:
+ @pytest.mark.parametrize("bool_input", [True, False])
+ def test_bool(self, bool_input):
+ b = bool(bool_input)
+ assert ujson.ujson_loads(ujson.ujson_dumps(b)) == b
+
+ def test_bool_array(self):
+ bool_array = np.array(
+ [True, False, True, True, False, True, False, False], dtype=bool
+ )
+ output = np.array(ujson.ujson_loads(ujson.ujson_dumps(bool_array)), dtype=bool)
+ tm.assert_numpy_array_equal(bool_array, output)
+
+ def test_int(self, any_int_numpy_dtype):
+ klass = np.dtype(any_int_numpy_dtype).type
+ num = klass(1)
+
+ assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num
+
+ def test_int_array(self, any_int_numpy_dtype):
+ arr = np.arange(100, dtype=int)
+ arr_input = arr.astype(any_int_numpy_dtype)
+
+ arr_output = np.array(
+ ujson.ujson_loads(ujson.ujson_dumps(arr_input)), dtype=any_int_numpy_dtype
+ )
+ tm.assert_numpy_array_equal(arr_input, arr_output)
+
+ def test_int_max(self, any_int_numpy_dtype):
+ if any_int_numpy_dtype in ("int64", "uint64") and not IS64:
+ pytest.skip("Cannot test 64-bit integer on 32-bit platform")
+
+ klass = np.dtype(any_int_numpy_dtype).type
+
+ # uint64 max will always overflow,
+ # as it's encoded to signed.
+ if any_int_numpy_dtype == "uint64":
+ num = np.iinfo("int64").max
+ else:
+ num = np.iinfo(any_int_numpy_dtype).max
+
+ assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num
+
+ def test_float(self, float_numpy_dtype):
+ klass = np.dtype(float_numpy_dtype).type
+ num = klass(256.2013)
+
+ assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num
+
+ def test_float_array(self, float_numpy_dtype):
+ arr = np.arange(12.5, 185.72, 1.7322, dtype=float)
+ float_input = arr.astype(float_numpy_dtype)
+
+ float_output = np.array(
+ ujson.ujson_loads(ujson.ujson_dumps(float_input, double_precision=15)),
+ dtype=float_numpy_dtype,
+ )
+ tm.assert_almost_equal(float_input, float_output)
+
+ def test_float_max(self, float_numpy_dtype):
+ klass = np.dtype(float_numpy_dtype).type
+ num = klass(np.finfo(float_numpy_dtype).max / 10)
+
+ tm.assert_almost_equal(
+ klass(ujson.ujson_loads(ujson.ujson_dumps(num, double_precision=15))), num
+ )
+
+ def test_array_basic(self):
+ arr = np.arange(96)
+ arr = arr.reshape((2, 2, 2, 2, 3, 2))
+
+ tm.assert_numpy_array_equal(
+ np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr
+ )
+
+ @pytest.mark.parametrize("shape", [(10, 10), (5, 5, 4), (100, 1)])
+ def test_array_reshaped(self, shape):
+ arr = np.arange(100)
+ arr = arr.reshape(shape)
+
+ tm.assert_numpy_array_equal(
+ np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr
+ )
+
+ def test_array_list(self):
+ arr_list = [
+ "a",
+ [],
+ {},
+ {},
+ [],
+ 42,
+ 97.8,
+ ["a", "b"],
+ {"key": "val"},
+ ]
+ arr = np.array(arr_list, dtype=object)
+ result = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=object)
+ tm.assert_numpy_array_equal(result, arr)
+
+ def test_array_float(self):
+ dtype = np.float32
+
+ arr = np.arange(100.202, 200.202, 1, dtype=dtype)
+ arr = arr.reshape((5, 5, 4))
+
+ arr_out = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=dtype)
+ tm.assert_almost_equal(arr, arr_out)
+
+ def test_0d_array(self):
+ # gh-18878
+ msg = re.escape(
+ "array(1) (numpy-scalar) is not JSON serializable at the moment"
+ )
+ with pytest.raises(TypeError, match=msg):
+ ujson.ujson_dumps(np.array(1))
+
+ def test_array_long_double(self):
+ msg = re.compile(
+ "1234.5.* \\(numpy-scalar\\) is not JSON serializable at the moment"
+ )
+ with pytest.raises(TypeError, match=msg):
+ ujson.ujson_dumps(np.longdouble(1234.5))
+
+
+class TestPandasJSONTests:
+ def test_dataframe(self, orient):
+ dtype = np.int64
+
+ df = DataFrame(
+ [[1, 2, 3], [4, 5, 6]],
+ index=["a", "b"],
+ columns=["x", "y", "z"],
+ dtype=dtype,
+ )
+ encode_kwargs = {} if orient is None else {"orient": orient}
+ assert (df.dtypes == dtype).all()
+
+ output = ujson.ujson_loads(ujson.ujson_dumps(df, **encode_kwargs))
+ assert (df.dtypes == dtype).all()
+
+ # Ensure proper DataFrame initialization.
+ if orient == "split":
+ dec = _clean_dict(output)
+ output = DataFrame(**dec)
+ else:
+ output = DataFrame(output)
+
+ # Corrections to enable DataFrame comparison.
+ if orient == "values":
+ df.columns = [0, 1, 2]
+ df.index = [0, 1]
+ elif orient == "records":
+ df.index = [0, 1]
+ elif orient == "index":
+ df = df.transpose()
+
+ assert (df.dtypes == dtype).all()
+ tm.assert_frame_equal(output, df)
+
+ def test_dataframe_nested(self, orient):
+ df = DataFrame(
+ [[1, 2, 3], [4, 5, 6]], index=["a", "b"], columns=["x", "y", "z"]
+ )
+
+ nested = {"df1": df, "df2": df.copy()}
+ kwargs = {} if orient is None else {"orient": orient}
+
+ exp = {
+ "df1": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)),
+ "df2": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)),
+ }
+ assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp
+
+ def test_series(self, orient):
+ dtype = np.int64
+ s = Series(
+ [10, 20, 30, 40, 50, 60],
+ name="series",
+ index=[6, 7, 8, 9, 10, 15],
+ dtype=dtype,
+ ).sort_values()
+ assert s.dtype == dtype
+
+ encode_kwargs = {} if orient is None else {"orient": orient}
+
+ output = ujson.ujson_loads(ujson.ujson_dumps(s, **encode_kwargs))
+ assert s.dtype == dtype
+
+ if orient == "split":
+ dec = _clean_dict(output)
+ output = Series(**dec)
+ else:
+ output = Series(output)
+
+ if orient in (None, "index"):
+ s.name = None
+ output = output.sort_values()
+ s.index = ["6", "7", "8", "9", "10", "15"]
+ elif orient in ("records", "values"):
+ s.name = None
+ s.index = [0, 1, 2, 3, 4, 5]
+
+ assert s.dtype == dtype
+ tm.assert_series_equal(output, s)
+
+ def test_series_nested(self, orient):
+ s = Series(
+ [10, 20, 30, 40, 50, 60], name="series", index=[6, 7, 8, 9, 10, 15]
+ ).sort_values()
+ nested = {"s1": s, "s2": s.copy()}
+ kwargs = {} if orient is None else {"orient": orient}
+
+ exp = {
+ "s1": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)),
+ "s2": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)),
+ }
+ assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp
+
+ def test_index(self):
+ i = Index([23, 45, 18, 98, 43, 11], name="index")
+
+ # Column indexed.
+ output = Index(ujson.ujson_loads(ujson.ujson_dumps(i)), name="index")
+ tm.assert_index_equal(i, output)
+
+ dec = _clean_dict(ujson.ujson_loads(ujson.ujson_dumps(i, orient="split")))
+ output = Index(**dec)
+
+ tm.assert_index_equal(i, output)
+ assert i.name == output.name
+
+ tm.assert_index_equal(i, output)
+ assert i.name == output.name
+
+ output = Index(
+ ujson.ujson_loads(ujson.ujson_dumps(i, orient="values")), name="index"
+ )
+ tm.assert_index_equal(i, output)
+
+ output = Index(
+ ujson.ujson_loads(ujson.ujson_dumps(i, orient="records")), name="index"
+ )
+ tm.assert_index_equal(i, output)
+
+ output = Index(
+ ujson.ujson_loads(ujson.ujson_dumps(i, orient="index")), name="index"
+ )
+ tm.assert_index_equal(i, output)
+
+ def test_datetime_index(self):
+ date_unit = "ns"
+
+ # freq doesn't round-trip
+ rng = DatetimeIndex(list(date_range("1/1/2000", periods=20)), freq=None)
+ encoded = ujson.ujson_dumps(rng, date_unit=date_unit)
+
+ decoded = DatetimeIndex(np.array(ujson.ujson_loads(encoded)))
+ tm.assert_index_equal(rng, decoded)
+
+ ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)
+ decoded = Series(ujson.ujson_loads(ujson.ujson_dumps(ts, date_unit=date_unit)))
+
+ idx_values = decoded.index.values.astype(np.int64)
+ decoded.index = DatetimeIndex(idx_values)
+ tm.assert_series_equal(ts, decoded)
+
+ @pytest.mark.parametrize(
+ "invalid_arr",
+ [
+ "[31337,]", # Trailing comma.
+ "[,31337]", # Leading comma.
+ "[]]", # Unmatched bracket.
+ "[,]", # Only comma.
+ ],
+ )
+ def test_decode_invalid_array(self, invalid_arr):
+ msg = (
+ "Expected object or value|Trailing data|"
+ "Unexpected character found when decoding array value"
+ )
+ with pytest.raises(ValueError, match=msg):
+ ujson.ujson_loads(invalid_arr)
+
+ @pytest.mark.parametrize("arr", [[], [31337]])
+ def test_decode_array(self, arr):
+ assert arr == ujson.ujson_loads(str(arr))
+
+ @pytest.mark.parametrize("extreme_num", [9223372036854775807, -9223372036854775808])
+ def test_decode_extreme_numbers(self, extreme_num):
+ assert extreme_num == ujson.ujson_loads(str(extreme_num))
+
+ @pytest.mark.parametrize("too_extreme_num", [f"{2**64}", f"{-2**63-1}"])
+ def test_decode_too_extreme_numbers(self, too_extreme_num):
+ with pytest.raises(
+ ValueError,
+ match="Value is too big|Value is too small",
+ ):
+ ujson.ujson_loads(too_extreme_num)
+
+ def test_decode_with_trailing_whitespaces(self):
+ assert {} == ujson.ujson_loads("{}\n\t ")
+
+ def test_decode_with_trailing_non_whitespaces(self):
+ with pytest.raises(ValueError, match="Trailing data"):
+ ujson.ujson_loads("{}\n\t a")
+
+ @pytest.mark.parametrize("value", [f"{2**64}", f"{-2**63-1}"])
+ def test_decode_array_with_big_int(self, value):
+ with pytest.raises(
+ ValueError,
+ match="Value is too big|Value is too small",
+ ):
+ ujson.ujson_loads(value)
+
+ @pytest.mark.parametrize(
+ "float_number",
+ [
+ 1.1234567893,
+ 1.234567893,
+ 1.34567893,
+ 1.4567893,
+ 1.567893,
+ 1.67893,
+ 1.7893,
+ 1.893,
+ 1.3,
+ ],
+ )
+ @pytest.mark.parametrize("sign", [-1, 1])
+ def test_decode_floating_point(self, sign, float_number):
+ float_number *= sign
+ tm.assert_almost_equal(
+ float_number, ujson.ujson_loads(str(float_number)), rtol=1e-15
+ )
+
+ def test_encode_big_set(self):
+ s = set()
+
+ for x in range(100000):
+ s.add(x)
+
+ # Make sure no Exception is raised.
+ ujson.ujson_dumps(s)
+
+ def test_encode_empty_set(self):
+ assert "[]" == ujson.ujson_dumps(set())
+
+ def test_encode_set(self):
+ s = {1, 2, 3, 4, 5, 6, 7, 8, 9}
+ enc = ujson.ujson_dumps(s)
+ dec = ujson.ujson_loads(enc)
+
+ for v in dec:
+ assert v in s
+
+ @pytest.mark.parametrize(
+ "td",
+ [
+ Timedelta(days=366),
+ Timedelta(days=-1),
+ Timedelta(hours=13, minutes=5, seconds=5),
+ Timedelta(hours=13, minutes=20, seconds=30),
+ Timedelta(days=-1, nanoseconds=5),
+ Timedelta(nanoseconds=1),
+ Timedelta(microseconds=1, nanoseconds=1),
+ Timedelta(milliseconds=1, microseconds=1, nanoseconds=1),
+ Timedelta(milliseconds=999, microseconds=999, nanoseconds=999),
+ ],
+ )
+ def test_encode_timedelta_iso(self, td):
+ # GH 28256
+ result = ujson.ujson_dumps(td, iso_dates=True)
+ expected = f'"{td.isoformat()}"'
+
+ assert result == expected
+
+ def test_encode_periodindex(self):
+ # GH 46683
+ p = PeriodIndex(["2022-04-06", "2022-04-07"], freq="D")
+ df = DataFrame(index=p)
+ assert df.to_json() == "{}"
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_network.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_network.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b7cd4e7a0fad2623d52c81ead855a747f3731b73
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_network.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b6a5385d8becc0f68ef149364c01d2c68269d326
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_categorical.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_categorical.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f8ccf2e1350ff1a89dbffc13559580d0bfdcc60
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_categorical.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_dtypes_basic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_dtypes_basic.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a08fcda8b5ff5cfa2c64f1d2a32a2f650f37d23
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_dtypes_basic.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_empty.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_empty.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a0eadb6c4af31f6c5a8c74c1b75423627a57ef1c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/__pycache__/test_empty.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_categorical.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_categorical.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4aff14a5ce32d19b0c4e6c9ef504ae141bdca67
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_categorical.py
@@ -0,0 +1,334 @@
+"""
+Tests dtype specification during parsing
+for all of the parsers defined in parsers.py
+"""
+from io import StringIO
+import os
+
+import numpy as np
+import pytest
+
+from pandas._libs import parsers as libparsers
+
+from pandas.core.dtypes.dtypes import CategoricalDtype
+
+import pandas as pd
+from pandas import (
+ Categorical,
+ DataFrame,
+ Timestamp,
+)
+import pandas._testing as tm
+
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+)
+
+xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
+
+
+@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different
+@pytest.mark.parametrize(
+ "dtype",
+ [
+ "category",
+ CategoricalDtype(),
+ {"a": "category", "b": "category", "c": CategoricalDtype()},
+ ],
+)
+def test_categorical_dtype(all_parsers, dtype):
+ # see gh-10153
+ parser = all_parsers
+ data = """a,b,c
+1,a,3.4
+1,a,3.4
+2,b,4.5"""
+ expected = DataFrame(
+ {
+ "a": Categorical(["1", "1", "2"]),
+ "b": Categorical(["a", "a", "b"]),
+ "c": Categorical(["3.4", "3.4", "4.5"]),
+ }
+ )
+ actual = parser.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(actual, expected)
+
+
+@pytest.mark.parametrize("dtype", [{"b": "category"}, {1: "category"}])
+def test_categorical_dtype_single(all_parsers, dtype, request):
+ # see gh-10153
+ parser = all_parsers
+ data = """a,b,c
+1,a,3.4
+1,a,3.4
+2,b,4.5"""
+ expected = DataFrame(
+ {"a": [1, 1, 2], "b": Categorical(["a", "a", "b"]), "c": [3.4, 3.4, 4.5]}
+ )
+ if parser.engine == "pyarrow":
+ mark = pytest.mark.xfail(
+ strict=False,
+ reason="Flaky test sometimes gives object dtype instead of Categorical",
+ )
+ request.applymarker(mark)
+
+ actual = parser.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(actual, expected)
+
+
+@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different
+def test_categorical_dtype_unsorted(all_parsers):
+ # see gh-10153
+ parser = all_parsers
+ data = """a,b,c
+1,b,3.4
+1,b,3.4
+2,a,4.5"""
+ expected = DataFrame(
+ {
+ "a": Categorical(["1", "1", "2"]),
+ "b": Categorical(["b", "b", "a"]),
+ "c": Categorical(["3.4", "3.4", "4.5"]),
+ }
+ )
+ actual = parser.read_csv(StringIO(data), dtype="category")
+ tm.assert_frame_equal(actual, expected)
+
+
+@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different
+def test_categorical_dtype_missing(all_parsers):
+ # see gh-10153
+ parser = all_parsers
+ data = """a,b,c
+1,b,3.4
+1,nan,3.4
+2,a,4.5"""
+ expected = DataFrame(
+ {
+ "a": Categorical(["1", "1", "2"]),
+ "b": Categorical(["b", np.nan, "a"]),
+ "c": Categorical(["3.4", "3.4", "4.5"]),
+ }
+ )
+ actual = parser.read_csv(StringIO(data), dtype="category")
+ tm.assert_frame_equal(actual, expected)
+
+
+@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different
+@pytest.mark.slow
+def test_categorical_dtype_high_cardinality_numeric(all_parsers, monkeypatch):
+ # see gh-18186
+ # was an issue with C parser, due to DEFAULT_BUFFER_HEURISTIC
+ parser = all_parsers
+ heuristic = 2**5
+ data = np.sort([str(i) for i in range(heuristic + 1)])
+ expected = DataFrame({"a": Categorical(data, ordered=True)})
+ with monkeypatch.context() as m:
+ m.setattr(libparsers, "DEFAULT_BUFFER_HEURISTIC", heuristic)
+ actual = parser.read_csv(StringIO("a\n" + "\n".join(data)), dtype="category")
+ actual["a"] = actual["a"].cat.reorder_categories(
+ np.sort(actual.a.cat.categories), ordered=True
+ )
+ tm.assert_frame_equal(actual, expected)
+
+
+def test_categorical_dtype_utf16(all_parsers, csv_dir_path):
+ # see gh-10153
+ pth = os.path.join(csv_dir_path, "utf16_ex.txt")
+ parser = all_parsers
+ encoding = "utf-16"
+ sep = "\t"
+
+ expected = parser.read_csv(pth, sep=sep, encoding=encoding)
+ expected = expected.apply(Categorical)
+
+ actual = parser.read_csv(pth, sep=sep, encoding=encoding, dtype="category")
+ tm.assert_frame_equal(actual, expected)
+
+
+def test_categorical_dtype_chunksize_infer_categories(all_parsers):
+ # see gh-10153
+ parser = all_parsers
+ data = """a,b
+1,a
+1,b
+1,b
+2,c"""
+ expecteds = [
+ DataFrame({"a": [1, 1], "b": Categorical(["a", "b"])}),
+ DataFrame({"a": [1, 2], "b": Categorical(["b", "c"])}, index=[2, 3]),
+ ]
+
+ if parser.engine == "pyarrow":
+ msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), dtype={"b": "category"}, chunksize=2)
+ return
+
+ with parser.read_csv(
+ StringIO(data), dtype={"b": "category"}, chunksize=2
+ ) as actuals:
+ for actual, expected in zip(actuals, expecteds):
+ tm.assert_frame_equal(actual, expected)
+
+
+def test_categorical_dtype_chunksize_explicit_categories(all_parsers):
+ # see gh-10153
+ parser = all_parsers
+ data = """a,b
+1,a
+1,b
+1,b
+2,c"""
+ cats = ["a", "b", "c"]
+ expecteds = [
+ DataFrame({"a": [1, 1], "b": Categorical(["a", "b"], categories=cats)}),
+ DataFrame(
+ {"a": [1, 2], "b": Categorical(["b", "c"], categories=cats)},
+ index=[2, 3],
+ ),
+ ]
+ dtype = CategoricalDtype(cats)
+
+ if parser.engine == "pyarrow":
+ msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2)
+ return
+
+ with parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2) as actuals:
+ for actual, expected in zip(actuals, expecteds):
+ tm.assert_frame_equal(actual, expected)
+
+
+def test_categorical_dtype_latin1(all_parsers, csv_dir_path):
+ # see gh-10153
+ pth = os.path.join(csv_dir_path, "unicode_series.csv")
+ parser = all_parsers
+ encoding = "latin-1"
+
+ expected = parser.read_csv(pth, header=None, encoding=encoding)
+ expected[1] = Categorical(expected[1])
+
+ actual = parser.read_csv(pth, header=None, encoding=encoding, dtype={1: "category"})
+ tm.assert_frame_equal(actual, expected)
+
+
+@pytest.mark.parametrize("ordered", [False, True])
+@pytest.mark.parametrize(
+ "categories",
+ [["a", "b", "c"], ["a", "c", "b"], ["a", "b", "c", "d"], ["c", "b", "a"]],
+)
+def test_categorical_category_dtype(all_parsers, categories, ordered):
+ parser = all_parsers
+ data = """a,b
+1,a
+1,b
+1,b
+2,c"""
+ expected = DataFrame(
+ {
+ "a": [1, 1, 1, 2],
+ "b": Categorical(
+ ["a", "b", "b", "c"], categories=categories, ordered=ordered
+ ),
+ }
+ )
+
+ dtype = {"b": CategoricalDtype(categories=categories, ordered=ordered)}
+ result = parser.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_categorical_category_dtype_unsorted(all_parsers):
+ parser = all_parsers
+ data = """a,b
+1,a
+1,b
+1,b
+2,c"""
+ dtype = CategoricalDtype(["c", "b", "a"])
+ expected = DataFrame(
+ {
+ "a": [1, 1, 1, 2],
+ "b": Categorical(["a", "b", "b", "c"], categories=["c", "b", "a"]),
+ }
+ )
+
+ result = parser.read_csv(StringIO(data), dtype={"b": dtype})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_categorical_coerces_numeric(all_parsers):
+ parser = all_parsers
+ dtype = {"b": CategoricalDtype([1, 2, 3])}
+
+ data = "b\n1\n1\n2\n3"
+ expected = DataFrame({"b": Categorical([1, 1, 2, 3])})
+
+ result = parser.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_categorical_coerces_datetime(all_parsers):
+ parser = all_parsers
+ dti = pd.DatetimeIndex(["2017-01-01", "2018-01-01", "2019-01-01"], freq=None)
+ dtype = {"b": CategoricalDtype(dti)}
+
+ data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
+ expected = DataFrame({"b": Categorical(dtype["b"].categories)})
+
+ result = parser.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_categorical_coerces_timestamp(all_parsers):
+ parser = all_parsers
+ dtype = {"b": CategoricalDtype([Timestamp("2014")])}
+
+ data = "b\n2014-01-01\n2014-01-01"
+ expected = DataFrame({"b": Categorical([Timestamp("2014")] * 2)})
+
+ result = parser.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_categorical_coerces_timedelta(all_parsers):
+ parser = all_parsers
+ dtype = {"b": CategoricalDtype(pd.to_timedelta(["1h", "2h", "3h"]))}
+
+ data = "b\n1h\n2h\n3h"
+ expected = DataFrame({"b": Categorical(dtype["b"].categories)})
+
+ result = parser.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "data",
+ [
+ "b\nTrue\nFalse\nNA\nFalse",
+ "b\ntrue\nfalse\nNA\nfalse",
+ "b\nTRUE\nFALSE\nNA\nFALSE",
+ "b\nTrue\nFalse\nNA\nFALSE",
+ ],
+)
+def test_categorical_dtype_coerces_boolean(all_parsers, data):
+ # see gh-20498
+ parser = all_parsers
+ dtype = {"b": CategoricalDtype([False, True])}
+ expected = DataFrame({"b": Categorical([True, False, None, False])})
+
+ result = parser.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_categorical_unexpected_categories(all_parsers):
+ parser = all_parsers
+ dtype = {"b": CategoricalDtype(["a", "b", "d", "e"])}
+
+ data = "b\nd\na\nc\nd" # Unexpected c
+ expected = DataFrame({"b": Categorical(list("dacd"), dtype=dtype["b"])})
+
+ result = parser.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(result, expected)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce02e752fb90b4f69d63baa6875ba8bda6d991fb
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_dtypes_basic.py
@@ -0,0 +1,643 @@
+"""
+Tests dtype specification during parsing
+for all of the parsers defined in parsers.py
+"""
+from collections import defaultdict
+from io import StringIO
+
+import numpy as np
+import pytest
+
+from pandas.errors import ParserWarning
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Timestamp,
+)
+import pandas._testing as tm
+from pandas.core.arrays import (
+ ArrowStringArray,
+ IntegerArray,
+ StringArray,
+)
+
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+)
+
+
+@pytest.mark.parametrize("dtype", [str, object])
+@pytest.mark.parametrize("check_orig", [True, False])
+@pytest.mark.usefixtures("pyarrow_xfail")
+def test_dtype_all_columns(all_parsers, dtype, check_orig):
+ # see gh-3795, gh-6607
+ parser = all_parsers
+
+ df = DataFrame(
+ np.random.default_rng(2).random((5, 2)).round(4),
+ columns=list("AB"),
+ index=["1A", "1B", "1C", "1D", "1E"],
+ )
+
+ with tm.ensure_clean("__passing_str_as_dtype__.csv") as path:
+ df.to_csv(path)
+
+ result = parser.read_csv(path, dtype=dtype, index_col=0)
+
+ if check_orig:
+ expected = df.copy()
+ result = result.astype(float)
+ else:
+ expected = df.astype(str)
+
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.usefixtures("pyarrow_xfail")
+def test_dtype_per_column(all_parsers):
+ parser = all_parsers
+ data = """\
+one,two
+1,2.5
+2,3.5
+3,4.5
+4,5.5"""
+ expected = DataFrame(
+ [[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"]
+ )
+ expected["one"] = expected["one"].astype(np.float64)
+ expected["two"] = expected["two"].astype(object)
+
+ result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_invalid_dtype_per_column(all_parsers):
+ parser = all_parsers
+ data = """\
+one,two
+1,2.5
+2,3.5
+3,4.5
+4,5.5"""
+
+ with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"):
+ parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
+
+
+def test_raise_on_passed_int_dtype_with_nas(all_parsers):
+ # see gh-2631
+ parser = all_parsers
+ data = """YEAR, DOY, a
+2001,106380451,10
+2001,,11
+2001,106380451,67"""
+
+ if parser.engine == "c":
+ msg = "Integer column has NA values"
+ elif parser.engine == "pyarrow":
+ msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine"
+ else:
+ msg = "Unable to convert column DOY"
+
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), dtype={"DOY": np.int64}, skipinitialspace=True)
+
+
+def test_dtype_with_converters(all_parsers):
+ parser = all_parsers
+ data = """a,b
+1.1,2.2
+1.2,2.3"""
+
+ if parser.engine == "pyarrow":
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(
+ StringIO(data), dtype={"a": "i8"}, converters={"a": lambda x: str(x)}
+ )
+ return
+
+ # Dtype spec ignored if converted specified.
+ result = parser.read_csv_check_warnings(
+ ParserWarning,
+ "Both a converter and dtype were specified for column a "
+ "- only the converter will be used.",
+ StringIO(data),
+ dtype={"a": "i8"},
+ converters={"a": lambda x: str(x)},
+ )
+ expected = DataFrame({"a": ["1.1", "1.2"], "b": [2.2, 2.3]})
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "dtype", list(np.typecodes["AllInteger"] + np.typecodes["Float"])
+)
+def test_numeric_dtype(all_parsers, dtype):
+ data = "0\n1"
+ parser = all_parsers
+ expected = DataFrame([0, 1], dtype=dtype)
+
+ result = parser.read_csv(StringIO(data), header=None, dtype=dtype)
+ tm.assert_frame_equal(expected, result)
+
+
+@pytest.mark.usefixtures("pyarrow_xfail")
+def test_boolean_dtype(all_parsers):
+ parser = all_parsers
+ data = "\n".join(
+ [
+ "a",
+ "True",
+ "TRUE",
+ "true",
+ "1",
+ "1.0",
+ "False",
+ "FALSE",
+ "false",
+ "0",
+ "0.0",
+ "NaN",
+ "nan",
+ "NA",
+ "null",
+ "NULL",
+ ]
+ )
+
+ result = parser.read_csv(StringIO(data), dtype="boolean")
+ expected = DataFrame(
+ {
+ "a": pd.array(
+ [
+ True,
+ True,
+ True,
+ True,
+ True,
+ False,
+ False,
+ False,
+ False,
+ False,
+ None,
+ None,
+ None,
+ None,
+ None,
+ ],
+ dtype="boolean",
+ )
+ }
+ )
+
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.usefixtures("pyarrow_xfail")
+def test_delimiter_with_usecols_and_parse_dates(all_parsers):
+ # GH#35873
+ result = all_parsers.read_csv(
+ StringIO('"dump","-9,1","-9,1",20101010'),
+ engine="python",
+ names=["col", "col1", "col2", "col3"],
+ usecols=["col1", "col2", "col3"],
+ parse_dates=["col3"],
+ decimal=",",
+ )
+ expected = DataFrame(
+ {"col1": [-9.1], "col2": [-9.1], "col3": [Timestamp("2010-10-10")]}
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("thousands", ["_", None])
+def test_decimal_and_exponential(
+ request, python_parser_only, numeric_decimal, thousands
+):
+ # GH#31920
+ decimal_number_check(request, python_parser_only, numeric_decimal, thousands, None)
+
+
+@pytest.mark.parametrize("thousands", ["_", None])
+@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
+def test_1000_sep_decimal_float_precision(
+ request, c_parser_only, numeric_decimal, float_precision, thousands
+):
+ # test decimal and thousand sep handling in across 'float_precision'
+ # parsers
+ decimal_number_check(
+ request, c_parser_only, numeric_decimal, thousands, float_precision
+ )
+ text, value = numeric_decimal
+ text = " " + text + " "
+ if isinstance(value, str): # the negative cases (parse as text)
+ value = " " + value + " "
+ decimal_number_check(
+ request, c_parser_only, (text, value), thousands, float_precision
+ )
+
+
+def decimal_number_check(request, parser, numeric_decimal, thousands, float_precision):
+ # GH#31920
+ value = numeric_decimal[0]
+ if thousands is None and value in ("1_,", "1_234,56", "1_234,56e0"):
+ request.applymarker(
+ pytest.mark.xfail(reason=f"thousands={thousands} and sep is in {value}")
+ )
+ df = parser.read_csv(
+ StringIO(value),
+ float_precision=float_precision,
+ sep="|",
+ thousands=thousands,
+ decimal=",",
+ header=None,
+ )
+ val = df.iloc[0, 0]
+ assert val == numeric_decimal[1]
+
+
+@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
+def test_skip_whitespace(c_parser_only, float_precision):
+ DATA = """id\tnum\t
+1\t1.2 \t
+1\t 2.1\t
+2\t 1\t
+2\t 1.2 \t
+"""
+ df = c_parser_only.read_csv(
+ StringIO(DATA),
+ float_precision=float_precision,
+ sep="\t",
+ header=0,
+ dtype={1: np.float64},
+ )
+ tm.assert_series_equal(df.iloc[:, 1], pd.Series([1.2, 2.1, 1.0, 1.2], name="num"))
+
+
+@pytest.mark.usefixtures("pyarrow_xfail")
+def test_true_values_cast_to_bool(all_parsers):
+ # GH#34655
+ text = """a,b
+yes,xxx
+no,yyy
+1,zzz
+0,aaa
+ """
+ parser = all_parsers
+ result = parser.read_csv(
+ StringIO(text),
+ true_values=["yes"],
+ false_values=["no"],
+ dtype={"a": "boolean"},
+ )
+ expected = DataFrame(
+ {"a": [True, False, True, False], "b": ["xxx", "yyy", "zzz", "aaa"]}
+ )
+ expected["a"] = expected["a"].astype("boolean")
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.usefixtures("pyarrow_xfail")
+@pytest.mark.parametrize("dtypes, exp_value", [({}, "1"), ({"a.1": "int64"}, 1)])
+def test_dtype_mangle_dup_cols(all_parsers, dtypes, exp_value):
+ # GH#35211
+ parser = all_parsers
+ data = """a,a\n1,1"""
+ dtype_dict = {"a": str, **dtypes}
+ # GH#42462
+ dtype_dict_copy = dtype_dict.copy()
+ result = parser.read_csv(StringIO(data), dtype=dtype_dict)
+ expected = DataFrame({"a": ["1"], "a.1": [exp_value]})
+ assert dtype_dict == dtype_dict_copy, "dtype dict changed"
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.usefixtures("pyarrow_xfail")
+def test_dtype_mangle_dup_cols_single_dtype(all_parsers):
+ # GH#42022
+ parser = all_parsers
+ data = """a,a\n1,1"""
+ result = parser.read_csv(StringIO(data), dtype=str)
+ expected = DataFrame({"a": ["1"], "a.1": ["1"]})
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.usefixtures("pyarrow_xfail")
+def test_dtype_multi_index(all_parsers):
+ # GH 42446
+ parser = all_parsers
+ data = "A,B,B\nX,Y,Z\n1,2,3"
+
+ result = parser.read_csv(
+ StringIO(data),
+ header=list(range(2)),
+ dtype={
+ ("A", "X"): np.int32,
+ ("B", "Y"): np.int32,
+ ("B", "Z"): np.float32,
+ },
+ )
+
+ expected = DataFrame(
+ {
+ ("A", "X"): np.int32([1]),
+ ("B", "Y"): np.int32([2]),
+ ("B", "Z"): np.float32([3]),
+ }
+ )
+
+ tm.assert_frame_equal(result, expected)
+
+
+def test_nullable_int_dtype(all_parsers, any_int_ea_dtype):
+ # GH 25472
+ parser = all_parsers
+ dtype = any_int_ea_dtype
+
+ data = """a,b,c
+,3,5
+1,,6
+2,4,"""
+ expected = DataFrame(
+ {
+ "a": pd.array([pd.NA, 1, 2], dtype=dtype),
+ "b": pd.array([3, pd.NA, 4], dtype=dtype),
+ "c": pd.array([5, 6, pd.NA], dtype=dtype),
+ }
+ )
+ actual = parser.read_csv(StringIO(data), dtype=dtype)
+ tm.assert_frame_equal(actual, expected)
+
+
+@pytest.mark.usefixtures("pyarrow_xfail")
+@pytest.mark.parametrize("default", ["float", "float64"])
+def test_dtypes_defaultdict(all_parsers, default):
+ # GH#41574
+ data = """a,b
+1,2
+"""
+ dtype = defaultdict(lambda: default, a="int64")
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), dtype=dtype)
+ expected = DataFrame({"a": [1], "b": 2.0})
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.usefixtures("pyarrow_xfail")
+def test_dtypes_defaultdict_mangle_dup_cols(all_parsers):
+ # GH#41574
+ data = """a,b,a,b,b.1
+1,2,3,4,5
+"""
+ dtype = defaultdict(lambda: "float64", a="int64")
+ dtype["b.1"] = "int64"
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), dtype=dtype)
+ expected = DataFrame({"a": [1], "b": [2.0], "a.1": [3], "b.2": [4.0], "b.1": [5]})
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.usefixtures("pyarrow_xfail")
+def test_dtypes_defaultdict_invalid(all_parsers):
+ # GH#41574
+ data = """a,b
+1,2
+"""
+ dtype = defaultdict(lambda: "invalid_dtype", a="int64")
+ parser = all_parsers
+ with pytest.raises(TypeError, match="not understood"):
+ parser.read_csv(StringIO(data), dtype=dtype)
+
+
+def test_dtype_backend(all_parsers):
+ # GH#36712
+
+ parser = all_parsers
+
+ data = """a,b,c,d,e,f,g,h,i,j
+1,2.5,True,a,,,,,12-31-2019,
+3,4.5,False,b,6,7.5,True,a,12-31-2019,
+"""
+ result = parser.read_csv(
+ StringIO(data), dtype_backend="numpy_nullable", parse_dates=["i"]
+ )
+ expected = DataFrame(
+ {
+ "a": pd.Series([1, 3], dtype="Int64"),
+ "b": pd.Series([2.5, 4.5], dtype="Float64"),
+ "c": pd.Series([True, False], dtype="boolean"),
+ "d": pd.Series(["a", "b"], dtype="string"),
+ "e": pd.Series([pd.NA, 6], dtype="Int64"),
+ "f": pd.Series([pd.NA, 7.5], dtype="Float64"),
+ "g": pd.Series([pd.NA, True], dtype="boolean"),
+ "h": pd.Series([pd.NA, "a"], dtype="string"),
+ "i": pd.Series([Timestamp("2019-12-31")] * 2),
+ "j": pd.Series([pd.NA, pd.NA], dtype="Int64"),
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_dtype_backend_and_dtype(all_parsers):
+ # GH#36712
+
+ parser = all_parsers
+
+ data = """a,b
+1,2.5
+,
+"""
+ result = parser.read_csv(
+ StringIO(data), dtype_backend="numpy_nullable", dtype="float64"
+ )
+ expected = DataFrame({"a": [1.0, np.nan], "b": [2.5, np.nan]})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_dtype_backend_string(all_parsers, string_storage):
+ # GH#36712
+ pa = pytest.importorskip("pyarrow")
+
+ with pd.option_context("mode.string_storage", string_storage):
+ parser = all_parsers
+
+ data = """a,b
+a,x
+b,
+"""
+ result = parser.read_csv(StringIO(data), dtype_backend="numpy_nullable")
+
+ if string_storage == "python":
+ expected = DataFrame(
+ {
+ "a": StringArray(np.array(["a", "b"], dtype=np.object_)),
+ "b": StringArray(np.array(["x", pd.NA], dtype=np.object_)),
+ }
+ )
+ else:
+ expected = DataFrame(
+ {
+ "a": ArrowStringArray(pa.array(["a", "b"])),
+ "b": ArrowStringArray(pa.array(["x", None])),
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_dtype_backend_ea_dtype_specified(all_parsers):
+ # GH#491496
+ data = """a,b
+1,2
+"""
+ parser = all_parsers
+ result = parser.read_csv(
+ StringIO(data), dtype="Int64", dtype_backend="numpy_nullable"
+ )
+ expected = DataFrame({"a": [1], "b": 2}, dtype="Int64")
+ tm.assert_frame_equal(result, expected)
+
+
+def test_dtype_backend_pyarrow(all_parsers, request):
+ # GH#36712
+ pa = pytest.importorskip("pyarrow")
+ parser = all_parsers
+
+ data = """a,b,c,d,e,f,g,h,i,j
+1,2.5,True,a,,,,,12-31-2019,
+3,4.5,False,b,6,7.5,True,a,12-31-2019,
+"""
+ result = parser.read_csv(StringIO(data), dtype_backend="pyarrow", parse_dates=["i"])
+ expected = DataFrame(
+ {
+ "a": pd.Series([1, 3], dtype="int64[pyarrow]"),
+ "b": pd.Series([2.5, 4.5], dtype="float64[pyarrow]"),
+ "c": pd.Series([True, False], dtype="bool[pyarrow]"),
+ "d": pd.Series(["a", "b"], dtype=pd.ArrowDtype(pa.string())),
+ "e": pd.Series([pd.NA, 6], dtype="int64[pyarrow]"),
+ "f": pd.Series([pd.NA, 7.5], dtype="float64[pyarrow]"),
+ "g": pd.Series([pd.NA, True], dtype="bool[pyarrow]"),
+ "h": pd.Series(
+ [pd.NA, "a"],
+ dtype=pd.ArrowDtype(pa.string()),
+ ),
+ "i": pd.Series([Timestamp("2019-12-31")] * 2),
+ "j": pd.Series([pd.NA, pd.NA], dtype="null[pyarrow]"),
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+# pyarrow engine failing:
+# https://github.com/pandas-dev/pandas/issues/56136
+@pytest.mark.usefixtures("pyarrow_xfail")
+def test_ea_int_avoid_overflow(all_parsers):
+ # GH#32134
+ parser = all_parsers
+ data = """a,b
+1,1
+,1
+1582218195625938945,1
+"""
+ result = parser.read_csv(StringIO(data), dtype={"a": "Int64"})
+ expected = DataFrame(
+ {
+ "a": IntegerArray(
+ np.array([1, 1, 1582218195625938945]), np.array([False, True, False])
+ ),
+ "b": 1,
+ }
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_string_inference(all_parsers):
+ # GH#54430
+ pytest.importorskip("pyarrow")
+ dtype = "string[pyarrow_numpy]"
+
+ data = """a,b
+x,1
+y,2
+,3"""
+ parser = all_parsers
+ with pd.option_context("future.infer_string", True):
+ result = parser.read_csv(StringIO(data))
+
+ expected = DataFrame(
+ {"a": pd.Series(["x", "y", None], dtype=dtype), "b": [1, 2, 3]},
+ columns=pd.Index(["a", "b"], dtype=dtype),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("dtype", ["O", object, "object", np.object_, str, np.str_])
+def test_string_inference_object_dtype(all_parsers, dtype):
+ # GH#56047
+ pytest.importorskip("pyarrow")
+
+ data = """a,b
+x,a
+y,a
+z,a"""
+ parser = all_parsers
+ with pd.option_context("future.infer_string", True):
+ result = parser.read_csv(StringIO(data), dtype=dtype)
+
+ expected = DataFrame(
+ {
+ "a": pd.Series(["x", "y", "z"], dtype=object),
+ "b": pd.Series(["a", "a", "a"], dtype=object),
+ },
+ columns=pd.Index(["a", "b"], dtype="string[pyarrow_numpy]"),
+ )
+ tm.assert_frame_equal(result, expected)
+
+ with pd.option_context("future.infer_string", True):
+ result = parser.read_csv(StringIO(data), dtype={"a": dtype})
+
+ expected = DataFrame(
+ {
+ "a": pd.Series(["x", "y", "z"], dtype=object),
+ "b": pd.Series(["a", "a", "a"], dtype="string[pyarrow_numpy]"),
+ },
+ columns=pd.Index(["a", "b"], dtype="string[pyarrow_numpy]"),
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+def test_accurate_parsing_of_large_integers(all_parsers):
+ # GH#52505
+ data = """SYMBOL,MOMENT,ID,ID_DEAL
+AAPL,20230301181139587,1925036343869802844,
+AAPL,20230301181139587,2023552585717889863,2023552585717263358
+NVDA,20230301181139587,2023552585717889863,2023552585717263359
+AMC,20230301181139587,2023552585717889863,2023552585717263360
+AMZN,20230301181139587,2023552585717889759,2023552585717263360
+MSFT,20230301181139587,2023552585717889863,2023552585717263361
+NVDA,20230301181139587,2023552585717889827,2023552585717263361"""
+ orders = pd.read_csv(StringIO(data), dtype={"ID_DEAL": pd.Int64Dtype()})
+ assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263358, "ID_DEAL"]) == 1
+ assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263359, "ID_DEAL"]) == 1
+ assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263360, "ID_DEAL"]) == 2
+ assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263361, "ID_DEAL"]) == 2
+
+
+def test_dtypes_with_usecols(all_parsers):
+ # GH#54868
+
+ parser = all_parsers
+ data = """a,b,c
+1,2,3
+4,5,6"""
+
+ result = parser.read_csv(StringIO(data), usecols=["a", "c"], dtype={"a": object})
+ if parser.engine == "pyarrow":
+ values = [1, 4]
+ else:
+ values = ["1", "4"]
+ expected = DataFrame({"a": pd.Series(values, dtype=object), "c": [3, 6]})
+ tm.assert_frame_equal(result, expected)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_empty.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_empty.py
new file mode 100644
index 0000000000000000000000000000000000000000..f34385b190c5ffa8df1a517fb0e0c9ccd8fe0073
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/dtypes/test_empty.py
@@ -0,0 +1,181 @@
+"""
+Tests dtype specification during parsing
+for all of the parsers defined in parsers.py
+"""
+from io import StringIO
+
+import numpy as np
+import pytest
+
+from pandas import (
+ Categorical,
+ DataFrame,
+ Index,
+ MultiIndex,
+ Series,
+ concat,
+)
+import pandas._testing as tm
+
+skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
+
+
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+def test_dtype_all_columns_empty(all_parsers):
+ # see gh-12048
+ parser = all_parsers
+ result = parser.read_csv(StringIO("A,B"), dtype=str)
+
+ expected = DataFrame({"A": [], "B": []}, dtype=str)
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+def test_empty_pass_dtype(all_parsers):
+ parser = all_parsers
+
+ data = "one,two"
+ result = parser.read_csv(StringIO(data), dtype={"one": "u1"})
+
+ expected = DataFrame(
+ {"one": np.empty(0, dtype="u1"), "two": np.empty(0, dtype=object)},
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+def test_empty_with_index_pass_dtype(all_parsers):
+ parser = all_parsers
+
+ data = "one,two"
+ result = parser.read_csv(
+ StringIO(data), index_col=["one"], dtype={"one": "u1", 1: "f"}
+ )
+
+ expected = DataFrame(
+ {"two": np.empty(0, dtype="f")}, index=Index([], dtype="u1", name="one")
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+def test_empty_with_multi_index_pass_dtype(all_parsers):
+ parser = all_parsers
+
+ data = "one,two,three"
+ result = parser.read_csv(
+ StringIO(data), index_col=["one", "two"], dtype={"one": "u1", 1: "f8"}
+ )
+
+ exp_idx = MultiIndex.from_arrays(
+ [np.empty(0, dtype="u1"), np.empty(0, dtype=np.float64)],
+ names=["one", "two"],
+ )
+ expected = DataFrame({"three": np.empty(0, dtype=object)}, index=exp_idx)
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+def test_empty_with_mangled_column_pass_dtype_by_names(all_parsers):
+ parser = all_parsers
+
+ data = "one,one"
+ result = parser.read_csv(StringIO(data), dtype={"one": "u1", "one.1": "f"})
+
+ expected = DataFrame(
+ {"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+def test_empty_with_mangled_column_pass_dtype_by_indexes(all_parsers):
+ parser = all_parsers
+
+ data = "one,one"
+ result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
+
+ expected = DataFrame(
+ {"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+def test_empty_with_dup_column_pass_dtype_by_indexes(all_parsers):
+ # see gh-9424
+ parser = all_parsers
+ expected = concat(
+ [Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
+ axis=1,
+ )
+
+ data = "one,one"
+ result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_empty_with_dup_column_pass_dtype_by_indexes_raises(all_parsers):
+ # see gh-9424
+ parser = all_parsers
+ expected = concat(
+ [Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
+ axis=1,
+ )
+ expected.index = expected.index.astype(object)
+
+ with pytest.raises(ValueError, match="Duplicate names"):
+ data = ""
+ parser.read_csv(StringIO(data), names=["one", "one"], dtype={0: "u1", 1: "f"})
+
+
+@pytest.mark.parametrize(
+ "dtype,expected",
+ [
+ (np.float64, DataFrame(columns=["a", "b"], dtype=np.float64)),
+ (
+ "category",
+ DataFrame({"a": Categorical([]), "b": Categorical([])}),
+ ),
+ (
+ {"a": "category", "b": "category"},
+ DataFrame({"a": Categorical([]), "b": Categorical([])}),
+ ),
+ ("datetime64[ns]", DataFrame(columns=["a", "b"], dtype="datetime64[ns]")),
+ (
+ "timedelta64[ns]",
+ DataFrame(
+ {
+ "a": Series([], dtype="timedelta64[ns]"),
+ "b": Series([], dtype="timedelta64[ns]"),
+ },
+ ),
+ ),
+ (
+ {"a": np.int64, "b": np.int32},
+ DataFrame(
+ {"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},
+ ),
+ ),
+ (
+ {0: np.int64, 1: np.int32},
+ DataFrame(
+ {"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},
+ ),
+ ),
+ (
+ {"a": np.int64, 1: np.int32},
+ DataFrame(
+ {"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},
+ ),
+ ),
+ ],
+)
+@skip_pyarrow # CSV parse error: Empty CSV file or block
+def test_empty_dtype(all_parsers, dtype, expected):
+ # see gh-14712
+ parser = all_parsers
+ data = "a,b"
+
+ result = parser.read_csv(StringIO(data), header=0, dtype=dtype)
+ tm.assert_frame_equal(result, expected)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_usecols_basic.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_usecols_basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..767fba666e41769a2fa1c756a5e93b5e1720cd9c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/test_usecols_basic.py
@@ -0,0 +1,563 @@
+"""
+Tests the usecols functionality during parsing
+for all of the parsers defined in parsers.py
+"""
+from io import StringIO
+
+import numpy as np
+import pytest
+
+from pandas.errors import ParserError
+
+from pandas import (
+ DataFrame,
+ Index,
+ array,
+)
+import pandas._testing as tm
+
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
+)
+
+_msg_validate_usecols_arg = (
+ "'usecols' must either be list-like "
+ "of all strings, all unicode, all "
+ "integers or a callable."
+)
+_msg_validate_usecols_names = (
+ "Usecols do not match columns, columns expected but not found: {0}"
+)
+_msg_pyarrow_requires_names = (
+ "The pyarrow engine does not allow 'usecols' to be integer column "
+ "positions. Pass a list of string column names instead."
+)
+
+xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
+skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
+
+pytestmark = pytest.mark.filterwarnings(
+ "ignore:Passing a BlockManager to DataFrame is deprecated:DeprecationWarning"
+)
+
+
+def test_raise_on_mixed_dtype_usecols(all_parsers):
+ # See gh-12678
+ data = """a,b,c
+ 1000,2000,3000
+ 4000,5000,6000
+ """
+ usecols = [0, "b", 2]
+ parser = all_parsers
+
+ with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
+ parser.read_csv(StringIO(data), usecols=usecols)
+
+
+@pytest.mark.parametrize("usecols", [(1, 2), ("b", "c")])
+def test_usecols(all_parsers, usecols, request):
+ data = """\
+a,b,c
+1,2,3
+4,5,6
+7,8,9
+10,11,12"""
+ parser = all_parsers
+ if parser.engine == "pyarrow" and isinstance(usecols[0], int):
+ with pytest.raises(ValueError, match=_msg_pyarrow_requires_names):
+ parser.read_csv(StringIO(data), usecols=usecols)
+ return
+
+ result = parser.read_csv(StringIO(data), usecols=usecols)
+
+ expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_usecols_with_names(all_parsers):
+ data = """\
+a,b,c
+1,2,3
+4,5,6
+7,8,9
+10,11,12"""
+ parser = all_parsers
+ names = ["foo", "bar"]
+
+ if parser.engine == "pyarrow":
+ with pytest.raises(ValueError, match=_msg_pyarrow_requires_names):
+ parser.read_csv(StringIO(data), names=names, usecols=[1, 2], header=0)
+ return
+
+ result = parser.read_csv(StringIO(data), names=names, usecols=[1, 2], header=0)
+
+ expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=names)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "names,usecols", [(["b", "c"], [1, 2]), (["a", "b", "c"], ["b", "c"])]
+)
+def test_usecols_relative_to_names(all_parsers, names, usecols):
+ data = """\
+1,2,3
+4,5,6
+7,8,9
+10,11,12"""
+ parser = all_parsers
+ if parser.engine == "pyarrow" and not isinstance(usecols[0], int):
+ # ArrowKeyError: Column 'fb' in include_columns does not exist
+ pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
+
+ result = parser.read_csv(StringIO(data), names=names, header=None, usecols=usecols)
+
+ expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_usecols_relative_to_names2(all_parsers):
+ # see gh-5766
+ data = """\
+1,2,3
+4,5,6
+7,8,9
+10,11,12"""
+ parser = all_parsers
+
+ result = parser.read_csv(
+ StringIO(data), names=["a", "b"], header=None, usecols=[0, 1]
+ )
+
+ expected = DataFrame([[1, 2], [4, 5], [7, 8], [10, 11]], columns=["a", "b"])
+ tm.assert_frame_equal(result, expected)
+
+
+# regex mismatch: "Length mismatch: Expected axis has 1 elements"
+@xfail_pyarrow
+def test_usecols_name_length_conflict(all_parsers):
+ data = """\
+1,2,3
+4,5,6
+7,8,9
+10,11,12"""
+ parser = all_parsers
+ msg = "Number of passed names did not match number of header fields in the file"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), names=["a", "b"], header=None, usecols=[1])
+
+
+def test_usecols_single_string(all_parsers):
+ # see gh-20558
+ parser = all_parsers
+ data = """foo, bar, baz
+1000, 2000, 3000
+4000, 5000, 6000"""
+
+ with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
+ parser.read_csv(StringIO(data), usecols="foo")
+
+
+@skip_pyarrow # CSV parse error in one case, AttributeError in another
+@pytest.mark.parametrize(
+ "data", ["a,b,c,d\n1,2,3,4\n5,6,7,8", "a,b,c,d\n1,2,3,4,\n5,6,7,8,"]
+)
+def test_usecols_index_col_false(all_parsers, data):
+ # see gh-9082
+ parser = all_parsers
+ usecols = ["a", "c", "d"]
+ expected = DataFrame({"a": [1, 5], "c": [3, 7], "d": [4, 8]})
+
+ result = parser.read_csv(StringIO(data), usecols=usecols, index_col=False)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("index_col", ["b", 0])
+@pytest.mark.parametrize("usecols", [["b", "c"], [1, 2]])
+def test_usecols_index_col_conflict(all_parsers, usecols, index_col, request):
+ # see gh-4201: test that index_col as integer reflects usecols
+ parser = all_parsers
+ data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
+
+ if parser.engine == "pyarrow" and isinstance(usecols[0], int):
+ with pytest.raises(ValueError, match=_msg_pyarrow_requires_names):
+ parser.read_csv(StringIO(data), usecols=usecols, index_col=index_col)
+ return
+
+ expected = DataFrame({"c": [1, 2]}, index=Index(["a", "b"], name="b"))
+
+ result = parser.read_csv(StringIO(data), usecols=usecols, index_col=index_col)
+ tm.assert_frame_equal(result, expected)
+
+
+def test_usecols_index_col_conflict2(all_parsers):
+ # see gh-4201: test that index_col as integer reflects usecols
+ parser = all_parsers
+ data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
+
+ expected = DataFrame({"b": ["a", "b"], "c": [1, 2], "d": ("one", "two")})
+ expected = expected.set_index(["b", "c"])
+
+ result = parser.read_csv(
+ StringIO(data), usecols=["b", "c", "d"], index_col=["b", "c"]
+ )
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow # CSV parse error: Expected 3 columns, got 4
+def test_usecols_implicit_index_col(all_parsers):
+ # see gh-2654
+ parser = all_parsers
+ data = "a,b,c\n4,apple,bat,5.7\n8,orange,cow,10"
+
+ result = parser.read_csv(StringIO(data), usecols=["a", "b"])
+ expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_usecols_index_col_middle(all_parsers):
+ # GH#9098
+ parser = all_parsers
+ data = """a,b,c,d
+1,2,3,4
+"""
+ result = parser.read_csv(StringIO(data), usecols=["b", "c", "d"], index_col="c")
+ expected = DataFrame({"b": [2], "d": [4]}, index=Index([3], name="c"))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_usecols_index_col_end(all_parsers):
+ # GH#9098
+ parser = all_parsers
+ data = """a,b,c,d
+1,2,3,4
+"""
+ result = parser.read_csv(StringIO(data), usecols=["b", "c", "d"], index_col="d")
+ expected = DataFrame({"b": [2], "c": [3]}, index=Index([4], name="d"))
+ tm.assert_frame_equal(result, expected)
+
+
+def test_usecols_regex_sep(all_parsers):
+ # see gh-2733
+ parser = all_parsers
+ data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
+
+ if parser.engine == "pyarrow":
+ msg = "the 'pyarrow' engine does not support regex separators"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b"))
+ return
+
+ result = parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b"))
+
+ expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_usecols_with_whitespace(all_parsers):
+ parser = all_parsers
+ data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
+
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
+
+ if parser.engine == "pyarrow":
+ msg = "The 'delim_whitespace' option is not supported with the 'pyarrow' engine"
+ with pytest.raises(ValueError, match=msg):
+ with tm.assert_produces_warning(
+ FutureWarning, match=depr_msg, check_stacklevel=False
+ ):
+ parser.read_csv(
+ StringIO(data), delim_whitespace=True, usecols=("a", "b")
+ )
+ return
+
+ with tm.assert_produces_warning(
+ FutureWarning, match=depr_msg, check_stacklevel=False
+ ):
+ result = parser.read_csv(
+ StringIO(data), delim_whitespace=True, usecols=("a", "b")
+ )
+ expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "usecols,expected",
+ [
+ # Column selection by index.
+ ([0, 1], DataFrame(data=[[1000, 2000], [4000, 5000]], columns=["2", "0"])),
+ # Column selection by name.
+ (
+ ["0", "1"],
+ DataFrame(data=[[2000, 3000], [5000, 6000]], columns=["0", "1"]),
+ ),
+ ],
+)
+def test_usecols_with_integer_like_header(all_parsers, usecols, expected, request):
+ parser = all_parsers
+ data = """2,0,1
+1000,2000,3000
+4000,5000,6000"""
+
+ if parser.engine == "pyarrow" and isinstance(usecols[0], int):
+ with pytest.raises(ValueError, match=_msg_pyarrow_requires_names):
+ parser.read_csv(StringIO(data), usecols=usecols)
+ return
+
+ result = parser.read_csv(StringIO(data), usecols=usecols)
+ tm.assert_frame_equal(result, expected)
+
+
+@xfail_pyarrow # mismatched shape
+def test_empty_usecols(all_parsers):
+ data = "a,b,c\n1,2,3\n4,5,6"
+ expected = DataFrame(columns=Index([]))
+ parser = all_parsers
+
+ result = parser.read_csv(StringIO(data), usecols=set())
+ tm.assert_frame_equal(result, expected)
+
+
+def test_np_array_usecols(all_parsers):
+ # see gh-12546
+ parser = all_parsers
+ data = "a,b,c\n1,2,3"
+ usecols = np.array(["a", "b"])
+
+ expected = DataFrame([[1, 2]], columns=usecols)
+ result = parser.read_csv(StringIO(data), usecols=usecols)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "usecols,expected",
+ [
+ (
+ lambda x: x.upper() in ["AAA", "BBB", "DDD"],
+ DataFrame(
+ {
+ "AaA": {
+ 0: 0.056674972999999997,
+ 1: 2.6132309819999997,
+ 2: 3.5689350380000002,
+ },
+ "bBb": {0: 8, 1: 2, 2: 7},
+ "ddd": {0: "a", 1: "b", 2: "a"},
+ }
+ ),
+ ),
+ (lambda x: False, DataFrame(columns=Index([]))),
+ ],
+)
+def test_callable_usecols(all_parsers, usecols, expected):
+ # see gh-14154
+ data = """AaA,bBb,CCC,ddd
+0.056674973,8,True,a
+2.613230982,2,False,b
+3.568935038,7,False,a"""
+ parser = all_parsers
+
+ if parser.engine == "pyarrow":
+ msg = "The pyarrow engine does not allow 'usecols' to be a callable"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), usecols=usecols)
+ return
+
+ result = parser.read_csv(StringIO(data), usecols=usecols)
+ tm.assert_frame_equal(result, expected)
+
+
+# ArrowKeyError: Column 'fa' in include_columns does not exist in CSV file
+@skip_pyarrow
+@pytest.mark.parametrize("usecols", [["a", "c"], lambda x: x in ["a", "c"]])
+def test_incomplete_first_row(all_parsers, usecols):
+ # see gh-6710
+ data = "1,2\n1,2,3"
+ parser = all_parsers
+ names = ["a", "b", "c"]
+ expected = DataFrame({"a": [1, 1], "c": [np.nan, 3]})
+
+ result = parser.read_csv(StringIO(data), names=names, usecols=usecols)
+ tm.assert_frame_equal(result, expected)
+
+
+@skip_pyarrow # CSV parse error: Expected 3 columns, got 4
+@pytest.mark.parametrize(
+ "data,usecols,kwargs,expected",
+ [
+ # see gh-8985
+ (
+ "19,29,39\n" * 2 + "10,20,30,40",
+ [0, 1, 2],
+ {"header": None},
+ DataFrame([[19, 29, 39], [19, 29, 39], [10, 20, 30]]),
+ ),
+ # see gh-9549
+ (
+ ("A,B,C\n1,2,3\n3,4,5\n1,2,4,5,1,6\n1,2,3,,,1,\n1,2,3\n5,6,7"),
+ ["A", "B", "C"],
+ {},
+ DataFrame(
+ {
+ "A": [1, 3, 1, 1, 1, 5],
+ "B": [2, 4, 2, 2, 2, 6],
+ "C": [3, 5, 4, 3, 3, 7],
+ }
+ ),
+ ),
+ ],
+)
+def test_uneven_length_cols(all_parsers, data, usecols, kwargs, expected):
+ # see gh-8985
+ parser = all_parsers
+ result = parser.read_csv(StringIO(data), usecols=usecols, **kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "usecols,kwargs,expected,msg",
+ [
+ (
+ ["a", "b", "c", "d"],
+ {},
+ DataFrame({"a": [1, 5], "b": [2, 6], "c": [3, 7], "d": [4, 8]}),
+ None,
+ ),
+ (
+ ["a", "b", "c", "f"],
+ {},
+ None,
+ _msg_validate_usecols_names.format(r"\['f'\]"),
+ ),
+ (["a", "b", "f"], {}, None, _msg_validate_usecols_names.format(r"\['f'\]")),
+ (
+ ["a", "b", "f", "g"],
+ {},
+ None,
+ _msg_validate_usecols_names.format(r"\[('f', 'g'|'g', 'f')\]"),
+ ),
+ # see gh-14671
+ (
+ None,
+ {"header": 0, "names": ["A", "B", "C", "D"]},
+ DataFrame({"A": [1, 5], "B": [2, 6], "C": [3, 7], "D": [4, 8]}),
+ None,
+ ),
+ (
+ ["A", "B", "C", "f"],
+ {"header": 0, "names": ["A", "B", "C", "D"]},
+ None,
+ _msg_validate_usecols_names.format(r"\['f'\]"),
+ ),
+ (
+ ["A", "B", "f"],
+ {"names": ["A", "B", "C", "D"]},
+ None,
+ _msg_validate_usecols_names.format(r"\['f'\]"),
+ ),
+ ],
+)
+def test_raises_on_usecols_names_mismatch(
+ all_parsers, usecols, kwargs, expected, msg, request
+):
+ data = "a,b,c,d\n1,2,3,4\n5,6,7,8"
+ kwargs.update(usecols=usecols)
+ parser = all_parsers
+
+ if parser.engine == "pyarrow" and not (
+ usecols is not None and expected is not None
+ ):
+ # everything but the first case
+ # ArrowKeyError: Column 'f' in include_columns does not exist in CSV file
+ pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
+
+ if expected is None:
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO(data), **kwargs)
+ else:
+ result = parser.read_csv(StringIO(data), **kwargs)
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("usecols", [["A", "C"], [0, 2]])
+def test_usecols_subset_names_mismatch_orig_columns(all_parsers, usecols, request):
+ data = "a,b,c,d\n1,2,3,4\n5,6,7,8"
+ names = ["A", "B", "C", "D"]
+ parser = all_parsers
+
+ if parser.engine == "pyarrow":
+ if isinstance(usecols[0], int):
+ with pytest.raises(ValueError, match=_msg_pyarrow_requires_names):
+ parser.read_csv(StringIO(data), header=0, names=names, usecols=usecols)
+ return
+ # "pyarrow.lib.ArrowKeyError: Column 'A' in include_columns does not exist"
+ pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
+
+ result = parser.read_csv(StringIO(data), header=0, names=names, usecols=usecols)
+ expected = DataFrame({"A": [1, 5], "C": [3, 7]})
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize("names", [None, ["a", "b"]])
+def test_usecols_indices_out_of_bounds(all_parsers, names):
+ # GH#25623 & GH 41130; enforced in 2.0
+ parser = all_parsers
+ data = """
+a,b
+1,2
+ """
+
+ err = ParserError
+ msg = "Defining usecols with out-of-bounds"
+ if parser.engine == "pyarrow":
+ err = ValueError
+ msg = _msg_pyarrow_requires_names
+
+ with pytest.raises(err, match=msg):
+ parser.read_csv(StringIO(data), usecols=[0, 2], names=names, header=0)
+
+
+def test_usecols_additional_columns(all_parsers):
+ # GH#46997
+ parser = all_parsers
+ usecols = lambda header: header.strip() in ["a", "b", "c"]
+
+ if parser.engine == "pyarrow":
+ msg = "The pyarrow engine does not allow 'usecols' to be a callable"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO("a,b\nx,y,z"), index_col=False, usecols=usecols)
+ return
+ result = parser.read_csv(StringIO("a,b\nx,y,z"), index_col=False, usecols=usecols)
+ expected = DataFrame({"a": ["x"], "b": "y"})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_usecols_additional_columns_integer_columns(all_parsers):
+ # GH#46997
+ parser = all_parsers
+ usecols = lambda header: header.strip() in ["0", "1"]
+ if parser.engine == "pyarrow":
+ msg = "The pyarrow engine does not allow 'usecols' to be a callable"
+ with pytest.raises(ValueError, match=msg):
+ parser.read_csv(StringIO("0,1\nx,y,z"), index_col=False, usecols=usecols)
+ return
+ result = parser.read_csv(StringIO("0,1\nx,y,z"), index_col=False, usecols=usecols)
+ expected = DataFrame({"0": ["x"], "1": "y"})
+ tm.assert_frame_equal(result, expected)
+
+
+def test_usecols_dtype(all_parsers):
+ parser = all_parsers
+ data = """
+col1,col2,col3
+a,1,x
+b,2,y
+"""
+ result = parser.read_csv(
+ StringIO(data),
+ usecols=["col1", "col2"],
+ dtype={"col1": "string", "col2": "uint8", "col3": "string"},
+ )
+ expected = DataFrame(
+ {"col1": array(["a", "b"]), "col2": np.array([1, 2], dtype="uint8")}
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fe4c017837627c12d2dbf935c990895260f680f5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..383aeaa22c7518e838ccb492ec6ba4e77664717d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..09f74c3225c6d8794c33bc63f47d13694f60de05
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a85eba28c4e0a61aa8d9d1d47b60557248a2ba22
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..18cca613b99954dbaaf37b00663fbd86458fede4
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ff88fa9049e2bae2e0774777c964bd374084f9be
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b96f141a6b4674f70d98708abb34efa3e0be7b9
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..809ae34203398c61a24c8d3ae0b15bc971abf160
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_byteswap.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_byteswap.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d691c5cefff345bed100f430ce272f540ce3d922
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_byteswap.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_sas.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_sas.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..04c54338c9b2cb0d8567795c7628d78a46cf2a0f
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_sas.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_sas7bdat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_sas7bdat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8292be8e9df10b1097c0f3d1de27a67548f976a2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_sas7bdat.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_xport.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_xport.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e71f55b98428d499928c0d9aa74aad6d2e4a0d56
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/__pycache__/test_xport.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/test_byteswap.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/test_byteswap.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d7f2f05d1b00b1855dd040bf08cebabf73f9b9a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/test_byteswap.py
@@ -0,0 +1,55 @@
+from hypothesis import (
+ assume,
+ example,
+ given,
+ strategies as st,
+)
+import numpy as np
+import pytest
+
+from pandas._libs.byteswap import (
+ read_double_with_byteswap,
+ read_float_with_byteswap,
+ read_uint16_with_byteswap,
+ read_uint32_with_byteswap,
+ read_uint64_with_byteswap,
+)
+
+import pandas._testing as tm
+
+
+@given(read_offset=st.integers(0, 11), number=st.integers(min_value=0))
+@example(number=2**16, read_offset=0)
+@example(number=2**32, read_offset=0)
+@example(number=2**64, read_offset=0)
+@pytest.mark.parametrize("int_type", [np.uint16, np.uint32, np.uint64])
+@pytest.mark.parametrize("should_byteswap", [True, False])
+def test_int_byteswap(read_offset, number, int_type, should_byteswap):
+ assume(number < 2 ** (8 * int_type(0).itemsize))
+ _test(number, int_type, read_offset, should_byteswap)
+
+
+@pytest.mark.filterwarnings("ignore:overflow encountered:RuntimeWarning")
+@given(read_offset=st.integers(0, 11), number=st.floats())
+@pytest.mark.parametrize("float_type", [np.float32, np.float64])
+@pytest.mark.parametrize("should_byteswap", [True, False])
+def test_float_byteswap(read_offset, number, float_type, should_byteswap):
+ _test(number, float_type, read_offset, should_byteswap)
+
+
+def _test(number, number_type, read_offset, should_byteswap):
+ number = number_type(number)
+ data = np.random.default_rng(2).integers(0, 256, size=20, dtype="uint8")
+ data[read_offset : read_offset + number.itemsize] = number[None].view("uint8")
+ swap_func = {
+ np.float32: read_float_with_byteswap,
+ np.float64: read_double_with_byteswap,
+ np.uint16: read_uint16_with_byteswap,
+ np.uint32: read_uint32_with_byteswap,
+ np.uint64: read_uint64_with_byteswap,
+ }[type(number)]
+ output_number = number_type(swap_func(bytes(data), read_offset, should_byteswap))
+ if should_byteswap:
+ tm.assert_equal(output_number, number.byteswap())
+ else:
+ tm.assert_equal(output_number, number)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/test_sas.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/test_sas.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e38baf4fc4093879b850f03d746ae5e67b477ee
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/test_sas.py
@@ -0,0 +1,34 @@
+from io import StringIO
+
+import pytest
+
+from pandas import read_sas
+import pandas._testing as tm
+
+
+class TestSas:
+ def test_sas_buffer_format(self):
+ # see gh-14947
+ b = StringIO("")
+
+ msg = (
+ "If this is a buffer object rather than a string "
+ "name, you must specify a format string"
+ )
+ with pytest.raises(ValueError, match=msg):
+ read_sas(b)
+
+ def test_sas_read_no_format_or_extension(self):
+ # see gh-24548
+ msg = "unable to infer format of SAS file.+"
+ with tm.ensure_clean("test_file_no_extension") as path:
+ with pytest.raises(ValueError, match=msg):
+ read_sas(path)
+
+
+def test_sas_archive(datapath):
+ fname_uncompressed = datapath("io", "sas", "data", "airline.sas7bdat")
+ df_uncompressed = read_sas(fname_uncompressed)
+ fname_compressed = datapath("io", "sas", "data", "airline.sas7bdat.gz")
+ df_compressed = read_sas(fname_compressed, format="sas7bdat")
+ tm.assert_frame_equal(df_uncompressed, df_compressed)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/test_sas7bdat.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/test_sas7bdat.py
new file mode 100644
index 0000000000000000000000000000000000000000..b71896c77ffb5872beb98d3914e6195b69855703
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/test_sas7bdat.py
@@ -0,0 +1,419 @@
+import contextlib
+from datetime import datetime
+import io
+import os
+from pathlib import Path
+
+import numpy as np
+import pytest
+
+from pandas.compat import IS64
+from pandas.errors import EmptyDataError
+import pandas.util._test_decorators as td
+
+import pandas as pd
+import pandas._testing as tm
+
+from pandas.io.sas.sas7bdat import SAS7BDATReader
+
+
+@pytest.fixture
+def dirpath(datapath):
+ return datapath("io", "sas", "data")
+
+
+@pytest.fixture(params=[(1, range(1, 16)), (2, [16])])
+def data_test_ix(request, dirpath):
+ i, test_ix = request.param
+ fname = os.path.join(dirpath, f"test_sas7bdat_{i}.csv")
+ df = pd.read_csv(fname)
+ epoch = datetime(1960, 1, 1)
+ t1 = pd.to_timedelta(df["Column4"], unit="d")
+ df["Column4"] = (epoch + t1).astype("M8[s]")
+ t2 = pd.to_timedelta(df["Column12"], unit="d")
+ df["Column12"] = (epoch + t2).astype("M8[s]")
+ for k in range(df.shape[1]):
+ col = df.iloc[:, k]
+ if col.dtype == np.int64:
+ df.isetitem(k, df.iloc[:, k].astype(np.float64))
+ return df, test_ix
+
+
+# https://github.com/cython/cython/issues/1720
+class TestSAS7BDAT:
+ @pytest.mark.slow
+ def test_from_file(self, dirpath, data_test_ix):
+ expected, test_ix = data_test_ix
+ for k in test_ix:
+ fname = os.path.join(dirpath, f"test{k}.sas7bdat")
+ df = pd.read_sas(fname, encoding="utf-8")
+ tm.assert_frame_equal(df, expected)
+
+ @pytest.mark.slow
+ def test_from_buffer(self, dirpath, data_test_ix):
+ expected, test_ix = data_test_ix
+ for k in test_ix:
+ fname = os.path.join(dirpath, f"test{k}.sas7bdat")
+ with open(fname, "rb") as f:
+ byts = f.read()
+ buf = io.BytesIO(byts)
+ with pd.read_sas(
+ buf, format="sas7bdat", iterator=True, encoding="utf-8"
+ ) as rdr:
+ df = rdr.read()
+ tm.assert_frame_equal(df, expected)
+
+ @pytest.mark.slow
+ def test_from_iterator(self, dirpath, data_test_ix):
+ expected, test_ix = data_test_ix
+ for k in test_ix:
+ fname = os.path.join(dirpath, f"test{k}.sas7bdat")
+ with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr:
+ df = rdr.read(2)
+ tm.assert_frame_equal(df, expected.iloc[0:2, :])
+ df = rdr.read(3)
+ tm.assert_frame_equal(df, expected.iloc[2:5, :])
+
+ @pytest.mark.slow
+ def test_path_pathlib(self, dirpath, data_test_ix):
+ expected, test_ix = data_test_ix
+ for k in test_ix:
+ fname = Path(os.path.join(dirpath, f"test{k}.sas7bdat"))
+ df = pd.read_sas(fname, encoding="utf-8")
+ tm.assert_frame_equal(df, expected)
+
+ @td.skip_if_no("py.path")
+ @pytest.mark.slow
+ def test_path_localpath(self, dirpath, data_test_ix):
+ from py.path import local as LocalPath
+
+ expected, test_ix = data_test_ix
+ for k in test_ix:
+ fname = LocalPath(os.path.join(dirpath, f"test{k}.sas7bdat"))
+ df = pd.read_sas(fname, encoding="utf-8")
+ tm.assert_frame_equal(df, expected)
+
+ @pytest.mark.slow
+ @pytest.mark.parametrize("chunksize", (3, 5, 10, 11))
+ @pytest.mark.parametrize("k", range(1, 17))
+ def test_iterator_loop(self, dirpath, k, chunksize):
+ # github #13654
+ fname = os.path.join(dirpath, f"test{k}.sas7bdat")
+ with pd.read_sas(fname, chunksize=chunksize, encoding="utf-8") as rdr:
+ y = 0
+ for x in rdr:
+ y += x.shape[0]
+ assert y == rdr.row_count
+
+ def test_iterator_read_too_much(self, dirpath):
+ # github #14734
+ fname = os.path.join(dirpath, "test1.sas7bdat")
+ with pd.read_sas(
+ fname, format="sas7bdat", iterator=True, encoding="utf-8"
+ ) as rdr:
+ d1 = rdr.read(rdr.row_count + 20)
+
+ with pd.read_sas(fname, iterator=True, encoding="utf-8") as rdr:
+ d2 = rdr.read(rdr.row_count + 20)
+ tm.assert_frame_equal(d1, d2)
+
+
+def test_encoding_options(datapath):
+ fname = datapath("io", "sas", "data", "test1.sas7bdat")
+ df1 = pd.read_sas(fname)
+ df2 = pd.read_sas(fname, encoding="utf-8")
+ for col in df1.columns:
+ try:
+ df1[col] = df1[col].str.decode("utf-8")
+ except AttributeError:
+ pass
+ tm.assert_frame_equal(df1, df2)
+
+ with contextlib.closing(SAS7BDATReader(fname, convert_header_text=False)) as rdr:
+ df3 = rdr.read()
+ for x, y in zip(df1.columns, df3.columns):
+ assert x == y.decode()
+
+
+def test_encoding_infer(datapath):
+ fname = datapath("io", "sas", "data", "test1.sas7bdat")
+
+ with pd.read_sas(fname, encoding="infer", iterator=True) as df1_reader:
+ # check: is encoding inferred correctly from file
+ assert df1_reader.inferred_encoding == "cp1252"
+ df1 = df1_reader.read()
+
+ with pd.read_sas(fname, encoding="cp1252", iterator=True) as df2_reader:
+ df2 = df2_reader.read()
+
+ # check: reader reads correct information
+ tm.assert_frame_equal(df1, df2)
+
+
+def test_productsales(datapath):
+ fname = datapath("io", "sas", "data", "productsales.sas7bdat")
+ df = pd.read_sas(fname, encoding="utf-8")
+ fname = datapath("io", "sas", "data", "productsales.csv")
+ df0 = pd.read_csv(fname, parse_dates=["MONTH"])
+ vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR"]
+ df0[vn] = df0[vn].astype(np.float64)
+
+ df0["MONTH"] = df0["MONTH"].astype("M8[s]")
+ tm.assert_frame_equal(df, df0)
+
+
+def test_12659(datapath):
+ fname = datapath("io", "sas", "data", "test_12659.sas7bdat")
+ df = pd.read_sas(fname)
+ fname = datapath("io", "sas", "data", "test_12659.csv")
+ df0 = pd.read_csv(fname)
+ df0 = df0.astype(np.float64)
+ tm.assert_frame_equal(df, df0)
+
+
+def test_airline(datapath):
+ fname = datapath("io", "sas", "data", "airline.sas7bdat")
+ df = pd.read_sas(fname)
+ fname = datapath("io", "sas", "data", "airline.csv")
+ df0 = pd.read_csv(fname)
+ df0 = df0.astype(np.float64)
+ tm.assert_frame_equal(df, df0)
+
+
+def test_date_time(datapath):
+ # Support of different SAS date/datetime formats (PR #15871)
+ fname = datapath("io", "sas", "data", "datetime.sas7bdat")
+ df = pd.read_sas(fname)
+ fname = datapath("io", "sas", "data", "datetime.csv")
+ df0 = pd.read_csv(
+ fname, parse_dates=["Date1", "Date2", "DateTime", "DateTimeHi", "Taiw"]
+ )
+ # GH 19732: Timestamps imported from sas will incur floating point errors
+ # See GH#56014 for discussion of the correct "expected" results
+ # We are really just testing that we are "close". This only seems to be
+ # an issue near the implementation bounds.
+
+ df[df.columns[3]] = df.iloc[:, 3].dt.round("us")
+ df0["Date1"] = df0["Date1"].astype("M8[s]")
+ df0["Date2"] = df0["Date2"].astype("M8[s]")
+ df0["DateTime"] = df0["DateTime"].astype("M8[ms]")
+ df0["Taiw"] = df0["Taiw"].astype("M8[s]")
+
+ res = df0["DateTimeHi"].astype("M8[us]").dt.round("ms")
+ df0["DateTimeHi"] = res.astype("M8[ms]")
+
+ if not IS64:
+ # No good reason for this, just what we get on the CI
+ df0.loc[0, "DateTimeHi"] += np.timedelta64(1, "ms")
+ df0.loc[[2, 3], "DateTimeHi"] -= np.timedelta64(1, "ms")
+ tm.assert_frame_equal(df, df0)
+
+
+@pytest.mark.parametrize("column", ["WGT", "CYL"])
+def test_compact_numerical_values(datapath, column):
+ # Regression test for #21616
+ fname = datapath("io", "sas", "data", "cars.sas7bdat")
+ df = pd.read_sas(fname, encoding="latin-1")
+ # The two columns CYL and WGT in cars.sas7bdat have column
+ # width < 8 and only contain integral values.
+ # Test that pandas doesn't corrupt the numbers by adding
+ # decimals.
+ result = df[column]
+ expected = df[column].round()
+ tm.assert_series_equal(result, expected, check_exact=True)
+
+
+def test_many_columns(datapath):
+ # Test for looking for column information in more places (PR #22628)
+ fname = datapath("io", "sas", "data", "many_columns.sas7bdat")
+
+ df = pd.read_sas(fname, encoding="latin-1")
+
+ fname = datapath("io", "sas", "data", "many_columns.csv")
+ df0 = pd.read_csv(fname, encoding="latin-1")
+ tm.assert_frame_equal(df, df0)
+
+
+def test_inconsistent_number_of_rows(datapath):
+ # Regression test for issue #16615. (PR #22628)
+ fname = datapath("io", "sas", "data", "load_log.sas7bdat")
+ df = pd.read_sas(fname, encoding="latin-1")
+ assert len(df) == 2097
+
+
+def test_zero_variables(datapath):
+ # Check if the SAS file has zero variables (PR #18184)
+ fname = datapath("io", "sas", "data", "zero_variables.sas7bdat")
+ with pytest.raises(EmptyDataError, match="No columns to parse from file"):
+ pd.read_sas(fname)
+
+
+def test_zero_rows(datapath):
+ # GH 18198
+ fname = datapath("io", "sas", "data", "zero_rows.sas7bdat")
+ result = pd.read_sas(fname)
+ expected = pd.DataFrame([{"char_field": "a", "num_field": 1.0}]).iloc[:0]
+ tm.assert_frame_equal(result, expected)
+
+
+def test_corrupt_read(datapath):
+ # We don't really care about the exact failure, the important thing is
+ # that the resource should be cleaned up afterwards (BUG #35566)
+ fname = datapath("io", "sas", "data", "corrupt.sas7bdat")
+ msg = "'SAS7BDATReader' object has no attribute 'row_count'"
+ with pytest.raises(AttributeError, match=msg):
+ pd.read_sas(fname)
+
+
+def test_max_sas_date(datapath):
+ # GH 20927
+ # NB. max datetime in SAS dataset is 31DEC9999:23:59:59.999
+ # but this is read as 29DEC9999:23:59:59.998993 by a buggy
+ # sas7bdat module
+ # See also GH#56014 for discussion of the correct "expected" results.
+ fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat")
+ df = pd.read_sas(fname, encoding="iso-8859-1")
+
+ expected = pd.DataFrame(
+ {
+ "text": ["max", "normal"],
+ "dt_as_float": [253717747199.999, 1880323199.999],
+ "dt_as_dt": np.array(
+ [
+ datetime(9999, 12, 29, 23, 59, 59, 999000),
+ datetime(2019, 8, 1, 23, 59, 59, 999000),
+ ],
+ dtype="M8[ms]",
+ ),
+ "date_as_float": [2936547.0, 21762.0],
+ "date_as_date": np.array(
+ [
+ datetime(9999, 12, 29),
+ datetime(2019, 8, 1),
+ ],
+ dtype="M8[s]",
+ ),
+ },
+ columns=["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"],
+ )
+
+ if not IS64:
+ # No good reason for this, just what we get on the CI
+ expected.loc[:, "dt_as_dt"] -= np.timedelta64(1, "ms")
+
+ tm.assert_frame_equal(df, expected)
+
+
+def test_max_sas_date_iterator(datapath):
+ # GH 20927
+ # when called as an iterator, only those chunks with a date > pd.Timestamp.max
+ # are returned as datetime.datetime, if this happens that whole chunk is returned
+ # as datetime.datetime
+ col_order = ["text", "dt_as_float", "dt_as_dt", "date_as_float", "date_as_date"]
+ fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat")
+ results = []
+ for df in pd.read_sas(fname, encoding="iso-8859-1", chunksize=1):
+ # GH 19732: Timestamps imported from sas will incur floating point errors
+ df.reset_index(inplace=True, drop=True)
+ results.append(df)
+ expected = [
+ pd.DataFrame(
+ {
+ "text": ["max"],
+ "dt_as_float": [253717747199.999],
+ "dt_as_dt": np.array(
+ [datetime(9999, 12, 29, 23, 59, 59, 999000)], dtype="M8[ms]"
+ ),
+ "date_as_float": [2936547.0],
+ "date_as_date": np.array([datetime(9999, 12, 29)], dtype="M8[s]"),
+ },
+ columns=col_order,
+ ),
+ pd.DataFrame(
+ {
+ "text": ["normal"],
+ "dt_as_float": [1880323199.999],
+ "dt_as_dt": np.array(["2019-08-01 23:59:59.999"], dtype="M8[ms]"),
+ "date_as_float": [21762.0],
+ "date_as_date": np.array(["2019-08-01"], dtype="M8[s]"),
+ },
+ columns=col_order,
+ ),
+ ]
+ if not IS64:
+ # No good reason for this, just what we get on the CI
+ expected[0].loc[0, "dt_as_dt"] -= np.timedelta64(1, "ms")
+ expected[1].loc[0, "dt_as_dt"] -= np.timedelta64(1, "ms")
+
+ tm.assert_frame_equal(results[0], expected[0])
+ tm.assert_frame_equal(results[1], expected[1])
+
+
+def test_null_date(datapath):
+ fname = datapath("io", "sas", "data", "dates_null.sas7bdat")
+ df = pd.read_sas(fname, encoding="utf-8")
+
+ expected = pd.DataFrame(
+ {
+ "datecol": np.array(
+ [
+ datetime(9999, 12, 29),
+ np.datetime64("NaT"),
+ ],
+ dtype="M8[s]",
+ ),
+ "datetimecol": np.array(
+ [
+ datetime(9999, 12, 29, 23, 59, 59, 999000),
+ np.datetime64("NaT"),
+ ],
+ dtype="M8[ms]",
+ ),
+ },
+ )
+ if not IS64:
+ # No good reason for this, just what we get on the CI
+ expected.loc[0, "datetimecol"] -= np.timedelta64(1, "ms")
+ tm.assert_frame_equal(df, expected)
+
+
+def test_meta2_page(datapath):
+ # GH 35545
+ fname = datapath("io", "sas", "data", "test_meta2_page.sas7bdat")
+ df = pd.read_sas(fname)
+ assert len(df) == 1000
+
+
+@pytest.mark.parametrize(
+ "test_file, override_offset, override_value, expected_msg",
+ [
+ ("test2.sas7bdat", 0x10000 + 55229, 0x80 | 0x0F, "Out of bounds"),
+ ("test2.sas7bdat", 0x10000 + 55229, 0x10, "unknown control byte"),
+ ("test3.sas7bdat", 118170, 184, "Out of bounds"),
+ ],
+)
+def test_rle_rdc_exceptions(
+ datapath, test_file, override_offset, override_value, expected_msg
+):
+ """Errors in RLE/RDC decompression should propagate."""
+ with open(datapath("io", "sas", "data", test_file), "rb") as fd:
+ data = bytearray(fd.read())
+ data[override_offset] = override_value
+ with pytest.raises(Exception, match=expected_msg):
+ pd.read_sas(io.BytesIO(data), format="sas7bdat")
+
+
+def test_0x40_control_byte(datapath):
+ # GH 31243
+ fname = datapath("io", "sas", "data", "0x40controlbyte.sas7bdat")
+ df = pd.read_sas(fname, encoding="ascii")
+ fname = datapath("io", "sas", "data", "0x40controlbyte.csv")
+ df0 = pd.read_csv(fname, dtype="object")
+ tm.assert_frame_equal(df, df0)
+
+
+def test_0x00_control_byte(datapath):
+ # GH 47099
+ fname = datapath("io", "sas", "data", "0x00controlbyte.sas7bdat.bz2")
+ df = next(pd.read_sas(fname, chunksize=11_000))
+ assert df.shape == (11_000, 20)
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/test_xport.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/test_xport.py
new file mode 100644
index 0000000000000000000000000000000000000000..766c9c37d55b9ee3fcb1d206b9ed100aaaf1d610
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/sas/test_xport.py
@@ -0,0 +1,167 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+import pandas._testing as tm
+
+from pandas.io.sas.sasreader import read_sas
+
+# CSV versions of test xpt files were obtained using the R foreign library
+
+# Numbers in a SAS xport file are always float64, so need to convert
+# before making comparisons.
+
+
+def numeric_as_float(data):
+ for v in data.columns:
+ if data[v].dtype is np.dtype("int64"):
+ data[v] = data[v].astype(np.float64)
+
+
+class TestXport:
+ @pytest.fixture
+ def file01(self, datapath):
+ return datapath("io", "sas", "data", "DEMO_G.xpt")
+
+ @pytest.fixture
+ def file02(self, datapath):
+ return datapath("io", "sas", "data", "SSHSV1_A.xpt")
+
+ @pytest.fixture
+ def file03(self, datapath):
+ return datapath("io", "sas", "data", "DRXFCD_G.xpt")
+
+ @pytest.fixture
+ def file04(self, datapath):
+ return datapath("io", "sas", "data", "paxraw_d_short.xpt")
+
+ @pytest.fixture
+ def file05(self, datapath):
+ return datapath("io", "sas", "data", "DEMO_PUF.cpt")
+
+ @pytest.mark.slow
+ def test1_basic(self, file01):
+ # Tests with DEMO_G.xpt (all numeric file)
+
+ # Compare to this
+ data_csv = pd.read_csv(file01.replace(".xpt", ".csv"))
+ numeric_as_float(data_csv)
+
+ # Read full file
+ data = read_sas(file01, format="xport")
+ tm.assert_frame_equal(data, data_csv)
+ num_rows = data.shape[0]
+
+ # Test reading beyond end of file
+ with read_sas(file01, format="xport", iterator=True) as reader:
+ data = reader.read(num_rows + 100)
+ assert data.shape[0] == num_rows
+
+ # Test incremental read with `read` method.
+ with read_sas(file01, format="xport", iterator=True) as reader:
+ data = reader.read(10)
+ tm.assert_frame_equal(data, data_csv.iloc[0:10, :])
+
+ # Test incremental read with `get_chunk` method.
+ with read_sas(file01, format="xport", chunksize=10) as reader:
+ data = reader.get_chunk()
+ tm.assert_frame_equal(data, data_csv.iloc[0:10, :])
+
+ # Test read in loop
+ m = 0
+ with read_sas(file01, format="xport", chunksize=100) as reader:
+ for x in reader:
+ m += x.shape[0]
+ assert m == num_rows
+
+ # Read full file with `read_sas` method
+ data = read_sas(file01)
+ tm.assert_frame_equal(data, data_csv)
+
+ def test1_index(self, file01):
+ # Tests with DEMO_G.xpt using index (all numeric file)
+
+ # Compare to this
+ data_csv = pd.read_csv(file01.replace(".xpt", ".csv"))
+ data_csv = data_csv.set_index("SEQN")
+ numeric_as_float(data_csv)
+
+ # Read full file
+ data = read_sas(file01, index="SEQN", format="xport")
+ tm.assert_frame_equal(data, data_csv, check_index_type=False)
+
+ # Test incremental read with `read` method.
+ with read_sas(file01, index="SEQN", format="xport", iterator=True) as reader:
+ data = reader.read(10)
+ tm.assert_frame_equal(data, data_csv.iloc[0:10, :], check_index_type=False)
+
+ # Test incremental read with `get_chunk` method.
+ with read_sas(file01, index="SEQN", format="xport", chunksize=10) as reader:
+ data = reader.get_chunk()
+ tm.assert_frame_equal(data, data_csv.iloc[0:10, :], check_index_type=False)
+
+ def test1_incremental(self, file01):
+ # Test with DEMO_G.xpt, reading full file incrementally
+
+ data_csv = pd.read_csv(file01.replace(".xpt", ".csv"))
+ data_csv = data_csv.set_index("SEQN")
+ numeric_as_float(data_csv)
+
+ with read_sas(file01, index="SEQN", chunksize=1000) as reader:
+ all_data = list(reader)
+ data = pd.concat(all_data, axis=0)
+
+ tm.assert_frame_equal(data, data_csv, check_index_type=False)
+
+ def test2(self, file02):
+ # Test with SSHSV1_A.xpt
+
+ # Compare to this
+ data_csv = pd.read_csv(file02.replace(".xpt", ".csv"))
+ numeric_as_float(data_csv)
+
+ data = read_sas(file02)
+ tm.assert_frame_equal(data, data_csv)
+
+ def test2_binary(self, file02):
+ # Test with SSHSV1_A.xpt, read as a binary file
+
+ # Compare to this
+ data_csv = pd.read_csv(file02.replace(".xpt", ".csv"))
+ numeric_as_float(data_csv)
+
+ with open(file02, "rb") as fd:
+ # GH#35693 ensure that if we pass an open file, we
+ # dont incorrectly close it in read_sas
+ data = read_sas(fd, format="xport")
+
+ tm.assert_frame_equal(data, data_csv)
+
+ def test_multiple_types(self, file03):
+ # Test with DRXFCD_G.xpt (contains text and numeric variables)
+
+ # Compare to this
+ data_csv = pd.read_csv(file03.replace(".xpt", ".csv"))
+
+ data = read_sas(file03, encoding="utf-8")
+ tm.assert_frame_equal(data, data_csv)
+
+ def test_truncated_float_support(self, file04):
+ # Test with paxraw_d_short.xpt, a shortened version of:
+ # http://wwwn.cdc.gov/Nchs/Nhanes/2005-2006/PAXRAW_D.ZIP
+ # This file has truncated floats (5 bytes in this case).
+
+ # GH 11713
+
+ data_csv = pd.read_csv(file04.replace(".xpt", ".csv"))
+
+ data = read_sas(file04, format="xport")
+ tm.assert_frame_equal(data.astype("int64"), data_csv)
+
+ def test_cport_header_found_raises(self, file05):
+ # Test with DEMO_PUF.cpt, the beginning of puf2019_1_fall.xpt
+ # from https://www.cms.gov/files/zip/puf2019.zip
+ # (despite the extension, it's a cpt file)
+ msg = "Header record indicates a CPORT file, which is not readable."
+ with pytest.raises(ValueError, match=msg):
+ read_sas(file05, format="xport")
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3ccc675ecb0d63a8e97f6b11da825cfd6738750c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/conftest.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..893ee4f29ba360c0c759d4eadeb2cbdf45edf963
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/conftest.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_to_xml.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_to_xml.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d1ef0b91311d15d91dcce6fda37f3b2f68f2bc88
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_to_xml.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fde386e79d73cb10f288697db973a912dadbb3dd
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml_dtypes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml_dtypes.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9bb869fb6d83a4e9ce974bdf5136dd45e5070839
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/__pycache__/test_xml_dtypes.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/test_to_xml.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/test_to_xml.py
new file mode 100644
index 0000000000000000000000000000000000000000..37251a58b0c119ef1da15c259e9e77a456b86ac9
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/xml/test_to_xml.py
@@ -0,0 +1,1375 @@
+from __future__ import annotations
+
+from io import (
+ BytesIO,
+ StringIO,
+)
+import os
+
+import numpy as np
+import pytest
+
+import pandas.util._test_decorators as td
+
+from pandas import (
+ NA,
+ DataFrame,
+ Index,
+)
+import pandas._testing as tm
+
+from pandas.io.common import get_handle
+from pandas.io.xml import read_xml
+
+# CHECKLIST
+
+# [x] - ValueError: "Values for parser can only be lxml or etree."
+
+# etree
+# [x] - ImportError: "lxml not found, please install or use the etree parser."
+# [X] - TypeError: "...is not a valid type for attr_cols"
+# [X] - TypeError: "...is not a valid type for elem_cols"
+# [X] - LookupError: "unknown encoding"
+# [X] - KeyError: "...is not included in namespaces"
+# [X] - KeyError: "no valid column"
+# [X] - ValueError: "To use stylesheet, you need lxml installed..."
+# [] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.)
+# [X] - FileNotFoundError: "No such file or directory"
+# [X] - PermissionError: "Forbidden"
+
+# lxml
+# [X] - TypeError: "...is not a valid type for attr_cols"
+# [X] - TypeError: "...is not a valid type for elem_cols"
+# [X] - LookupError: "unknown encoding"
+# [] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.)
+# [X] - FileNotFoundError: "No such file or directory"
+# [X] - KeyError: "...is not included in namespaces"
+# [X] - KeyError: "no valid column"
+# [X] - ValueError: "stylesheet is not a url, file, or xml string."
+# [] - LookupError: (NEED WRONG ENCODING FOR FILE OUTPUT)
+# [] - URLError: (USUALLY DUE TO NETWORKING)
+# [] - HTTPError: (NEED AN ONLINE STYLESHEET)
+# [X] - OSError: "failed to load external entity"
+# [X] - XMLSyntaxError: "Opening and ending tag mismatch"
+# [X] - XSLTApplyError: "Cannot resolve URI"
+# [X] - XSLTParseError: "failed to compile"
+# [X] - PermissionError: "Forbidden"
+
+
+@pytest.fixture
+def geom_df():
+ return DataFrame(
+ {
+ "shape": ["square", "circle", "triangle"],
+ "degrees": [360, 360, 180],
+ "sides": [4, np.nan, 3],
+ }
+ )
+
+
+@pytest.fixture
+def planet_df():
+ return DataFrame(
+ {
+ "planet": [
+ "Mercury",
+ "Venus",
+ "Earth",
+ "Mars",
+ "Jupiter",
+ "Saturn",
+ "Uranus",
+ "Neptune",
+ ],
+ "type": [
+ "terrestrial",
+ "terrestrial",
+ "terrestrial",
+ "terrestrial",
+ "gas giant",
+ "gas giant",
+ "ice giant",
+ "ice giant",
+ ],
+ "location": [
+ "inner",
+ "inner",
+ "inner",
+ "inner",
+ "outer",
+ "outer",
+ "outer",
+ "outer",
+ ],
+ "mass": [
+ 0.330114,
+ 4.86747,
+ 5.97237,
+ 0.641712,
+ 1898.187,
+ 568.3174,
+ 86.8127,
+ 102.4126,
+ ],
+ }
+ )
+
+
+@pytest.fixture
+def from_file_expected():
+ return """\
+
+
+
+ 0
+ cooking
+ Everyday Italian
+ Giada De Laurentiis
+ 2005
+ 30.0
+
+
+ 1
+ children
+ Harry Potter
+ J K. Rowling
+ 2005
+ 29.99
+
+
+ 2
+ web
+ Learning XML
+ Erik T. Ray
+ 2003
+ 39.95
+
+"""
+
+
+def equalize_decl(doc):
+ # etree and lxml differ on quotes and case in xml declaration
+ if doc is not None:
+ doc = doc.replace(
+ '
+
+
+ cooking
+ Everyday Italian
+ Giada De Laurentiis
+ 2005
+ 30.0
+
+
+ children
+ Harry Potter
+ J K. Rowling
+ 2005
+ 29.99
+
+
+ web
+ Learning XML
+ Erik T. Ray
+ 2003
+ 39.95
+
+"""
+
+ df_file = read_xml(xml_books, parser=parser)
+
+ with tm.ensure_clean("test.xml") as path:
+ df_file.to_xml(path, index=False, parser=parser)
+ with open(path, "rb") as f:
+ output = f.read().decode("utf-8").strip()
+
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+def test_index_false_rename_row_root(xml_books, parser):
+ expected = """\
+
+
+
+ cooking
+ Everyday Italian
+ Giada De Laurentiis
+ 2005
+ 30.0
+
+
+ children
+ Harry Potter
+ J K. Rowling
+ 2005
+ 29.99
+
+
+ web
+ Learning XML
+ Erik T. Ray
+ 2003
+ 39.95
+
+"""
+
+ df_file = read_xml(xml_books, parser=parser)
+
+ with tm.ensure_clean("test.xml") as path:
+ df_file.to_xml(
+ path, index=False, root_name="books", row_name="book", parser=parser
+ )
+ with open(path, "rb") as f:
+ output = f.read().decode("utf-8").strip()
+
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+@pytest.mark.parametrize(
+ "offset_index", [list(range(10, 13)), [str(i) for i in range(10, 13)]]
+)
+def test_index_false_with_offset_input_index(parser, offset_index, geom_df):
+ """
+ Tests that the output does not contain the `` field when the index of the
+ input Dataframe has an offset.
+
+ This is a regression test for issue #42458.
+ """
+
+ expected = """\
+
+
+
+ square
+ 360
+ 4.0
+
+
+ circle
+ 360
+
+
+
+ triangle
+ 180
+ 3.0
+
+"""
+
+ offset_geom_df = geom_df.copy()
+ offset_geom_df.index = Index(offset_index)
+ output = offset_geom_df.to_xml(index=False, parser=parser)
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+# NA_REP
+
+na_expected = """\
+
+
+
+ 0
+ square
+ 360
+ 4.0
+
+
+ 1
+ circle
+ 360
+
+
+
+ 2
+ triangle
+ 180
+ 3.0
+
+"""
+
+
+def test_na_elem_output(parser, geom_df):
+ output = geom_df.to_xml(parser=parser)
+ output = equalize_decl(output)
+
+ assert output == na_expected
+
+
+def test_na_empty_str_elem_option(parser, geom_df):
+ output = geom_df.to_xml(na_rep="", parser=parser)
+ output = equalize_decl(output)
+
+ assert output == na_expected
+
+
+def test_na_empty_elem_option(parser, geom_df):
+ expected = """\
+
+
+
+ 0
+ square
+ 360
+ 4.0
+
+
+ 1
+ circle
+ 360
+ 0.0
+
+
+ 2
+ triangle
+ 180
+ 3.0
+
+"""
+
+ output = geom_df.to_xml(na_rep="0.0", parser=parser)
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+# ATTR_COLS
+
+
+def test_attrs_cols_nan_output(parser, geom_df):
+ expected = """\
+
+
+
+
+
+"""
+
+ output = geom_df.to_xml(attr_cols=["shape", "degrees", "sides"], parser=parser)
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+def test_attrs_cols_prefix(parser, geom_df):
+ expected = """\
+
+
+
+
+
+"""
+
+ output = geom_df.to_xml(
+ attr_cols=["index", "shape", "degrees", "sides"],
+ namespaces={"doc": "http://example.xom"},
+ prefix="doc",
+ parser=parser,
+ )
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+def test_attrs_unknown_column(parser, geom_df):
+ with pytest.raises(KeyError, match=("no valid column")):
+ geom_df.to_xml(attr_cols=["shape", "degree", "sides"], parser=parser)
+
+
+def test_attrs_wrong_type(parser, geom_df):
+ with pytest.raises(TypeError, match=("is not a valid type for attr_cols")):
+ geom_df.to_xml(attr_cols='"shape", "degree", "sides"', parser=parser)
+
+
+# ELEM_COLS
+
+
+def test_elems_cols_nan_output(parser, geom_df):
+ elems_cols_expected = """\
+
+
+
+ 360
+ 4.0
+ square
+
+
+ 360
+
+ circle
+
+
+ 180
+ 3.0
+ triangle
+
+"""
+
+ output = geom_df.to_xml(
+ index=False, elem_cols=["degrees", "sides", "shape"], parser=parser
+ )
+ output = equalize_decl(output)
+
+ assert output == elems_cols_expected
+
+
+def test_elems_unknown_column(parser, geom_df):
+ with pytest.raises(KeyError, match=("no valid column")):
+ geom_df.to_xml(elem_cols=["shape", "degree", "sides"], parser=parser)
+
+
+def test_elems_wrong_type(parser, geom_df):
+ with pytest.raises(TypeError, match=("is not a valid type for elem_cols")):
+ geom_df.to_xml(elem_cols='"shape", "degree", "sides"', parser=parser)
+
+
+def test_elems_and_attrs_cols(parser, geom_df):
+ elems_cols_expected = """\
+
+
+
+ 360
+ 4.0
+
+
+ 360
+
+
+
+ 180
+ 3.0
+
+"""
+
+ output = geom_df.to_xml(
+ index=False,
+ elem_cols=["degrees", "sides"],
+ attr_cols=["shape"],
+ parser=parser,
+ )
+ output = equalize_decl(output)
+
+ assert output == elems_cols_expected
+
+
+# HIERARCHICAL COLUMNS
+
+
+def test_hierarchical_columns(parser, planet_df):
+ expected = """\
+
+
+
+ inner
+ terrestrial
+ 4
+ 11.81
+ 2.95
+
+
+ outer
+ gas giant
+ 2
+ 2466.5
+ 1233.25
+
+
+ outer
+ ice giant
+ 2
+ 189.23
+ 94.61
+
+
+ All
+
+ 8
+ 2667.54
+ 333.44
+
+"""
+
+ pvt = planet_df.pivot_table(
+ index=["location", "type"],
+ values="mass",
+ aggfunc=["count", "sum", "mean"],
+ margins=True,
+ ).round(2)
+
+ output = pvt.to_xml(parser=parser)
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+def test_hierarchical_attrs_columns(parser, planet_df):
+ expected = """\
+
+
+
+
+
+
+"""
+
+ pvt = planet_df.pivot_table(
+ index=["location", "type"],
+ values="mass",
+ aggfunc=["count", "sum", "mean"],
+ margins=True,
+ ).round(2)
+
+ output = pvt.to_xml(attr_cols=list(pvt.reset_index().columns.values), parser=parser)
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+# MULTIINDEX
+
+
+def test_multi_index(parser, planet_df):
+ expected = """\
+
+
+
+ inner
+ terrestrial
+ 4
+ 11.81
+ 2.95
+
+
+ outer
+ gas giant
+ 2
+ 2466.5
+ 1233.25
+
+
+ outer
+ ice giant
+ 2
+ 189.23
+ 94.61
+
+"""
+
+ agg = (
+ planet_df.groupby(["location", "type"])["mass"]
+ .agg(["count", "sum", "mean"])
+ .round(2)
+ )
+
+ output = agg.to_xml(parser=parser)
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+def test_multi_index_attrs_cols(parser, planet_df):
+ expected = """\
+
+
+
+
+
+"""
+
+ agg = (
+ planet_df.groupby(["location", "type"])["mass"]
+ .agg(["count", "sum", "mean"])
+ .round(2)
+ )
+ output = agg.to_xml(attr_cols=list(agg.reset_index().columns.values), parser=parser)
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+# NAMESPACE
+
+
+def test_default_namespace(parser, geom_df):
+ expected = """\
+
+
+
+ 0
+ square
+ 360
+ 4.0
+
+
+ 1
+ circle
+ 360
+
+
+
+ 2
+ triangle
+ 180
+ 3.0
+
+"""
+
+ output = geom_df.to_xml(namespaces={"": "http://example.com"}, parser=parser)
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+def test_unused_namespaces(parser, geom_df):
+ expected = """\
+
+
+
+ 0
+ square
+ 360
+ 4.0
+
+
+ 1
+ circle
+ 360
+
+
+
+ 2
+ triangle
+ 180
+ 3.0
+
+"""
+
+ output = geom_df.to_xml(
+ namespaces={"oth": "http://other.org", "ex": "http://example.com"},
+ parser=parser,
+ )
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+# PREFIX
+
+
+def test_namespace_prefix(parser, geom_df):
+ expected = """\
+
+
+
+ 0
+ square
+ 360
+ 4.0
+
+
+ 1
+ circle
+ 360
+
+
+
+ 2
+ triangle
+ 180
+ 3.0
+
+"""
+
+ output = geom_df.to_xml(
+ namespaces={"doc": "http://example.com"}, prefix="doc", parser=parser
+ )
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+def test_missing_prefix_in_nmsp(parser, geom_df):
+ with pytest.raises(KeyError, match=("doc is not included in namespaces")):
+ geom_df.to_xml(
+ namespaces={"": "http://example.com"}, prefix="doc", parser=parser
+ )
+
+
+def test_namespace_prefix_and_default(parser, geom_df):
+ expected = """\
+
+
+
+ 0
+ square
+ 360
+ 4.0
+
+
+ 1
+ circle
+ 360
+
+
+
+ 2
+ triangle
+ 180
+ 3.0
+
+"""
+
+ output = geom_df.to_xml(
+ namespaces={"": "http://example.com", "doc": "http://other.org"},
+ prefix="doc",
+ parser=parser,
+ )
+ output = equalize_decl(output)
+
+ assert output == expected
+
+
+# ENCODING
+
+encoding_expected = """\
+
+
+
+ 0
+ 1
+ José
+ Sofía
+
+
+ 1
+ 2
+ Luis
+ Valentina
+
+
+ 2
+ 3
+ Carlos
+ Isabella
+
+
+ 3
+ 4
+ Juan
+ Camila
+
+
+ 4
+ 5
+ Jorge
+ Valeria
+
+"""
+
+
+def test_encoding_option_str(xml_baby_names, parser):
+ df_file = read_xml(xml_baby_names, parser=parser, encoding="ISO-8859-1").head(5)
+
+ output = df_file.to_xml(encoding="ISO-8859-1", parser=parser)
+
+ if output is not None:
+ # etree and lxml differ on quotes and case in xml declaration
+ output = output.replace(
+ '
+
+ 0
+ square
+ 360
+ 4.0
+
+
+ 1
+ circle
+ 360
+
+
+
+ 2
+ triangle
+ 180
+ 3.0
+
+"""
+
+ output = geom_df.to_xml(xml_declaration=False)
+
+ assert output == expected
+
+
+def test_no_pretty_print_with_decl(parser, geom_df):
+ expected = (
+ "\n"
+ "0square"
+ "3604.0 "
+ "1circle360"
+ " 2"
+ "triangle1803.0"
+ " "
+ )
+
+ output = geom_df.to_xml(pretty_print=False, parser=parser)
+ output = equalize_decl(output)
+
+ # etree adds space for closed tags
+ if output is not None:
+ output = output.replace(" />", "/>")
+
+ assert output == expected
+
+
+def test_no_pretty_print_no_decl(parser, geom_df):
+ expected = (
+ "0square"
+ "3604.0 "
+ "1circle360"
+ " 2"
+ "triangle1803.0"
+ " "
+ )
+
+ output = geom_df.to_xml(xml_declaration=False, pretty_print=False, parser=parser)
+
+ # etree adds space for closed tags
+ if output is not None:
+ output = output.replace(" />", "/>")
+
+ assert output == expected
+
+
+# PARSER
+
+
+@td.skip_if_installed("lxml")
+def test_default_parser_no_lxml(geom_df):
+ with pytest.raises(
+ ImportError, match=("lxml not found, please install or use the etree parser.")
+ ):
+ geom_df.to_xml()
+
+
+def test_unknown_parser(geom_df):
+ with pytest.raises(
+ ValueError, match=("Values for parser can only be lxml or etree.")
+ ):
+ geom_df.to_xml(parser="bs4")
+
+
+# STYLESHEET
+
+xsl_expected = """\
+
+
+
+ 0
+ square
+ 360
+ 4.0
+
+
+ 1
+ circle
+ 360
+
+
+
+ 2
+ triangle
+ 180
+ 3.0
+
+"""
+
+
+def test_stylesheet_file_like(xsl_row_field_output, mode, geom_df):
+ pytest.importorskip("lxml")
+ with open(
+ xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
+ ) as f:
+ assert geom_df.to_xml(stylesheet=f) == xsl_expected
+
+
+def test_stylesheet_io(xsl_row_field_output, mode, geom_df):
+ # note: By default the bodies of untyped functions are not checked,
+ # consider using --check-untyped-defs
+ pytest.importorskip("lxml")
+ xsl_obj: BytesIO | StringIO # type: ignore[annotation-unchecked]
+
+ with open(
+ xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
+ ) as f:
+ if mode == "rb":
+ xsl_obj = BytesIO(f.read())
+ else:
+ xsl_obj = StringIO(f.read())
+
+ output = geom_df.to_xml(stylesheet=xsl_obj)
+
+ assert output == xsl_expected
+
+
+def test_stylesheet_buffered_reader(xsl_row_field_output, mode, geom_df):
+ pytest.importorskip("lxml")
+ with open(
+ xsl_row_field_output, mode, encoding="utf-8" if mode == "r" else None
+ ) as f:
+ xsl_obj = f.read()
+
+ output = geom_df.to_xml(stylesheet=xsl_obj)
+
+ assert output == xsl_expected
+
+
+def test_stylesheet_wrong_path(geom_df):
+ lxml_etree = pytest.importorskip("lxml.etree")
+
+ xsl = os.path.join("data", "xml", "row_field_output.xslt")
+
+ with pytest.raises(
+ lxml_etree.XMLSyntaxError,
+ match=("Start tag expected, '<' not found"),
+ ):
+ geom_df.to_xml(stylesheet=xsl)
+
+
+@pytest.mark.parametrize("val", ["", b""])
+def test_empty_string_stylesheet(val, geom_df):
+ lxml_etree = pytest.importorskip("lxml.etree")
+
+ msg = "|".join(
+ [
+ "Document is empty",
+ "Start tag expected, '<' not found",
+ # Seen on Mac with lxml 4.9.1
+ r"None \(line 0\)",
+ ]
+ )
+
+ with pytest.raises(lxml_etree.XMLSyntaxError, match=msg):
+ geom_df.to_xml(stylesheet=val)
+
+
+def test_incorrect_xsl_syntax(geom_df):
+ lxml_etree = pytest.importorskip("lxml.etree")
+
+ xsl = """\
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+"""
+
+ with pytest.raises(
+ lxml_etree.XMLSyntaxError, match=("Opening and ending tag mismatch")
+ ):
+ geom_df.to_xml(stylesheet=xsl)
+
+
+def test_incorrect_xsl_eval(geom_df):
+ lxml_etree = pytest.importorskip("lxml.etree")
+
+ xsl = """\
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+"""
+
+ with pytest.raises(lxml_etree.XSLTParseError, match=("failed to compile")):
+ geom_df.to_xml(stylesheet=xsl)
+
+
+def test_incorrect_xsl_apply(geom_df):
+ lxml_etree = pytest.importorskip("lxml.etree")
+
+ xsl = """\
+
+
+
+
+
+
+
+
+
+"""
+
+ with pytest.raises(lxml_etree.XSLTApplyError, match=("Cannot resolve URI")):
+ with tm.ensure_clean("test.xml") as path:
+ geom_df.to_xml(path, stylesheet=xsl)
+
+
+def test_stylesheet_with_etree(geom_df):
+ xsl = """\
+
+
+
+
+
+
+
+
+ """
+
+ with pytest.raises(
+ ValueError, match=("To use stylesheet, you need lxml installed")
+ ):
+ geom_df.to_xml(parser="etree", stylesheet=xsl)
+
+
+def test_style_to_csv(geom_df):
+ pytest.importorskip("lxml")
+ xsl = """\
+
+
+
+
+ ,
+
+ ,shape,degrees,sides
+
+
+
+
+
+
+
+"""
+
+ out_csv = geom_df.to_csv(lineterminator="\n")
+
+ if out_csv is not None:
+ out_csv = out_csv.strip()
+ out_xml = geom_df.to_xml(stylesheet=xsl)
+
+ assert out_csv == out_xml
+
+
+def test_style_to_string(geom_df):
+ pytest.importorskip("lxml")
+ xsl = """\
+
+
+
+
+
+
+ shape degrees sides
+
+
+
+
+
+
+
+"""
+
+ out_str = geom_df.to_string()
+ out_xml = geom_df.to_xml(na_rep="NaN", stylesheet=xsl)
+
+ assert out_xml == out_str
+
+
+def test_style_to_json(geom_df):
+ pytest.importorskip("lxml")
+ xsl = """\
+
+
+
+
+ "
+
+
+ {"shape":{
+
+ },"degrees":{
+
+ },"sides":{
+
+ }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ,
+
+
+"""
+
+ out_json = geom_df.to_json()
+ out_xml = geom_df.to_xml(stylesheet=xsl)
+
+ assert out_json == out_xml
+
+
+# COMPRESSION
+
+
+geom_xml = """\
+
+
+
+ 0
+ square
+ 360
+ 4.0
+
+
+ 1
+ circle
+ 360
+
+
+
+ 2
+ triangle
+ 180
+ 3.0
+
+"""
+
+
+def test_compression_output(parser, compression_only, geom_df):
+ with tm.ensure_clean() as path:
+ geom_df.to_xml(path, parser=parser, compression=compression_only)
+
+ with get_handle(
+ path,
+ "r",
+ compression=compression_only,
+ ) as handle_obj:
+ output = handle_obj.handle.read()
+
+ output = equalize_decl(output)
+
+ assert geom_xml == output.strip()
+
+
+def test_filename_and_suffix_comp(
+ parser, compression_only, geom_df, compression_to_extension
+):
+ compfile = "xml." + compression_to_extension[compression_only]
+ with tm.ensure_clean(filename=compfile) as path:
+ geom_df.to_xml(path, parser=parser, compression=compression_only)
+
+ with get_handle(
+ path,
+ "r",
+ compression=compression_only,
+ ) as handle_obj:
+ output = handle_obj.handle.read()
+
+ output = equalize_decl(output)
+
+ assert geom_xml == output.strip()
+
+
+def test_ea_dtypes(any_numeric_ea_dtype, parser):
+ # GH#43903
+ expected = """
+
+
+ 0
+
+
+"""
+ df = DataFrame({"a": [NA]}).astype(any_numeric_ea_dtype)
+ result = df.to_xml(parser=parser)
+ assert equalize_decl(result).strip() == expected
+
+
+def test_unsuported_compression(parser, geom_df):
+ with pytest.raises(ValueError, match="Unrecognized compression type"):
+ with tm.ensure_clean() as path:
+ geom_df.to_xml(path, parser=parser, compression="7z")
+
+
+# STORAGE OPTIONS
+
+
+@pytest.mark.single_cpu
+def test_s3_permission_output(parser, s3_public_bucket, geom_df):
+ s3fs = pytest.importorskip("s3fs")
+ pytest.importorskip("lxml")
+
+ with tm.external_error_raised((PermissionError, FileNotFoundError)):
+ fs = s3fs.S3FileSystem(anon=True)
+ fs.ls(s3_public_bucket.name)
+
+ geom_df.to_xml(
+ f"s3://{s3_public_bucket.name}/geom.xml", compression="zip", parser=parser
+ )
|