diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_183_mp_rank_03_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_183_mp_rank_03_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f418c5ae169a5269a40063146842a2f55d246ab1
--- /dev/null
+++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_183_mp_rank_03_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f446402ba72c4e2c94858ef15fbd6f5e2593674bd5e04ca2367f5ba80774d84
+size 41830404
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/api/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/api/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/api/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2325821fc5f1782cf8c609c00f499c9415641cfe
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/api/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_api.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a17c49d7d76bd92989abfd59de2eaa947b714122
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_api.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_types.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_types.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ddf642b286d2bfdfcddef044d47a2107636a3dd1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/api/__pycache__/test_types.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/api/test_api.py b/venv/lib/python3.10/site-packages/pandas/tests/api/test_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..60bcb97aaa3642be064bcacd130edf2084c4a55c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/api/test_api.py
@@ -0,0 +1,383 @@
+from __future__ import annotations
+
+import pytest
+
+import pandas as pd
+from pandas import api
+import pandas._testing as tm
+from pandas.api import (
+ extensions as api_extensions,
+ indexers as api_indexers,
+ interchange as api_interchange,
+ types as api_types,
+ typing as api_typing,
+)
+
+
+class Base:
+ def check(self, namespace, expected, ignored=None):
+ # see which names are in the namespace, minus optional
+ # ignored ones
+ # compare vs the expected
+
+ result = sorted(
+ f for f in dir(namespace) if not f.startswith("__") and f != "annotations"
+ )
+ if ignored is not None:
+ result = sorted(set(result) - set(ignored))
+
+ expected = sorted(expected)
+ tm.assert_almost_equal(result, expected)
+
+
+class TestPDApi(Base):
+ # these are optionally imported based on testing
+ # & need to be ignored
+ ignored = ["tests", "locale", "conftest", "_version_meson"]
+
+ # top-level sub-packages
+ public_lib = [
+ "api",
+ "arrays",
+ "options",
+ "test",
+ "testing",
+ "errors",
+ "plotting",
+ "io",
+ "tseries",
+ ]
+ private_lib = ["compat", "core", "pandas", "util", "_built_with_meson"]
+
+ # misc
+ misc = ["IndexSlice", "NaT", "NA"]
+
+ # top-level classes
+ classes = [
+ "ArrowDtype",
+ "Categorical",
+ "CategoricalIndex",
+ "DataFrame",
+ "DateOffset",
+ "DatetimeIndex",
+ "ExcelFile",
+ "ExcelWriter",
+ "Flags",
+ "Grouper",
+ "HDFStore",
+ "Index",
+ "MultiIndex",
+ "Period",
+ "PeriodIndex",
+ "RangeIndex",
+ "Series",
+ "SparseDtype",
+ "StringDtype",
+ "Timedelta",
+ "TimedeltaIndex",
+ "Timestamp",
+ "Interval",
+ "IntervalIndex",
+ "CategoricalDtype",
+ "PeriodDtype",
+ "IntervalDtype",
+ "DatetimeTZDtype",
+ "BooleanDtype",
+ "Int8Dtype",
+ "Int16Dtype",
+ "Int32Dtype",
+ "Int64Dtype",
+ "UInt8Dtype",
+ "UInt16Dtype",
+ "UInt32Dtype",
+ "UInt64Dtype",
+ "Float32Dtype",
+ "Float64Dtype",
+ "NamedAgg",
+ ]
+
+ # these are already deprecated; awaiting removal
+ deprecated_classes: list[str] = []
+
+ # external modules exposed in pandas namespace
+ modules: list[str] = []
+
+ # top-level functions
+ funcs = [
+ "array",
+ "bdate_range",
+ "concat",
+ "crosstab",
+ "cut",
+ "date_range",
+ "interval_range",
+ "eval",
+ "factorize",
+ "get_dummies",
+ "from_dummies",
+ "infer_freq",
+ "isna",
+ "isnull",
+ "lreshape",
+ "melt",
+ "notna",
+ "notnull",
+ "offsets",
+ "merge",
+ "merge_ordered",
+ "merge_asof",
+ "period_range",
+ "pivot",
+ "pivot_table",
+ "qcut",
+ "show_versions",
+ "timedelta_range",
+ "unique",
+ "value_counts",
+ "wide_to_long",
+ ]
+
+ # top-level option funcs
+ funcs_option = [
+ "reset_option",
+ "describe_option",
+ "get_option",
+ "option_context",
+ "set_option",
+ "set_eng_float_format",
+ ]
+
+ # top-level read_* funcs
+ funcs_read = [
+ "read_clipboard",
+ "read_csv",
+ "read_excel",
+ "read_fwf",
+ "read_gbq",
+ "read_hdf",
+ "read_html",
+ "read_xml",
+ "read_json",
+ "read_pickle",
+ "read_sas",
+ "read_sql",
+ "read_sql_query",
+ "read_sql_table",
+ "read_stata",
+ "read_table",
+ "read_feather",
+ "read_parquet",
+ "read_orc",
+ "read_spss",
+ ]
+
+ # top-level json funcs
+ funcs_json = ["json_normalize"]
+
+ # top-level to_* funcs
+ funcs_to = ["to_datetime", "to_numeric", "to_pickle", "to_timedelta"]
+
+ # top-level to deprecate in the future
+ deprecated_funcs_in_future: list[str] = []
+
+ # these are already deprecated; awaiting removal
+ deprecated_funcs: list[str] = []
+
+ # private modules in pandas namespace
+ private_modules = [
+ "_config",
+ "_libs",
+ "_is_numpy_dev",
+ "_pandas_datetime_CAPI",
+ "_pandas_parser_CAPI",
+ "_testing",
+ "_typing",
+ ]
+ if not pd._built_with_meson:
+ private_modules.append("_version")
+
+ def test_api(self):
+ checkthese = (
+ self.public_lib
+ + self.private_lib
+ + self.misc
+ + self.modules
+ + self.classes
+ + self.funcs
+ + self.funcs_option
+ + self.funcs_read
+ + self.funcs_json
+ + self.funcs_to
+ + self.private_modules
+ )
+ self.check(namespace=pd, expected=checkthese, ignored=self.ignored)
+
+ def test_api_all(self):
+ expected = set(
+ self.public_lib
+ + self.misc
+ + self.modules
+ + self.classes
+ + self.funcs
+ + self.funcs_option
+ + self.funcs_read
+ + self.funcs_json
+ + self.funcs_to
+ ) - set(self.deprecated_classes)
+ actual = set(pd.__all__)
+
+ extraneous = actual - expected
+ assert not extraneous
+
+ missing = expected - actual
+ assert not missing
+
+ def test_depr(self):
+ deprecated_list = (
+ self.deprecated_classes
+ + self.deprecated_funcs
+ + self.deprecated_funcs_in_future
+ )
+ for depr in deprecated_list:
+ with tm.assert_produces_warning(FutureWarning):
+ _ = getattr(pd, depr)
+
+
+class TestApi(Base):
+ allowed_api_dirs = [
+ "types",
+ "extensions",
+ "indexers",
+ "interchange",
+ "typing",
+ ]
+ allowed_typing = [
+ "DataFrameGroupBy",
+ "DatetimeIndexResamplerGroupby",
+ "Expanding",
+ "ExpandingGroupby",
+ "ExponentialMovingWindow",
+ "ExponentialMovingWindowGroupby",
+ "JsonReader",
+ "NaTType",
+ "NAType",
+ "PeriodIndexResamplerGroupby",
+ "Resampler",
+ "Rolling",
+ "RollingGroupby",
+ "SeriesGroupBy",
+ "StataReader",
+ "TimedeltaIndexResamplerGroupby",
+ "TimeGrouper",
+ "Window",
+ ]
+ allowed_api_types = [
+ "is_any_real_numeric_dtype",
+ "is_array_like",
+ "is_bool",
+ "is_bool_dtype",
+ "is_categorical_dtype",
+ "is_complex",
+ "is_complex_dtype",
+ "is_datetime64_any_dtype",
+ "is_datetime64_dtype",
+ "is_datetime64_ns_dtype",
+ "is_datetime64tz_dtype",
+ "is_dict_like",
+ "is_dtype_equal",
+ "is_extension_array_dtype",
+ "is_file_like",
+ "is_float",
+ "is_float_dtype",
+ "is_hashable",
+ "is_int64_dtype",
+ "is_integer",
+ "is_integer_dtype",
+ "is_interval",
+ "is_interval_dtype",
+ "is_iterator",
+ "is_list_like",
+ "is_named_tuple",
+ "is_number",
+ "is_numeric_dtype",
+ "is_object_dtype",
+ "is_period_dtype",
+ "is_re",
+ "is_re_compilable",
+ "is_scalar",
+ "is_signed_integer_dtype",
+ "is_sparse",
+ "is_string_dtype",
+ "is_timedelta64_dtype",
+ "is_timedelta64_ns_dtype",
+ "is_unsigned_integer_dtype",
+ "pandas_dtype",
+ "infer_dtype",
+ "union_categoricals",
+ "CategoricalDtype",
+ "DatetimeTZDtype",
+ "IntervalDtype",
+ "PeriodDtype",
+ ]
+ allowed_api_interchange = ["from_dataframe", "DataFrame"]
+ allowed_api_indexers = [
+ "check_array_indexer",
+ "BaseIndexer",
+ "FixedForwardWindowIndexer",
+ "VariableOffsetWindowIndexer",
+ ]
+ allowed_api_extensions = [
+ "no_default",
+ "ExtensionDtype",
+ "register_extension_dtype",
+ "register_dataframe_accessor",
+ "register_index_accessor",
+ "register_series_accessor",
+ "take",
+ "ExtensionArray",
+ "ExtensionScalarOpsMixin",
+ ]
+
+ def test_api(self):
+ self.check(api, self.allowed_api_dirs)
+
+ def test_api_typing(self):
+ self.check(api_typing, self.allowed_typing)
+
+ def test_api_types(self):
+ self.check(api_types, self.allowed_api_types)
+
+ def test_api_interchange(self):
+ self.check(api_interchange, self.allowed_api_interchange)
+
+ def test_api_indexers(self):
+ self.check(api_indexers, self.allowed_api_indexers)
+
+ def test_api_extensions(self):
+ self.check(api_extensions, self.allowed_api_extensions)
+
+
+class TestTesting(Base):
+ funcs = [
+ "assert_frame_equal",
+ "assert_series_equal",
+ "assert_index_equal",
+ "assert_extension_array_equal",
+ ]
+
+ def test_testing(self):
+ from pandas import testing
+
+ self.check(testing, self.funcs)
+
+ def test_util_in_top_level(self):
+ with pytest.raises(AttributeError, match="foo"):
+ pd.util.foo
+
+
+def test_pandas_array_alias():
+ msg = "PandasArray has been renamed NumpyExtensionArray"
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ res = pd.arrays.PandasArray
+
+ assert res is pd.arrays.NumpyExtensionArray
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/api/test_types.py b/venv/lib/python3.10/site-packages/pandas/tests/api/test_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbaa6e7e18bcaa9a574b741b5361818f1be01ecf
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/api/test_types.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+import pandas._testing as tm
+from pandas.api import types
+from pandas.tests.api.test_api import Base
+
+
+class TestTypes(Base):
+ allowed = [
+ "is_any_real_numeric_dtype",
+ "is_bool",
+ "is_bool_dtype",
+ "is_categorical_dtype",
+ "is_complex",
+ "is_complex_dtype",
+ "is_datetime64_any_dtype",
+ "is_datetime64_dtype",
+ "is_datetime64_ns_dtype",
+ "is_datetime64tz_dtype",
+ "is_dtype_equal",
+ "is_float",
+ "is_float_dtype",
+ "is_int64_dtype",
+ "is_integer",
+ "is_integer_dtype",
+ "is_number",
+ "is_numeric_dtype",
+ "is_object_dtype",
+ "is_scalar",
+ "is_sparse",
+ "is_string_dtype",
+ "is_signed_integer_dtype",
+ "is_timedelta64_dtype",
+ "is_timedelta64_ns_dtype",
+ "is_unsigned_integer_dtype",
+ "is_period_dtype",
+ "is_interval",
+ "is_interval_dtype",
+ "is_re",
+ "is_re_compilable",
+ "is_dict_like",
+ "is_iterator",
+ "is_file_like",
+ "is_list_like",
+ "is_hashable",
+ "is_array_like",
+ "is_named_tuple",
+ "pandas_dtype",
+ "union_categoricals",
+ "infer_dtype",
+ "is_extension_array_dtype",
+ ]
+ deprecated: list[str] = []
+ dtypes = ["CategoricalDtype", "DatetimeTZDtype", "PeriodDtype", "IntervalDtype"]
+
+ def test_types(self):
+ self.check(types, self.allowed + self.dtypes + self.deprecated)
+
+ def test_deprecated_from_api_types(self):
+ for t in self.deprecated:
+ with tm.assert_produces_warning(FutureWarning):
+ getattr(types, t)(1)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_astype.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_astype.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..61f0d6ec6ed6b4a0c870b802b72dbb92a8e0c20d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_astype.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_combine_concat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_combine_concat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..99074de813beb62b60c52916dadfdda2624967ef
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_combine_concat.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_reductions.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_reductions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38ffd53be5b6bf2e4f8738da0f6ddb965a7a0359
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_reductions.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_unary.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_unary.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cda56d5ca47f2b167884ebc7ebf9b7d8cc8d2e9f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_unary.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6030a88f7093887ae223e632a0762df39d6acf7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_console.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_console.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b47590c158851cede8b37bf09fcbbf283aea9ce
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_console.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_css.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_css.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8c56dff5a70e7f2775e786e4c7afc97bc0c9392d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_css.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_eng_formatting.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_eng_formatting.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38cb4cbe10460efebd2b8dc65aff46f890d359ee
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_eng_formatting.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_format.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_format.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fc83c37bedf7bfd9819892d76b88951e6a974e38
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_format.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_ipython_compat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_ipython_compat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..51d6ed4fe728938d03e5d87841f203d25768d0f2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_ipython_compat.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_printing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_printing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1b9daf41fa9a64ee17cb0861c4f94c55a20c24b8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_printing.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_csv.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_csv.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7a341e73eeb604910d12333f2a0c3fa0c3a08ca5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_csv.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_excel.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_excel.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..53bcac4db9d8b57feb830a7ebaea646393baf49c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_excel.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_html.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_html.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c0f8488371a7f4458698b32b37fc456f245f6ea
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_html.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_latex.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_latex.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..45b3ef1987abfa75342b497feabee83ba9a0c695
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_latex.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_markdown.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_markdown.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..88d2c4db38becb2d69b4715fbc19a268f393828d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_markdown.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_string.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_string.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3eadb96e6291c6e1651b8ec73752505313b7e17d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_string.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d506d71a0c5767d9a33871c2ae1c777d6bc454a9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_bar.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_bar.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5f2bb4c91b112e48e30517a44b3f9f51d665b0e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_bar.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_exceptions.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_exceptions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb554a7fd178cbefe46b4a185fccb433ed1df84d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_exceptions.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_format.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_format.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c995e69febefbff9b2f0cebae7bee52923d16872
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_format.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_highlight.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_highlight.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c0271d22e60c8e35feda65355faa329030f08169
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_highlight.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_html.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_html.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..863706f7046b2ed860b7411dde84495daf90e9d9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_html.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_matplotlib.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_matplotlib.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e4f06b1292e21e44f39cb84633365b48b2a8598
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_matplotlib.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_non_unique.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_non_unique.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8973443388dcc468f4eb5706c89eb14a054ee0da
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_non_unique.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_style.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_style.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b53ed9b2ac595a2bf228398a44447a01846d782
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_style.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_latex.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_latex.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2908d9e03450ca31b1d7a1f051a409d987d5b88b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_latex.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_string.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_string.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a32feec28f545d3ff957f242332eedba7718ba30
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_string.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_tooltip.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_tooltip.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0ff6c1fe5d89826478cd4b5a77cd47eef7944c25
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_tooltip.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_bar.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_bar.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0e4712e8bb3d15959bddc0bd8697981b16bd8ef
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_bar.py
@@ -0,0 +1,358 @@
+import io
+
+import numpy as np
+import pytest
+
+from pandas import (
+ NA,
+ DataFrame,
+ read_csv,
+)
+
+pytest.importorskip("jinja2")
+
+
+def bar_grad(a=None, b=None, c=None, d=None):
+ """Used in multiple tests to simplify formatting of expected result"""
+ ret = [("width", "10em")]
+ if all(x is None for x in [a, b, c, d]):
+ return ret
+ return ret + [
+ (
+ "background",
+ f"linear-gradient(90deg,{','.join([x for x in [a, b, c, d] if x])})",
+ )
+ ]
+
+
+def no_bar():
+ return bar_grad()
+
+
+def bar_to(x, color="#d65f5f"):
+ return bar_grad(f" {color} {x:.1f}%", f" transparent {x:.1f}%")
+
+
+def bar_from_to(x, y, color="#d65f5f"):
+ return bar_grad(
+ f" transparent {x:.1f}%",
+ f" {color} {x:.1f}%",
+ f" {color} {y:.1f}%",
+ f" transparent {y:.1f}%",
+ )
+
+
+@pytest.fixture
+def df_pos():
+ return DataFrame([[1], [2], [3]])
+
+
+@pytest.fixture
+def df_neg():
+ return DataFrame([[-1], [-2], [-3]])
+
+
+@pytest.fixture
+def df_mix():
+ return DataFrame([[-3], [1], [2]])
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ ("left", [no_bar(), bar_to(50), bar_to(100)]),
+ ("right", [bar_to(100), bar_from_to(50, 100), no_bar()]),
+ ("mid", [bar_to(33.33), bar_to(66.66), bar_to(100)]),
+ ("zero", [bar_from_to(50, 66.7), bar_from_to(50, 83.3), bar_from_to(50, 100)]),
+ ("mean", [bar_to(50), no_bar(), bar_from_to(50, 100)]),
+ (2.0, [bar_to(50), no_bar(), bar_from_to(50, 100)]),
+ (np.median, [bar_to(50), no_bar(), bar_from_to(50, 100)]),
+ ],
+)
+def test_align_positive_cases(df_pos, align, exp):
+ # test different align cases for all positive values
+ result = df_pos.style.bar(align=align)._compute().ctx
+ expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ ("left", [bar_to(100), bar_to(50), no_bar()]),
+ ("right", [no_bar(), bar_from_to(50, 100), bar_to(100)]),
+ ("mid", [bar_from_to(66.66, 100), bar_from_to(33.33, 100), bar_to(100)]),
+ ("zero", [bar_from_to(33.33, 50), bar_from_to(16.66, 50), bar_to(50)]),
+ ("mean", [bar_from_to(50, 100), no_bar(), bar_to(50)]),
+ (-2.0, [bar_from_to(50, 100), no_bar(), bar_to(50)]),
+ (np.median, [bar_from_to(50, 100), no_bar(), bar_to(50)]),
+ ],
+)
+def test_align_negative_cases(df_neg, align, exp):
+ # test different align cases for all negative values
+ result = df_neg.style.bar(align=align)._compute().ctx
+ expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ ("left", [no_bar(), bar_to(80), bar_to(100)]),
+ ("right", [bar_to(100), bar_from_to(80, 100), no_bar()]),
+ ("mid", [bar_to(60), bar_from_to(60, 80), bar_from_to(60, 100)]),
+ ("zero", [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
+ ("mean", [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
+ (-0.0, [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
+ (np.nanmedian, [bar_to(50), no_bar(), bar_from_to(50, 62.5)]),
+ ],
+)
+@pytest.mark.parametrize("nans", [True, False])
+def test_align_mixed_cases(df_mix, align, exp, nans):
+ # test different align cases for mixed positive and negative values
+ # also test no impact of NaNs and no_bar
+ expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
+ if nans:
+ df_mix.loc[3, :] = np.nan
+ expected.update({(3, 0): no_bar()})
+ result = df_mix.style.bar(align=align)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ (
+ "left",
+ {
+ "index": [[no_bar(), no_bar()], [bar_to(100), bar_to(100)]],
+ "columns": [[no_bar(), bar_to(100)], [no_bar(), bar_to(100)]],
+ "none": [[no_bar(), bar_to(33.33)], [bar_to(66.66), bar_to(100)]],
+ },
+ ),
+ (
+ "mid",
+ {
+ "index": [[bar_to(33.33), bar_to(50)], [bar_to(100), bar_to(100)]],
+ "columns": [[bar_to(50), bar_to(100)], [bar_to(75), bar_to(100)]],
+ "none": [[bar_to(25), bar_to(50)], [bar_to(75), bar_to(100)]],
+ },
+ ),
+ (
+ "zero",
+ {
+ "index": [
+ [bar_from_to(50, 66.66), bar_from_to(50, 75)],
+ [bar_from_to(50, 100), bar_from_to(50, 100)],
+ ],
+ "columns": [
+ [bar_from_to(50, 75), bar_from_to(50, 100)],
+ [bar_from_to(50, 87.5), bar_from_to(50, 100)],
+ ],
+ "none": [
+ [bar_from_to(50, 62.5), bar_from_to(50, 75)],
+ [bar_from_to(50, 87.5), bar_from_to(50, 100)],
+ ],
+ },
+ ),
+ (
+ 2,
+ {
+ "index": [
+ [bar_to(50), no_bar()],
+ [bar_from_to(50, 100), bar_from_to(50, 100)],
+ ],
+ "columns": [
+ [bar_to(50), no_bar()],
+ [bar_from_to(50, 75), bar_from_to(50, 100)],
+ ],
+ "none": [
+ [bar_from_to(25, 50), no_bar()],
+ [bar_from_to(50, 75), bar_from_to(50, 100)],
+ ],
+ },
+ ),
+ ],
+)
+@pytest.mark.parametrize("axis", ["index", "columns", "none"])
+def test_align_axis(align, exp, axis):
+ # test all axis combinations with positive values and different aligns
+ data = DataFrame([[1, 2], [3, 4]])
+ result = (
+ data.style.bar(align=align, axis=None if axis == "none" else axis)
+ ._compute()
+ .ctx
+ )
+ expected = {
+ (0, 0): exp[axis][0][0],
+ (0, 1): exp[axis][0][1],
+ (1, 0): exp[axis][1][0],
+ (1, 1): exp[axis][1][1],
+ }
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "values, vmin, vmax",
+ [
+ ("positive", 1.5, 2.5),
+ ("negative", -2.5, -1.5),
+ ("mixed", -2.5, 1.5),
+ ],
+)
+@pytest.mark.parametrize("nullify", [None, "vmin", "vmax"]) # test min/max separately
+@pytest.mark.parametrize("align", ["left", "right", "zero", "mid"])
+def test_vmin_vmax_clipping(df_pos, df_neg, df_mix, values, vmin, vmax, nullify, align):
+ # test that clipping occurs if any vmin > data_values or vmax < data_values
+ if align == "mid": # mid acts as left or right in each case
+ if values == "positive":
+ align = "left"
+ elif values == "negative":
+ align = "right"
+ df = {"positive": df_pos, "negative": df_neg, "mixed": df_mix}[values]
+ vmin = None if nullify == "vmin" else vmin
+ vmax = None if nullify == "vmax" else vmax
+
+ clip_df = df.where(df <= (vmax if vmax else 999), other=vmax)
+ clip_df = clip_df.where(clip_df >= (vmin if vmin else -999), other=vmin)
+
+ result = (
+ df.style.bar(align=align, vmin=vmin, vmax=vmax, color=["red", "green"])
+ ._compute()
+ .ctx
+ )
+ expected = clip_df.style.bar(align=align, color=["red", "green"])._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "values, vmin, vmax",
+ [
+ ("positive", 0.5, 4.5),
+ ("negative", -4.5, -0.5),
+ ("mixed", -4.5, 4.5),
+ ],
+)
+@pytest.mark.parametrize("nullify", [None, "vmin", "vmax"]) # test min/max separately
+@pytest.mark.parametrize("align", ["left", "right", "zero", "mid"])
+def test_vmin_vmax_widening(df_pos, df_neg, df_mix, values, vmin, vmax, nullify, align):
+ # test that widening occurs if any vmax > data_values or vmin < data_values
+ if align == "mid": # mid acts as left or right in each case
+ if values == "positive":
+ align = "left"
+ elif values == "negative":
+ align = "right"
+ df = {"positive": df_pos, "negative": df_neg, "mixed": df_mix}[values]
+ vmin = None if nullify == "vmin" else vmin
+ vmax = None if nullify == "vmax" else vmax
+
+ expand_df = df.copy()
+ expand_df.loc[3, :], expand_df.loc[4, :] = vmin, vmax
+
+ result = (
+ df.style.bar(align=align, vmin=vmin, vmax=vmax, color=["red", "green"])
+ ._compute()
+ .ctx
+ )
+ expected = expand_df.style.bar(align=align, color=["red", "green"])._compute().ctx
+ assert result.items() <= expected.items()
+
+
+def test_numerics():
+ # test data is pre-selected for numeric values
+ data = DataFrame([[1, "a"], [2, "b"]])
+ result = data.style.bar()._compute().ctx
+ assert (0, 1) not in result
+ assert (1, 1) not in result
+
+
+@pytest.mark.parametrize(
+ "align, exp",
+ [
+ ("left", [no_bar(), bar_to(100, "green")]),
+ ("right", [bar_to(100, "red"), no_bar()]),
+ ("mid", [bar_to(25, "red"), bar_from_to(25, 100, "green")]),
+ ("zero", [bar_from_to(33.33, 50, "red"), bar_from_to(50, 100, "green")]),
+ ],
+)
+def test_colors_mixed(align, exp):
+ data = DataFrame([[-1], [3]])
+ result = data.style.bar(align=align, color=["red", "green"])._compute().ctx
+ assert result == {(0, 0): exp[0], (1, 0): exp[1]}
+
+
+def test_bar_align_height():
+ # test when keyword height is used 'no-repeat center' and 'background-size' present
+ data = DataFrame([[1], [2]])
+ result = data.style.bar(align="left", height=50)._compute().ctx
+ bg_s = "linear-gradient(90deg, #d65f5f 100.0%, transparent 100.0%) no-repeat center"
+ expected = {
+ (0, 0): [("width", "10em")],
+ (1, 0): [
+ ("width", "10em"),
+ ("background", bg_s),
+ ("background-size", "100% 50.0%"),
+ ],
+ }
+ assert result == expected
+
+
+def test_bar_value_error_raises():
+ df = DataFrame({"A": [-100, -60, -30, -20]})
+
+ msg = "`align` should be in {'left', 'right', 'mid', 'mean', 'zero'} or"
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(align="poorly", color=["#d65f5f", "#5fba7d"]).to_html()
+
+ msg = r"`width` must be a value in \[0, 100\]"
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(width=200).to_html()
+
+ msg = r"`height` must be a value in \[0, 100\]"
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(height=200).to_html()
+
+
+def test_bar_color_and_cmap_error_raises():
+ df = DataFrame({"A": [1, 2, 3, 4]})
+ msg = "`color` and `cmap` cannot both be given"
+ # Test that providing both color and cmap raises a ValueError
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color="#d65f5f", cmap="viridis").to_html()
+
+
+def test_bar_invalid_color_type_error_raises():
+ df = DataFrame({"A": [1, 2, 3, 4]})
+ msg = (
+ r"`color` must be string or list or tuple of 2 strings,"
+ r"\(eg: color=\['#d65f5f', '#5fba7d'\]\)"
+ )
+ # Test that providing an invalid color type raises a ValueError
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color=123).to_html()
+
+ # Test that providing a color list with more than two elements raises a ValueError
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color=["#d65f5f", "#5fba7d", "#abcdef"]).to_html()
+
+
+def test_styler_bar_with_NA_values():
+ df1 = DataFrame({"A": [1, 2, NA, 4]})
+ df2 = DataFrame([[NA, NA], [NA, NA]])
+ expected_substring = "style type="
+ html_output1 = df1.style.bar(subset="A").to_html()
+ html_output2 = df2.style.bar(align="left", axis=None).to_html()
+ assert expected_substring in html_output1
+ assert expected_substring in html_output2
+
+
+def test_style_bar_with_pyarrow_NA_values():
+ data = """name,age,test1,test2,teacher
+ Adam,15,95.0,80,Ashby
+ Bob,16,81.0,82,Ashby
+ Dave,16,89.0,84,Jones
+ Fred,15,,88,Jones"""
+ df = read_csv(io.StringIO(data), dtype_backend="pyarrow")
+ expected_substring = "style type="
+ html_output = df.style.bar(subset="test1").to_html()
+ assert expected_substring in html_output
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_exceptions.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..d52e3a37e7693dadce34f73fc03a0790c7a0b4d3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_exceptions.py
@@ -0,0 +1,44 @@
+import pytest
+
+jinja2 = pytest.importorskip("jinja2")
+
+from pandas import (
+ DataFrame,
+ MultiIndex,
+)
+
+from pandas.io.formats.style import Styler
+
+
+@pytest.fixture
+def df():
+ return DataFrame(
+ data=[[0, -0.609], [1, -1.228]],
+ columns=["A", "B"],
+ index=["x", "y"],
+ )
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+def test_concat_bad_columns(styler):
+ msg = "`other.data` must have same columns as `Styler.data"
+ with pytest.raises(ValueError, match=msg):
+ styler.concat(DataFrame([[1, 2]]).style)
+
+
+def test_concat_bad_type(styler):
+ msg = "`other` must be of type `Styler`"
+ with pytest.raises(TypeError, match=msg):
+ styler.concat(DataFrame([[1, 2]]))
+
+
+def test_concat_bad_index_levels(styler, df):
+ df = df.copy()
+ df.index = MultiIndex.from_tuples([(0, 0), (1, 1)])
+ msg = "number of index levels must be same in `other`"
+ with pytest.raises(ValueError, match=msg):
+ styler.concat(df.style)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_format.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c84816ead140b95f14df8dbeccc83b317ac239a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_format.py
@@ -0,0 +1,562 @@
+import numpy as np
+import pytest
+
+from pandas import (
+ NA,
+ DataFrame,
+ IndexSlice,
+ MultiIndex,
+ NaT,
+ Timestamp,
+ option_context,
+)
+
+pytest.importorskip("jinja2")
+from pandas.io.formats.style import Styler
+from pandas.io.formats.style_render import _str_escape
+
+
+@pytest.fixture
+def df():
+ return DataFrame(
+ data=[[0, -0.609], [1, -1.228]],
+ columns=["A", "B"],
+ index=["x", "y"],
+ )
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+@pytest.fixture
+def df_multi():
+ return DataFrame(
+ data=np.arange(16).reshape(4, 4),
+ columns=MultiIndex.from_product([["A", "B"], ["a", "b"]]),
+ index=MultiIndex.from_product([["X", "Y"], ["x", "y"]]),
+ )
+
+
+@pytest.fixture
+def styler_multi(df_multi):
+ return Styler(df_multi, uuid_len=0)
+
+
+def test_display_format(styler):
+ ctx = styler.format("{:0.1f}")._translate(True, True)
+ assert all(["display_value" in c for c in row] for row in ctx["body"])
+ assert all([len(c["display_value"]) <= 3 for c in row[1:]] for row in ctx["body"])
+ assert len(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3
+
+
+@pytest.mark.parametrize("index", [True, False])
+@pytest.mark.parametrize("columns", [True, False])
+def test_display_format_index(styler, index, columns):
+ exp_index = ["x", "y"]
+ if index:
+ styler.format_index(lambda v: v.upper(), axis=0) # test callable
+ exp_index = ["X", "Y"]
+
+ exp_columns = ["A", "B"]
+ if columns:
+ styler.format_index("*{}*", axis=1) # test string
+ exp_columns = ["*A*", "*B*"]
+
+ ctx = styler._translate(True, True)
+
+ for r, row in enumerate(ctx["body"]):
+ assert row[0]["display_value"] == exp_index[r]
+
+ for c, col in enumerate(ctx["head"][1:]):
+ assert col["display_value"] == exp_columns[c]
+
+
+def test_format_dict(styler):
+ ctx = styler.format({"A": "{:0.1f}", "B": "{0:.2%}"})._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "0.0"
+ assert ctx["body"][0][2]["display_value"] == "-60.90%"
+
+
+def test_format_index_dict(styler):
+ ctx = styler.format_index({0: lambda v: v.upper()})._translate(True, True)
+ for i, val in enumerate(["X", "Y"]):
+ assert ctx["body"][i][0]["display_value"] == val
+
+
+def test_format_string(styler):
+ ctx = styler.format("{:.2f}")._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "0.00"
+ assert ctx["body"][0][2]["display_value"] == "-0.61"
+ assert ctx["body"][1][1]["display_value"] == "1.00"
+ assert ctx["body"][1][2]["display_value"] == "-1.23"
+
+
+def test_format_callable(styler):
+ ctx = styler.format(lambda v: "neg" if v < 0 else "pos")._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "pos"
+ assert ctx["body"][0][2]["display_value"] == "neg"
+ assert ctx["body"][1][1]["display_value"] == "pos"
+ assert ctx["body"][1][2]["display_value"] == "neg"
+
+
+def test_format_with_na_rep():
+ # GH 21527 28358
+ df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
+
+ ctx = df.style.format(None, na_rep="-")._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "-"
+ assert ctx["body"][0][2]["display_value"] == "-"
+
+ ctx = df.style.format("{:.2%}", na_rep="-")._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "-"
+ assert ctx["body"][0][2]["display_value"] == "-"
+ assert ctx["body"][1][1]["display_value"] == "110.00%"
+ assert ctx["body"][1][2]["display_value"] == "120.00%"
+
+ ctx = df.style.format("{:.2%}", na_rep="-", subset=["B"])._translate(True, True)
+ assert ctx["body"][0][2]["display_value"] == "-"
+ assert ctx["body"][1][2]["display_value"] == "120.00%"
+
+
+def test_format_index_with_na_rep():
+ df = DataFrame([[1, 2, 3, 4, 5]], columns=["A", None, np.nan, NaT, NA])
+ ctx = df.style.format_index(None, na_rep="--", axis=1)._translate(True, True)
+ assert ctx["head"][0][1]["display_value"] == "A"
+ for i in [2, 3, 4, 5]:
+ assert ctx["head"][0][i]["display_value"] == "--"
+
+
+def test_format_non_numeric_na():
+ # GH 21527 28358
+ df = DataFrame(
+ {
+ "object": [None, np.nan, "foo"],
+ "datetime": [None, NaT, Timestamp("20120101")],
+ }
+ )
+ ctx = df.style.format(None, na_rep="-")._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "-"
+ assert ctx["body"][0][2]["display_value"] == "-"
+ assert ctx["body"][1][1]["display_value"] == "-"
+ assert ctx["body"][1][2]["display_value"] == "-"
+
+
+@pytest.mark.parametrize(
+ "func, attr, kwargs",
+ [
+ ("format", "_display_funcs", {}),
+ ("format_index", "_display_funcs_index", {"axis": 0}),
+ ("format_index", "_display_funcs_columns", {"axis": 1}),
+ ],
+)
+def test_format_clear(styler, func, attr, kwargs):
+ assert (0, 0) not in getattr(styler, attr) # using default
+ getattr(styler, func)("{:.2f}", **kwargs)
+ assert (0, 0) in getattr(styler, attr) # formatter is specified
+ getattr(styler, func)(**kwargs)
+ assert (0, 0) not in getattr(styler, attr) # formatter cleared to default
+
+
+@pytest.mark.parametrize(
+ "escape, exp",
+ [
+ ("html", "<>&"%$#_{}~^\\~ ^ \\ "),
+ (
+ "latex",
+ '<>\\&"\\%\\$\\#\\_\\{\\}\\textasciitilde \\textasciicircum '
+ "\\textbackslash \\textasciitilde \\space \\textasciicircum \\space "
+ "\\textbackslash \\space ",
+ ),
+ ],
+)
+def test_format_escape_html(escape, exp):
+ chars = '<>&"%$#_{}~^\\~ ^ \\ '
+ df = DataFrame([[chars]])
+
+ s = Styler(df, uuid_len=0).format("&{0}&", escape=None)
+ expected = f'
&{chars}& '
+ assert expected in s.to_html()
+
+ # only the value should be escaped before passing to the formatter
+ s = Styler(df, uuid_len=0).format("&{0}&", escape=escape)
+ expected = f'&{exp}& '
+ assert expected in s.to_html()
+
+ # also test format_index()
+ styler = Styler(DataFrame(columns=[chars]), uuid_len=0)
+ styler.format_index("&{0}&", escape=None, axis=1)
+ assert styler._translate(True, True)["head"][0][1]["display_value"] == f"&{chars}&"
+ styler.format_index("&{0}&", escape=escape, axis=1)
+ assert styler._translate(True, True)["head"][0][1]["display_value"] == f"&{exp}&"
+
+
+@pytest.mark.parametrize(
+ "chars, expected",
+ [
+ (
+ r"$ \$&%#_{}~^\ $ &%#_{}~^\ $",
+ "".join(
+ [
+ r"$ \$&%#_{}~^\ $ ",
+ r"\&\%\#\_\{\}\textasciitilde \textasciicircum ",
+ r"\textbackslash \space \$",
+ ]
+ ),
+ ),
+ (
+ r"\( &%#_{}~^\ \) &%#_{}~^\ \(",
+ "".join(
+ [
+ r"\( &%#_{}~^\ \) ",
+ r"\&\%\#\_\{\}\textasciitilde \textasciicircum ",
+ r"\textbackslash \space \textbackslash (",
+ ]
+ ),
+ ),
+ (
+ r"$\&%#_{}^\$",
+ r"\$\textbackslash \&\%\#\_\{\}\textasciicircum \textbackslash \$",
+ ),
+ (
+ r"$ \frac{1}{2} $ \( \frac{1}{2} \)",
+ "".join(
+ [
+ r"$ \frac{1}{2} $",
+ r" \textbackslash ( \textbackslash frac\{1\}\{2\} \textbackslash )",
+ ]
+ ),
+ ),
+ ],
+)
+def test_format_escape_latex_math(chars, expected):
+ # GH 51903
+ # latex-math escape works for each DataFrame cell separately. If we have
+ # a combination of dollar signs and brackets, the dollar sign would apply.
+ df = DataFrame([[chars]])
+ s = df.style.format("{0}", escape="latex-math")
+ assert s._translate(True, True)["body"][0][1]["display_value"] == expected
+
+
+def test_format_escape_na_rep():
+ # tests the na_rep is not escaped
+ df = DataFrame([['<>&"', None]])
+ s = Styler(df, uuid_len=0).format("X&{0}>X", escape="html", na_rep="&")
+ ex = 'X&<>&">X '
+ expected2 = '& '
+ assert ex in s.to_html()
+ assert expected2 in s.to_html()
+
+ # also test for format_index()
+ df = DataFrame(columns=['<>&"', None])
+ styler = Styler(df, uuid_len=0)
+ styler.format_index("X&{0}>X", escape="html", na_rep="&", axis=1)
+ ctx = styler._translate(True, True)
+ assert ctx["head"][0][1]["display_value"] == "X&<>&">X"
+ assert ctx["head"][0][2]["display_value"] == "&"
+
+
+def test_format_escape_floats(styler):
+ # test given formatter for number format is not impacted by escape
+ s = styler.format("{:.1f}", escape="html")
+ for expected in [">0.0<", ">1.0<", ">-1.2<", ">-0.6<"]:
+ assert expected in s.to_html()
+ # tests precision of floats is not impacted by escape
+ s = styler.format(precision=1, escape="html")
+ for expected in [">0<", ">1<", ">-1.2<", ">-0.6<"]:
+ assert expected in s.to_html()
+
+
+@pytest.mark.parametrize("formatter", [5, True, [2.0]])
+@pytest.mark.parametrize("func", ["format", "format_index"])
+def test_format_raises(styler, formatter, func):
+ with pytest.raises(TypeError, match="expected str or callable"):
+ getattr(styler, func)(formatter)
+
+
+@pytest.mark.parametrize(
+ "precision, expected",
+ [
+ (1, ["1.0", "2.0", "3.2", "4.6"]),
+ (2, ["1.00", "2.01", "3.21", "4.57"]),
+ (3, ["1.000", "2.009", "3.212", "4.566"]),
+ ],
+)
+def test_format_with_precision(precision, expected):
+ # Issue #13257
+ df = DataFrame([[1.0, 2.0090, 3.2121, 4.566]], columns=[1.0, 2.0090, 3.2121, 4.566])
+ styler = Styler(df)
+ styler.format(precision=precision)
+ styler.format_index(precision=precision, axis=1)
+
+ ctx = styler._translate(True, True)
+ for col, exp in enumerate(expected):
+ assert ctx["body"][0][col + 1]["display_value"] == exp # format test
+ assert ctx["head"][0][col + 1]["display_value"] == exp # format_index test
+
+
+@pytest.mark.parametrize("axis", [0, 1])
+@pytest.mark.parametrize(
+ "level, expected",
+ [
+ (0, ["X", "X", "_", "_"]), # level int
+ ("zero", ["X", "X", "_", "_"]), # level name
+ (1, ["_", "_", "X", "X"]), # other level int
+ ("one", ["_", "_", "X", "X"]), # other level name
+ ([0, 1], ["X", "X", "X", "X"]), # both levels
+ ([0, "zero"], ["X", "X", "_", "_"]), # level int and name simultaneous
+ ([0, "one"], ["X", "X", "X", "X"]), # both levels as int and name
+ (["one", "zero"], ["X", "X", "X", "X"]), # both level names, reversed
+ ],
+)
+def test_format_index_level(axis, level, expected):
+ midx = MultiIndex.from_arrays([["_", "_"], ["_", "_"]], names=["zero", "one"])
+ df = DataFrame([[1, 2], [3, 4]])
+ if axis == 0:
+ df.index = midx
+ else:
+ df.columns = midx
+
+ styler = df.style.format_index(lambda v: "X", level=level, axis=axis)
+ ctx = styler._translate(True, True)
+
+ if axis == 0: # compare index
+ result = [ctx["body"][s][0]["display_value"] for s in range(2)]
+ result += [ctx["body"][s][1]["display_value"] for s in range(2)]
+ else: # compare columns
+ result = [ctx["head"][0][s + 1]["display_value"] for s in range(2)]
+ result += [ctx["head"][1][s + 1]["display_value"] for s in range(2)]
+
+ assert expected == result
+
+
+def test_format_subset():
+ df = DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"])
+ ctx = df.style.format(
+ {"a": "{:0.1f}", "b": "{0:.2%}"}, subset=IndexSlice[0, :]
+ )._translate(True, True)
+ expected = "0.1"
+ raw_11 = "1.123400"
+ assert ctx["body"][0][1]["display_value"] == expected
+ assert ctx["body"][1][1]["display_value"] == raw_11
+ assert ctx["body"][0][2]["display_value"] == "12.34%"
+
+ ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, :])._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == expected
+ assert ctx["body"][1][1]["display_value"] == raw_11
+
+ ctx = df.style.format("{:0.1f}", subset=IndexSlice["a"])._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == expected
+ assert ctx["body"][0][2]["display_value"] == "0.123400"
+
+ ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, "a"])._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == expected
+ assert ctx["body"][1][1]["display_value"] == raw_11
+
+ ctx = df.style.format("{:0.1f}", subset=IndexSlice[[0, 1], ["a"]])._translate(
+ True, True
+ )
+ assert ctx["body"][0][1]["display_value"] == expected
+ assert ctx["body"][1][1]["display_value"] == "1.1"
+ assert ctx["body"][0][2]["display_value"] == "0.123400"
+ assert ctx["body"][1][2]["display_value"] == raw_11
+
+
+@pytest.mark.parametrize("formatter", [None, "{:,.1f}"])
+@pytest.mark.parametrize("decimal", [".", "*"])
+@pytest.mark.parametrize("precision", [None, 2])
+@pytest.mark.parametrize("func, col", [("format", 1), ("format_index", 0)])
+def test_format_thousands(formatter, decimal, precision, func, col):
+ styler = DataFrame([[1000000.123456789]], index=[1000000.123456789]).style
+ result = getattr(styler, func)( # testing float
+ thousands="_", formatter=formatter, decimal=decimal, precision=precision
+ )._translate(True, True)
+ assert "1_000_000" in result["body"][0][col]["display_value"]
+
+ styler = DataFrame([[1000000]], index=[1000000]).style
+ result = getattr(styler, func)( # testing int
+ thousands="_", formatter=formatter, decimal=decimal, precision=precision
+ )._translate(True, True)
+ assert "1_000_000" in result["body"][0][col]["display_value"]
+
+ styler = DataFrame([[1 + 1000000.123456789j]], index=[1 + 1000000.123456789j]).style
+ result = getattr(styler, func)( # testing complex
+ thousands="_", formatter=formatter, decimal=decimal, precision=precision
+ )._translate(True, True)
+ assert "1_000_000" in result["body"][0][col]["display_value"]
+
+
+@pytest.mark.parametrize("formatter", [None, "{:,.4f}"])
+@pytest.mark.parametrize("thousands", [None, ",", "*"])
+@pytest.mark.parametrize("precision", [None, 4])
+@pytest.mark.parametrize("func, col", [("format", 1), ("format_index", 0)])
+def test_format_decimal(formatter, thousands, precision, func, col):
+ styler = DataFrame([[1000000.123456789]], index=[1000000.123456789]).style
+ result = getattr(styler, func)( # testing float
+ decimal="_", formatter=formatter, thousands=thousands, precision=precision
+ )._translate(True, True)
+ assert "000_123" in result["body"][0][col]["display_value"]
+
+ styler = DataFrame([[1 + 1000000.123456789j]], index=[1 + 1000000.123456789j]).style
+ result = getattr(styler, func)( # testing complex
+ decimal="_", formatter=formatter, thousands=thousands, precision=precision
+ )._translate(True, True)
+ assert "000_123" in result["body"][0][col]["display_value"]
+
+
+def test_str_escape_error():
+ msg = "`escape` only permitted in {'html', 'latex', 'latex-math'}, got "
+ with pytest.raises(ValueError, match=msg):
+ _str_escape("text", "bad_escape")
+
+ with pytest.raises(ValueError, match=msg):
+ _str_escape("text", [])
+
+ _str_escape(2.00, "bad_escape") # OK since dtype is float
+
+
+def test_long_int_formatting():
+ df = DataFrame(data=[[1234567890123456789]], columns=["test"])
+ styler = df.style
+ ctx = styler._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "1234567890123456789"
+
+ styler = df.style.format(thousands="_")
+ ctx = styler._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "1_234_567_890_123_456_789"
+
+
+def test_format_options():
+ df = DataFrame({"int": [2000, 1], "float": [1.009, None], "str": ["&<", "&~"]})
+ ctx = df.style._translate(True, True)
+
+ # test option: na_rep
+ assert ctx["body"][1][2]["display_value"] == "nan"
+ with option_context("styler.format.na_rep", "MISSING"):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][1][2]["display_value"] == "MISSING"
+
+ # test option: decimal and precision
+ assert ctx["body"][0][2]["display_value"] == "1.009000"
+ with option_context("styler.format.decimal", "_"):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][0][2]["display_value"] == "1_009000"
+ with option_context("styler.format.precision", 2):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][0][2]["display_value"] == "1.01"
+
+ # test option: thousands
+ assert ctx["body"][0][1]["display_value"] == "2000"
+ with option_context("styler.format.thousands", "_"):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][0][1]["display_value"] == "2_000"
+
+ # test option: escape
+ assert ctx["body"][0][3]["display_value"] == "&<"
+ assert ctx["body"][1][3]["display_value"] == "&~"
+ with option_context("styler.format.escape", "html"):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][0][3]["display_value"] == "&<"
+ with option_context("styler.format.escape", "latex"):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][1][3]["display_value"] == "\\&\\textasciitilde "
+ with option_context("styler.format.escape", "latex-math"):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][1][3]["display_value"] == "\\&\\textasciitilde "
+
+ # test option: formatter
+ with option_context("styler.format.formatter", {"int": "{:,.2f}"}):
+ ctx_with_op = df.style._translate(True, True)
+ assert ctx_with_op["body"][0][1]["display_value"] == "2,000.00"
+
+
+def test_precision_zero(df):
+ styler = Styler(df, precision=0)
+ ctx = styler._translate(True, True)
+ assert ctx["body"][0][2]["display_value"] == "-1"
+ assert ctx["body"][1][2]["display_value"] == "-1"
+
+
+@pytest.mark.parametrize(
+ "formatter, exp",
+ [
+ (lambda x: f"{x:.3f}", "9.000"),
+ ("{:.2f}", "9.00"),
+ ({0: "{:.1f}"}, "9.0"),
+ (None, "9"),
+ ],
+)
+def test_formatter_options_validator(formatter, exp):
+ df = DataFrame([[9]])
+ with option_context("styler.format.formatter", formatter):
+ assert f" {exp} " in df.style.to_latex()
+
+
+def test_formatter_options_raises():
+ msg = "Value must be an instance of"
+ with pytest.raises(ValueError, match=msg):
+ with option_context("styler.format.formatter", ["bad", "type"]):
+ DataFrame().style.to_latex()
+
+
+def test_1level_multiindex():
+ # GH 43383
+ midx = MultiIndex.from_product([[1, 2]], names=[""])
+ df = DataFrame(-1, index=midx, columns=[0, 1])
+ ctx = df.style._translate(True, True)
+ assert ctx["body"][0][0]["display_value"] == "1"
+ assert ctx["body"][0][0]["is_visible"] is True
+ assert ctx["body"][1][0]["display_value"] == "2"
+ assert ctx["body"][1][0]["is_visible"] is True
+
+
+def test_boolean_format():
+ # gh 46384: booleans do not collapse to integer representation on display
+ df = DataFrame([[True, False]])
+ ctx = df.style._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] is True
+ assert ctx["body"][0][2]["display_value"] is False
+
+
+@pytest.mark.parametrize(
+ "hide, labels",
+ [
+ (False, [1, 2]),
+ (True, [1, 2, 3, 4]),
+ ],
+)
+def test_relabel_raise_length(styler_multi, hide, labels):
+ if hide:
+ styler_multi.hide(axis=0, subset=[("X", "x"), ("Y", "y")])
+ with pytest.raises(ValueError, match="``labels`` must be of length equal"):
+ styler_multi.relabel_index(labels=labels)
+
+
+def test_relabel_index(styler_multi):
+ labels = [(1, 2), (3, 4)]
+ styler_multi.hide(axis=0, subset=[("X", "x"), ("Y", "y")])
+ styler_multi.relabel_index(labels=labels)
+ ctx = styler_multi._translate(True, True)
+ assert {"value": "X", "display_value": 1}.items() <= ctx["body"][0][0].items()
+ assert {"value": "y", "display_value": 2}.items() <= ctx["body"][0][1].items()
+ assert {"value": "Y", "display_value": 3}.items() <= ctx["body"][1][0].items()
+ assert {"value": "x", "display_value": 4}.items() <= ctx["body"][1][1].items()
+
+
+def test_relabel_columns(styler_multi):
+ labels = [(1, 2), (3, 4)]
+ styler_multi.hide(axis=1, subset=[("A", "a"), ("B", "b")])
+ styler_multi.relabel_index(axis=1, labels=labels)
+ ctx = styler_multi._translate(True, True)
+ assert {"value": "A", "display_value": 1}.items() <= ctx["head"][0][3].items()
+ assert {"value": "B", "display_value": 3}.items() <= ctx["head"][0][4].items()
+ assert {"value": "b", "display_value": 2}.items() <= ctx["head"][1][3].items()
+ assert {"value": "a", "display_value": 4}.items() <= ctx["head"][1][4].items()
+
+
+def test_relabel_roundtrip(styler):
+ styler.relabel_index(["{}", "{}"])
+ ctx = styler._translate(True, True)
+ assert {"value": "x", "display_value": "x"}.items() <= ctx["body"][0][0].items()
+ assert {"value": "y", "display_value": "y"}.items() <= ctx["body"][1][0].items()
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_highlight.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_highlight.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d59719010ee03cc53373a1c96f5f8c5611d7681
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_highlight.py
@@ -0,0 +1,218 @@
+import numpy as np
+import pytest
+
+from pandas import (
+ NA,
+ DataFrame,
+ IndexSlice,
+)
+
+pytest.importorskip("jinja2")
+
+from pandas.io.formats.style import Styler
+
+
+@pytest.fixture(params=[(None, "float64"), (NA, "Int64")])
+def df(request):
+ # GH 45804
+ return DataFrame(
+ {"A": [0, np.nan, 10], "B": [1, request.param[0], 2]}, dtype=request.param[1]
+ )
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+def test_highlight_null(styler):
+ result = styler.highlight_null()._compute().ctx
+ expected = {
+ (1, 0): [("background-color", "red")],
+ (1, 1): [("background-color", "red")],
+ }
+ assert result == expected
+
+
+def test_highlight_null_subset(styler):
+ # GH 31345
+ result = (
+ styler.highlight_null(color="red", subset=["A"])
+ .highlight_null(color="green", subset=["B"])
+ ._compute()
+ .ctx
+ )
+ expected = {
+ (1, 0): [("background-color", "red")],
+ (1, 1): [("background-color", "green")],
+ }
+ assert result == expected
+
+
+@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"])
+def test_highlight_minmax_basic(df, f):
+ expected = {
+ (0, 1): [("background-color", "red")],
+ # ignores NaN row,
+ (2, 0): [("background-color", "red")],
+ }
+ if f == "highlight_min":
+ df = -df
+ result = getattr(df.style, f)(axis=1, color="red")._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"])
+@pytest.mark.parametrize(
+ "kwargs",
+ [
+ {"axis": None, "color": "red"}, # test axis
+ {"axis": 0, "subset": ["A"], "color": "red"}, # test subset and ignores NaN
+ {"axis": None, "props": "background-color: red"}, # test props
+ ],
+)
+def test_highlight_minmax_ext(df, f, kwargs):
+ expected = {(2, 0): [("background-color", "red")]}
+ if f == "highlight_min":
+ df = -df
+ result = getattr(df.style, f)(**kwargs)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"])
+@pytest.mark.parametrize("axis", [None, 0, 1])
+def test_highlight_minmax_nulls(f, axis):
+ # GH 42750
+ expected = {
+ (1, 0): [("background-color", "yellow")],
+ (1, 1): [("background-color", "yellow")],
+ }
+ if axis == 1:
+ expected.update({(2, 1): [("background-color", "yellow")]})
+
+ if f == "highlight_max":
+ df = DataFrame({"a": [NA, 1, None], "b": [np.nan, 1, -1]})
+ else:
+ df = DataFrame({"a": [NA, -1, None], "b": [np.nan, -1, 1]})
+
+ result = getattr(df.style, f)(axis=axis)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "kwargs",
+ [
+ {"left": 0, "right": 1}, # test basic range
+ {"left": 0, "right": 1, "props": "background-color: yellow"}, # test props
+ {"left": -100, "right": 100, "subset": IndexSlice[[0, 1], :]}, # test subset
+ {"left": 0, "subset": IndexSlice[[0, 1], :]}, # test no right
+ {"right": 1}, # test no left
+ {"left": [0, 0, 11], "axis": 0}, # test left as sequence
+ {"left": DataFrame({"A": [0, 0, 11], "B": [1, 1, 11]}), "axis": None}, # axis
+ {"left": 0, "right": [0, 1], "axis": 1}, # test sequence right
+ ],
+)
+def test_highlight_between(styler, kwargs):
+ expected = {
+ (0, 0): [("background-color", "yellow")],
+ (0, 1): [("background-color", "yellow")],
+ }
+ result = styler.highlight_between(**kwargs)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "arg, map, axis",
+ [
+ ("left", [1, 2], 0), # 0 axis has 3 elements not 2
+ ("left", [1, 2, 3], 1), # 1 axis has 2 elements not 3
+ ("left", np.array([[1, 2], [1, 2]]), None), # df is (2,3) not (2,2)
+ ("right", [1, 2], 0), # same tests as above for 'right' not 'left'
+ ("right", [1, 2, 3], 1), # ..
+ ("right", np.array([[1, 2], [1, 2]]), None), # ..
+ ],
+)
+def test_highlight_between_raises(arg, styler, map, axis):
+ msg = f"supplied '{arg}' is not correct shape"
+ with pytest.raises(ValueError, match=msg):
+ styler.highlight_between(**{arg: map, "axis": axis})._compute()
+
+
+def test_highlight_between_raises2(styler):
+ msg = "values can be 'both', 'left', 'right', or 'neither'"
+ with pytest.raises(ValueError, match=msg):
+ styler.highlight_between(inclusive="badstring")._compute()
+
+ with pytest.raises(ValueError, match=msg):
+ styler.highlight_between(inclusive=1)._compute()
+
+
+@pytest.mark.parametrize(
+ "inclusive, expected",
+ [
+ (
+ "both",
+ {
+ (0, 0): [("background-color", "yellow")],
+ (0, 1): [("background-color", "yellow")],
+ },
+ ),
+ ("neither", {}),
+ ("left", {(0, 0): [("background-color", "yellow")]}),
+ ("right", {(0, 1): [("background-color", "yellow")]}),
+ ],
+)
+def test_highlight_between_inclusive(styler, inclusive, expected):
+ kwargs = {"left": 0, "right": 1, "subset": IndexSlice[[0, 1], :]}
+ result = styler.highlight_between(**kwargs, inclusive=inclusive)._compute()
+ assert result.ctx == expected
+
+
+@pytest.mark.parametrize(
+ "kwargs",
+ [
+ {"q_left": 0.5, "q_right": 1, "axis": 0}, # base case
+ {"q_left": 0.5, "q_right": 1, "axis": None}, # test axis
+ {"q_left": 0, "q_right": 1, "subset": IndexSlice[2, :]}, # test subset
+ {"q_left": 0.5, "axis": 0}, # test no high
+ {"q_right": 1, "subset": IndexSlice[2, :], "axis": 1}, # test no low
+ {"q_left": 0.5, "axis": 0, "props": "background-color: yellow"}, # tst prop
+ ],
+)
+def test_highlight_quantile(styler, kwargs):
+ expected = {
+ (2, 0): [("background-color", "yellow")],
+ (2, 1): [("background-color", "yellow")],
+ }
+ result = styler.highlight_quantile(**kwargs)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "f,kwargs",
+ [
+ ("highlight_min", {"axis": 1, "subset": IndexSlice[1, :]}),
+ ("highlight_max", {"axis": 0, "subset": [0]}),
+ ("highlight_quantile", {"axis": None, "q_left": 0.6, "q_right": 0.8}),
+ ("highlight_between", {"subset": [0]}),
+ ],
+)
+@pytest.mark.parametrize(
+ "df",
+ [
+ DataFrame([[0, 10], [20, 30]], dtype=int),
+ DataFrame([[0, 10], [20, 30]], dtype=float),
+ DataFrame([[0, 10], [20, 30]], dtype="datetime64[ns]"),
+ DataFrame([[0, 10], [20, 30]], dtype=str),
+ DataFrame([[0, 10], [20, 30]], dtype="timedelta64[ns]"),
+ ],
+)
+def test_all_highlight_dtypes(f, kwargs, df):
+ if f == "highlight_quantile" and isinstance(df.iloc[0, 0], (str)):
+ return None # quantile incompatible with str
+ if f == "highlight_between":
+ kwargs["left"] = df.iloc[1, 0] # set the range low for testing
+
+ expected = {(1, 0): [("background-color", "yellow")]}
+ result = getattr(df.style, f)(**kwargs)._compute().ctx
+ assert result == expected
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_html.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_html.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e345eb82ed3c31e7a5e0f89fa574aea84923dd7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_html.py
@@ -0,0 +1,1009 @@
+from textwrap import (
+ dedent,
+ indent,
+)
+
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ MultiIndex,
+ option_context,
+)
+
+jinja2 = pytest.importorskip("jinja2")
+from pandas.io.formats.style import Styler
+
+
+@pytest.fixture
+def env():
+ loader = jinja2.PackageLoader("pandas", "io/formats/templates")
+ env = jinja2.Environment(loader=loader, trim_blocks=True)
+ return env
+
+
+@pytest.fixture
+def styler():
+ return Styler(DataFrame([[2.61], [2.69]], index=["a", "b"], columns=["A"]))
+
+
+@pytest.fixture
+def styler_mi():
+ midx = MultiIndex.from_product([["a", "b"], ["c", "d"]])
+ return Styler(DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=midx))
+
+
+@pytest.fixture
+def tpl_style(env):
+ return env.get_template("html_style.tpl")
+
+
+@pytest.fixture
+def tpl_table(env):
+ return env.get_template("html_table.tpl")
+
+
+def test_html_template_extends_options():
+ # make sure if templates are edited tests are updated as are setup fixtures
+ # to understand the dependency
+ with open("pandas/io/formats/templates/html.tpl", encoding="utf-8") as file:
+ result = file.read()
+ assert "{% include html_style_tpl %}" in result
+ assert "{% include html_table_tpl %}" in result
+
+
+def test_exclude_styles(styler):
+ result = styler.to_html(exclude_styles=True, doctype_html=True)
+ expected = dedent(
+ """\
+
+
+
+
+
+
+
+
+
+
+ A
+
+
+
+
+ a
+ 2.610000
+
+
+ b
+ 2.690000
+
+
+
+
+
+ """
+ )
+ assert result == expected
+
+
+def test_w3_html_format(styler):
+ styler.set_uuid("").set_table_styles([{"selector": "th", "props": "att2:v2;"}]).map(
+ lambda x: "att1:v1;"
+ ).set_table_attributes('class="my-cls1" style="attr3:v3;"').set_td_classes(
+ DataFrame(["my-cls2"], index=["a"], columns=["A"])
+ ).format(
+ "{:.1f}"
+ ).set_caption(
+ "A comprehensive test"
+ )
+ expected = dedent(
+ """\
+
+
+ A comprehensive test
+
+
+
+ A
+
+
+
+
+ a
+ 2.6
+
+
+ b
+ 2.7
+
+
+
+ """
+ )
+ assert expected == styler.to_html()
+
+
+def test_colspan_w3():
+ # GH 36223
+ df = DataFrame(data=[[1, 2]], columns=[["l0", "l0"], ["l1a", "l1b"]])
+ styler = Styler(df, uuid="_", cell_ids=False)
+ assert 'l0 ' in styler.to_html()
+
+
+def test_rowspan_w3():
+ # GH 38533
+ df = DataFrame(data=[[1, 2]], index=[["l0", "l0"], ["l1a", "l1b"]])
+ styler = Styler(df, uuid="_", cell_ids=False)
+ assert 'l0 ' in styler.to_html()
+
+
+def test_styles(styler):
+ styler.set_uuid("abc")
+ styler.set_table_styles([{"selector": "td", "props": "color: red;"}])
+ result = styler.to_html(doctype_html=True)
+ expected = dedent(
+ """\
+
+
+
+
+
+
+
+
+
+
+
+ A
+
+
+
+
+ a
+ 2.610000
+
+
+ b
+ 2.690000
+
+
+
+
+
+ """
+ )
+ assert result == expected
+
+
+def test_doctype(styler):
+ result = styler.to_html(doctype_html=False)
+ assert "" not in result
+ assert "" not in result
+ assert "" not in result
+ assert "" not in result
+
+
+def test_doctype_encoding(styler):
+ with option_context("styler.render.encoding", "ASCII"):
+ result = styler.to_html(doctype_html=True)
+ assert ' ' in result
+ result = styler.to_html(doctype_html=True, encoding="ANSI")
+ assert ' ' in result
+
+
+def test_bold_headers_arg(styler):
+ result = styler.to_html(bold_headers=True)
+ assert "th {\n font-weight: bold;\n}" in result
+ result = styler.to_html()
+ assert "th {\n font-weight: bold;\n}" not in result
+
+
+def test_caption_arg(styler):
+ result = styler.to_html(caption="foo bar")
+ assert "foo bar " in result
+ result = styler.to_html()
+ assert "foo bar " not in result
+
+
+def test_block_names(tpl_style, tpl_table):
+ # catch accidental removal of a block
+ expected_style = {
+ "before_style",
+ "style",
+ "table_styles",
+ "before_cellstyle",
+ "cellstyle",
+ }
+ expected_table = {
+ "before_table",
+ "table",
+ "caption",
+ "thead",
+ "tbody",
+ "after_table",
+ "before_head_rows",
+ "head_tr",
+ "after_head_rows",
+ "before_rows",
+ "tr",
+ "after_rows",
+ }
+ result1 = set(tpl_style.blocks)
+ assert result1 == expected_style
+
+ result2 = set(tpl_table.blocks)
+ assert result2 == expected_table
+
+
+def test_from_custom_template_table(tmpdir):
+ p = tmpdir.mkdir("tpl").join("myhtml_table.tpl")
+ p.write(
+ dedent(
+ """\
+ {% extends "html_table.tpl" %}
+ {% block table %}
+ {{custom_title}}
+ {{ super() }}
+ {% endblock table %}"""
+ )
+ )
+ result = Styler.from_custom_template(str(tmpdir.join("tpl")), "myhtml_table.tpl")
+ assert issubclass(result, Styler)
+ assert result.env is not Styler.env
+ assert result.template_html_table is not Styler.template_html_table
+ styler = result(DataFrame({"A": [1, 2]}))
+ assert "My Title \n\n\n
+ {{ super() }}
+ {% endblock style %}"""
+ )
+ )
+ result = Styler.from_custom_template(
+ str(tmpdir.join("tpl")), html_style="myhtml_style.tpl"
+ )
+ assert issubclass(result, Styler)
+ assert result.env is not Styler.env
+ assert result.template_html_style is not Styler.template_html_style
+ styler = result(DataFrame({"A": [1, 2]}))
+ assert ' \n\n
+
+
+
+
+ n1
+ a
+
+
+
+ n2
+ c
+
+
+ n1
+ n2
+
+
+
+
+
+ a
+ c
+ 0
+
+
+
+ """
+ )
+ result = styler_mi.to_html()
+ assert result == expected
+
+
+def test_include_css_style_rules_only_for_visible_cells(styler_mi):
+ # GH 43619
+ result = (
+ styler_mi.set_uuid("")
+ .map(lambda v: "color: blue;")
+ .hide(styler_mi.data.columns[1:], axis="columns")
+ .hide(styler_mi.data.index[1:], axis="index")
+ .to_html()
+ )
+ expected_styles = dedent(
+ """\
+
+ """
+ )
+ assert expected_styles in result
+
+
+def test_include_css_style_rules_only_for_visible_index_labels(styler_mi):
+ # GH 43619
+ result = (
+ styler_mi.set_uuid("")
+ .map_index(lambda v: "color: blue;", axis="index")
+ .hide(styler_mi.data.columns, axis="columns")
+ .hide(styler_mi.data.index[1:], axis="index")
+ .to_html()
+ )
+ expected_styles = dedent(
+ """\
+
+ """
+ )
+ assert expected_styles in result
+
+
+def test_include_css_style_rules_only_for_visible_column_labels(styler_mi):
+ # GH 43619
+ result = (
+ styler_mi.set_uuid("")
+ .map_index(lambda v: "color: blue;", axis="columns")
+ .hide(styler_mi.data.columns[1:], axis="columns")
+ .hide(styler_mi.data.index, axis="index")
+ .to_html()
+ )
+ expected_styles = dedent(
+ """\
+
+ """
+ )
+ assert expected_styles in result
+
+
+def test_hiding_index_columns_multiindex_alignment():
+ # gh 43644
+ midx = MultiIndex.from_product(
+ [["i0", "j0"], ["i1"], ["i2", "j2"]], names=["i-0", "i-1", "i-2"]
+ )
+ cidx = MultiIndex.from_product(
+ [["c0"], ["c1", "d1"], ["c2", "d2"]], names=["c-0", "c-1", "c-2"]
+ )
+ df = DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=cidx)
+ styler = Styler(df, uuid_len=0)
+ styler.hide(level=1, axis=0).hide(level=0, axis=1)
+ styler.hide([("j0", "i1", "j2")], axis=0)
+ styler.hide([("c0", "d1", "d2")], axis=1)
+ result = styler.to_html()
+ expected = dedent(
+ """\
+
+
+
+
+
+ c-1
+ c1
+ d1
+
+
+
+ c-2
+ c2
+ d2
+ c2
+
+
+ i-0
+ i-2
+
+
+
+
+
+
+
+ i0
+ i2
+ 0
+ 1
+ 2
+
+
+ j2
+ 4
+ 5
+ 6
+
+
+ j0
+ i2
+ 8
+ 9
+ 10
+
+
+
+ """
+ )
+ assert result == expected
+
+
+def test_hiding_index_columns_multiindex_trimming():
+ # gh 44272
+ df = DataFrame(np.arange(64).reshape(8, 8))
+ df.columns = MultiIndex.from_product([[0, 1, 2, 3], [0, 1]])
+ df.index = MultiIndex.from_product([[0, 1, 2, 3], [0, 1]])
+ df.index.names, df.columns.names = ["a", "b"], ["c", "d"]
+ styler = Styler(df, cell_ids=False, uuid_len=0)
+ styler.hide([(0, 0), (0, 1), (1, 0)], axis=1).hide([(0, 0), (0, 1), (1, 0)], axis=0)
+ with option_context("styler.render.max_rows", 4, "styler.render.max_columns", 4):
+ result = styler.to_html()
+
+ expected = dedent(
+ """\
+
+
+
+
+
+ c
+ 1
+ 2
+ 3
+
+
+
+ d
+ 1
+ 0
+ 1
+ 0
+ ...
+
+
+ a
+ b
+
+
+
+
+
+
+
+
+
+ 1
+ 1
+ 27
+ 28
+ 29
+ 30
+ ...
+
+
+ 2
+ 0
+ 35
+ 36
+ 37
+ 38
+ ...
+
+
+ 1
+ 43
+ 44
+ 45
+ 46
+ ...
+
+
+ 3
+ 0
+ 51
+ 52
+ 53
+ 54
+ ...
+
+
+ ...
+ ...
+ ...
+ ...
+ ...
+ ...
+ ...
+
+
+
+ """
+ )
+
+ assert result == expected
+
+
+@pytest.mark.parametrize("type", ["data", "index"])
+@pytest.mark.parametrize(
+ "text, exp, found",
+ [
+ ("no link, just text", False, ""),
+ ("subdomain not www: sub.web.com", False, ""),
+ ("www subdomain: www.web.com other", True, "www.web.com"),
+ ("scheme full structure: http://www.web.com", True, "http://www.web.com"),
+ ("scheme no top-level: http://www.web", True, "http://www.web"),
+ ("no scheme, no top-level: www.web", False, "www.web"),
+ ("https scheme: https://www.web.com", True, "https://www.web.com"),
+ ("ftp scheme: ftp://www.web", True, "ftp://www.web"),
+ ("ftps scheme: ftps://www.web", True, "ftps://www.web"),
+ ("subdirectories: www.web.com/directory", True, "www.web.com/directory"),
+ ("Multiple domains: www.1.2.3.4", True, "www.1.2.3.4"),
+ ("with port: http://web.com:80", True, "http://web.com:80"),
+ (
+ "full net_loc scheme: http://user:pass@web.com",
+ True,
+ "http://user:pass@web.com",
+ ),
+ (
+ "with valid special chars: http://web.com/,.':;~!@#$*()[]",
+ True,
+ "http://web.com/,.':;~!@#$*()[]",
+ ),
+ ],
+)
+def test_rendered_links(type, text, exp, found):
+ if type == "data":
+ df = DataFrame([text])
+ styler = df.style.format(hyperlinks="html")
+ else:
+ df = DataFrame([0], index=[text])
+ styler = df.style.format_index(hyperlinks="html")
+
+ rendered = f'{found} '
+ result = styler.to_html()
+ assert (rendered in result) is exp
+ assert (text in result) is not exp # test conversion done when expected and not
+
+
+def test_multiple_rendered_links():
+ links = ("www.a.b", "http://a.c", "https://a.d", "ftp://a.e")
+ # pylint: disable-next=consider-using-f-string
+ df = DataFrame(["text {} {} text {} {}".format(*links)])
+ result = df.style.format(hyperlinks="html").to_html()
+ href = '{0} '
+ for link in links:
+ assert href.format(link) in result
+ assert href.format("text") not in result
+
+
+def test_concat(styler):
+ other = styler.data.agg(["mean"]).style
+ styler.concat(other).set_uuid("X")
+ result = styler.to_html()
+ fp = "foot0_"
+ expected = dedent(
+ f"""\
+
+ b
+ 2.690000
+
+
+ mean
+ 2.650000
+
+
+
+ """
+ )
+ assert expected in result
+
+
+def test_concat_recursion(styler):
+ df = styler.data
+ styler1 = styler
+ styler2 = Styler(df.agg(["mean"]), precision=3)
+ styler3 = Styler(df.agg(["mean"]), precision=4)
+ styler1.concat(styler2.concat(styler3)).set_uuid("X")
+ result = styler.to_html()
+ # notice that the second concat (last of the output html),
+ # there are two `foot_` in the id and class
+ fp1 = "foot0_"
+ fp2 = "foot0_foot0_"
+ expected = dedent(
+ f"""\
+
+ b
+ 2.690000
+
+
+ mean
+ 2.650
+
+
+ mean
+ 2.6500
+
+
+
+ """
+ )
+ assert expected in result
+
+
+def test_concat_chain(styler):
+ df = styler.data
+ styler1 = styler
+ styler2 = Styler(df.agg(["mean"]), precision=3)
+ styler3 = Styler(df.agg(["mean"]), precision=4)
+ styler1.concat(styler2).concat(styler3).set_uuid("X")
+ result = styler.to_html()
+ fp1 = "foot0_"
+ fp2 = "foot1_"
+ expected = dedent(
+ f"""\
+
+ b
+ 2.690000
+
+
+ mean
+ 2.650
+
+
+ mean
+ 2.6500
+
+
+
+ """
+ )
+ assert expected in result
+
+
+def test_concat_combined():
+ def html_lines(foot_prefix: str):
+ assert foot_prefix.endswith("_") or foot_prefix == ""
+ fp = foot_prefix
+ return indent(
+ dedent(
+ f"""\
+
+ a
+ 2.610000
+
+
+ b
+ 2.690000
+
+ """
+ ),
+ prefix=" " * 4,
+ )
+
+ df = DataFrame([[2.61], [2.69]], index=["a", "b"], columns=["A"])
+ s1 = df.style.highlight_max(color="red")
+ s2 = df.style.highlight_max(color="green")
+ s3 = df.style.highlight_max(color="blue")
+ s4 = df.style.highlight_max(color="yellow")
+
+ result = s1.concat(s2).concat(s3.concat(s4)).set_uuid("X").to_html()
+ expected_css = dedent(
+ """\
+
+ """
+ )
+ expected_table = (
+ dedent(
+ """\
+
+
+
+
+ A
+
+
+
+ """
+ )
+ + html_lines("")
+ + html_lines("foot0_")
+ + html_lines("foot1_")
+ + html_lines("foot1_foot0_")
+ + dedent(
+ """\
+
+
+ """
+ )
+ )
+ assert expected_css + expected_table == result
+
+
+def test_to_html_na_rep_non_scalar_data(datapath):
+ # GH47103
+ df = DataFrame([{"a": 1, "b": [1, 2, 3], "c": np.nan}])
+ result = df.style.format(na_rep="-").to_html(table_uuid="test")
+ expected = """\
+
+
+
+
+
+ a
+ b
+ c
+
+
+
+
+ 0
+ 1
+ [1, 2, 3]
+ -
+
+
+
+"""
+ assert result == expected
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_matplotlib.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_matplotlib.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb7a77f1ddb27db66a847fc1a1d87d14d95822aa
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_matplotlib.py
@@ -0,0 +1,335 @@
+import gc
+
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ IndexSlice,
+ Series,
+)
+
+pytest.importorskip("matplotlib")
+pytest.importorskip("jinja2")
+
+import matplotlib as mpl
+
+from pandas.io.formats.style import Styler
+
+
+@pytest.fixture(autouse=True)
+def mpl_cleanup():
+ # matplotlib/testing/decorators.py#L24
+ # 1) Resets units registry
+ # 2) Resets rc_context
+ # 3) Closes all figures
+ mpl = pytest.importorskip("matplotlib")
+ mpl_units = pytest.importorskip("matplotlib.units")
+ plt = pytest.importorskip("matplotlib.pyplot")
+ orig_units_registry = mpl_units.registry.copy()
+ with mpl.rc_context():
+ mpl.use("template")
+ yield
+ mpl_units.registry.clear()
+ mpl_units.registry.update(orig_units_registry)
+ plt.close("all")
+ # https://matplotlib.org/stable/users/prev_whats_new/whats_new_3.6.0.html#garbage-collection-is-no-longer-run-on-figure-close # noqa: E501
+ gc.collect(1)
+
+
+@pytest.fixture
+def df():
+ return DataFrame([[1, 2], [2, 4]], columns=["A", "B"])
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+@pytest.fixture
+def df_blank():
+ return DataFrame([[0, 0], [0, 0]], columns=["A", "B"], index=["X", "Y"])
+
+
+@pytest.fixture
+def styler_blank(df_blank):
+ return Styler(df_blank, uuid_len=0)
+
+
+@pytest.mark.parametrize("f", ["background_gradient", "text_gradient"])
+def test_function_gradient(styler, f):
+ for c_map in [None, "YlOrRd"]:
+ result = getattr(styler, f)(cmap=c_map)._compute().ctx
+ assert all("#" in x[0][1] for x in result.values())
+ assert result[(0, 0)] == result[(0, 1)]
+ assert result[(1, 0)] == result[(1, 1)]
+
+
+@pytest.mark.parametrize("f", ["background_gradient", "text_gradient"])
+def test_background_gradient_color(styler, f):
+ result = getattr(styler, f)(subset=IndexSlice[1, "A"])._compute().ctx
+ if f == "background_gradient":
+ assert result[(1, 0)] == [("background-color", "#fff7fb"), ("color", "#000000")]
+ elif f == "text_gradient":
+ assert result[(1, 0)] == [("color", "#fff7fb")]
+
+
+@pytest.mark.parametrize(
+ "axis, expected",
+ [
+ (0, ["low", "low", "high", "high"]),
+ (1, ["low", "high", "low", "high"]),
+ (None, ["low", "mid", "mid", "high"]),
+ ],
+)
+@pytest.mark.parametrize("f", ["background_gradient", "text_gradient"])
+def test_background_gradient_axis(styler, axis, expected, f):
+ if f == "background_gradient":
+ colors = {
+ "low": [("background-color", "#f7fbff"), ("color", "#000000")],
+ "mid": [("background-color", "#abd0e6"), ("color", "#000000")],
+ "high": [("background-color", "#08306b"), ("color", "#f1f1f1")],
+ }
+ elif f == "text_gradient":
+ colors = {
+ "low": [("color", "#f7fbff")],
+ "mid": [("color", "#abd0e6")],
+ "high": [("color", "#08306b")],
+ }
+ result = getattr(styler, f)(cmap="Blues", axis=axis)._compute().ctx
+ for i, cell in enumerate([(0, 0), (0, 1), (1, 0), (1, 1)]):
+ assert result[cell] == colors[expected[i]]
+
+
+@pytest.mark.parametrize(
+ "cmap, expected",
+ [
+ (
+ "PuBu",
+ {
+ (4, 5): [("background-color", "#86b0d3"), ("color", "#000000")],
+ (4, 6): [("background-color", "#83afd3"), ("color", "#f1f1f1")],
+ },
+ ),
+ (
+ "YlOrRd",
+ {
+ (4, 8): [("background-color", "#fd913e"), ("color", "#000000")],
+ (4, 9): [("background-color", "#fd8f3d"), ("color", "#f1f1f1")],
+ },
+ ),
+ (
+ None,
+ {
+ (7, 0): [("background-color", "#48c16e"), ("color", "#f1f1f1")],
+ (7, 1): [("background-color", "#4cc26c"), ("color", "#000000")],
+ },
+ ),
+ ],
+)
+def test_text_color_threshold(cmap, expected):
+ # GH 39888
+ df = DataFrame(np.arange(100).reshape(10, 10))
+ result = df.style.background_gradient(cmap=cmap, axis=None)._compute().ctx
+ for k in expected.keys():
+ assert result[k] == expected[k]
+
+
+def test_background_gradient_vmin_vmax():
+ # GH 12145
+ df = DataFrame(range(5))
+ ctx = df.style.background_gradient(vmin=1, vmax=3)._compute().ctx
+ assert ctx[(0, 0)] == ctx[(1, 0)]
+ assert ctx[(4, 0)] == ctx[(3, 0)]
+
+
+def test_background_gradient_int64():
+ # GH 28869
+ df1 = Series(range(3)).to_frame()
+ df2 = Series(range(3), dtype="Int64").to_frame()
+ ctx1 = df1.style.background_gradient()._compute().ctx
+ ctx2 = df2.style.background_gradient()._compute().ctx
+ assert ctx2[(0, 0)] == ctx1[(0, 0)]
+ assert ctx2[(1, 0)] == ctx1[(1, 0)]
+ assert ctx2[(2, 0)] == ctx1[(2, 0)]
+
+
+@pytest.mark.parametrize(
+ "axis, gmap, expected",
+ [
+ (
+ 0,
+ [1, 2],
+ {
+ (0, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (1, 0): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ (0, 1): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ },
+ ),
+ (
+ 1,
+ [1, 2],
+ {
+ (0, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (1, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (0, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ },
+ ),
+ (
+ None,
+ np.array([[2, 1], [1, 2]]),
+ {
+ (0, 0): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ (1, 0): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (0, 1): [("background-color", "#fff7fb"), ("color", "#000000")],
+ (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")],
+ },
+ ),
+ ],
+)
+def test_background_gradient_gmap_array(styler_blank, axis, gmap, expected):
+ # tests when gmap is given as a sequence and converted to ndarray
+ result = styler_blank.background_gradient(axis=axis, gmap=gmap)._compute().ctx
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "gmap, axis", [([1, 2, 3], 0), ([1, 2], 1), (np.array([[1, 2], [1, 2]]), None)]
+)
+def test_background_gradient_gmap_array_raises(gmap, axis):
+ # test when gmap as converted ndarray is bad shape
+ df = DataFrame([[0, 0, 0], [0, 0, 0]])
+ msg = "supplied 'gmap' is not correct shape"
+ with pytest.raises(ValueError, match=msg):
+ df.style.background_gradient(gmap=gmap, axis=axis)._compute()
+
+
+@pytest.mark.parametrize(
+ "gmap",
+ [
+ DataFrame( # reverse the columns
+ [[2, 1], [1, 2]], columns=["B", "A"], index=["X", "Y"]
+ ),
+ DataFrame( # reverse the index
+ [[2, 1], [1, 2]], columns=["A", "B"], index=["Y", "X"]
+ ),
+ DataFrame( # reverse the index and columns
+ [[1, 2], [2, 1]], columns=["B", "A"], index=["Y", "X"]
+ ),
+ DataFrame( # add unnecessary columns
+ [[1, 2, 3], [2, 1, 3]], columns=["A", "B", "C"], index=["X", "Y"]
+ ),
+ DataFrame( # add unnecessary index
+ [[1, 2], [2, 1], [3, 3]], columns=["A", "B"], index=["X", "Y", "Z"]
+ ),
+ ],
+)
+@pytest.mark.parametrize(
+ "subset, exp_gmap", # exp_gmap is underlying map DataFrame should conform to
+ [
+ (None, [[1, 2], [2, 1]]),
+ (["A"], [[1], [2]]), # slice only column "A" in data and gmap
+ (["B", "A"], [[2, 1], [1, 2]]), # reverse the columns in data
+ (IndexSlice["X", :], [[1, 2]]), # slice only index "X" in data and gmap
+ (IndexSlice[["Y", "X"], :], [[2, 1], [1, 2]]), # reverse the index in data
+ ],
+)
+def test_background_gradient_gmap_dataframe_align(styler_blank, gmap, subset, exp_gmap):
+ # test gmap given as DataFrame that it aligns to the data including subset
+ expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap, subset=subset)
+ result = styler_blank.background_gradient(axis=None, gmap=gmap, subset=subset)
+ assert expected._compute().ctx == result._compute().ctx
+
+
+@pytest.mark.parametrize(
+ "gmap, axis, exp_gmap",
+ [
+ (Series([2, 1], index=["Y", "X"]), 0, [[1, 1], [2, 2]]), # revrse the index
+ (Series([2, 1], index=["B", "A"]), 1, [[1, 2], [1, 2]]), # revrse the cols
+ (Series([1, 2, 3], index=["X", "Y", "Z"]), 0, [[1, 1], [2, 2]]), # add idx
+ (Series([1, 2, 3], index=["A", "B", "C"]), 1, [[1, 2], [1, 2]]), # add col
+ ],
+)
+def test_background_gradient_gmap_series_align(styler_blank, gmap, axis, exp_gmap):
+ # test gmap given as Series that it aligns to the data including subset
+ expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap)._compute()
+ result = styler_blank.background_gradient(axis=axis, gmap=gmap)._compute()
+ assert expected.ctx == result.ctx
+
+
+@pytest.mark.parametrize(
+ "gmap, axis",
+ [
+ (DataFrame([[1, 2], [2, 1]], columns=["A", "B"], index=["X", "Y"]), 1),
+ (DataFrame([[1, 2], [2, 1]], columns=["A", "B"], index=["X", "Y"]), 0),
+ ],
+)
+def test_background_gradient_gmap_wrong_dataframe(styler_blank, gmap, axis):
+ # test giving a gmap in DataFrame but with wrong axis
+ msg = "'gmap' is a DataFrame but underlying data for operations is a Series"
+ with pytest.raises(ValueError, match=msg):
+ styler_blank.background_gradient(gmap=gmap, axis=axis)._compute()
+
+
+def test_background_gradient_gmap_wrong_series(styler_blank):
+ # test giving a gmap in Series form but with wrong axis
+ msg = "'gmap' is a Series but underlying data for operations is a DataFrame"
+ gmap = Series([1, 2], index=["X", "Y"])
+ with pytest.raises(ValueError, match=msg):
+ styler_blank.background_gradient(gmap=gmap, axis=None)._compute()
+
+
+def test_background_gradient_nullable_dtypes():
+ # GH 50712
+ df1 = DataFrame([[1], [0], [np.nan]], dtype=float)
+ df2 = DataFrame([[1], [0], [None]], dtype="Int64")
+
+ ctx1 = df1.style.background_gradient()._compute().ctx
+ ctx2 = df2.style.background_gradient()._compute().ctx
+ assert ctx1 == ctx2
+
+
+@pytest.mark.parametrize(
+ "cmap",
+ ["PuBu", mpl.colormaps["PuBu"]],
+)
+def test_bar_colormap(cmap):
+ data = DataFrame([[1, 2], [3, 4]])
+ ctx = data.style.bar(cmap=cmap, axis=None)._compute().ctx
+ pubu_colors = {
+ (0, 0): "#d0d1e6",
+ (1, 0): "#056faf",
+ (0, 1): "#73a9cf",
+ (1, 1): "#023858",
+ }
+ for k, v in pubu_colors.items():
+ assert v in ctx[k][1][1]
+
+
+def test_bar_color_raises(df):
+ msg = "`color` must be string or list or tuple of 2 strings"
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color={"a", "b"}).to_html()
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color=["a", "b", "c"]).to_html()
+
+ msg = "`color` and `cmap` cannot both be given"
+ with pytest.raises(ValueError, match=msg):
+ df.style.bar(color="something", cmap="something else").to_html()
+
+
+@pytest.mark.parametrize(
+ "plot_method",
+ ["scatter", "hexbin"],
+)
+def test_pass_colormap_instance(df, plot_method):
+ # https://github.com/pandas-dev/pandas/issues/49374
+ cmap = mpl.colors.ListedColormap([[1, 1, 1], [0, 0, 0]])
+ df["c"] = df.A + df.B
+ kwargs = {"x": "A", "y": "B", "c": "c", "colormap": cmap}
+ if plot_method == "hexbin":
+ kwargs["C"] = kwargs.pop("c")
+ getattr(df.plot, plot_method)(**kwargs)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_non_unique.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_non_unique.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4d31fe21f2c9cf3454a67f8c7443382f7f1c0ef
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_non_unique.py
@@ -0,0 +1,140 @@
+from textwrap import dedent
+
+import pytest
+
+from pandas import (
+ DataFrame,
+ IndexSlice,
+)
+
+pytest.importorskip("jinja2")
+
+from pandas.io.formats.style import Styler
+
+
+@pytest.fixture
+def df():
+ return DataFrame(
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
+ index=["i", "j", "j"],
+ columns=["c", "d", "d"],
+ dtype=float,
+ )
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0)
+
+
+def test_format_non_unique(df):
+ # GH 41269
+
+ # test dict
+ html = df.style.format({"d": "{:.1f}"}).to_html()
+ for val in ["1.000000<", "4.000000<", "7.000000<"]:
+ assert val in html
+ for val in ["2.0<", "3.0<", "5.0<", "6.0<", "8.0<", "9.0<"]:
+ assert val in html
+
+ # test subset
+ html = df.style.format(precision=1, subset=IndexSlice["j", "d"]).to_html()
+ for val in ["1.000000<", "4.000000<", "7.000000<", "2.000000<", "3.000000<"]:
+ assert val in html
+ for val in ["5.0<", "6.0<", "8.0<", "9.0<"]:
+ assert val in html
+
+
+@pytest.mark.parametrize("func", ["apply", "map"])
+def test_apply_map_non_unique_raises(df, func):
+ # GH 41269
+ if func == "apply":
+ op = lambda s: ["color: red;"] * len(s)
+ else:
+ op = lambda v: "color: red;"
+
+ with pytest.raises(KeyError, match="`Styler.apply` and `.map` are not"):
+ getattr(df.style, func)(op)._compute()
+
+
+def test_table_styles_dict_non_unique_index(styler):
+ styles = styler.set_table_styles(
+ {"j": [{"selector": "td", "props": "a: v;"}]}, axis=1
+ ).table_styles
+ assert styles == [
+ {"selector": "td.row1", "props": [("a", "v")]},
+ {"selector": "td.row2", "props": [("a", "v")]},
+ ]
+
+
+def test_table_styles_dict_non_unique_columns(styler):
+ styles = styler.set_table_styles(
+ {"d": [{"selector": "td", "props": "a: v;"}]}, axis=0
+ ).table_styles
+ assert styles == [
+ {"selector": "td.col1", "props": [("a", "v")]},
+ {"selector": "td.col2", "props": [("a", "v")]},
+ ]
+
+
+def test_tooltips_non_unique_raises(styler):
+ # ttips has unique keys
+ ttips = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "b"])
+ styler.set_tooltips(ttips=ttips) # OK
+
+ # ttips has non-unique columns
+ ttips = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "c"], index=["a", "b"])
+ with pytest.raises(KeyError, match="Tooltips render only if `ttips` has unique"):
+ styler.set_tooltips(ttips=ttips)
+
+ # ttips has non-unique index
+ ttips = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "a"])
+ with pytest.raises(KeyError, match="Tooltips render only if `ttips` has unique"):
+ styler.set_tooltips(ttips=ttips)
+
+
+def test_set_td_classes_non_unique_raises(styler):
+ # classes has unique keys
+ classes = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "b"])
+ styler.set_td_classes(classes=classes) # OK
+
+ # classes has non-unique columns
+ classes = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "c"], index=["a", "b"])
+ with pytest.raises(KeyError, match="Classes render only if `classes` has unique"):
+ styler.set_td_classes(classes=classes)
+
+ # classes has non-unique index
+ classes = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "a"])
+ with pytest.raises(KeyError, match="Classes render only if `classes` has unique"):
+ styler.set_td_classes(classes=classes)
+
+
+def test_hide_columns_non_unique(styler):
+ ctx = styler.hide(["d"], axis="columns")._translate(True, True)
+
+ assert ctx["head"][0][1]["display_value"] == "c"
+ assert ctx["head"][0][1]["is_visible"] is True
+
+ assert ctx["head"][0][2]["display_value"] == "d"
+ assert ctx["head"][0][2]["is_visible"] is False
+
+ assert ctx["head"][0][3]["display_value"] == "d"
+ assert ctx["head"][0][3]["is_visible"] is False
+
+ assert ctx["body"][0][1]["is_visible"] is True
+ assert ctx["body"][0][2]["is_visible"] is False
+ assert ctx["body"][0][3]["is_visible"] is False
+
+
+def test_latex_non_unique(styler):
+ result = styler.to_latex()
+ assert result == dedent(
+ """\
+ \\begin{tabular}{lrrr}
+ & c & d & d \\\\
+ i & 1.000000 & 2.000000 & 3.000000 \\\\
+ j & 4.000000 & 5.000000 & 6.000000 \\\\
+ j & 7.000000 & 8.000000 & 9.000000 \\\\
+ \\end{tabular}
+ """
+ )
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_style.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_style.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fa72bd48031cca999b81cccfcedafcd3abcd924
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_style.py
@@ -0,0 +1,1588 @@
+import contextlib
+import copy
+import re
+from textwrap import dedent
+
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ IndexSlice,
+ MultiIndex,
+ Series,
+ option_context,
+)
+import pandas._testing as tm
+
+jinja2 = pytest.importorskip("jinja2")
+from pandas.io.formats.style import ( # isort:skip
+ Styler,
+)
+from pandas.io.formats.style_render import (
+ _get_level_lengths,
+ _get_trimming_maximums,
+ maybe_convert_css_to_tuples,
+ non_reducing_slice,
+)
+
+
+@pytest.fixture
+def mi_df():
+ return DataFrame(
+ [[1, 2], [3, 4]],
+ index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
+ columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
+ dtype=int,
+ )
+
+
+@pytest.fixture
+def mi_styler(mi_df):
+ return Styler(mi_df, uuid_len=0)
+
+
+@pytest.fixture
+def mi_styler_comp(mi_styler):
+ # comprehensively add features to mi_styler
+ mi_styler = mi_styler._copy(deepcopy=True)
+ mi_styler.css = {**mi_styler.css, "row": "ROW", "col": "COL"}
+ mi_styler.uuid_len = 5
+ mi_styler.uuid = "abcde"
+ mi_styler.set_caption("capt")
+ mi_styler.set_table_styles([{"selector": "a", "props": "a:v;"}])
+ mi_styler.hide(axis="columns")
+ mi_styler.hide([("c0", "c1_a")], axis="columns", names=True)
+ mi_styler.hide(axis="index")
+ mi_styler.hide([("i0", "i1_a")], axis="index", names=True)
+ mi_styler.set_table_attributes('class="box"')
+ other = mi_styler.data.agg(["mean"])
+ other.index = MultiIndex.from_product([[""], other.index])
+ mi_styler.concat(other.style)
+ mi_styler.format(na_rep="MISSING", precision=3)
+ mi_styler.format_index(precision=2, axis=0)
+ mi_styler.format_index(precision=4, axis=1)
+ mi_styler.highlight_max(axis=None)
+ mi_styler.map_index(lambda x: "color: white;", axis=0)
+ mi_styler.map_index(lambda x: "color: black;", axis=1)
+ mi_styler.set_td_classes(
+ DataFrame(
+ [["a", "b"], ["a", "c"]], index=mi_styler.index, columns=mi_styler.columns
+ )
+ )
+ mi_styler.set_tooltips(
+ DataFrame(
+ [["a2", "b2"], ["a2", "c2"]],
+ index=mi_styler.index,
+ columns=mi_styler.columns,
+ )
+ )
+ return mi_styler
+
+
+@pytest.fixture
+def blank_value():
+ return " "
+
+
+@pytest.fixture
+def df():
+ df = DataFrame({"A": [0, 1], "B": np.random.default_rng(2).standard_normal(2)})
+ return df
+
+
+@pytest.fixture
+def styler(df):
+ df = DataFrame({"A": [0, 1], "B": np.random.default_rng(2).standard_normal(2)})
+ return Styler(df)
+
+
+@pytest.mark.parametrize(
+ "sparse_columns, exp_cols",
+ [
+ (
+ True,
+ [
+ {"is_visible": True, "attributes": 'colspan="2"', "value": "c0"},
+ {"is_visible": False, "attributes": "", "value": "c0"},
+ ],
+ ),
+ (
+ False,
+ [
+ {"is_visible": True, "attributes": "", "value": "c0"},
+ {"is_visible": True, "attributes": "", "value": "c0"},
+ ],
+ ),
+ ],
+)
+def test_mi_styler_sparsify_columns(mi_styler, sparse_columns, exp_cols):
+ exp_l1_c0 = {"is_visible": True, "attributes": "", "display_value": "c1_a"}
+ exp_l1_c1 = {"is_visible": True, "attributes": "", "display_value": "c1_b"}
+
+ ctx = mi_styler._translate(True, sparse_columns)
+
+ assert exp_cols[0].items() <= ctx["head"][0][2].items()
+ assert exp_cols[1].items() <= ctx["head"][0][3].items()
+ assert exp_l1_c0.items() <= ctx["head"][1][2].items()
+ assert exp_l1_c1.items() <= ctx["head"][1][3].items()
+
+
+@pytest.mark.parametrize(
+ "sparse_index, exp_rows",
+ [
+ (
+ True,
+ [
+ {"is_visible": True, "attributes": 'rowspan="2"', "value": "i0"},
+ {"is_visible": False, "attributes": "", "value": "i0"},
+ ],
+ ),
+ (
+ False,
+ [
+ {"is_visible": True, "attributes": "", "value": "i0"},
+ {"is_visible": True, "attributes": "", "value": "i0"},
+ ],
+ ),
+ ],
+)
+def test_mi_styler_sparsify_index(mi_styler, sparse_index, exp_rows):
+ exp_l1_r0 = {"is_visible": True, "attributes": "", "display_value": "i1_a"}
+ exp_l1_r1 = {"is_visible": True, "attributes": "", "display_value": "i1_b"}
+
+ ctx = mi_styler._translate(sparse_index, True)
+
+ assert exp_rows[0].items() <= ctx["body"][0][0].items()
+ assert exp_rows[1].items() <= ctx["body"][1][0].items()
+ assert exp_l1_r0.items() <= ctx["body"][0][1].items()
+ assert exp_l1_r1.items() <= ctx["body"][1][1].items()
+
+
+def test_mi_styler_sparsify_options(mi_styler):
+ with option_context("styler.sparse.index", False):
+ html1 = mi_styler.to_html()
+ with option_context("styler.sparse.index", True):
+ html2 = mi_styler.to_html()
+
+ assert html1 != html2
+
+ with option_context("styler.sparse.columns", False):
+ html1 = mi_styler.to_html()
+ with option_context("styler.sparse.columns", True):
+ html2 = mi_styler.to_html()
+
+ assert html1 != html2
+
+
+@pytest.mark.parametrize(
+ "rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn",
+ [
+ (100, 100, 100, None, None, 12, 6), # reduce to (12, 6) < 100 elements
+ (1000, 3, 750, None, None, 250, 3), # dynamically reduce rows to 250, keep cols
+ (4, 1000, 500, None, None, 4, 125), # dynamically reduce cols to 125, keep rows
+ (1000, 3, 750, 10, None, 10, 3), # overwrite above dynamics with max_row
+ (4, 1000, 500, None, 5, 4, 5), # overwrite above dynamics with max_col
+ (100, 100, 700, 50, 50, 25, 25), # rows cols below given maxes so < 700 elmts
+ ],
+)
+def test_trimming_maximum(rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn):
+ rn, cn = _get_trimming_maximums(
+ rn, cn, max_els, max_rows, max_cols, scaling_factor=0.5
+ )
+ assert (rn, cn) == (exp_rn, exp_cn)
+
+
+@pytest.mark.parametrize(
+ "option, val",
+ [
+ ("styler.render.max_elements", 6),
+ ("styler.render.max_rows", 3),
+ ],
+)
+def test_render_trimming_rows(option, val):
+ # test auto and specific trimming of rows
+ df = DataFrame(np.arange(120).reshape(60, 2))
+ with option_context(option, val):
+ ctx = df.style._translate(True, True)
+ assert len(ctx["head"][0]) == 3 # index + 2 data cols
+ assert len(ctx["body"]) == 4 # 3 data rows + trimming row
+ assert len(ctx["body"][0]) == 3 # index + 2 data cols
+
+
+@pytest.mark.parametrize(
+ "option, val",
+ [
+ ("styler.render.max_elements", 6),
+ ("styler.render.max_columns", 2),
+ ],
+)
+def test_render_trimming_cols(option, val):
+ # test auto and specific trimming of cols
+ df = DataFrame(np.arange(30).reshape(3, 10))
+ with option_context(option, val):
+ ctx = df.style._translate(True, True)
+ assert len(ctx["head"][0]) == 4 # index + 2 data cols + trimming col
+ assert len(ctx["body"]) == 3 # 3 data rows
+ assert len(ctx["body"][0]) == 4 # index + 2 data cols + trimming col
+
+
+def test_render_trimming_mi():
+ midx = MultiIndex.from_product([[1, 2], [1, 2, 3]])
+ df = DataFrame(np.arange(36).reshape(6, 6), columns=midx, index=midx)
+ with option_context("styler.render.max_elements", 4):
+ ctx = df.style._translate(True, True)
+
+ assert len(ctx["body"][0]) == 5 # 2 indexes + 2 data cols + trimming row
+ assert {"attributes": 'rowspan="2"'}.items() <= ctx["body"][0][0].items()
+ assert {"class": "data row0 col_trim"}.items() <= ctx["body"][0][4].items()
+ assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items()
+ assert len(ctx["body"]) == 3 # 2 data rows + trimming row
+
+
+def test_render_empty_mi():
+ # GH 43305
+ df = DataFrame(index=MultiIndex.from_product([["A"], [0, 1]], names=[None, "one"]))
+ expected = dedent(
+ """\
+ >
+
+
+
+ one
+
+
+ """
+ )
+ assert expected in df.style.to_html()
+
+
+@pytest.mark.parametrize("comprehensive", [True, False])
+@pytest.mark.parametrize("render", [True, False])
+@pytest.mark.parametrize("deepcopy", [True, False])
+def test_copy(comprehensive, render, deepcopy, mi_styler, mi_styler_comp):
+ styler = mi_styler_comp if comprehensive else mi_styler
+ styler.uuid_len = 5
+
+ s2 = copy.deepcopy(styler) if deepcopy else copy.copy(styler) # make copy and check
+ assert s2 is not styler
+
+ if render:
+ styler.to_html()
+
+ excl = [
+ "cellstyle_map", # render time vars..
+ "cellstyle_map_columns",
+ "cellstyle_map_index",
+ "template_latex", # render templates are class level
+ "template_html",
+ "template_html_style",
+ "template_html_table",
+ ]
+ if not deepcopy: # check memory locations are equal for all included attributes
+ for attr in [a for a in styler.__dict__ if (not callable(a) and a not in excl)]:
+ assert id(getattr(s2, attr)) == id(getattr(styler, attr))
+ else: # check memory locations are different for nested or mutable vars
+ shallow = [
+ "data",
+ "columns",
+ "index",
+ "uuid_len",
+ "uuid",
+ "caption",
+ "cell_ids",
+ "hide_index_",
+ "hide_columns_",
+ "hide_index_names",
+ "hide_column_names",
+ "table_attributes",
+ ]
+ for attr in shallow:
+ assert id(getattr(s2, attr)) == id(getattr(styler, attr))
+
+ for attr in [
+ a
+ for a in styler.__dict__
+ if (not callable(a) and a not in excl and a not in shallow)
+ ]:
+ if getattr(s2, attr) is None:
+ assert id(getattr(s2, attr)) == id(getattr(styler, attr))
+ else:
+ assert id(getattr(s2, attr)) != id(getattr(styler, attr))
+
+
+@pytest.mark.parametrize("deepcopy", [True, False])
+def test_inherited_copy(mi_styler, deepcopy):
+ # Ensure that the inherited class is preserved when a Styler object is copied.
+ # GH 52728
+ class CustomStyler(Styler):
+ pass
+
+ custom_styler = CustomStyler(mi_styler.data)
+ custom_styler_copy = (
+ copy.deepcopy(custom_styler) if deepcopy else copy.copy(custom_styler)
+ )
+ assert isinstance(custom_styler_copy, CustomStyler)
+
+
+def test_clear(mi_styler_comp):
+ # NOTE: if this test fails for new features then 'mi_styler_comp' should be updated
+ # to ensure proper testing of the 'copy', 'clear', 'export' methods with new feature
+ # GH 40675
+ styler = mi_styler_comp
+ styler._compute() # execute applied methods
+
+ clean_copy = Styler(styler.data, uuid=styler.uuid)
+
+ excl = [
+ "data",
+ "index",
+ "columns",
+ "uuid",
+ "uuid_len", # uuid is set to be the same on styler and clean_copy
+ "cell_ids",
+ "cellstyle_map", # execution time only
+ "cellstyle_map_columns", # execution time only
+ "cellstyle_map_index", # execution time only
+ "template_latex", # render templates are class level
+ "template_html",
+ "template_html_style",
+ "template_html_table",
+ ]
+ # tests vars are not same vals on obj and clean copy before clear (except for excl)
+ for attr in [a for a in styler.__dict__ if not (callable(a) or a in excl)]:
+ res = getattr(styler, attr) == getattr(clean_copy, attr)
+ if hasattr(res, "__iter__") and len(res) > 0:
+ assert not all(res) # some element in iterable differs
+ elif hasattr(res, "__iter__") and len(res) == 0:
+ pass # empty array
+ else:
+ assert not res # explicit var differs
+
+ # test vars have same vales on obj and clean copy after clearing
+ styler.clear()
+ for attr in [a for a in styler.__dict__ if not callable(a)]:
+ res = getattr(styler, attr) == getattr(clean_copy, attr)
+ assert all(res) if hasattr(res, "__iter__") else res
+
+
+def test_export(mi_styler_comp, mi_styler):
+ exp_attrs = [
+ "_todo",
+ "hide_index_",
+ "hide_index_names",
+ "hide_columns_",
+ "hide_column_names",
+ "table_attributes",
+ "table_styles",
+ "css",
+ ]
+ for attr in exp_attrs:
+ check = getattr(mi_styler, attr) == getattr(mi_styler_comp, attr)
+ assert not (
+ all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
+ )
+
+ export = mi_styler_comp.export()
+ used = mi_styler.use(export)
+ for attr in exp_attrs:
+ check = getattr(used, attr) == getattr(mi_styler_comp, attr)
+ assert all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
+
+ used.to_html()
+
+
+def test_hide_raises(mi_styler):
+ msg = "`subset` and `level` cannot be passed simultaneously"
+ with pytest.raises(ValueError, match=msg):
+ mi_styler.hide(axis="index", subset="something", level="something else")
+
+ msg = "`level` must be of type `int`, `str` or list of such"
+ with pytest.raises(ValueError, match=msg):
+ mi_styler.hide(axis="index", level={"bad": 1, "type": 2})
+
+
+@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
+def test_hide_index_level(mi_styler, level):
+ mi_styler.index.names, mi_styler.columns.names = ["zero", "one"], ["zero", "one"]
+ ctx = mi_styler.hide(axis="index", level=level)._translate(False, True)
+ assert len(ctx["head"][0]) == 3
+ assert len(ctx["head"][1]) == 3
+ assert len(ctx["head"][2]) == 4
+ assert ctx["head"][2][0]["is_visible"]
+ assert not ctx["head"][2][1]["is_visible"]
+
+ assert ctx["body"][0][0]["is_visible"]
+ assert not ctx["body"][0][1]["is_visible"]
+ assert ctx["body"][1][0]["is_visible"]
+ assert not ctx["body"][1][1]["is_visible"]
+
+
+@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
+@pytest.mark.parametrize("names", [True, False])
+def test_hide_columns_level(mi_styler, level, names):
+ mi_styler.columns.names = ["zero", "one"]
+ if names:
+ mi_styler.index.names = ["zero", "one"]
+ ctx = mi_styler.hide(axis="columns", level=level)._translate(True, False)
+ assert len(ctx["head"]) == (2 if names else 1)
+
+
+@pytest.mark.parametrize("method", ["map", "apply"])
+@pytest.mark.parametrize("axis", ["index", "columns"])
+def test_apply_map_header(method, axis):
+ # GH 41893
+ df = DataFrame({"A": [0, 0], "B": [1, 1]}, index=["C", "D"])
+ func = {
+ "apply": lambda s: ["attr: val" if ("A" in v or "C" in v) else "" for v in s],
+ "map": lambda v: "attr: val" if ("A" in v or "C" in v) else "",
+ }
+
+ # test execution added to todo
+ result = getattr(df.style, f"{method}_index")(func[method], axis=axis)
+ assert len(result._todo) == 1
+ assert len(getattr(result, f"ctx_{axis}")) == 0
+
+ # test ctx object on compute
+ result._compute()
+ expected = {
+ (0, 0): [("attr", "val")],
+ }
+ assert getattr(result, f"ctx_{axis}") == expected
+
+
+@pytest.mark.parametrize("method", ["apply", "map"])
+@pytest.mark.parametrize("axis", ["index", "columns"])
+def test_apply_map_header_mi(mi_styler, method, axis):
+ # GH 41893
+ func = {
+ "apply": lambda s: ["attr: val;" if "b" in v else "" for v in s],
+ "map": lambda v: "attr: val" if "b" in v else "",
+ }
+ result = getattr(mi_styler, f"{method}_index")(func[method], axis=axis)._compute()
+ expected = {(1, 1): [("attr", "val")]}
+ assert getattr(result, f"ctx_{axis}") == expected
+
+
+def test_apply_map_header_raises(mi_styler):
+ # GH 41893
+ with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"):
+ mi_styler.map_index(lambda v: "attr: val;", axis="bad")._compute()
+
+
+class TestStyler:
+ def test_init_non_pandas(self):
+ msg = "``data`` must be a Series or DataFrame"
+ with pytest.raises(TypeError, match=msg):
+ Styler([1, 2, 3])
+
+ def test_init_series(self):
+ result = Styler(Series([1, 2]))
+ assert result.data.ndim == 2
+
+ def test_repr_html_ok(self, styler):
+ styler._repr_html_()
+
+ def test_repr_html_mathjax(self, styler):
+ # gh-19824 / 41395
+ assert "tex2jax_ignore" not in styler._repr_html_()
+
+ with option_context("styler.html.mathjax", False):
+ assert "tex2jax_ignore" in styler._repr_html_()
+
+ def test_update_ctx(self, styler):
+ styler._update_ctx(DataFrame({"A": ["color: red", "color: blue"]}))
+ expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]}
+ assert styler.ctx == expected
+
+ def test_update_ctx_flatten_multi_and_trailing_semi(self, styler):
+ attrs = DataFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]})
+ styler._update_ctx(attrs)
+ expected = {
+ (0, 0): [("color", "red"), ("foo", "bar")],
+ (1, 0): [("color", "blue"), ("foo", "baz")],
+ }
+ assert styler.ctx == expected
+
+ def test_render(self):
+ df = DataFrame({"A": [0, 1]})
+ style = lambda x: Series(["color: red", "color: blue"], name=x.name)
+ s = Styler(df, uuid="AB").apply(style)
+ s.to_html()
+ # it worked?
+
+ def test_multiple_render(self, df):
+ # GH 39396
+ s = Styler(df, uuid_len=0).map(lambda x: "color: red;", subset=["A"])
+ s.to_html() # do 2 renders to ensure css styles not duplicated
+ assert (
+ '" in s.to_html()
+ )
+
+ def test_render_empty_dfs(self):
+ empty_df = DataFrame()
+ es = Styler(empty_df)
+ es.to_html()
+ # An index but no columns
+ DataFrame(columns=["a"]).style.to_html()
+ # A column but no index
+ DataFrame(index=["a"]).style.to_html()
+ # No IndexError raised?
+
+ def test_render_double(self):
+ df = DataFrame({"A": [0, 1]})
+ style = lambda x: Series(
+ ["color: red; border: 1px", "color: blue; border: 2px"], name=x.name
+ )
+ s = Styler(df, uuid="AB").apply(style)
+ s.to_html()
+ # it worked?
+
+ def test_set_properties(self):
+ df = DataFrame({"A": [0, 1]})
+ result = df.style.set_properties(color="white", size="10px")._compute().ctx
+ # order is deterministic
+ v = [("color", "white"), ("size", "10px")]
+ expected = {(0, 0): v, (1, 0): v}
+ assert result.keys() == expected.keys()
+ for v1, v2 in zip(result.values(), expected.values()):
+ assert sorted(v1) == sorted(v2)
+
+ def test_set_properties_subset(self):
+ df = DataFrame({"A": [0, 1]})
+ result = (
+ df.style.set_properties(subset=IndexSlice[0, "A"], color="white")
+ ._compute()
+ .ctx
+ )
+ expected = {(0, 0): [("color", "white")]}
+ assert result == expected
+
+ def test_empty_index_name_doesnt_display(self, blank_value):
+ # https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
+ df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
+ result = df.style._translate(True, True)
+ assert len(result["head"]) == 1
+ expected = {
+ "class": "blank level0",
+ "type": "th",
+ "value": blank_value,
+ "is_visible": True,
+ "display_value": blank_value,
+ }
+ assert expected.items() <= result["head"][0][0].items()
+
+ def test_index_name(self):
+ # https://github.com/pandas-dev/pandas/issues/11655
+ df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
+ result = df.set_index("A").style._translate(True, True)
+ expected = {
+ "class": "index_name level0",
+ "type": "th",
+ "value": "A",
+ "is_visible": True,
+ "display_value": "A",
+ }
+ assert expected.items() <= result["head"][1][0].items()
+
+ def test_numeric_columns(self):
+ # https://github.com/pandas-dev/pandas/issues/12125
+ # smoke test for _translate
+ df = DataFrame({0: [1, 2, 3]})
+ df.style._translate(True, True)
+
+ def test_apply_axis(self):
+ df = DataFrame({"A": [0, 0], "B": [1, 1]})
+ f = lambda x: [f"val: {x.max()}" for v in x]
+ result = df.style.apply(f, axis=1)
+ assert len(result._todo) == 1
+ assert len(result.ctx) == 0
+ result._compute()
+ expected = {
+ (0, 0): [("val", "1")],
+ (0, 1): [("val", "1")],
+ (1, 0): [("val", "1")],
+ (1, 1): [("val", "1")],
+ }
+ assert result.ctx == expected
+
+ result = df.style.apply(f, axis=0)
+ expected = {
+ (0, 0): [("val", "0")],
+ (0, 1): [("val", "1")],
+ (1, 0): [("val", "0")],
+ (1, 1): [("val", "1")],
+ }
+ result._compute()
+ assert result.ctx == expected
+ result = df.style.apply(f) # default
+ result._compute()
+ assert result.ctx == expected
+
+ @pytest.mark.parametrize("axis", [0, 1])
+ def test_apply_series_return(self, axis):
+ # GH 42014
+ df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
+
+ # test Series return where len(Series) < df.index or df.columns but labels OK
+ func = lambda s: Series(["color: red;"], index=["Y"])
+ result = df.style.apply(func, axis=axis)._compute().ctx
+ assert result[(1, 1)] == [("color", "red")]
+ assert result[(1 - axis, axis)] == [("color", "red")]
+
+ # test Series return where labels align but different order
+ func = lambda s: Series(["color: red;", "color: blue;"], index=["Y", "X"])
+ result = df.style.apply(func, axis=axis)._compute().ctx
+ assert result[(0, 0)] == [("color", "blue")]
+ assert result[(1, 1)] == [("color", "red")]
+ assert result[(1 - axis, axis)] == [("color", "red")]
+ assert result[(axis, 1 - axis)] == [("color", "blue")]
+
+ @pytest.mark.parametrize("index", [False, True])
+ @pytest.mark.parametrize("columns", [False, True])
+ def test_apply_dataframe_return(self, index, columns):
+ # GH 42014
+ df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
+ idxs = ["X", "Y"] if index else ["Y"]
+ cols = ["X", "Y"] if columns else ["Y"]
+ df_styles = DataFrame("color: red;", index=idxs, columns=cols)
+ result = df.style.apply(lambda x: df_styles, axis=None)._compute().ctx
+
+ assert result[(1, 1)] == [("color", "red")] # (Y,Y) styles always present
+ assert (result[(0, 1)] == [("color", "red")]) is index # (X,Y) only if index
+ assert (result[(1, 0)] == [("color", "red")]) is columns # (Y,X) only if cols
+ assert (result[(0, 0)] == [("color", "red")]) is (index and columns) # (X,X)
+
+ @pytest.mark.parametrize(
+ "slice_",
+ [
+ IndexSlice[:],
+ IndexSlice[:, ["A"]],
+ IndexSlice[[1], :],
+ IndexSlice[[1], ["A"]],
+ IndexSlice[:2, ["A", "B"]],
+ ],
+ )
+ @pytest.mark.parametrize("axis", [0, 1])
+ def test_apply_subset(self, slice_, axis, df):
+ def h(x, color="bar"):
+ return Series(f"color: {color}", index=x.index, name=x.name)
+
+ result = df.style.apply(h, axis=axis, subset=slice_, color="baz")._compute().ctx
+ expected = {
+ (r, c): [("color", "baz")]
+ for r, row in enumerate(df.index)
+ for c, col in enumerate(df.columns)
+ if row in df.loc[slice_].index and col in df.loc[slice_].columns
+ }
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "slice_",
+ [
+ IndexSlice[:],
+ IndexSlice[:, ["A"]],
+ IndexSlice[[1], :],
+ IndexSlice[[1], ["A"]],
+ IndexSlice[:2, ["A", "B"]],
+ ],
+ )
+ def test_map_subset(self, slice_, df):
+ result = df.style.map(lambda x: "color:baz;", subset=slice_)._compute().ctx
+ expected = {
+ (r, c): [("color", "baz")]
+ for r, row in enumerate(df.index)
+ for c, col in enumerate(df.columns)
+ if row in df.loc[slice_].index and col in df.loc[slice_].columns
+ }
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "slice_",
+ [
+ IndexSlice[:, IndexSlice["x", "A"]],
+ IndexSlice[:, IndexSlice[:, "A"]],
+ IndexSlice[:, IndexSlice[:, ["A", "C"]]], # missing col element
+ IndexSlice[IndexSlice["a", 1], :],
+ IndexSlice[IndexSlice[:, 1], :],
+ IndexSlice[IndexSlice[:, [1, 3]], :], # missing row element
+ IndexSlice[:, ("x", "A")],
+ IndexSlice[("a", 1), :],
+ ],
+ )
+ def test_map_subset_multiindex(self, slice_):
+ # GH 19861
+ # edited for GH 33562
+ if (
+ isinstance(slice_[-1], tuple)
+ and isinstance(slice_[-1][-1], list)
+ and "C" in slice_[-1][-1]
+ ):
+ ctx = pytest.raises(KeyError, match="C")
+ elif (
+ isinstance(slice_[0], tuple)
+ and isinstance(slice_[0][1], list)
+ and 3 in slice_[0][1]
+ ):
+ ctx = pytest.raises(KeyError, match="3")
+ else:
+ ctx = contextlib.nullcontext()
+
+ idx = MultiIndex.from_product([["a", "b"], [1, 2]])
+ col = MultiIndex.from_product([["x", "y"], ["A", "B"]])
+ df = DataFrame(np.random.default_rng(2).random((4, 4)), columns=col, index=idx)
+
+ with ctx:
+ df.style.map(lambda x: "color: red;", subset=slice_).to_html()
+
+ def test_map_subset_multiindex_code(self):
+ # https://github.com/pandas-dev/pandas/issues/25858
+ # Checks styler.map works with multindex when codes are provided
+ codes = np.array([[0, 0, 1, 1], [0, 1, 0, 1]])
+ columns = MultiIndex(
+ levels=[["a", "b"], ["%", "#"]], codes=codes, names=["", ""]
+ )
+ df = DataFrame(
+ [[1, -1, 1, 1], [-1, 1, 1, 1]], index=["hello", "world"], columns=columns
+ )
+ pct_subset = IndexSlice[:, IndexSlice[:, "%":"%"]]
+
+ def color_negative_red(val):
+ color = "red" if val < 0 else "black"
+ return f"color: {color}"
+
+ df.loc[pct_subset]
+ df.style.map(color_negative_red, subset=pct_subset)
+
+ @pytest.mark.parametrize(
+ "stylefunc", ["background_gradient", "bar", "text_gradient"]
+ )
+ def test_subset_for_boolean_cols(self, stylefunc):
+ # GH47838
+ df = DataFrame(
+ [
+ [1, 2],
+ [3, 4],
+ ],
+ columns=[False, True],
+ )
+ styled = getattr(df.style, stylefunc)()
+ styled._compute()
+ assert set(styled.ctx) == {(0, 0), (0, 1), (1, 0), (1, 1)}
+
+ def test_empty(self):
+ df = DataFrame({"A": [1, 0]})
+ s = df.style
+ s.ctx = {(0, 0): [("color", "red")], (1, 0): [("", "")]}
+
+ result = s._translate(True, True)["cellstyle"]
+ expected = [
+ {"props": [("color", "red")], "selectors": ["row0_col0"]},
+ {"props": [("", "")], "selectors": ["row1_col0"]},
+ ]
+ assert result == expected
+
+ def test_duplicate(self):
+ df = DataFrame({"A": [1, 0]})
+ s = df.style
+ s.ctx = {(0, 0): [("color", "red")], (1, 0): [("color", "red")]}
+
+ result = s._translate(True, True)["cellstyle"]
+ expected = [
+ {"props": [("color", "red")], "selectors": ["row0_col0", "row1_col0"]}
+ ]
+ assert result == expected
+
+ def test_init_with_na_rep(self):
+ # GH 21527 28358
+ df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
+
+ ctx = Styler(df, na_rep="NA")._translate(True, True)
+ assert ctx["body"][0][1]["display_value"] == "NA"
+ assert ctx["body"][0][2]["display_value"] == "NA"
+
+ def test_caption(self, df):
+ styler = Styler(df, caption="foo")
+ result = styler.to_html()
+ assert all(["caption" in result, "foo" in result])
+
+ styler = df.style
+ result = styler.set_caption("baz")
+ assert styler is result
+ assert styler.caption == "baz"
+
+ def test_uuid(self, df):
+ styler = Styler(df, uuid="abc123")
+ result = styler.to_html()
+ assert "abc123" in result
+
+ styler = df.style
+ result = styler.set_uuid("aaa")
+ assert result is styler
+ assert result.uuid == "aaa"
+
+ def test_unique_id(self):
+ # See https://github.com/pandas-dev/pandas/issues/16780
+ df = DataFrame({"a": [1, 3, 5, 6], "b": [2, 4, 12, 21]})
+ result = df.style.to_html(uuid="test")
+ assert "test" in result
+ ids = re.findall('id="(.*?)"', result)
+ assert np.unique(ids).size == len(ids)
+
+ def test_table_styles(self, df):
+ style = [{"selector": "th", "props": [("foo", "bar")]}] # default format
+ styler = Styler(df, table_styles=style)
+ result = " ".join(styler.to_html().split())
+ assert "th { foo: bar; }" in result
+
+ styler = df.style
+ result = styler.set_table_styles(style)
+ assert styler is result
+ assert styler.table_styles == style
+
+ # GH 39563
+ style = [{"selector": "th", "props": "foo:bar;"}] # css string format
+ styler = df.style.set_table_styles(style)
+ result = " ".join(styler.to_html().split())
+ assert "th { foo: bar; }" in result
+
+ def test_table_styles_multiple(self, df):
+ ctx = df.style.set_table_styles(
+ [
+ {"selector": "th,td", "props": "color:red;"},
+ {"selector": "tr", "props": "color:green;"},
+ ]
+ )._translate(True, True)["table_styles"]
+ assert ctx == [
+ {"selector": "th", "props": [("color", "red")]},
+ {"selector": "td", "props": [("color", "red")]},
+ {"selector": "tr", "props": [("color", "green")]},
+ ]
+
+ def test_table_styles_dict_multiple_selectors(self, df):
+ # GH 44011
+ result = df.style.set_table_styles(
+ {
+ "B": [
+ {"selector": "th,td", "props": [("border-left", "2px solid black")]}
+ ]
+ }
+ )._translate(True, True)["table_styles"]
+
+ expected = [
+ {"selector": "th.col1", "props": [("border-left", "2px solid black")]},
+ {"selector": "td.col1", "props": [("border-left", "2px solid black")]},
+ ]
+
+ assert result == expected
+
+ def test_maybe_convert_css_to_tuples(self):
+ expected = [("a", "b"), ("c", "d e")]
+ assert maybe_convert_css_to_tuples("a:b;c:d e;") == expected
+ assert maybe_convert_css_to_tuples("a: b ;c: d e ") == expected
+ expected = []
+ assert maybe_convert_css_to_tuples("") == expected
+
+ def test_maybe_convert_css_to_tuples_err(self):
+ msg = "Styles supplied as string must follow CSS rule formats"
+ with pytest.raises(ValueError, match=msg):
+ maybe_convert_css_to_tuples("err")
+
+ def test_table_attributes(self, df):
+ attributes = 'class="foo" data-bar'
+ styler = Styler(df, table_attributes=attributes)
+ result = styler.to_html()
+ assert 'class="foo" data-bar' in result
+
+ result = df.style.set_table_attributes(attributes).to_html()
+ assert 'class="foo" data-bar' in result
+
+ def test_apply_none(self):
+ def f(x):
+ return DataFrame(
+ np.where(x == x.max(), "color: red", ""),
+ index=x.index,
+ columns=x.columns,
+ )
+
+ result = DataFrame([[1, 2], [3, 4]]).style.apply(f, axis=None)._compute().ctx
+ assert result[(1, 1)] == [("color", "red")]
+
+ def test_trim(self, df):
+ result = df.style.to_html() # trim=True
+ assert result.count("#") == 0
+
+ result = df.style.highlight_max().to_html()
+ assert result.count("#") == len(df.columns)
+
+ def test_export(self, df, styler):
+ f = lambda x: "color: red" if x > 0 else "color: blue"
+ g = lambda x, z: f"color: {z}" if x > 0 else f"color: {z}"
+ style1 = styler
+ style1.map(f).map(g, z="b").highlight_max()._compute() # = render
+ result = style1.export()
+ style2 = df.style
+ style2.use(result)
+ assert style1._todo == style2._todo
+ style2.to_html()
+
+ def test_bad_apply_shape(self):
+ df = DataFrame([[1, 2], [3, 4]], index=["A", "B"], columns=["X", "Y"])
+
+ msg = "resulted in the apply method collapsing to a Series."
+ with pytest.raises(ValueError, match=msg):
+ df.style._apply(lambda x: "x")
+
+ msg = "created invalid {} labels"
+ with pytest.raises(ValueError, match=msg.format("index")):
+ df.style._apply(lambda x: [""])
+
+ with pytest.raises(ValueError, match=msg.format("index")):
+ df.style._apply(lambda x: ["", "", "", ""])
+
+ with pytest.raises(ValueError, match=msg.format("index")):
+ df.style._apply(lambda x: Series(["a:v;", ""], index=["A", "C"]), axis=0)
+
+ with pytest.raises(ValueError, match=msg.format("columns")):
+ df.style._apply(lambda x: ["", "", ""], axis=1)
+
+ with pytest.raises(ValueError, match=msg.format("columns")):
+ df.style._apply(lambda x: Series(["a:v;", ""], index=["X", "Z"]), axis=1)
+
+ msg = "returned ndarray with wrong shape"
+ with pytest.raises(ValueError, match=msg):
+ df.style._apply(lambda x: np.array([[""], [""]]), axis=None)
+
+ def test_apply_bad_return(self):
+ def f(x):
+ return ""
+
+ df = DataFrame([[1, 2], [3, 4]])
+ msg = (
+ "must return a DataFrame or ndarray when passed to `Styler.apply` "
+ "with axis=None"
+ )
+ with pytest.raises(TypeError, match=msg):
+ df.style._apply(f, axis=None)
+
+ @pytest.mark.parametrize("axis", ["index", "columns"])
+ def test_apply_bad_labels(self, axis):
+ def f(x):
+ return DataFrame(**{axis: ["bad", "labels"]})
+
+ df = DataFrame([[1, 2], [3, 4]])
+ msg = f"created invalid {axis} labels."
+ with pytest.raises(ValueError, match=msg):
+ df.style._apply(f, axis=None)
+
+ def test_get_level_lengths(self):
+ index = MultiIndex.from_product([["a", "b"], [0, 1, 2]])
+ expected = {
+ (0, 0): 3,
+ (0, 3): 3,
+ (1, 0): 1,
+ (1, 1): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ (1, 4): 1,
+ (1, 5): 1,
+ }
+ result = _get_level_lengths(index, sparsify=True, max_index=100)
+ tm.assert_dict_equal(result, expected)
+
+ expected = {
+ (0, 0): 1,
+ (0, 1): 1,
+ (0, 2): 1,
+ (0, 3): 1,
+ (0, 4): 1,
+ (0, 5): 1,
+ (1, 0): 1,
+ (1, 1): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ (1, 4): 1,
+ (1, 5): 1,
+ }
+ result = _get_level_lengths(index, sparsify=False, max_index=100)
+ tm.assert_dict_equal(result, expected)
+
+ def test_get_level_lengths_un_sorted(self):
+ index = MultiIndex.from_arrays([[1, 1, 2, 1], ["a", "b", "b", "d"]])
+ expected = {
+ (0, 0): 2,
+ (0, 2): 1,
+ (0, 3): 1,
+ (1, 0): 1,
+ (1, 1): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ }
+ result = _get_level_lengths(index, sparsify=True, max_index=100)
+ tm.assert_dict_equal(result, expected)
+
+ expected = {
+ (0, 0): 1,
+ (0, 1): 1,
+ (0, 2): 1,
+ (0, 3): 1,
+ (1, 0): 1,
+ (1, 1): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ }
+ result = _get_level_lengths(index, sparsify=False, max_index=100)
+ tm.assert_dict_equal(result, expected)
+
+ def test_mi_sparse_index_names(self, blank_value):
+ # Test the class names and displayed value are correct on rendering MI names
+ df = DataFrame(
+ {"A": [1, 2]},
+ index=MultiIndex.from_arrays(
+ [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"]
+ ),
+ )
+ result = df.style._translate(True, True)
+ head = result["head"][1]
+ expected = [
+ {
+ "class": "index_name level0",
+ "display_value": "idx_level_0",
+ "is_visible": True,
+ },
+ {
+ "class": "index_name level1",
+ "display_value": "idx_level_1",
+ "is_visible": True,
+ },
+ {
+ "class": "blank col0",
+ "display_value": blank_value,
+ "is_visible": True,
+ },
+ ]
+ for i, expected_dict in enumerate(expected):
+ assert expected_dict.items() <= head[i].items()
+
+ def test_mi_sparse_column_names(self, blank_value):
+ df = DataFrame(
+ np.arange(16).reshape(4, 4),
+ index=MultiIndex.from_arrays(
+ [["a", "a", "b", "a"], [0, 1, 1, 2]],
+ names=["idx_level_0", "idx_level_1"],
+ ),
+ columns=MultiIndex.from_arrays(
+ [["C1", "C1", "C2", "C2"], [1, 0, 1, 0]], names=["colnam_0", "colnam_1"]
+ ),
+ )
+ result = Styler(df, cell_ids=False)._translate(True, True)
+
+ for level in [0, 1]:
+ head = result["head"][level]
+ expected = [
+ {
+ "class": "blank",
+ "display_value": blank_value,
+ "is_visible": True,
+ },
+ {
+ "class": f"index_name level{level}",
+ "display_value": f"colnam_{level}",
+ "is_visible": True,
+ },
+ ]
+ for i, expected_dict in enumerate(expected):
+ assert expected_dict.items() <= head[i].items()
+
+ def test_hide_column_headers(self, df, styler):
+ ctx = styler.hide(axis="columns")._translate(True, True)
+ assert len(ctx["head"]) == 0 # no header entries with an unnamed index
+
+ df.index.name = "some_name"
+ ctx = df.style.hide(axis="columns")._translate(True, True)
+ assert len(ctx["head"]) == 1
+ # index names still visible, changed in #42101, reverted in 43404
+
+ def test_hide_single_index(self, df):
+ # GH 14194
+ # single unnamed index
+ ctx = df.style._translate(True, True)
+ assert ctx["body"][0][0]["is_visible"]
+ assert ctx["head"][0][0]["is_visible"]
+ ctx2 = df.style.hide(axis="index")._translate(True, True)
+ assert not ctx2["body"][0][0]["is_visible"]
+ assert not ctx2["head"][0][0]["is_visible"]
+
+ # single named index
+ ctx3 = df.set_index("A").style._translate(True, True)
+ assert ctx3["body"][0][0]["is_visible"]
+ assert len(ctx3["head"]) == 2 # 2 header levels
+ assert ctx3["head"][0][0]["is_visible"]
+
+ ctx4 = df.set_index("A").style.hide(axis="index")._translate(True, True)
+ assert not ctx4["body"][0][0]["is_visible"]
+ assert len(ctx4["head"]) == 1 # only 1 header levels
+ assert not ctx4["head"][0][0]["is_visible"]
+
+ def test_hide_multiindex(self):
+ # GH 14194
+ df = DataFrame(
+ {"A": [1, 2], "B": [1, 2]},
+ index=MultiIndex.from_arrays(
+ [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"]
+ ),
+ )
+ ctx1 = df.style._translate(True, True)
+ # tests for 'a' and '0'
+ assert ctx1["body"][0][0]["is_visible"]
+ assert ctx1["body"][0][1]["is_visible"]
+ # check for blank header rows
+ assert len(ctx1["head"][0]) == 4 # two visible indexes and two data columns
+
+ ctx2 = df.style.hide(axis="index")._translate(True, True)
+ # tests for 'a' and '0'
+ assert not ctx2["body"][0][0]["is_visible"]
+ assert not ctx2["body"][0][1]["is_visible"]
+ # check for blank header rows
+ assert len(ctx2["head"][0]) == 3 # one hidden (col name) and two data columns
+ assert not ctx2["head"][0][0]["is_visible"]
+
+ def test_hide_columns_single_level(self, df):
+ # GH 14194
+ # test hiding single column
+ ctx = df.style._translate(True, True)
+ assert ctx["head"][0][1]["is_visible"]
+ assert ctx["head"][0][1]["display_value"] == "A"
+ assert ctx["head"][0][2]["is_visible"]
+ assert ctx["head"][0][2]["display_value"] == "B"
+ assert ctx["body"][0][1]["is_visible"] # col A, row 1
+ assert ctx["body"][1][2]["is_visible"] # col B, row 1
+
+ ctx = df.style.hide("A", axis="columns")._translate(True, True)
+ assert not ctx["head"][0][1]["is_visible"]
+ assert not ctx["body"][0][1]["is_visible"] # col A, row 1
+ assert ctx["body"][1][2]["is_visible"] # col B, row 1
+
+ # test hiding multiple columns
+ ctx = df.style.hide(["A", "B"], axis="columns")._translate(True, True)
+ assert not ctx["head"][0][1]["is_visible"]
+ assert not ctx["head"][0][2]["is_visible"]
+ assert not ctx["body"][0][1]["is_visible"] # col A, row 1
+ assert not ctx["body"][1][2]["is_visible"] # col B, row 1
+
+ def test_hide_columns_index_mult_levels(self):
+ # GH 14194
+ # setup dataframe with multiple column levels and indices
+ i1 = MultiIndex.from_arrays(
+ [["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"]
+ )
+ i2 = MultiIndex.from_arrays(
+ [["b", "b"], [0, 1]], names=["col_level_0", "col_level_1"]
+ )
+ df = DataFrame([[1, 2], [3, 4]], index=i1, columns=i2)
+ ctx = df.style._translate(True, True)
+ # column headers
+ assert ctx["head"][0][2]["is_visible"]
+ assert ctx["head"][1][2]["is_visible"]
+ assert ctx["head"][1][3]["display_value"] == "1"
+ # indices
+ assert ctx["body"][0][0]["is_visible"]
+ # data
+ assert ctx["body"][1][2]["is_visible"]
+ assert ctx["body"][1][2]["display_value"] == "3"
+ assert ctx["body"][1][3]["is_visible"]
+ assert ctx["body"][1][3]["display_value"] == "4"
+
+ # hide top column level, which hides both columns
+ ctx = df.style.hide("b", axis="columns")._translate(True, True)
+ assert not ctx["head"][0][2]["is_visible"] # b
+ assert not ctx["head"][1][2]["is_visible"] # 0
+ assert not ctx["body"][1][2]["is_visible"] # 3
+ assert ctx["body"][0][0]["is_visible"] # index
+
+ # hide first column only
+ ctx = df.style.hide([("b", 0)], axis="columns")._translate(True, True)
+ assert not ctx["head"][0][2]["is_visible"] # b
+ assert ctx["head"][0][3]["is_visible"] # b
+ assert not ctx["head"][1][2]["is_visible"] # 0
+ assert not ctx["body"][1][2]["is_visible"] # 3
+ assert ctx["body"][1][3]["is_visible"]
+ assert ctx["body"][1][3]["display_value"] == "4"
+
+ # hide second column and index
+ ctx = df.style.hide([("b", 1)], axis=1).hide(axis=0)._translate(True, True)
+ assert not ctx["body"][0][0]["is_visible"] # index
+ assert len(ctx["head"][0]) == 3
+ assert ctx["head"][0][1]["is_visible"] # b
+ assert ctx["head"][1][1]["is_visible"] # 0
+ assert not ctx["head"][1][2]["is_visible"] # 1
+ assert not ctx["body"][1][3]["is_visible"] # 4
+ assert ctx["body"][1][2]["is_visible"]
+ assert ctx["body"][1][2]["display_value"] == "3"
+
+ # hide top row level, which hides both rows so body empty
+ ctx = df.style.hide("a", axis="index")._translate(True, True)
+ assert ctx["body"] == []
+
+ # hide first row only
+ ctx = df.style.hide(("a", 0), axis="index")._translate(True, True)
+ for i in [0, 1, 2, 3]:
+ assert "row1" in ctx["body"][0][i]["class"] # row0 not included in body
+ assert ctx["body"][0][i]["is_visible"]
+
+ def test_pipe(self, df):
+ def set_caption_from_template(styler, a, b):
+ return styler.set_caption(f"Dataframe with a = {a} and b = {b}")
+
+ styler = df.style.pipe(set_caption_from_template, "A", b="B")
+ assert "Dataframe with a = A and b = B" in styler.to_html()
+
+ # Test with an argument that is a (callable, keyword_name) pair.
+ def f(a, b, styler):
+ return (a, b, styler)
+
+ styler = df.style
+ result = styler.pipe((f, "styler"), a=1, b=2)
+ assert result == (1, 2, styler)
+
+ def test_no_cell_ids(self):
+ # GH 35588
+ # GH 35663
+ df = DataFrame(data=[[0]])
+ styler = Styler(df, uuid="_", cell_ids=False)
+ styler.to_html()
+ s = styler.to_html() # render twice to ensure ctx is not updated
+ assert s.find('') != -1
+
+ @pytest.mark.parametrize(
+ "classes",
+ [
+ DataFrame(
+ data=[["", "test-class"], [np.nan, None]],
+ columns=["A", "B"],
+ index=["a", "b"],
+ ),
+ DataFrame(data=[["test-class"]], columns=["B"], index=["a"]),
+ DataFrame(data=[["test-class", "unused"]], columns=["B", "C"], index=["a"]),
+ ],
+ )
+ def test_set_data_classes(self, classes):
+ # GH 36159
+ df = DataFrame(data=[[0, 1], [2, 3]], columns=["A", "B"], index=["a", "b"])
+ s = Styler(df, uuid_len=0, cell_ids=False).set_td_classes(classes).to_html()
+ assert ' 0 ' in s
+ assert '1 ' in s
+ assert '2 ' in s
+ assert '3 ' in s
+ # GH 39317
+ s = Styler(df, uuid_len=0, cell_ids=True).set_td_classes(classes).to_html()
+ assert '0 ' in s
+ assert '1 ' in s
+ assert '2 ' in s
+ assert '3 ' in s
+
+ def test_set_data_classes_reindex(self):
+ # GH 39317
+ df = DataFrame(
+ data=[[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=[0, 1, 2], index=[0, 1, 2]
+ )
+ classes = DataFrame(
+ data=[["mi", "ma"], ["mu", "mo"]],
+ columns=[0, 2],
+ index=[0, 2],
+ )
+ s = Styler(df, uuid_len=0).set_td_classes(classes).to_html()
+ assert '0 ' in s
+ assert '2 ' in s
+ assert '4 ' in s
+ assert '6 ' in s
+ assert '8 ' in s
+
+ def test_chaining_table_styles(self):
+ # GH 35607
+ df = DataFrame(data=[[0, 1], [1, 2]], columns=["A", "B"])
+ styler = df.style.set_table_styles(
+ [{"selector": "", "props": [("background-color", "yellow")]}]
+ ).set_table_styles(
+ [{"selector": ".col0", "props": [("background-color", "blue")]}],
+ overwrite=False,
+ )
+ assert len(styler.table_styles) == 2
+
+ def test_column_and_row_styling(self):
+ # GH 35607
+ df = DataFrame(data=[[0, 1], [1, 2]], columns=["A", "B"])
+ s = Styler(df, uuid_len=0)
+ s = s.set_table_styles({"A": [{"selector": "", "props": [("color", "blue")]}]})
+ assert "#T_ .col0 {\n color: blue;\n}" in s.to_html()
+ s = s.set_table_styles(
+ {0: [{"selector": "", "props": [("color", "blue")]}]}, axis=1
+ )
+ assert "#T_ .row0 {\n color: blue;\n}" in s.to_html()
+
+ @pytest.mark.parametrize("len_", [1, 5, 32, 33, 100])
+ def test_uuid_len(self, len_):
+ # GH 36345
+ df = DataFrame(data=[["A"]])
+ s = Styler(df, uuid_len=len_, cell_ids=False).to_html()
+ strt = s.find('id="T_')
+ end = s[strt + 6 :].find('"')
+ if len_ > 32:
+ assert end == 32
+ else:
+ assert end == len_
+
+ @pytest.mark.parametrize("len_", [-2, "bad", None])
+ def test_uuid_len_raises(self, len_):
+ # GH 36345
+ df = DataFrame(data=[["A"]])
+ msg = "``uuid_len`` must be an integer in range \\[0, 32\\]."
+ with pytest.raises(TypeError, match=msg):
+ Styler(df, uuid_len=len_, cell_ids=False).to_html()
+
+ @pytest.mark.parametrize(
+ "slc",
+ [
+ IndexSlice[:, :],
+ IndexSlice[:, 1],
+ IndexSlice[1, :],
+ IndexSlice[[1], [1]],
+ IndexSlice[1, [1]],
+ IndexSlice[[1], 1],
+ IndexSlice[1],
+ IndexSlice[1, 1],
+ slice(None, None, None),
+ [0, 1],
+ np.array([0, 1]),
+ Series([0, 1]),
+ ],
+ )
+ def test_non_reducing_slice(self, slc):
+ df = DataFrame([[0, 1], [2, 3]])
+
+ tslice_ = non_reducing_slice(slc)
+ assert isinstance(df.loc[tslice_], DataFrame)
+
+ @pytest.mark.parametrize("box", [list, Series, np.array])
+ def test_list_slice(self, box):
+ # like dataframe getitem
+ subset = box(["A"])
+
+ df = DataFrame({"A": [1, 2], "B": [3, 4]}, index=["A", "B"])
+ expected = IndexSlice[:, ["A"]]
+
+ result = non_reducing_slice(subset)
+ tm.assert_frame_equal(df.loc[result], df.loc[expected])
+
+ def test_non_reducing_slice_on_multiindex(self):
+ # GH 19861
+ dic = {
+ ("a", "d"): [1, 4],
+ ("a", "c"): [2, 3],
+ ("b", "c"): [3, 2],
+ ("b", "d"): [4, 1],
+ }
+ df = DataFrame(dic, index=[0, 1])
+ idx = IndexSlice
+ slice_ = idx[:, idx["b", "d"]]
+ tslice_ = non_reducing_slice(slice_)
+
+ result = df.loc[tslice_]
+ expected = DataFrame({("b", "d"): [4, 1]})
+ tm.assert_frame_equal(result, expected)
+
+ @pytest.mark.parametrize(
+ "slice_",
+ [
+ IndexSlice[:, :],
+ # check cols
+ IndexSlice[:, IndexSlice[["a"]]], # inferred deeper need list
+ IndexSlice[:, IndexSlice[["a"], ["c"]]], # inferred deeper need list
+ IndexSlice[:, IndexSlice["a", "c", :]],
+ IndexSlice[:, IndexSlice["a", :, "e"]],
+ IndexSlice[:, IndexSlice[:, "c", "e"]],
+ IndexSlice[:, IndexSlice["a", ["c", "d"], :]], # check list
+ IndexSlice[:, IndexSlice["a", ["c", "d", "-"], :]], # don't allow missing
+ IndexSlice[:, IndexSlice["a", ["c", "d", "-"], "e"]], # no slice
+ # check rows
+ IndexSlice[IndexSlice[["U"]], :], # inferred deeper need list
+ IndexSlice[IndexSlice[["U"], ["W"]], :], # inferred deeper need list
+ IndexSlice[IndexSlice["U", "W", :], :],
+ IndexSlice[IndexSlice["U", :, "Y"], :],
+ IndexSlice[IndexSlice[:, "W", "Y"], :],
+ IndexSlice[IndexSlice[:, "W", ["Y", "Z"]], :], # check list
+ IndexSlice[IndexSlice[:, "W", ["Y", "Z", "-"]], :], # don't allow missing
+ IndexSlice[IndexSlice["U", "W", ["Y", "Z", "-"]], :], # no slice
+ # check simultaneous
+ IndexSlice[IndexSlice[:, "W", "Y"], IndexSlice["a", "c", :]],
+ ],
+ )
+ def test_non_reducing_multi_slice_on_multiindex(self, slice_):
+ # GH 33562
+ cols = MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]])
+ idxs = MultiIndex.from_product([["U", "V"], ["W", "X"], ["Y", "Z"]])
+ df = DataFrame(np.arange(64).reshape(8, 8), columns=cols, index=idxs)
+
+ for lvl in [0, 1]:
+ key = slice_[lvl]
+ if isinstance(key, tuple):
+ for subkey in key:
+ if isinstance(subkey, list) and "-" in subkey:
+ # not present in the index level, raises KeyError since 2.0
+ with pytest.raises(KeyError, match="-"):
+ df.loc[slice_]
+ return
+
+ expected = df.loc[slice_]
+ result = df.loc[non_reducing_slice(slice_)]
+ tm.assert_frame_equal(result, expected)
+
+
+def test_hidden_index_names(mi_df):
+ mi_df.index.names = ["Lev0", "Lev1"]
+ mi_styler = mi_df.style
+ ctx = mi_styler._translate(True, True)
+ assert len(ctx["head"]) == 3 # 2 column index levels + 1 index names row
+
+ mi_styler.hide(axis="index", names=True)
+ ctx = mi_styler._translate(True, True)
+ assert len(ctx["head"]) == 2 # index names row is unparsed
+ for i in range(4):
+ assert ctx["body"][0][i]["is_visible"] # 2 index levels + 2 data values visible
+
+ mi_styler.hide(axis="index", level=1)
+ ctx = mi_styler._translate(True, True)
+ assert len(ctx["head"]) == 2 # index names row is still hidden
+ assert ctx["body"][0][0]["is_visible"] is True
+ assert ctx["body"][0][1]["is_visible"] is False
+
+
+def test_hidden_column_names(mi_df):
+ mi_df.columns.names = ["Lev0", "Lev1"]
+ mi_styler = mi_df.style
+ ctx = mi_styler._translate(True, True)
+ assert ctx["head"][0][1]["display_value"] == "Lev0"
+ assert ctx["head"][1][1]["display_value"] == "Lev1"
+
+ mi_styler.hide(names=True, axis="columns")
+ ctx = mi_styler._translate(True, True)
+ assert ctx["head"][0][1]["display_value"] == " "
+ assert ctx["head"][1][1]["display_value"] == " "
+
+ mi_styler.hide(level=0, axis="columns")
+ ctx = mi_styler._translate(True, True)
+ assert len(ctx["head"]) == 1 # no index names and only one visible column headers
+ assert ctx["head"][0][1]["display_value"] == " "
+
+
+@pytest.mark.parametrize("caption", [1, ("a", "b", "c"), (1, "s")])
+def test_caption_raises(mi_styler, caption):
+ msg = "`caption` must be either a string or 2-tuple of strings."
+ with pytest.raises(ValueError, match=msg):
+ mi_styler.set_caption(caption)
+
+
+def test_hiding_headers_over_index_no_sparsify():
+ # GH 43464
+ midx = MultiIndex.from_product([[1, 2], ["a", "a", "b"]])
+ df = DataFrame(9, index=midx, columns=[0])
+ ctx = df.style._translate(False, False)
+ assert len(ctx["body"]) == 6
+ ctx = df.style.hide((1, "a"), axis=0)._translate(False, False)
+ assert len(ctx["body"]) == 4
+ assert "row2" in ctx["body"][0][0]["class"]
+
+
+def test_hiding_headers_over_columns_no_sparsify():
+ # GH 43464
+ midx = MultiIndex.from_product([[1, 2], ["a", "a", "b"]])
+ df = DataFrame(9, columns=midx, index=[0])
+ ctx = df.style._translate(False, False)
+ for ix in [(0, 1), (0, 2), (1, 1), (1, 2)]:
+ assert ctx["head"][ix[0]][ix[1]]["is_visible"] is True
+ ctx = df.style.hide((1, "a"), axis="columns")._translate(False, False)
+ for ix in [(0, 1), (0, 2), (1, 1), (1, 2)]:
+ assert ctx["head"][ix[0]][ix[1]]["is_visible"] is False
+
+
+def test_get_level_lengths_mi_hidden():
+ # GH 43464
+ index = MultiIndex.from_arrays([[1, 1, 1, 2, 2, 2], ["a", "a", "b", "a", "a", "b"]])
+ expected = {
+ (0, 2): 1,
+ (0, 3): 1,
+ (0, 4): 1,
+ (0, 5): 1,
+ (1, 2): 1,
+ (1, 3): 1,
+ (1, 4): 1,
+ (1, 5): 1,
+ }
+ result = _get_level_lengths(
+ index,
+ sparsify=False,
+ max_index=100,
+ hidden_elements=[0, 1, 0, 1], # hidden element can repeat if duplicated index
+ )
+ tm.assert_dict_equal(result, expected)
+
+
+def test_row_trimming_hide_index():
+ # gh 43703
+ df = DataFrame([[1], [2], [3], [4], [5]])
+ with option_context("styler.render.max_rows", 2):
+ ctx = df.style.hide([0, 1], axis="index")._translate(True, True)
+ assert len(ctx["body"]) == 3
+ for r, val in enumerate(["3", "4", "..."]):
+ assert ctx["body"][r][1]["display_value"] == val
+
+
+def test_row_trimming_hide_index_mi():
+ # gh 44247
+ df = DataFrame([[1], [2], [3], [4], [5]])
+ df.index = MultiIndex.from_product([[0], [0, 1, 2, 3, 4]])
+ with option_context("styler.render.max_rows", 2):
+ ctx = df.style.hide([(0, 0), (0, 1)], axis="index")._translate(True, True)
+ assert len(ctx["body"]) == 3
+
+ # level 0 index headers (sparsified)
+ assert {"value": 0, "attributes": 'rowspan="2"', "is_visible": True}.items() <= ctx[
+ "body"
+ ][0][0].items()
+ assert {"value": 0, "attributes": "", "is_visible": False}.items() <= ctx["body"][
+ 1
+ ][0].items()
+ assert {"value": "...", "is_visible": True}.items() <= ctx["body"][2][0].items()
+
+ for r, val in enumerate(["2", "3", "..."]):
+ assert ctx["body"][r][1]["display_value"] == val # level 1 index headers
+ for r, val in enumerate(["3", "4", "..."]):
+ assert ctx["body"][r][2]["display_value"] == val # data values
+
+
+def test_col_trimming_hide_columns():
+ # gh 44272
+ df = DataFrame([[1, 2, 3, 4, 5]])
+ with option_context("styler.render.max_columns", 2):
+ ctx = df.style.hide([0, 1], axis="columns")._translate(True, True)
+
+ assert len(ctx["head"][0]) == 6 # blank, [0, 1 (hidden)], [2 ,3 (visible)], + trim
+ for c, vals in enumerate([(1, False), (2, True), (3, True), ("...", True)]):
+ assert ctx["head"][0][c + 2]["value"] == vals[0]
+ assert ctx["head"][0][c + 2]["is_visible"] == vals[1]
+
+ assert len(ctx["body"][0]) == 6 # index + 2 hidden + 2 visible + trimming col
+
+
+def test_no_empty_apply(mi_styler):
+ # 45313
+ mi_styler.apply(lambda s: ["a:v;"] * 2, subset=[False, False])
+ mi_styler._compute()
+
+
+@pytest.mark.parametrize("format", ["html", "latex", "string"])
+def test_output_buffer(mi_styler, format):
+ # gh 47053
+ with tm.ensure_clean(f"delete_me.{format}") as f:
+ getattr(mi_styler, f"to_{format}")(f)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_to_latex.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_to_latex.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f1443c3ee66be040f668f546682924207cfd31e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_to_latex.py
@@ -0,0 +1,1090 @@
+from textwrap import dedent
+
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ MultiIndex,
+ Series,
+ option_context,
+)
+
+pytest.importorskip("jinja2")
+from pandas.io.formats.style import Styler
+from pandas.io.formats.style_render import (
+ _parse_latex_cell_styles,
+ _parse_latex_css_conversion,
+ _parse_latex_header_span,
+ _parse_latex_table_styles,
+ _parse_latex_table_wrapping,
+)
+
+
+@pytest.fixture
+def df():
+ return DataFrame(
+ {"A": [0, 1], "B": [-0.61, -1.22], "C": Series(["ab", "cd"], dtype=object)}
+ )
+
+
+@pytest.fixture
+def df_ext():
+ return DataFrame(
+ {"A": [0, 1, 2], "B": [-0.61, -1.22, -2.22], "C": ["ab", "cd", "de"]}
+ )
+
+
+@pytest.fixture
+def styler(df):
+ return Styler(df, uuid_len=0, precision=2)
+
+
+def test_minimal_latex_tabular(styler):
+ expected = dedent(
+ """\
+ \\begin{tabular}{lrrl}
+ & A & B & C \\\\
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{tabular}
+ """
+ )
+ assert styler.to_latex() == expected
+
+
+def test_tabular_hrules(styler):
+ expected = dedent(
+ """\
+ \\begin{tabular}{lrrl}
+ \\toprule
+ & A & B & C \\\\
+ \\midrule
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\bottomrule
+ \\end{tabular}
+ """
+ )
+ assert styler.to_latex(hrules=True) == expected
+
+
+def test_tabular_custom_hrules(styler):
+ styler.set_table_styles(
+ [
+ {"selector": "toprule", "props": ":hline"},
+ {"selector": "bottomrule", "props": ":otherline"},
+ ]
+ ) # no midrule
+ expected = dedent(
+ """\
+ \\begin{tabular}{lrrl}
+ \\hline
+ & A & B & C \\\\
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\otherline
+ \\end{tabular}
+ """
+ )
+ assert styler.to_latex() == expected
+
+
+def test_column_format(styler):
+ # default setting is already tested in `test_latex_minimal_tabular`
+ styler.set_table_styles([{"selector": "column_format", "props": ":cccc"}])
+
+ assert "\\begin{tabular}{rrrr}" in styler.to_latex(column_format="rrrr")
+ styler.set_table_styles([{"selector": "column_format", "props": ":r|r|cc"}])
+ assert "\\begin{tabular}{r|r|cc}" in styler.to_latex()
+
+
+def test_siunitx_cols(styler):
+ expected = dedent(
+ """\
+ \\begin{tabular}{lSSl}
+ {} & {A} & {B} & {C} \\\\
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{tabular}
+ """
+ )
+ assert styler.to_latex(siunitx=True) == expected
+
+
+def test_position(styler):
+ assert "\\begin{table}[h!]" in styler.to_latex(position="h!")
+ assert "\\end{table}" in styler.to_latex(position="h!")
+ styler.set_table_styles([{"selector": "position", "props": ":b!"}])
+ assert "\\begin{table}[b!]" in styler.to_latex()
+ assert "\\end{table}" in styler.to_latex()
+
+
+@pytest.mark.parametrize("env", [None, "longtable"])
+def test_label(styler, env):
+ assert "\n\\label{text}" in styler.to_latex(label="text", environment=env)
+ styler.set_table_styles([{"selector": "label", "props": ":{more §text}"}])
+ assert "\n\\label{more :text}" in styler.to_latex(environment=env)
+
+
+def test_position_float_raises(styler):
+ msg = "`position_float` should be one of 'raggedright', 'raggedleft', 'centering',"
+ with pytest.raises(ValueError, match=msg):
+ styler.to_latex(position_float="bad_string")
+
+ msg = "`position_float` cannot be used in 'longtable' `environment`"
+ with pytest.raises(ValueError, match=msg):
+ styler.to_latex(position_float="centering", environment="longtable")
+
+
+@pytest.mark.parametrize("label", [(None, ""), ("text", "\\label{text}")])
+@pytest.mark.parametrize("position", [(None, ""), ("h!", "{table}[h!]")])
+@pytest.mark.parametrize("caption", [(None, ""), ("text", "\\caption{text}")])
+@pytest.mark.parametrize("column_format", [(None, ""), ("rcrl", "{tabular}{rcrl}")])
+@pytest.mark.parametrize("position_float", [(None, ""), ("centering", "\\centering")])
+def test_kwargs_combinations(
+ styler, label, position, caption, column_format, position_float
+):
+ result = styler.to_latex(
+ label=label[0],
+ position=position[0],
+ caption=caption[0],
+ column_format=column_format[0],
+ position_float=position_float[0],
+ )
+ assert label[1] in result
+ assert position[1] in result
+ assert caption[1] in result
+ assert column_format[1] in result
+ assert position_float[1] in result
+
+
+def test_custom_table_styles(styler):
+ styler.set_table_styles(
+ [
+ {"selector": "mycommand", "props": ":{myoptions}"},
+ {"selector": "mycommand2", "props": ":{myoptions2}"},
+ ]
+ )
+ expected = dedent(
+ """\
+ \\begin{table}
+ \\mycommand{myoptions}
+ \\mycommand2{myoptions2}
+ """
+ )
+ assert expected in styler.to_latex()
+
+
+def test_cell_styling(styler):
+ styler.highlight_max(props="itshape:;Huge:--wrap;")
+ expected = dedent(
+ """\
+ \\begin{tabular}{lrrl}
+ & A & B & C \\\\
+ 0 & 0 & \\itshape {\\Huge -0.61} & ab \\\\
+ 1 & \\itshape {\\Huge 1} & -1.22 & \\itshape {\\Huge cd} \\\\
+ \\end{tabular}
+ """
+ )
+ assert expected == styler.to_latex()
+
+
+def test_multiindex_columns(df):
+ cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df.columns = cidx
+ expected = dedent(
+ """\
+ \\begin{tabular}{lrrl}
+ & \\multicolumn{2}{r}{A} & B \\\\
+ & a & b & c \\\\
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{tabular}
+ """
+ )
+ s = df.style.format(precision=2)
+ assert expected == s.to_latex()
+
+ # non-sparse
+ expected = dedent(
+ """\
+ \\begin{tabular}{lrrl}
+ & A & A & B \\\\
+ & a & b & c \\\\
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{tabular}
+ """
+ )
+ s = df.style.format(precision=2)
+ assert expected == s.to_latex(sparse_columns=False)
+
+
+def test_multiindex_row(df_ext):
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df_ext.index = ridx
+ expected = dedent(
+ """\
+ \\begin{tabular}{llrrl}
+ & & A & B & C \\\\
+ \\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
+ & b & 1 & -1.22 & cd \\\\
+ B & c & 2 & -2.22 & de \\\\
+ \\end{tabular}
+ """
+ )
+ styler = df_ext.style.format(precision=2)
+ result = styler.to_latex()
+ assert expected == result
+
+ # non-sparse
+ expected = dedent(
+ """\
+ \\begin{tabular}{llrrl}
+ & & A & B & C \\\\
+ A & a & 0 & -0.61 & ab \\\\
+ A & b & 1 & -1.22 & cd \\\\
+ B & c & 2 & -2.22 & de \\\\
+ \\end{tabular}
+ """
+ )
+ result = styler.to_latex(sparse_index=False)
+ assert expected == result
+
+
+def test_multirow_naive(df_ext):
+ ridx = MultiIndex.from_tuples([("X", "x"), ("X", "y"), ("Y", "z")])
+ df_ext.index = ridx
+ expected = dedent(
+ """\
+ \\begin{tabular}{llrrl}
+ & & A & B & C \\\\
+ X & x & 0 & -0.61 & ab \\\\
+ & y & 1 & -1.22 & cd \\\\
+ Y & z & 2 & -2.22 & de \\\\
+ \\end{tabular}
+ """
+ )
+ styler = df_ext.style.format(precision=2)
+ result = styler.to_latex(multirow_align="naive")
+ assert expected == result
+
+
+def test_multiindex_row_and_col(df_ext):
+ cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df_ext.index, df_ext.columns = ridx, cidx
+ expected = dedent(
+ """\
+ \\begin{tabular}{llrrl}
+ & & \\multicolumn{2}{l}{Z} & Y \\\\
+ & & a & b & c \\\\
+ \\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
+ & b & 1 & -1.22 & cd \\\\
+ B & c & 2 & -2.22 & de \\\\
+ \\end{tabular}
+ """
+ )
+ styler = df_ext.style.format(precision=2)
+ result = styler.to_latex(multirow_align="b", multicol_align="l")
+ assert result == expected
+
+ # non-sparse
+ expected = dedent(
+ """\
+ \\begin{tabular}{llrrl}
+ & & Z & Z & Y \\\\
+ & & a & b & c \\\\
+ A & a & 0 & -0.61 & ab \\\\
+ A & b & 1 & -1.22 & cd \\\\
+ B & c & 2 & -2.22 & de \\\\
+ \\end{tabular}
+ """
+ )
+ result = styler.to_latex(sparse_index=False, sparse_columns=False)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "multicol_align, siunitx, header",
+ [
+ ("naive-l", False, " & A & &"),
+ ("naive-r", False, " & & & A"),
+ ("naive-l", True, "{} & {A} & {} & {}"),
+ ("naive-r", True, "{} & {} & {} & {A}"),
+ ],
+)
+def test_multicol_naive(df, multicol_align, siunitx, header):
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")])
+ df.columns = ridx
+ level1 = " & a & b & c" if not siunitx else "{} & {a} & {b} & {c}"
+ col_format = "lrrl" if not siunitx else "lSSl"
+ expected = dedent(
+ f"""\
+ \\begin{{tabular}}{{{col_format}}}
+ {header} \\\\
+ {level1} \\\\
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{{tabular}}
+ """
+ )
+ styler = df.style.format(precision=2)
+ result = styler.to_latex(multicol_align=multicol_align, siunitx=siunitx)
+ assert expected == result
+
+
+def test_multi_options(df_ext):
+ cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df_ext.index, df_ext.columns = ridx, cidx
+ styler = df_ext.style.format(precision=2)
+
+ expected = dedent(
+ """\
+ & & \\multicolumn{2}{r}{Z} & Y \\\\
+ & & a & b & c \\\\
+ \\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\
+ """
+ )
+ result = styler.to_latex()
+ assert expected in result
+
+ with option_context("styler.latex.multicol_align", "l"):
+ assert " & & \\multicolumn{2}{l}{Z} & Y \\\\" in styler.to_latex()
+
+ with option_context("styler.latex.multirow_align", "b"):
+ assert "\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\" in styler.to_latex()
+
+
+def test_multiindex_columns_hidden():
+ df = DataFrame([[1, 2, 3, 4]])
+ df.columns = MultiIndex.from_tuples([("A", 1), ("A", 2), ("A", 3), ("B", 1)])
+ s = df.style
+ assert "{tabular}{lrrrr}" in s.to_latex()
+ s.set_table_styles([]) # reset the position command
+ s.hide([("A", 2)], axis="columns")
+ assert "{tabular}{lrrr}" in s.to_latex()
+
+
+@pytest.mark.parametrize(
+ "option, value",
+ [
+ ("styler.sparse.index", True),
+ ("styler.sparse.index", False),
+ ("styler.sparse.columns", True),
+ ("styler.sparse.columns", False),
+ ],
+)
+def test_sparse_options(df_ext, option, value):
+ cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df_ext.index, df_ext.columns = ridx, cidx
+ styler = df_ext.style
+
+ latex1 = styler.to_latex()
+ with option_context(option, value):
+ latex2 = styler.to_latex()
+ assert (latex1 == latex2) is value
+
+
+def test_hidden_index(styler):
+ styler.hide(axis="index")
+ expected = dedent(
+ """\
+ \\begin{tabular}{rrl}
+ A & B & C \\\\
+ 0 & -0.61 & ab \\\\
+ 1 & -1.22 & cd \\\\
+ \\end{tabular}
+ """
+ )
+ assert styler.to_latex() == expected
+
+
+@pytest.mark.parametrize("environment", ["table", "figure*", None])
+def test_comprehensive(df_ext, environment):
+ # test as many low level features simultaneously as possible
+ cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df_ext.index, df_ext.columns = ridx, cidx
+ stlr = df_ext.style
+ stlr.set_caption("mycap")
+ stlr.set_table_styles(
+ [
+ {"selector": "label", "props": ":{fig§item}"},
+ {"selector": "position", "props": ":h!"},
+ {"selector": "position_float", "props": ":centering"},
+ {"selector": "column_format", "props": ":rlrlr"},
+ {"selector": "toprule", "props": ":toprule"},
+ {"selector": "midrule", "props": ":midrule"},
+ {"selector": "bottomrule", "props": ":bottomrule"},
+ {"selector": "rowcolors", "props": ":{3}{pink}{}"}, # custom command
+ ]
+ )
+ stlr.highlight_max(axis=0, props="textbf:--rwrap;cellcolor:[rgb]{1,1,0.6}--rwrap")
+ stlr.highlight_max(axis=None, props="Huge:--wrap;", subset=[("Z", "a"), ("Z", "b")])
+
+ expected = (
+ """\
+\\begin{table}[h!]
+\\centering
+\\caption{mycap}
+\\label{fig:item}
+\\rowcolors{3}{pink}{}
+\\begin{tabular}{rlrlr}
+\\toprule
+ & & \\multicolumn{2}{r}{Z} & Y \\\\
+ & & a & b & c \\\\
+\\midrule
+\\multirow[c]{2}{*}{A} & a & 0 & \\textbf{\\cellcolor[rgb]{1,1,0.6}{-0.61}} & ab \\\\
+ & b & 1 & -1.22 & cd \\\\
+B & c & \\textbf{\\cellcolor[rgb]{1,1,0.6}{{\\Huge 2}}} & -2.22 & """
+ """\
+\\textbf{\\cellcolor[rgb]{1,1,0.6}{de}} \\\\
+\\bottomrule
+\\end{tabular}
+\\end{table}
+"""
+ ).replace("table", environment if environment else "table")
+ result = stlr.format(precision=2).to_latex(environment=environment)
+ assert result == expected
+
+
+def test_environment_option(styler):
+ with option_context("styler.latex.environment", "bar-env"):
+ assert "\\begin{bar-env}" in styler.to_latex()
+ assert "\\begin{foo-env}" in styler.to_latex(environment="foo-env")
+
+
+def test_parse_latex_table_styles(styler):
+ styler.set_table_styles(
+ [
+ {"selector": "foo", "props": [("attr", "value")]},
+ {"selector": "bar", "props": [("attr", "overwritten")]},
+ {"selector": "bar", "props": [("attr", "baz"), ("attr2", "ignored")]},
+ {"selector": "label", "props": [("", "{fig§item}")]},
+ ]
+ )
+ assert _parse_latex_table_styles(styler.table_styles, "bar") == "baz"
+
+ # test '§' replaced by ':' [for CSS compatibility]
+ assert _parse_latex_table_styles(styler.table_styles, "label") == "{fig:item}"
+
+
+def test_parse_latex_cell_styles_basic(): # test nesting
+ cell_style = [("itshape", "--rwrap"), ("cellcolor", "[rgb]{0,1,1}--rwrap")]
+ expected = "\\itshape{\\cellcolor[rgb]{0,1,1}{text}}"
+ assert _parse_latex_cell_styles(cell_style, "text") == expected
+
+
+@pytest.mark.parametrize(
+ "wrap_arg, expected",
+ [ # test wrapping
+ ("", "\\ "),
+ ("--wrap", "{\\ }"),
+ ("--nowrap", "\\ "),
+ ("--lwrap", "{\\} "),
+ ("--dwrap", "{\\}{}"),
+ ("--rwrap", "\\{}"),
+ ],
+)
+def test_parse_latex_cell_styles_braces(wrap_arg, expected):
+ cell_style = [("", f"{wrap_arg}")]
+ assert _parse_latex_cell_styles(cell_style, "") == expected
+
+
+def test_parse_latex_header_span():
+ cell = {"attributes": 'colspan="3"', "display_value": "text", "cellstyle": []}
+ expected = "\\multicolumn{3}{Y}{text}"
+ assert _parse_latex_header_span(cell, "X", "Y") == expected
+
+ cell = {"attributes": 'rowspan="5"', "display_value": "text", "cellstyle": []}
+ expected = "\\multirow[X]{5}{*}{text}"
+ assert _parse_latex_header_span(cell, "X", "Y") == expected
+
+ cell = {"display_value": "text", "cellstyle": []}
+ assert _parse_latex_header_span(cell, "X", "Y") == "text"
+
+ cell = {"display_value": "text", "cellstyle": [("bfseries", "--rwrap")]}
+ assert _parse_latex_header_span(cell, "X", "Y") == "\\bfseries{text}"
+
+
+def test_parse_latex_table_wrapping(styler):
+ styler.set_table_styles(
+ [
+ {"selector": "toprule", "props": ":value"},
+ {"selector": "bottomrule", "props": ":value"},
+ {"selector": "midrule", "props": ":value"},
+ {"selector": "column_format", "props": ":value"},
+ ]
+ )
+ assert _parse_latex_table_wrapping(styler.table_styles, styler.caption) is False
+ assert _parse_latex_table_wrapping(styler.table_styles, "some caption") is True
+ styler.set_table_styles(
+ [
+ {"selector": "not-ignored", "props": ":value"},
+ ],
+ overwrite=False,
+ )
+ assert _parse_latex_table_wrapping(styler.table_styles, None) is True
+
+
+def test_short_caption(styler):
+ result = styler.to_latex(caption=("full cap", "short cap"))
+ assert "\\caption[short cap]{full cap}" in result
+
+
+@pytest.mark.parametrize(
+ "css, expected",
+ [
+ ([("color", "red")], [("color", "{red}")]), # test color and input format types
+ (
+ [("color", "rgb(128, 128, 128 )")],
+ [("color", "[rgb]{0.502, 0.502, 0.502}")],
+ ),
+ (
+ [("color", "rgb(128, 50%, 25% )")],
+ [("color", "[rgb]{0.502, 0.500, 0.250}")],
+ ),
+ (
+ [("color", "rgba(128,128,128,1)")],
+ [("color", "[rgb]{0.502, 0.502, 0.502}")],
+ ),
+ ([("color", "#FF00FF")], [("color", "[HTML]{FF00FF}")]),
+ ([("color", "#F0F")], [("color", "[HTML]{FF00FF}")]),
+ ([("font-weight", "bold")], [("bfseries", "")]), # test font-weight and types
+ ([("font-weight", "bolder")], [("bfseries", "")]),
+ ([("font-weight", "normal")], []),
+ ([("background-color", "red")], [("cellcolor", "{red}--lwrap")]),
+ (
+ [("background-color", "#FF00FF")], # test background-color command and wrap
+ [("cellcolor", "[HTML]{FF00FF}--lwrap")],
+ ),
+ ([("font-style", "italic")], [("itshape", "")]), # test font-style and types
+ ([("font-style", "oblique")], [("slshape", "")]),
+ ([("font-style", "normal")], []),
+ ([("color", "red /*--dwrap*/")], [("color", "{red}--dwrap")]), # css comments
+ ([("background-color", "red /* --dwrap */")], [("cellcolor", "{red}--dwrap")]),
+ ],
+)
+def test_parse_latex_css_conversion(css, expected):
+ result = _parse_latex_css_conversion(css)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "env, inner_env",
+ [
+ (None, "tabular"),
+ ("table", "tabular"),
+ ("longtable", "longtable"),
+ ],
+)
+@pytest.mark.parametrize(
+ "convert, exp", [(True, "bfseries"), (False, "font-weightbold")]
+)
+def test_parse_latex_css_convert_minimal(styler, env, inner_env, convert, exp):
+ # parameters ensure longtable template is also tested
+ styler.highlight_max(props="font-weight:bold;")
+ result = styler.to_latex(convert_css=convert, environment=env)
+ expected = dedent(
+ f"""\
+ 0 & 0 & \\{exp} -0.61 & ab \\\\
+ 1 & \\{exp} 1 & -1.22 & \\{exp} cd \\\\
+ \\end{{{inner_env}}}
+ """
+ )
+ assert expected in result
+
+
+def test_parse_latex_css_conversion_option():
+ css = [("command", "option--latex--wrap")]
+ expected = [("command", "option--wrap")]
+ result = _parse_latex_css_conversion(css)
+ assert result == expected
+
+
+def test_styler_object_after_render(styler):
+ # GH 42320
+ pre_render = styler._copy(deepcopy=True)
+ styler.to_latex(
+ column_format="rllr",
+ position="h",
+ position_float="centering",
+ hrules=True,
+ label="my lab",
+ caption="my cap",
+ )
+
+ assert pre_render.table_styles == styler.table_styles
+ assert pre_render.caption == styler.caption
+
+
+def test_longtable_comprehensive(styler):
+ result = styler.to_latex(
+ environment="longtable", hrules=True, label="fig:A", caption=("full", "short")
+ )
+ expected = dedent(
+ """\
+ \\begin{longtable}{lrrl}
+ \\caption[short]{full} \\label{fig:A} \\\\
+ \\toprule
+ & A & B & C \\\\
+ \\midrule
+ \\endfirsthead
+ \\caption[]{full} \\\\
+ \\toprule
+ & A & B & C \\\\
+ \\midrule
+ \\endhead
+ \\midrule
+ \\multicolumn{4}{r}{Continued on next page} \\\\
+ \\midrule
+ \\endfoot
+ \\bottomrule
+ \\endlastfoot
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{longtable}
+ """
+ )
+ assert result == expected
+
+
+def test_longtable_minimal(styler):
+ result = styler.to_latex(environment="longtable")
+ expected = dedent(
+ """\
+ \\begin{longtable}{lrrl}
+ & A & B & C \\\\
+ \\endfirsthead
+ & A & B & C \\\\
+ \\endhead
+ \\multicolumn{4}{r}{Continued on next page} \\\\
+ \\endfoot
+ \\endlastfoot
+ 0 & 0 & -0.61 & ab \\\\
+ 1 & 1 & -1.22 & cd \\\\
+ \\end{longtable}
+ """
+ )
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "sparse, exp, siunitx",
+ [
+ (True, "{} & \\multicolumn{2}{r}{A} & {B}", True),
+ (False, "{} & {A} & {A} & {B}", True),
+ (True, " & \\multicolumn{2}{r}{A} & B", False),
+ (False, " & A & A & B", False),
+ ],
+)
+def test_longtable_multiindex_columns(df, sparse, exp, siunitx):
+ cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df.columns = cidx
+ with_si = "{} & {a} & {b} & {c} \\\\"
+ without_si = " & a & b & c \\\\"
+ expected = dedent(
+ f"""\
+ \\begin{{longtable}}{{l{"SS" if siunitx else "rr"}l}}
+ {exp} \\\\
+ {with_si if siunitx else without_si}
+ \\endfirsthead
+ {exp} \\\\
+ {with_si if siunitx else without_si}
+ \\endhead
+ """
+ )
+ result = df.style.to_latex(
+ environment="longtable", sparse_columns=sparse, siunitx=siunitx
+ )
+ assert expected in result
+
+
+@pytest.mark.parametrize(
+ "caption, cap_exp",
+ [
+ ("full", ("{full}", "")),
+ (("full", "short"), ("{full}", "[short]")),
+ ],
+)
+@pytest.mark.parametrize("label, lab_exp", [(None, ""), ("tab:A", " \\label{tab:A}")])
+def test_longtable_caption_label(styler, caption, cap_exp, label, lab_exp):
+ cap_exp1 = f"\\caption{cap_exp[1]}{cap_exp[0]}"
+ cap_exp2 = f"\\caption[]{cap_exp[0]}"
+
+ expected = dedent(
+ f"""\
+ {cap_exp1}{lab_exp} \\\\
+ & A & B & C \\\\
+ \\endfirsthead
+ {cap_exp2} \\\\
+ """
+ )
+ assert expected in styler.to_latex(
+ environment="longtable", caption=caption, label=label
+ )
+
+
+@pytest.mark.parametrize("index", [True, False])
+@pytest.mark.parametrize(
+ "columns, siunitx",
+ [
+ (True, True),
+ (True, False),
+ (False, False),
+ ],
+)
+def test_apply_map_header_render_mi(df_ext, index, columns, siunitx):
+ cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")])
+ ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")])
+ df_ext.index, df_ext.columns = ridx, cidx
+ styler = df_ext.style
+
+ func = lambda v: "bfseries: --rwrap" if "A" in v or "Z" in v or "c" in v else None
+
+ if index:
+ styler.map_index(func, axis="index")
+ if columns:
+ styler.map_index(func, axis="columns")
+
+ result = styler.to_latex(siunitx=siunitx)
+
+ expected_index = dedent(
+ """\
+ \\multirow[c]{2}{*}{\\bfseries{A}} & a & 0 & -0.610000 & ab \\\\
+ \\bfseries{} & b & 1 & -1.220000 & cd \\\\
+ B & \\bfseries{c} & 2 & -2.220000 & de \\\\
+ """
+ )
+ assert (expected_index in result) is index
+
+ exp_cols_si = dedent(
+ """\
+ {} & {} & \\multicolumn{2}{r}{\\bfseries{Z}} & {Y} \\\\
+ {} & {} & {a} & {b} & {\\bfseries{c}} \\\\
+ """
+ )
+ exp_cols_no_si = """\
+ & & \\multicolumn{2}{r}{\\bfseries{Z}} & Y \\\\
+ & & a & b & \\bfseries{c} \\\\
+"""
+ assert ((exp_cols_si if siunitx else exp_cols_no_si) in result) is columns
+
+
+def test_repr_option(styler):
+ assert "' in result
+ assert ' ' not in result
+
+
+def test_tooltip_css_class(styler):
+ # GH 21266
+ result = styler.set_tooltips(
+ DataFrame([["tooltip"]], index=["x"], columns=["A"]),
+ css_class="other-class",
+ props=[("color", "green")],
+ ).to_html()
+ assert "#T_ .other-class {\n color: green;\n" in result
+ assert '#T_ #T__row0_col0 .other-class::after {\n content: "tooltip";\n' in result
+
+ # GH 39563
+ result = styler.set_tooltips( # set_tooltips overwrites previous
+ DataFrame([["tooltip"]], index=["x"], columns=["A"]),
+ css_class="another-class",
+ props="color:green;color:red;",
+ ).to_html()
+ assert "#T_ .another-class {\n color: green;\n color: red;\n}" in result
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_css.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_css.py
new file mode 100644
index 0000000000000000000000000000000000000000..db436d8283b9972819f8eff099689cf492d45a83
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_css.py
@@ -0,0 +1,289 @@
+import pytest
+
+from pandas.errors import CSSWarning
+
+import pandas._testing as tm
+
+from pandas.io.formats.css import CSSResolver
+
+
+def assert_resolves(css, props, inherited=None):
+ resolve = CSSResolver()
+ actual = resolve(css, inherited=inherited)
+ assert props == actual
+
+
+def assert_same_resolution(css1, css2, inherited=None):
+ resolve = CSSResolver()
+ resolved1 = resolve(css1, inherited=inherited)
+ resolved2 = resolve(css2, inherited=inherited)
+ assert resolved1 == resolved2
+
+
+@pytest.mark.parametrize(
+ "name,norm,abnorm",
+ [
+ (
+ "whitespace",
+ "hello: world; foo: bar",
+ " \t hello \t :\n world \n ; \n foo: \tbar\n\n",
+ ),
+ ("case", "hello: world; foo: bar", "Hello: WORLD; foO: bar"),
+ ("empty-decl", "hello: world; foo: bar", "; hello: world;; foo: bar;\n; ;"),
+ ("empty-list", "", ";"),
+ ],
+)
+def test_css_parse_normalisation(name, norm, abnorm):
+ assert_same_resolution(norm, abnorm)
+
+
+@pytest.mark.parametrize(
+ "invalid_css,remainder",
+ [
+ # No colon
+ ("hello-world", ""),
+ ("border-style: solid; hello-world", "border-style: solid"),
+ (
+ "border-style: solid; hello-world; font-weight: bold",
+ "border-style: solid; font-weight: bold",
+ ),
+ # Unclosed string fail
+ # Invalid size
+ ("font-size: blah", "font-size: 1em"),
+ ("font-size: 1a2b", "font-size: 1em"),
+ ("font-size: 1e5pt", "font-size: 1em"),
+ ("font-size: 1+6pt", "font-size: 1em"),
+ ("font-size: 1unknownunit", "font-size: 1em"),
+ ("font-size: 10", "font-size: 1em"),
+ ("font-size: 10 pt", "font-size: 1em"),
+ # Too many args
+ ("border-top: 1pt solid red green", "border-top: 1pt solid green"),
+ ],
+)
+def test_css_parse_invalid(invalid_css, remainder):
+ with tm.assert_produces_warning(CSSWarning):
+ assert_same_resolution(invalid_css, remainder)
+
+
+@pytest.mark.parametrize(
+ "shorthand,expansions",
+ [
+ ("margin", ["margin-top", "margin-right", "margin-bottom", "margin-left"]),
+ ("padding", ["padding-top", "padding-right", "padding-bottom", "padding-left"]),
+ (
+ "border-width",
+ [
+ "border-top-width",
+ "border-right-width",
+ "border-bottom-width",
+ "border-left-width",
+ ],
+ ),
+ (
+ "border-color",
+ [
+ "border-top-color",
+ "border-right-color",
+ "border-bottom-color",
+ "border-left-color",
+ ],
+ ),
+ (
+ "border-style",
+ [
+ "border-top-style",
+ "border-right-style",
+ "border-bottom-style",
+ "border-left-style",
+ ],
+ ),
+ ],
+)
+def test_css_side_shorthands(shorthand, expansions):
+ top, right, bottom, left = expansions
+
+ assert_resolves(
+ f"{shorthand}: 1pt", {top: "1pt", right: "1pt", bottom: "1pt", left: "1pt"}
+ )
+
+ assert_resolves(
+ f"{shorthand}: 1pt 4pt", {top: "1pt", right: "4pt", bottom: "1pt", left: "4pt"}
+ )
+
+ assert_resolves(
+ f"{shorthand}: 1pt 4pt 2pt",
+ {top: "1pt", right: "4pt", bottom: "2pt", left: "4pt"},
+ )
+
+ assert_resolves(
+ f"{shorthand}: 1pt 4pt 2pt 0pt",
+ {top: "1pt", right: "4pt", bottom: "2pt", left: "0pt"},
+ )
+
+ with tm.assert_produces_warning(CSSWarning):
+ assert_resolves(f"{shorthand}: 1pt 1pt 1pt 1pt 1pt", {})
+
+
+@pytest.mark.parametrize(
+ "shorthand,sides",
+ [
+ ("border-top", ["top"]),
+ ("border-right", ["right"]),
+ ("border-bottom", ["bottom"]),
+ ("border-left", ["left"]),
+ ("border", ["top", "right", "bottom", "left"]),
+ ],
+)
+def test_css_border_shorthand_sides(shorthand, sides):
+ def create_border_dict(sides, color=None, style=None, width=None):
+ resolved = {}
+ for side in sides:
+ if color:
+ resolved[f"border-{side}-color"] = color
+ if style:
+ resolved[f"border-{side}-style"] = style
+ if width:
+ resolved[f"border-{side}-width"] = width
+ return resolved
+
+ assert_resolves(
+ f"{shorthand}: 1pt red solid", create_border_dict(sides, "red", "solid", "1pt")
+ )
+
+
+@pytest.mark.parametrize(
+ "prop, expected",
+ [
+ ("1pt red solid", ("red", "solid", "1pt")),
+ ("red 1pt solid", ("red", "solid", "1pt")),
+ ("red solid 1pt", ("red", "solid", "1pt")),
+ ("solid 1pt red", ("red", "solid", "1pt")),
+ ("red solid", ("red", "solid", "1.500000pt")),
+ # Note: color=black is not CSS conforming
+ # (See https://drafts.csswg.org/css-backgrounds/#border-shorthands)
+ ("1pt solid", ("black", "solid", "1pt")),
+ ("1pt red", ("red", "none", "1pt")),
+ ("red", ("red", "none", "1.500000pt")),
+ ("1pt", ("black", "none", "1pt")),
+ ("solid", ("black", "solid", "1.500000pt")),
+ # Sizes
+ ("1em", ("black", "none", "12pt")),
+ ],
+)
+def test_css_border_shorthands(prop, expected):
+ color, style, width = expected
+
+ assert_resolves(
+ f"border-left: {prop}",
+ {
+ "border-left-color": color,
+ "border-left-style": style,
+ "border-left-width": width,
+ },
+ )
+
+
+@pytest.mark.parametrize(
+ "style,inherited,equiv",
+ [
+ ("margin: 1px; margin: 2px", "", "margin: 2px"),
+ ("margin: 1px", "margin: 2px", "margin: 1px"),
+ ("margin: 1px; margin: inherit", "margin: 2px", "margin: 2px"),
+ (
+ "margin: 1px; margin-top: 2px",
+ "",
+ "margin-left: 1px; margin-right: 1px; "
+ "margin-bottom: 1px; margin-top: 2px",
+ ),
+ ("margin-top: 2px", "margin: 1px", "margin: 1px; margin-top: 2px"),
+ ("margin: 1px", "margin-top: 2px", "margin: 1px"),
+ (
+ "margin: 1px; margin-top: inherit",
+ "margin: 2px",
+ "margin: 1px; margin-top: 2px",
+ ),
+ ],
+)
+def test_css_precedence(style, inherited, equiv):
+ resolve = CSSResolver()
+ inherited_props = resolve(inherited)
+ style_props = resolve(style, inherited=inherited_props)
+ equiv_props = resolve(equiv)
+ assert style_props == equiv_props
+
+
+@pytest.mark.parametrize(
+ "style,equiv",
+ [
+ (
+ "margin: 1px; margin-top: inherit",
+ "margin-bottom: 1px; margin-right: 1px; margin-left: 1px",
+ ),
+ ("margin-top: inherit", ""),
+ ("margin-top: initial", ""),
+ ],
+)
+def test_css_none_absent(style, equiv):
+ assert_same_resolution(style, equiv)
+
+
+@pytest.mark.parametrize(
+ "size,resolved",
+ [
+ ("xx-small", "6pt"),
+ ("x-small", f"{7.5:f}pt"),
+ ("small", f"{9.6:f}pt"),
+ ("medium", "12pt"),
+ ("large", f"{13.5:f}pt"),
+ ("x-large", "18pt"),
+ ("xx-large", "24pt"),
+ ("8px", "6pt"),
+ ("1.25pc", "15pt"),
+ (".25in", "18pt"),
+ ("02.54cm", "72pt"),
+ ("25.4mm", "72pt"),
+ ("101.6q", "72pt"),
+ ("101.6q", "72pt"),
+ ],
+)
+@pytest.mark.parametrize("relative_to", [None, "16pt"]) # invariant to inherited size
+def test_css_absolute_font_size(size, relative_to, resolved):
+ if relative_to is None:
+ inherited = None
+ else:
+ inherited = {"font-size": relative_to}
+ assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited)
+
+
+@pytest.mark.parametrize(
+ "size,relative_to,resolved",
+ [
+ ("1em", None, "12pt"),
+ ("1.0em", None, "12pt"),
+ ("1.25em", None, "15pt"),
+ ("1em", "16pt", "16pt"),
+ ("1.0em", "16pt", "16pt"),
+ ("1.25em", "16pt", "20pt"),
+ ("1rem", "16pt", "12pt"),
+ ("1.0rem", "16pt", "12pt"),
+ ("1.25rem", "16pt", "15pt"),
+ ("100%", None, "12pt"),
+ ("125%", None, "15pt"),
+ ("100%", "16pt", "16pt"),
+ ("125%", "16pt", "20pt"),
+ ("2ex", None, "12pt"),
+ ("2.0ex", None, "12pt"),
+ ("2.50ex", None, "15pt"),
+ ("inherit", "16pt", "16pt"),
+ ("smaller", None, "10pt"),
+ ("smaller", "18pt", "15pt"),
+ ("larger", None, f"{14.4:f}pt"),
+ ("larger", "15pt", "18pt"),
+ ],
+)
+def test_css_relative_font_size(size, relative_to, resolved):
+ if relative_to is None:
+ inherited = None
+ else:
+ inherited = {"font-size": relative_to}
+ assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_eng_formatting.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_eng_formatting.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d581b5b92e0c8cbcfe21dbbdfb0f99ca05c1a4e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_eng_formatting.py
@@ -0,0 +1,254 @@
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ reset_option,
+ set_eng_float_format,
+)
+
+from pandas.io.formats.format import EngFormatter
+
+
+@pytest.fixture(autouse=True)
+def reset_float_format():
+ yield
+ reset_option("display.float_format")
+
+
+class TestEngFormatter:
+ def test_eng_float_formatter2(self, float_frame):
+ df = float_frame
+ df.loc[5] = 0
+
+ set_eng_float_format()
+ repr(df)
+
+ set_eng_float_format(use_eng_prefix=True)
+ repr(df)
+
+ set_eng_float_format(accuracy=0)
+ repr(df)
+
+ def test_eng_float_formatter(self):
+ df = DataFrame({"A": [1.41, 141.0, 14100, 1410000.0]})
+
+ set_eng_float_format()
+ result = df.to_string()
+ expected = (
+ " A\n"
+ "0 1.410E+00\n"
+ "1 141.000E+00\n"
+ "2 14.100E+03\n"
+ "3 1.410E+06"
+ )
+ assert result == expected
+
+ set_eng_float_format(use_eng_prefix=True)
+ result = df.to_string()
+ expected = " A\n0 1.410\n1 141.000\n2 14.100k\n3 1.410M"
+ assert result == expected
+
+ set_eng_float_format(accuracy=0)
+ result = df.to_string()
+ expected = " A\n0 1E+00\n1 141E+00\n2 14E+03\n3 1E+06"
+ assert result == expected
+
+ def compare(self, formatter, input, output):
+ formatted_input = formatter(input)
+ assert formatted_input == output
+
+ def compare_all(self, formatter, in_out):
+ """
+ Parameters:
+ -----------
+ formatter: EngFormatter under test
+ in_out: list of tuples. Each tuple = (number, expected_formatting)
+
+ It is tested if 'formatter(number) == expected_formatting'.
+ *number* should be >= 0 because formatter(-number) == fmt is also
+ tested. *fmt* is derived from *expected_formatting*
+ """
+ for input, output in in_out:
+ self.compare(formatter, input, output)
+ self.compare(formatter, -input, "-" + output[1:])
+
+ def test_exponents_with_eng_prefix(self):
+ formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
+ f = np.sqrt(2)
+ in_out = [
+ (f * 10**-24, " 1.414y"),
+ (f * 10**-23, " 14.142y"),
+ (f * 10**-22, " 141.421y"),
+ (f * 10**-21, " 1.414z"),
+ (f * 10**-20, " 14.142z"),
+ (f * 10**-19, " 141.421z"),
+ (f * 10**-18, " 1.414a"),
+ (f * 10**-17, " 14.142a"),
+ (f * 10**-16, " 141.421a"),
+ (f * 10**-15, " 1.414f"),
+ (f * 10**-14, " 14.142f"),
+ (f * 10**-13, " 141.421f"),
+ (f * 10**-12, " 1.414p"),
+ (f * 10**-11, " 14.142p"),
+ (f * 10**-10, " 141.421p"),
+ (f * 10**-9, " 1.414n"),
+ (f * 10**-8, " 14.142n"),
+ (f * 10**-7, " 141.421n"),
+ (f * 10**-6, " 1.414u"),
+ (f * 10**-5, " 14.142u"),
+ (f * 10**-4, " 141.421u"),
+ (f * 10**-3, " 1.414m"),
+ (f * 10**-2, " 14.142m"),
+ (f * 10**-1, " 141.421m"),
+ (f * 10**0, " 1.414"),
+ (f * 10**1, " 14.142"),
+ (f * 10**2, " 141.421"),
+ (f * 10**3, " 1.414k"),
+ (f * 10**4, " 14.142k"),
+ (f * 10**5, " 141.421k"),
+ (f * 10**6, " 1.414M"),
+ (f * 10**7, " 14.142M"),
+ (f * 10**8, " 141.421M"),
+ (f * 10**9, " 1.414G"),
+ (f * 10**10, " 14.142G"),
+ (f * 10**11, " 141.421G"),
+ (f * 10**12, " 1.414T"),
+ (f * 10**13, " 14.142T"),
+ (f * 10**14, " 141.421T"),
+ (f * 10**15, " 1.414P"),
+ (f * 10**16, " 14.142P"),
+ (f * 10**17, " 141.421P"),
+ (f * 10**18, " 1.414E"),
+ (f * 10**19, " 14.142E"),
+ (f * 10**20, " 141.421E"),
+ (f * 10**21, " 1.414Z"),
+ (f * 10**22, " 14.142Z"),
+ (f * 10**23, " 141.421Z"),
+ (f * 10**24, " 1.414Y"),
+ (f * 10**25, " 14.142Y"),
+ (f * 10**26, " 141.421Y"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ def test_exponents_without_eng_prefix(self):
+ formatter = EngFormatter(accuracy=4, use_eng_prefix=False)
+ f = np.pi
+ in_out = [
+ (f * 10**-24, " 3.1416E-24"),
+ (f * 10**-23, " 31.4159E-24"),
+ (f * 10**-22, " 314.1593E-24"),
+ (f * 10**-21, " 3.1416E-21"),
+ (f * 10**-20, " 31.4159E-21"),
+ (f * 10**-19, " 314.1593E-21"),
+ (f * 10**-18, " 3.1416E-18"),
+ (f * 10**-17, " 31.4159E-18"),
+ (f * 10**-16, " 314.1593E-18"),
+ (f * 10**-15, " 3.1416E-15"),
+ (f * 10**-14, " 31.4159E-15"),
+ (f * 10**-13, " 314.1593E-15"),
+ (f * 10**-12, " 3.1416E-12"),
+ (f * 10**-11, " 31.4159E-12"),
+ (f * 10**-10, " 314.1593E-12"),
+ (f * 10**-9, " 3.1416E-09"),
+ (f * 10**-8, " 31.4159E-09"),
+ (f * 10**-7, " 314.1593E-09"),
+ (f * 10**-6, " 3.1416E-06"),
+ (f * 10**-5, " 31.4159E-06"),
+ (f * 10**-4, " 314.1593E-06"),
+ (f * 10**-3, " 3.1416E-03"),
+ (f * 10**-2, " 31.4159E-03"),
+ (f * 10**-1, " 314.1593E-03"),
+ (f * 10**0, " 3.1416E+00"),
+ (f * 10**1, " 31.4159E+00"),
+ (f * 10**2, " 314.1593E+00"),
+ (f * 10**3, " 3.1416E+03"),
+ (f * 10**4, " 31.4159E+03"),
+ (f * 10**5, " 314.1593E+03"),
+ (f * 10**6, " 3.1416E+06"),
+ (f * 10**7, " 31.4159E+06"),
+ (f * 10**8, " 314.1593E+06"),
+ (f * 10**9, " 3.1416E+09"),
+ (f * 10**10, " 31.4159E+09"),
+ (f * 10**11, " 314.1593E+09"),
+ (f * 10**12, " 3.1416E+12"),
+ (f * 10**13, " 31.4159E+12"),
+ (f * 10**14, " 314.1593E+12"),
+ (f * 10**15, " 3.1416E+15"),
+ (f * 10**16, " 31.4159E+15"),
+ (f * 10**17, " 314.1593E+15"),
+ (f * 10**18, " 3.1416E+18"),
+ (f * 10**19, " 31.4159E+18"),
+ (f * 10**20, " 314.1593E+18"),
+ (f * 10**21, " 3.1416E+21"),
+ (f * 10**22, " 31.4159E+21"),
+ (f * 10**23, " 314.1593E+21"),
+ (f * 10**24, " 3.1416E+24"),
+ (f * 10**25, " 31.4159E+24"),
+ (f * 10**26, " 314.1593E+24"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ def test_rounding(self):
+ formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
+ in_out = [
+ (5.55555, " 5.556"),
+ (55.5555, " 55.556"),
+ (555.555, " 555.555"),
+ (5555.55, " 5.556k"),
+ (55555.5, " 55.556k"),
+ (555555, " 555.555k"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
+ in_out = [
+ (5.55555, " 5.6"),
+ (55.5555, " 55.6"),
+ (555.555, " 555.6"),
+ (5555.55, " 5.6k"),
+ (55555.5, " 55.6k"),
+ (555555, " 555.6k"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ formatter = EngFormatter(accuracy=0, use_eng_prefix=True)
+ in_out = [
+ (5.55555, " 6"),
+ (55.5555, " 56"),
+ (555.555, " 556"),
+ (5555.55, " 6k"),
+ (55555.5, " 56k"),
+ (555555, " 556k"),
+ ]
+ self.compare_all(formatter, in_out)
+
+ formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
+ result = formatter(0)
+ assert result == " 0.000"
+
+ def test_nan(self):
+ # Issue #11981
+
+ formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
+ result = formatter(np.nan)
+ assert result == "NaN"
+
+ df = DataFrame(
+ {
+ "a": [1.5, 10.3, 20.5],
+ "b": [50.3, 60.67, 70.12],
+ "c": [100.2, 101.33, 120.33],
+ }
+ )
+ pt = df.pivot_table(values="a", index="b", columns="c")
+ set_eng_float_format(accuracy=1)
+ result = pt.to_string()
+ assert "NaN" in result
+
+ def test_inf(self):
+ # Issue #11981
+
+ formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
+ result = formatter(np.inf)
+ assert result == "inf"
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_format.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ca29c219b55b0931885f2fbf92cbf1fd809c5b8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_format.py
@@ -0,0 +1,2293 @@
+"""
+Tests for the file pandas.io.formats.format, *not* tests for general formatting
+of pandas objects.
+"""
+from datetime import datetime
+from io import StringIO
+from pathlib import Path
+import re
+from shutil import get_terminal_size
+
+import numpy as np
+import pytest
+
+from pandas._config import using_pyarrow_string_dtype
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Index,
+ MultiIndex,
+ NaT,
+ Series,
+ Timestamp,
+ date_range,
+ get_option,
+ option_context,
+ read_csv,
+ reset_option,
+)
+
+from pandas.io.formats import printing
+import pandas.io.formats.format as fmt
+
+
+@pytest.fixture(params=["string", "pathlike", "buffer"])
+def filepath_or_buffer_id(request):
+ """
+ A fixture yielding test ids for filepath_or_buffer testing.
+ """
+ return request.param
+
+
+@pytest.fixture
+def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
+ """
+ A fixture yielding a string representing a filepath, a path-like object
+ and a StringIO buffer. Also checks that buffer is not closed.
+ """
+ if filepath_or_buffer_id == "buffer":
+ buf = StringIO()
+ yield buf
+ assert not buf.closed
+ else:
+ assert isinstance(tmp_path, Path)
+ if filepath_or_buffer_id == "pathlike":
+ yield tmp_path / "foo"
+ else:
+ yield str(tmp_path / "foo")
+
+
+@pytest.fixture
+def assert_filepath_or_buffer_equals(
+ filepath_or_buffer, filepath_or_buffer_id, encoding
+):
+ """
+ Assertion helper for checking filepath_or_buffer.
+ """
+ if encoding is None:
+ encoding = "utf-8"
+
+ def _assert_filepath_or_buffer_equals(expected):
+ if filepath_or_buffer_id == "string":
+ with open(filepath_or_buffer, encoding=encoding) as f:
+ result = f.read()
+ elif filepath_or_buffer_id == "pathlike":
+ result = filepath_or_buffer.read_text(encoding=encoding)
+ elif filepath_or_buffer_id == "buffer":
+ result = filepath_or_buffer.getvalue()
+ assert result == expected
+
+ return _assert_filepath_or_buffer_equals
+
+
+def has_info_repr(df):
+ r = repr(df)
+ c1 = r.split("\n")[0].startswith("
+ # 2. Index
+ # 3. Columns
+ # 4. dtype
+ # 5. memory usage
+ # 6. trailing newline
+ nv = len(r.split("\n")) == 6
+ return has_info and nv
+
+
+def has_horizontally_truncated_repr(df):
+ try: # Check header row
+ fst_line = np.array(repr(df).splitlines()[0].split())
+ cand_col = np.where(fst_line == "...")[0][0]
+ except IndexError:
+ return False
+ # Make sure each row has this ... in the same place
+ r = repr(df)
+ for ix, _ in enumerate(r.splitlines()):
+ if not r.split()[cand_col] == "...":
+ return False
+ return True
+
+
+def has_vertically_truncated_repr(df):
+ r = repr(df)
+ only_dot_row = False
+ for row in r.splitlines():
+ if re.match(r"^[\.\ ]+$", row):
+ only_dot_row = True
+ return only_dot_row
+
+
+def has_truncated_repr(df):
+ return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
+
+
+def has_doubly_truncated_repr(df):
+ return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
+
+
+def has_expanded_repr(df):
+ r = repr(df)
+ for line in r.split("\n"):
+ if line.endswith("\\"):
+ return True
+ return False
+
+
+class TestDataFrameFormatting:
+ def test_repr_truncation(self):
+ max_len = 20
+ with option_context("display.max_colwidth", max_len):
+ df = DataFrame(
+ {
+ "A": np.random.default_rng(2).standard_normal(10),
+ "B": [
+ "a"
+ * np.random.default_rng(2).integers(max_len - 1, max_len + 1)
+ for _ in range(10)
+ ],
+ }
+ )
+ r = repr(df)
+ r = r[r.find("\n") + 1 :]
+
+ adj = printing.get_adjustment()
+
+ for line, value in zip(r.split("\n"), df["B"]):
+ if adj.len(value) + 1 > max_len:
+ assert "..." in line
+ else:
+ assert "..." not in line
+
+ with option_context("display.max_colwidth", 999999):
+ assert "..." not in repr(df)
+
+ with option_context("display.max_colwidth", max_len + 2):
+ assert "..." not in repr(df)
+
+ def test_repr_truncation_preserves_na(self):
+ # https://github.com/pandas-dev/pandas/issues/55630
+ df = DataFrame({"a": [pd.NA for _ in range(10)]})
+ with option_context("display.max_rows", 2, "display.show_dimensions", False):
+ assert repr(df) == " a\n0 \n.. ...\n9 "
+
+ def test_max_colwidth_negative_int_raises(self):
+ # Deprecation enforced from:
+ # https://github.com/pandas-dev/pandas/issues/31532
+ with pytest.raises(
+ ValueError, match="Value must be a nonnegative integer or None"
+ ):
+ with option_context("display.max_colwidth", -1):
+ pass
+
+ def test_repr_chop_threshold(self):
+ df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
+ reset_option("display.chop_threshold") # default None
+ assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
+
+ with option_context("display.chop_threshold", 0.2):
+ assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
+
+ with option_context("display.chop_threshold", 0.6):
+ assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
+
+ with option_context("display.chop_threshold", None):
+ assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
+
+ def test_repr_chop_threshold_column_below(self):
+ # GH 6839: validation case
+
+ df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
+
+ with option_context("display.chop_threshold", 0):
+ assert repr(df) == (
+ " 0 1\n"
+ "0 10.0 8.000000e-10\n"
+ "1 20.0 -1.000000e-11\n"
+ "2 30.0 2.000000e-09\n"
+ "3 40.0 -2.000000e-11"
+ )
+
+ with option_context("display.chop_threshold", 1e-8):
+ assert repr(df) == (
+ " 0 1\n"
+ "0 10.0 0.000000e+00\n"
+ "1 20.0 0.000000e+00\n"
+ "2 30.0 0.000000e+00\n"
+ "3 40.0 0.000000e+00"
+ )
+
+ with option_context("display.chop_threshold", 5e-11):
+ assert repr(df) == (
+ " 0 1\n"
+ "0 10.0 8.000000e-10\n"
+ "1 20.0 0.000000e+00\n"
+ "2 30.0 2.000000e-09\n"
+ "3 40.0 0.000000e+00"
+ )
+
+ def test_repr_no_backslash(self):
+ with option_context("mode.sim_interactive", True):
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
+ assert "\\" not in repr(df)
+
+ def test_expand_frame_repr(self):
+ df_small = DataFrame("hello", index=[0], columns=[0])
+ df_wide = DataFrame("hello", index=[0], columns=range(10))
+ df_tall = DataFrame("hello", index=range(30), columns=range(5))
+
+ with option_context("mode.sim_interactive", True):
+ with option_context(
+ "display.max_columns",
+ 10,
+ "display.width",
+ 20,
+ "display.max_rows",
+ 20,
+ "display.show_dimensions",
+ True,
+ ):
+ with option_context("display.expand_frame_repr", True):
+ assert not has_truncated_repr(df_small)
+ assert not has_expanded_repr(df_small)
+ assert not has_truncated_repr(df_wide)
+ assert has_expanded_repr(df_wide)
+ assert has_vertically_truncated_repr(df_tall)
+ assert has_expanded_repr(df_tall)
+
+ with option_context("display.expand_frame_repr", False):
+ assert not has_truncated_repr(df_small)
+ assert not has_expanded_repr(df_small)
+ assert not has_horizontally_truncated_repr(df_wide)
+ assert not has_expanded_repr(df_wide)
+ assert has_vertically_truncated_repr(df_tall)
+ assert not has_expanded_repr(df_tall)
+
+ def test_repr_non_interactive(self):
+ # in non interactive mode, there can be no dependency on the
+ # result of terminal auto size detection
+ df = DataFrame("hello", index=range(1000), columns=range(5))
+
+ with option_context(
+ "mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
+ ):
+ assert not has_truncated_repr(df)
+ assert not has_expanded_repr(df)
+
+ def test_repr_truncates_terminal_size(self, monkeypatch):
+ # see gh-21180
+
+ terminal_size = (118, 96)
+ monkeypatch.setattr(
+ "pandas.io.formats.format.get_terminal_size", lambda: terminal_size
+ )
+
+ index = range(5)
+ columns = MultiIndex.from_tuples(
+ [
+ ("This is a long title with > 37 chars.", "cat"),
+ ("This is a loooooonger title with > 43 chars.", "dog"),
+ ]
+ )
+ df = DataFrame(1, index=index, columns=columns)
+
+ result = repr(df)
+
+ h1, h2 = result.split("\n")[:2]
+ assert "long" in h1
+ assert "loooooonger" in h1
+ assert "cat" in h2
+ assert "dog" in h2
+
+ # regular columns
+ df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
+ result = repr(df2)
+
+ assert df2.columns[0] in result.split("\n")[0]
+
+ def test_repr_truncates_terminal_size_full(self, monkeypatch):
+ # GH 22984 ensure entire window is filled
+ terminal_size = (80, 24)
+ df = DataFrame(np.random.default_rng(2).random((1, 7)))
+
+ monkeypatch.setattr(
+ "pandas.io.formats.format.get_terminal_size", lambda: terminal_size
+ )
+ assert "..." not in str(df)
+
+ def test_repr_truncation_column_size(self):
+ # dataframe with last column very wide -> check it is not used to
+ # determine size of truncation (...) column
+ df = DataFrame(
+ {
+ "a": [108480, 30830],
+ "b": [12345, 12345],
+ "c": [12345, 12345],
+ "d": [12345, 12345],
+ "e": ["a" * 50] * 2,
+ }
+ )
+ assert "..." in str(df)
+ assert " ... " not in str(df)
+
+ def test_repr_max_columns_max_rows(self):
+ term_width, term_height = get_terminal_size()
+ if term_width < 10 or term_height < 10:
+ pytest.skip(f"terminal size too small, {term_width} x {term_height}")
+
+ def mkframe(n):
+ index = [f"{i:05d}" for i in range(n)]
+ return DataFrame(0, index, index)
+
+ df6 = mkframe(6)
+ df10 = mkframe(10)
+ with option_context("mode.sim_interactive", True):
+ with option_context("display.width", term_width * 2):
+ with option_context("display.max_rows", 5, "display.max_columns", 5):
+ assert not has_expanded_repr(mkframe(4))
+ assert not has_expanded_repr(mkframe(5))
+ assert not has_expanded_repr(df6)
+ assert has_doubly_truncated_repr(df6)
+
+ with option_context("display.max_rows", 20, "display.max_columns", 10):
+ # Out off max_columns boundary, but no extending
+ # since not exceeding width
+ assert not has_expanded_repr(df6)
+ assert not has_truncated_repr(df6)
+
+ with option_context("display.max_rows", 9, "display.max_columns", 10):
+ # out vertical bounds can not result in expanded repr
+ assert not has_expanded_repr(df10)
+ assert has_vertically_truncated_repr(df10)
+
+ # width=None in terminal, auto detection
+ with option_context(
+ "display.max_columns",
+ 100,
+ "display.max_rows",
+ term_width * 20,
+ "display.width",
+ None,
+ ):
+ df = mkframe((term_width // 7) - 2)
+ assert not has_expanded_repr(df)
+ df = mkframe((term_width // 7) + 2)
+ printing.pprint_thing(df._repr_fits_horizontal_())
+ assert has_expanded_repr(df)
+
+ def test_repr_min_rows(self):
+ df = DataFrame({"a": range(20)})
+
+ # default setting no truncation even if above min_rows
+ assert ".." not in repr(df)
+ assert ".." not in df._repr_html_()
+
+ df = DataFrame({"a": range(61)})
+
+ # default of max_rows 60 triggers truncation if above
+ assert ".." in repr(df)
+ assert ".." in df._repr_html_()
+
+ with option_context("display.max_rows", 10, "display.min_rows", 4):
+ # truncated after first two rows
+ assert ".." in repr(df)
+ assert "2 " not in repr(df)
+ assert "..." in df._repr_html_()
+ assert "2 " not in df._repr_html_()
+
+ with option_context("display.max_rows", 12, "display.min_rows", None):
+ # when set to None, follow value of max_rows
+ assert "5 5" in repr(df)
+ assert "5 " in df._repr_html_()
+
+ with option_context("display.max_rows", 10, "display.min_rows", 12):
+ # when set value higher as max_rows, use the minimum
+ assert "5 5" not in repr(df)
+ assert "5 " not in df._repr_html_()
+
+ with option_context("display.max_rows", None, "display.min_rows", 12):
+ # max_rows of None -> never truncate
+ assert ".." not in repr(df)
+ assert ".." not in df._repr_html_()
+
+ def test_str_max_colwidth(self):
+ # GH 7856
+ df = DataFrame(
+ [
+ {
+ "a": "foo",
+ "b": "bar",
+ "c": "uncomfortably long line with lots of stuff",
+ "d": 1,
+ },
+ {"a": "foo", "b": "bar", "c": "stuff", "d": 1},
+ ]
+ )
+ df.set_index(["a", "b", "c"])
+ assert str(df) == (
+ " a b c d\n"
+ "0 foo bar uncomfortably long line with lots of stuff 1\n"
+ "1 foo bar stuff 1"
+ )
+ with option_context("max_colwidth", 20):
+ assert str(df) == (
+ " a b c d\n"
+ "0 foo bar uncomfortably lo... 1\n"
+ "1 foo bar stuff 1"
+ )
+
+ def test_auto_detect(self):
+ term_width, term_height = get_terminal_size()
+ fac = 1.05 # Arbitrary large factor to exceed term width
+ cols = range(int(term_width * fac))
+ index = range(10)
+ df = DataFrame(index=index, columns=cols)
+ with option_context("mode.sim_interactive", True):
+ with option_context("display.max_rows", None):
+ with option_context("display.max_columns", None):
+ # Wrap around with None
+ assert has_expanded_repr(df)
+ with option_context("display.max_rows", 0):
+ with option_context("display.max_columns", 0):
+ # Truncate with auto detection.
+ assert has_horizontally_truncated_repr(df)
+
+ index = range(int(term_height * fac))
+ df = DataFrame(index=index, columns=cols)
+ with option_context("display.max_rows", 0):
+ with option_context("display.max_columns", None):
+ # Wrap around with None
+ assert has_expanded_repr(df)
+ # Truncate vertically
+ assert has_vertically_truncated_repr(df)
+
+ with option_context("display.max_rows", None):
+ with option_context("display.max_columns", 0):
+ assert has_horizontally_truncated_repr(df)
+
+ def test_to_string_repr_unicode2(self):
+ idx = Index(["abc", "\u03c3a", "aegdvg"])
+ ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx)
+ rs = repr(ser).split("\n")
+ line_len = len(rs[0])
+ for line in rs[1:]:
+ try:
+ line = line.decode(get_option("display.encoding"))
+ except AttributeError:
+ pass
+ if not line.startswith("dtype:"):
+ assert len(line) == line_len
+
+ def test_east_asian_unicode_false(self):
+ # not aligned properly because of east asian width
+
+ # mid col
+ df = DataFrame(
+ {"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na あ 1\n"
+ "bb いいい 222\nc う 33333\n"
+ "ddd ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ # last col
+ df = DataFrame(
+ {"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na 1 あ\n"
+ "bb 222 いいい\nc 33333 う\n"
+ "ddd 4 ええええええ"
+ )
+ assert repr(df) == expected
+
+ # all col
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na あああああ あ\n"
+ "bb い いいい\nc う う\n"
+ "ddd えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # column name
+ df = DataFrame(
+ {
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ "あああああ": [1, 222, 33333, 4],
+ },
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " b あああああ\na あ 1\n"
+ "bb いいい 222\nc う 33333\n"
+ "ddd ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ # index
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=["あああ", "いいいいいい", "うう", "え"],
+ )
+ expected = (
+ " a b\nあああ あああああ あ\n"
+ "いいいいいい い いいい\nうう う う\n"
+ "え えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # index name
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=Index(["あ", "い", "うう", "え"], name="おおおお"),
+ )
+ expected = (
+ " a b\n"
+ "おおおお \n"
+ "あ あああああ あ\n"
+ "い い いいい\n"
+ "うう う う\n"
+ "え えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # all
+ df = DataFrame(
+ {
+ "あああ": ["あああ", "い", "う", "えええええ"],
+ "いいいいい": ["あ", "いいい", "う", "ええ"],
+ },
+ index=Index(["あ", "いいい", "うう", "え"], name="お"),
+ )
+ expected = (
+ " あああ いいいいい\n"
+ "お \n"
+ "あ あああ あ\n"
+ "いいい い いいい\n"
+ "うう う う\n"
+ "え えええええ ええ"
+ )
+ assert repr(df) == expected
+
+ # MultiIndex
+ idx = MultiIndex.from_tuples(
+ [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
+ )
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=idx,
+ )
+ expected = (
+ " a b\n"
+ "あ いい あああああ あ\n"
+ "う え い いいい\n"
+ "おおお かかかか う う\n"
+ "き くく えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # truncate
+ with option_context("display.max_rows", 3, "display.max_columns", 3):
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ "c": ["お", "か", "ききき", "くくくくくく"],
+ "ああああ": ["さ", "し", "す", "せ"],
+ },
+ columns=["a", "b", "c", "ああああ"],
+ )
+
+ expected = (
+ " a ... ああああ\n0 あああああ ... さ\n"
+ ".. ... ... ...\n3 えええ ... せ\n"
+ "\n[4 rows x 4 columns]"
+ )
+ assert repr(df) == expected
+
+ df.index = ["あああ", "いいいい", "う", "aaa"]
+ expected = (
+ " a ... ああああ\nあああ あああああ ... さ\n"
+ ".. ... ... ...\naaa えええ ... せ\n"
+ "\n[4 rows x 4 columns]"
+ )
+ assert repr(df) == expected
+
+ def test_east_asian_unicode_true(self):
+ # Enable Unicode option -----------------------------------------
+ with option_context("display.unicode.east_asian_width", True):
+ # mid col
+ df = DataFrame(
+ {"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na あ 1\n"
+ "bb いいい 222\nc う 33333\n"
+ "ddd ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ # last col
+ df = DataFrame(
+ {"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\na 1 あ\n"
+ "bb 222 いいい\nc 33333 う\n"
+ "ddd 4 ええええええ"
+ )
+ assert repr(df) == expected
+
+ # all col
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " a b\n"
+ "a あああああ あ\n"
+ "bb い いいい\n"
+ "c う う\n"
+ "ddd えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # column name
+ df = DataFrame(
+ {
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ "あああああ": [1, 222, 33333, 4],
+ },
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ " b あああああ\n"
+ "a あ 1\n"
+ "bb いいい 222\n"
+ "c う 33333\n"
+ "ddd ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ # index
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=["あああ", "いいいいいい", "うう", "え"],
+ )
+ expected = (
+ " a b\n"
+ "あああ あああああ あ\n"
+ "いいいいいい い いいい\n"
+ "うう う う\n"
+ "え えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # index name
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=Index(["あ", "い", "うう", "え"], name="おおおお"),
+ )
+ expected = (
+ " a b\n"
+ "おおおお \n"
+ "あ あああああ あ\n"
+ "い い いいい\n"
+ "うう う う\n"
+ "え えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # all
+ df = DataFrame(
+ {
+ "あああ": ["あああ", "い", "う", "えええええ"],
+ "いいいいい": ["あ", "いいい", "う", "ええ"],
+ },
+ index=Index(["あ", "いいい", "うう", "え"], name="お"),
+ )
+ expected = (
+ " あああ いいいいい\n"
+ "お \n"
+ "あ あああ あ\n"
+ "いいい い いいい\n"
+ "うう う う\n"
+ "え えええええ ええ"
+ )
+ assert repr(df) == expected
+
+ # MultiIndex
+ idx = MultiIndex.from_tuples(
+ [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
+ )
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ },
+ index=idx,
+ )
+ expected = (
+ " a b\n"
+ "あ いい あああああ あ\n"
+ "う え い いいい\n"
+ "おおお かかかか う う\n"
+ "き くく えええ ええええええ"
+ )
+ assert repr(df) == expected
+
+ # truncate
+ with option_context("display.max_rows", 3, "display.max_columns", 3):
+ df = DataFrame(
+ {
+ "a": ["あああああ", "い", "う", "えええ"],
+ "b": ["あ", "いいい", "う", "ええええええ"],
+ "c": ["お", "か", "ききき", "くくくくくく"],
+ "ああああ": ["さ", "し", "す", "せ"],
+ },
+ columns=["a", "b", "c", "ああああ"],
+ )
+
+ expected = (
+ " a ... ああああ\n"
+ "0 あああああ ... さ\n"
+ ".. ... ... ...\n"
+ "3 えええ ... せ\n"
+ "\n[4 rows x 4 columns]"
+ )
+ assert repr(df) == expected
+
+ df.index = ["あああ", "いいいい", "う", "aaa"]
+ expected = (
+ " a ... ああああ\n"
+ "あああ あああああ ... さ\n"
+ "... ... ... ...\n"
+ "aaa えええ ... せ\n"
+ "\n[4 rows x 4 columns]"
+ )
+ assert repr(df) == expected
+
+ # ambiguous unicode
+ df = DataFrame(
+ {
+ "b": ["あ", "いいい", "¡¡", "ええええええ"],
+ "あああああ": [1, 222, 33333, 4],
+ },
+ index=["a", "bb", "c", "¡¡¡"],
+ )
+ expected = (
+ " b あああああ\n"
+ "a あ 1\n"
+ "bb いいい 222\n"
+ "c ¡¡ 33333\n"
+ "¡¡¡ ええええええ 4"
+ )
+ assert repr(df) == expected
+
+ def test_to_string_buffer_all_unicode(self):
+ buf = StringIO()
+
+ empty = DataFrame({"c/\u03c3": Series(dtype=object)})
+ nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
+
+ print(empty, file=buf)
+ print(nonempty, file=buf)
+
+ # this should work
+ buf.getvalue()
+
+ @pytest.mark.parametrize(
+ "index_scalar",
+ [
+ "a" * 10,
+ 1,
+ Timestamp(2020, 1, 1),
+ pd.Period("2020-01-01"),
+ ],
+ )
+ @pytest.mark.parametrize("h", [10, 20])
+ @pytest.mark.parametrize("w", [10, 20])
+ def test_to_string_truncate_indices(self, index_scalar, h, w):
+ with option_context("display.expand_frame_repr", False):
+ df = DataFrame(
+ index=[index_scalar] * h, columns=[str(i) * 10 for i in range(w)]
+ )
+ with option_context("display.max_rows", 15):
+ if h == 20:
+ assert has_vertically_truncated_repr(df)
+ else:
+ assert not has_vertically_truncated_repr(df)
+ with option_context("display.max_columns", 15):
+ if w == 20:
+ assert has_horizontally_truncated_repr(df)
+ else:
+ assert not has_horizontally_truncated_repr(df)
+ with option_context("display.max_rows", 15, "display.max_columns", 15):
+ if h == 20 and w == 20:
+ assert has_doubly_truncated_repr(df)
+ else:
+ assert not has_doubly_truncated_repr(df)
+
+ def test_to_string_truncate_multilevel(self):
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
+ df = DataFrame(index=arrays, columns=arrays)
+ with option_context("display.max_rows", 7, "display.max_columns", 7):
+ assert has_doubly_truncated_repr(df)
+
+ @pytest.mark.parametrize("dtype", ["object", "datetime64[us]"])
+ def test_truncate_with_different_dtypes(self, dtype):
+ # 11594, 12045
+ # when truncated the dtypes of the splits can differ
+
+ # 11594
+ ser = Series(
+ [datetime(2012, 1, 1)] * 10
+ + [datetime(1012, 1, 2)]
+ + [datetime(2012, 1, 3)] * 10,
+ dtype=dtype,
+ )
+
+ with option_context("display.max_rows", 8):
+ result = str(ser)
+ assert dtype in result
+
+ def test_truncate_with_different_dtypes2(self):
+ # 12045
+ df = DataFrame({"text": ["some words"] + [None] * 9}, dtype=object)
+
+ with option_context("display.max_rows", 8, "display.max_columns", 3):
+ result = str(df)
+ assert "None" in result
+ assert "NaN" not in result
+
+ def test_truncate_with_different_dtypes_multiindex(self):
+ # GH#13000
+ df = DataFrame({"Vals": range(100)})
+ frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
+ result = repr(frame)
+
+ result2 = repr(frame.iloc[:5])
+ assert result.startswith(result2)
+
+ def test_datetimelike_frame(self):
+ # GH 12211
+ df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
+
+ with option_context("display.max_rows", 5):
+ result = str(df)
+ assert "2013-01-01 00:00:00+00:00" in result
+ assert "NaT" in result
+ assert "..." in result
+ assert "[6 rows x 1 columns]" in result
+
+ dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
+ df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
+ with option_context("display.max_rows", 5):
+ expected = (
+ " dt x\n"
+ "0 2011-01-01 00:00:00-05:00 1\n"
+ "1 2011-01-01 00:00:00-05:00 2\n"
+ ".. ... ..\n"
+ "8 NaT 9\n"
+ "9 NaT 10\n\n"
+ "[10 rows x 2 columns]"
+ )
+ assert repr(df) == expected
+
+ dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
+ df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
+ with option_context("display.max_rows", 5):
+ expected = (
+ " dt x\n"
+ "0 NaT 1\n"
+ "1 NaT 2\n"
+ ".. ... ..\n"
+ "8 2011-01-01 00:00:00-05:00 9\n"
+ "9 2011-01-01 00:00:00-05:00 10\n\n"
+ "[10 rows x 2 columns]"
+ )
+ assert repr(df) == expected
+
+ dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
+ Timestamp("2011-01-01", tz="US/Eastern")
+ ] * 5
+ df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
+ with option_context("display.max_rows", 5):
+ expected = (
+ " dt x\n"
+ "0 2011-01-01 00:00:00+09:00 1\n"
+ "1 2011-01-01 00:00:00+09:00 2\n"
+ ".. ... ..\n"
+ "8 2011-01-01 00:00:00-05:00 9\n"
+ "9 2011-01-01 00:00:00-05:00 10\n\n"
+ "[10 rows x 2 columns]"
+ )
+ assert repr(df) == expected
+
+ @pytest.mark.parametrize(
+ "start_date",
+ [
+ "2017-01-01 23:59:59.999999999",
+ "2017-01-01 23:59:59.99999999",
+ "2017-01-01 23:59:59.9999999",
+ "2017-01-01 23:59:59.999999",
+ "2017-01-01 23:59:59.99999",
+ "2017-01-01 23:59:59.9999",
+ ],
+ )
+ def test_datetimeindex_highprecision(self, start_date):
+ # GH19030
+ # Check that high-precision time values for the end of day are
+ # included in repr for DatetimeIndex
+ df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
+ result = str(df)
+ assert start_date in result
+
+ dti = date_range(start=start_date, freq="D", periods=5)
+ df = DataFrame({"A": range(5)}, index=dti)
+ result = str(df.index)
+ assert start_date in result
+
+ def test_string_repr_encoding(self, datapath):
+ filepath = datapath("io", "parser", "data", "unicode_series.csv")
+ df = read_csv(filepath, header=None, encoding="latin1")
+ repr(df)
+ repr(df[1])
+
+ def test_repr_corner(self):
+ # representing infs poses no problems
+ df = DataFrame({"foo": [-np.inf, np.inf]})
+ repr(df)
+
+ def test_frame_info_encoding(self):
+ index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
+ with option_context("display.max_rows", 1):
+ df = DataFrame(columns=["a", "b", "c"], index=index)
+ repr(df)
+ repr(df.T)
+
+ def test_wide_repr(self):
+ with option_context(
+ "mode.sim_interactive",
+ True,
+ "display.show_dimensions",
+ True,
+ "display.max_columns",
+ 20,
+ ):
+ max_cols = get_option("display.max_columns")
+ df = DataFrame([["a" * 25] * (max_cols - 1)] * 10)
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+
+ assert f"10 rows x {max_cols - 1} columns" in rep_str
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 120):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ def test_wide_repr_wide_columns(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 3)),
+ columns=["a" * 90, "b" * 90, "c" * 90],
+ )
+ rep_str = repr(df)
+
+ assert len(rep_str.splitlines()) == 20
+
+ def test_wide_repr_named(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ max_cols = get_option("display.max_columns")
+ df = DataFrame([["a" * 25] * (max_cols - 1)] * 10)
+ df.index.name = "DataFrame Index"
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 150):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ for line in wide_repr.splitlines()[1::13]:
+ assert "DataFrame Index" in line
+
+ def test_wide_repr_multiindex(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ midx = MultiIndex.from_arrays([["a" * 5] * 10] * 2)
+ max_cols = get_option("display.max_columns")
+ df = DataFrame([["a" * 25] * (max_cols - 1)] * 10, index=midx)
+ df.index.names = ["Level 0", "Level 1"]
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 150):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ for line in wide_repr.splitlines()[1::13]:
+ assert "Level 0 Level 1" in line
+
+ def test_wide_repr_multiindex_cols(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ max_cols = get_option("display.max_columns")
+ midx = MultiIndex.from_arrays([["a" * 5] * 10] * 2)
+ mcols = MultiIndex.from_arrays([["b" * 3] * (max_cols - 1)] * 2)
+ df = DataFrame(
+ [["c" * 25] * (max_cols - 1)] * 10, index=midx, columns=mcols
+ )
+ df.index.names = ["Level 0", "Level 1"]
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 150, "display.max_columns", 20):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ def test_wide_repr_unicode(self):
+ with option_context("mode.sim_interactive", True, "display.max_columns", 20):
+ max_cols = 20
+ df = DataFrame([["a" * 25] * 10] * (max_cols - 1))
+ with option_context("display.expand_frame_repr", False):
+ rep_str = repr(df)
+ with option_context("display.expand_frame_repr", True):
+ wide_repr = repr(df)
+ assert rep_str != wide_repr
+
+ with option_context("display.width", 150):
+ wider_repr = repr(df)
+ assert len(wider_repr) < len(wide_repr)
+
+ def test_wide_repr_wide_long_columns(self):
+ with option_context("mode.sim_interactive", True):
+ df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
+
+ result = repr(df)
+ assert "ccccc" in result
+ assert "ddddd" in result
+
+ def test_long_series(self):
+ n = 1000
+ s = Series(
+ np.random.default_rng(2).integers(-50, 50, n),
+ index=[f"s{x:04d}" for x in range(n)],
+ dtype="int64",
+ )
+
+ str_rep = str(s)
+ nmatches = len(re.findall("dtype", str_rep))
+ assert nmatches == 1
+
+ def test_to_string_ascii_error(self):
+ data = [
+ (
+ "0 ",
+ " .gitignore ",
+ " 5 ",
+ " \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
+ )
+ ]
+ df = DataFrame(data)
+
+ # it works!
+ repr(df)
+
+ def test_show_dimensions(self):
+ df = DataFrame(123, index=range(10, 15), columns=range(30))
+
+ with option_context(
+ "display.max_rows",
+ 10,
+ "display.max_columns",
+ 40,
+ "display.width",
+ 500,
+ "display.expand_frame_repr",
+ "info",
+ "display.show_dimensions",
+ True,
+ ):
+ assert "5 rows" in str(df)
+ assert "5 rows" in df._repr_html_()
+ with option_context(
+ "display.max_rows",
+ 10,
+ "display.max_columns",
+ 40,
+ "display.width",
+ 500,
+ "display.expand_frame_repr",
+ "info",
+ "display.show_dimensions",
+ False,
+ ):
+ assert "5 rows" not in str(df)
+ assert "5 rows" not in df._repr_html_()
+ with option_context(
+ "display.max_rows",
+ 2,
+ "display.max_columns",
+ 2,
+ "display.width",
+ 500,
+ "display.expand_frame_repr",
+ "info",
+ "display.show_dimensions",
+ "truncate",
+ ):
+ assert "5 rows" in str(df)
+ assert "5 rows" in df._repr_html_()
+ with option_context(
+ "display.max_rows",
+ 10,
+ "display.max_columns",
+ 40,
+ "display.width",
+ 500,
+ "display.expand_frame_repr",
+ "info",
+ "display.show_dimensions",
+ "truncate",
+ ):
+ assert "5 rows" not in str(df)
+ assert "5 rows" not in df._repr_html_()
+
+ def test_info_repr(self):
+ # GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
+ # the terminal size to ensure that we try to print something "too big"
+ term_width, term_height = get_terminal_size()
+
+ max_rows = 60
+ max_cols = 20 + (max(term_width, 80) - 80) // 4
+ # Long
+ h, w = max_rows + 1, max_cols - 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ assert has_vertically_truncated_repr(df)
+ with option_context("display.large_repr", "info"):
+ assert has_info_repr(df)
+
+ # Wide
+ h, w = max_rows - 1, max_cols + 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ assert has_horizontally_truncated_repr(df)
+ with option_context(
+ "display.large_repr", "info", "display.max_columns", max_cols
+ ):
+ assert has_info_repr(df)
+
+ def test_info_repr_max_cols(self):
+ # GH #6939
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 5)))
+ with option_context(
+ "display.large_repr",
+ "info",
+ "display.max_columns",
+ 1,
+ "display.max_info_columns",
+ 4,
+ ):
+ assert has_non_verbose_info_repr(df)
+
+ with option_context(
+ "display.large_repr",
+ "info",
+ "display.max_columns",
+ 1,
+ "display.max_info_columns",
+ 5,
+ ):
+ assert not has_non_verbose_info_repr(df)
+
+ # FIXME: don't leave commented-out
+ # test verbose overrides
+ # set_option('display.max_info_columns', 4) # exceeded
+
+ def test_pprint_pathological_object(self):
+ """
+ If the test fails, it at least won't hang.
+ """
+
+ class A:
+ def __getitem__(self, key):
+ return 3 # obviously simplified
+
+ df = DataFrame([A()])
+ repr(df) # just don't die
+
+ def test_float_trim_zeros(self):
+ vals = [
+ 2.08430917305e10,
+ 3.52205017305e10,
+ 2.30674817305e10,
+ 2.03954217305e10,
+ 5.59897817305e10,
+ ]
+ skip = True
+ for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
+ if line.startswith("dtype:"):
+ continue
+ if _three_digit_exp():
+ assert ("+010" in line) or skip
+ else:
+ assert ("+10" in line) or skip
+ skip = False
+
+ @pytest.mark.parametrize(
+ "data, expected",
+ [
+ (["3.50"], "0 3.50\ndtype: object"),
+ ([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
+ ([np.nan], "0 NaN\ndtype: float64"),
+ ([None], "0 None\ndtype: object"),
+ (["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
+ ([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
+ ([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
+ ([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
+ ],
+ )
+ def test_repr_str_float_truncation(self, data, expected, using_infer_string):
+ # GH#38708
+ series = Series(data, dtype=object if "3.50" in data else None)
+ result = repr(series)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "float_format,expected",
+ [
+ ("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
+ ("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
+ ],
+ )
+ def test_repr_float_format_in_object_col(self, float_format, expected):
+ # GH#40024
+ df = Series([1000.0, "test"])
+ with option_context("display.float_format", float_format):
+ result = repr(df)
+
+ assert result == expected
+
+ def test_period(self):
+ # GH 12615
+ df = DataFrame(
+ {
+ "A": pd.period_range("2013-01", periods=4, freq="M"),
+ "B": [
+ pd.Period("2011-01", freq="M"),
+ pd.Period("2011-02-01", freq="D"),
+ pd.Period("2011-03-01 09:00", freq="h"),
+ pd.Period("2011-04", freq="M"),
+ ],
+ "C": list("abcd"),
+ }
+ )
+ exp = (
+ " A B C\n"
+ "0 2013-01 2011-01 a\n"
+ "1 2013-02 2011-02-01 b\n"
+ "2 2013-03 2011-03-01 09:00 c\n"
+ "3 2013-04 2011-04 d"
+ )
+ assert str(df) == exp
+
+ @pytest.mark.parametrize(
+ "length, max_rows, min_rows, expected",
+ [
+ (10, 10, 10, 10),
+ (10, 10, None, 10),
+ (10, 8, None, 8),
+ (20, 30, 10, 30), # max_rows > len(frame), hence max_rows
+ (50, 30, 10, 10), # max_rows < len(frame), hence min_rows
+ (100, 60, 10, 10), # same
+ (60, 60, 10, 60), # edge case
+ (61, 60, 10, 10), # edge case
+ ],
+ )
+ def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
+ """Check that display logic is correct.
+
+ GH #37359
+
+ See description here:
+ https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
+ """
+ formatter = fmt.DataFrameFormatter(
+ DataFrame(np.random.default_rng(2).random((length, 3))),
+ max_rows=max_rows,
+ min_rows=min_rows,
+ )
+ result = formatter.max_rows_fitted
+ assert result == expected
+
+
+def gen_series_formatting():
+ s1 = Series(["a"] * 100)
+ s2 = Series(["ab"] * 100)
+ s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
+ s4 = s3[::-1]
+ test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
+ return test_sers
+
+
+class TestSeriesFormatting:
+ def test_freq_name_separation(self):
+ s = Series(
+ np.random.default_rng(2).standard_normal(10),
+ index=date_range("1/1/2000", periods=10),
+ name=0,
+ )
+
+ result = repr(s)
+ assert "Freq: D, Name: 0" in result
+
+ def test_unicode_name_in_footer(self):
+ s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
+ sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
+ sf._get_footer() # should not raise exception
+
+ @pytest.mark.xfail(
+ using_pyarrow_string_dtype(), reason="Fixup when arrow is default"
+ )
+ def test_east_asian_unicode_series(self):
+ # not aligned properly because of east asian width
+
+ # unicode index
+ s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
+ expected = "".join(
+ [
+ "あ a\n",
+ "いい bb\n",
+ "ううう CCC\n",
+ "ええええ D\ndtype: object",
+ ]
+ )
+ assert repr(s) == expected
+
+ # unicode values
+ s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
+ expected = "".join(
+ [
+ "a あ\n",
+ "bb いい\n",
+ "c ううう\n",
+ "ddd ええええ\n",
+ "dtype: object",
+ ]
+ )
+
+ assert repr(s) == expected
+
+ # both
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["ああ", "いいいい", "う", "えええ"],
+ )
+ expected = "".join(
+ [
+ "ああ あ\n",
+ "いいいい いい\n",
+ "う ううう\n",
+ "えええ ええええ\n",
+ "dtype: object",
+ ]
+ )
+
+ assert repr(s) == expected
+
+ # unicode footer
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["ああ", "いいいい", "う", "えええ"],
+ name="おおおおおおお",
+ )
+ expected = (
+ "ああ あ\nいいいい いい\nう ううう\n"
+ "えええ ええええ\nName: おおおおおおお, dtype: object"
+ )
+ assert repr(s) == expected
+
+ # MultiIndex
+ idx = MultiIndex.from_tuples(
+ [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
+ )
+ s = Series([1, 22, 3333, 44444], index=idx)
+ expected = (
+ "あ いい 1\n"
+ "う え 22\n"
+ "おおお かかかか 3333\n"
+ "き くく 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # object dtype, shorter than unicode repr
+ s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
+ expected = (
+ "1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # object dtype, longer than unicode repr
+ s = Series(
+ [1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
+ )
+ expected = (
+ "1 1\n"
+ "AB 22\n"
+ "2011-01-01 00:00:00 3333\n"
+ "あああ 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # truncate
+ with option_context("display.max_rows", 3):
+ s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
+
+ expected = (
+ "0 あ\n ... \n"
+ "3 ええええ\n"
+ "Name: おおおおおおお, Length: 4, dtype: object"
+ )
+ assert repr(s) == expected
+
+ s.index = ["ああ", "いいいい", "う", "えええ"]
+ expected = (
+ "ああ あ\n ... \n"
+ "えええ ええええ\n"
+ "Name: おおおおおおお, Length: 4, dtype: object"
+ )
+ assert repr(s) == expected
+
+ # Enable Unicode option -----------------------------------------
+ with option_context("display.unicode.east_asian_width", True):
+ # unicode index
+ s = Series(
+ ["a", "bb", "CCC", "D"],
+ index=["あ", "いい", "ううう", "ええええ"],
+ )
+ expected = (
+ "あ a\nいい bb\nううう CCC\n"
+ "ええええ D\ndtype: object"
+ )
+ assert repr(s) == expected
+
+ # unicode values
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["a", "bb", "c", "ddd"],
+ )
+ expected = (
+ "a あ\nbb いい\nc ううう\n"
+ "ddd ええええ\ndtype: object"
+ )
+ assert repr(s) == expected
+ # both
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["ああ", "いいいい", "う", "えええ"],
+ )
+ expected = (
+ "ああ あ\n"
+ "いいいい いい\n"
+ "う ううう\n"
+ "えええ ええええ\ndtype: object"
+ )
+ assert repr(s) == expected
+
+ # unicode footer
+ s = Series(
+ ["あ", "いい", "ううう", "ええええ"],
+ index=["ああ", "いいいい", "う", "えええ"],
+ name="おおおおおおお",
+ )
+ expected = (
+ "ああ あ\n"
+ "いいいい いい\n"
+ "う ううう\n"
+ "えええ ええええ\n"
+ "Name: おおおおおおお, dtype: object"
+ )
+ assert repr(s) == expected
+
+ # MultiIndex
+ idx = MultiIndex.from_tuples(
+ [("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
+ )
+ s = Series([1, 22, 3333, 44444], index=idx)
+ expected = (
+ "あ いい 1\n"
+ "う え 22\n"
+ "おおお かかかか 3333\n"
+ "き くく 44444\n"
+ "dtype: int64"
+ )
+ assert repr(s) == expected
+
+ # object dtype, shorter than unicode repr
+ s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
+ expected = (
+ "1 1\nAB 22\nNaN 3333\n"
+ "あああ 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # object dtype, longer than unicode repr
+ s = Series(
+ [1, 22, 3333, 44444],
+ index=[1, "AB", Timestamp("2011-01-01"), "あああ"],
+ )
+ expected = (
+ "1 1\n"
+ "AB 22\n"
+ "2011-01-01 00:00:00 3333\n"
+ "あああ 44444\ndtype: int64"
+ )
+ assert repr(s) == expected
+
+ # truncate
+ with option_context("display.max_rows", 3):
+ s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
+ expected = (
+ "0 あ\n ... \n"
+ "3 ええええ\n"
+ "Name: おおおおおおお, Length: 4, dtype: object"
+ )
+ assert repr(s) == expected
+
+ s.index = ["ああ", "いいいい", "う", "えええ"]
+ expected = (
+ "ああ あ\n"
+ " ... \n"
+ "えええ ええええ\n"
+ "Name: おおおおおおお, Length: 4, dtype: object"
+ )
+ assert repr(s) == expected
+
+ # ambiguous unicode
+ s = Series(
+ ["¡¡", "い¡¡", "ううう", "ええええ"],
+ index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"],
+ )
+ expected = (
+ "ああ ¡¡\n"
+ "¡¡¡¡いい い¡¡\n"
+ "¡¡ ううう\n"
+ "えええ ええええ\ndtype: object"
+ )
+ assert repr(s) == expected
+
+ def test_float_trim_zeros(self):
+ vals = [
+ 2.08430917305e10,
+ 3.52205017305e10,
+ 2.30674817305e10,
+ 2.03954217305e10,
+ 5.59897817305e10,
+ ]
+ for line in repr(Series(vals)).split("\n"):
+ if line.startswith("dtype:"):
+ continue
+ if _three_digit_exp():
+ assert "+010" in line
+ else:
+ assert "+10" in line
+
+ @pytest.mark.parametrize(
+ "start_date",
+ [
+ "2017-01-01 23:59:59.999999999",
+ "2017-01-01 23:59:59.99999999",
+ "2017-01-01 23:59:59.9999999",
+ "2017-01-01 23:59:59.999999",
+ "2017-01-01 23:59:59.99999",
+ "2017-01-01 23:59:59.9999",
+ ],
+ )
+ def test_datetimeindex_highprecision(self, start_date):
+ # GH19030
+ # Check that high-precision time values for the end of day are
+ # included in repr for DatetimeIndex
+ s1 = Series(date_range(start=start_date, freq="D", periods=5))
+ result = str(s1)
+ assert start_date in result
+
+ dti = date_range(start=start_date, freq="D", periods=5)
+ s2 = Series(3, index=dti)
+ result = str(s2.index)
+ assert start_date in result
+
+ def test_mixed_datetime64(self):
+ df = DataFrame({"A": [1, 2], "B": ["2012-01-01", "2012-01-02"]})
+ df["B"] = pd.to_datetime(df.B)
+
+ result = repr(df.loc[0])
+ assert "2012-01-01" in result
+
+ def test_period(self):
+ # GH 12615
+ index = pd.period_range("2013-01", periods=6, freq="M")
+ s = Series(np.arange(6, dtype="int64"), index=index)
+ exp = (
+ "2013-01 0\n"
+ "2013-02 1\n"
+ "2013-03 2\n"
+ "2013-04 3\n"
+ "2013-05 4\n"
+ "2013-06 5\n"
+ "Freq: M, dtype: int64"
+ )
+ assert str(s) == exp
+
+ s = Series(index)
+ exp = (
+ "0 2013-01\n"
+ "1 2013-02\n"
+ "2 2013-03\n"
+ "3 2013-04\n"
+ "4 2013-05\n"
+ "5 2013-06\n"
+ "dtype: period[M]"
+ )
+ assert str(s) == exp
+
+ # periods with mixed freq
+ s = Series(
+ [
+ pd.Period("2011-01", freq="M"),
+ pd.Period("2011-02-01", freq="D"),
+ pd.Period("2011-03-01 09:00", freq="h"),
+ ]
+ )
+ exp = (
+ "0 2011-01\n1 2011-02-01\n"
+ "2 2011-03-01 09:00\ndtype: object"
+ )
+ assert str(s) == exp
+
+ def test_max_multi_index_display(self):
+ # GH 7101
+
+ # doc example (indexing.rst)
+
+ # multi-index
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
+ tuples = list(zip(*arrays))
+ index = MultiIndex.from_tuples(tuples, names=["first", "second"])
+ s = Series(np.random.default_rng(2).standard_normal(8), index=index)
+
+ with option_context("display.max_rows", 10):
+ assert len(str(s).split("\n")) == 10
+ with option_context("display.max_rows", 3):
+ assert len(str(s).split("\n")) == 5
+ with option_context("display.max_rows", 2):
+ assert len(str(s).split("\n")) == 5
+ with option_context("display.max_rows", 1):
+ assert len(str(s).split("\n")) == 4
+ with option_context("display.max_rows", 0):
+ assert len(str(s).split("\n")) == 10
+
+ # index
+ s = Series(np.random.default_rng(2).standard_normal(8), None)
+
+ with option_context("display.max_rows", 10):
+ assert len(str(s).split("\n")) == 9
+ with option_context("display.max_rows", 3):
+ assert len(str(s).split("\n")) == 4
+ with option_context("display.max_rows", 2):
+ assert len(str(s).split("\n")) == 4
+ with option_context("display.max_rows", 1):
+ assert len(str(s).split("\n")) == 3
+ with option_context("display.max_rows", 0):
+ assert len(str(s).split("\n")) == 9
+
+ # Make sure #8532 is fixed
+ def test_consistent_format(self):
+ s = Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
+ with option_context("display.max_rows", 10, "display.show_dimensions", False):
+ res = repr(s)
+ exp = (
+ "0 1.0000\n1 1.0000\n2 1.0000\n3 "
+ "1.0000\n4 1.0000\n ... \n125 "
+ "1.0000\n126 1.0000\n127 0.9999\n128 "
+ "1.0000\n129 1.0000\ndtype: float64"
+ )
+ assert res == exp
+
+ def chck_ncols(self, s):
+ lines = [
+ line for line in repr(s).split("\n") if not re.match(r"[^\.]*\.+", line)
+ ][:-1]
+ ncolsizes = len({len(line.strip()) for line in lines})
+ assert ncolsizes == 1
+
+ @pytest.mark.xfail(
+ using_pyarrow_string_dtype(), reason="change when arrow is default"
+ )
+ def test_format_explicit(self):
+ test_sers = gen_series_formatting()
+ with option_context("display.max_rows", 4, "display.show_dimensions", False):
+ res = repr(test_sers["onel"])
+ exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object"
+ assert exp == res
+ res = repr(test_sers["twol"])
+ exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object"
+ assert exp == res
+ res = repr(test_sers["asc"])
+ exp = (
+ "0 a\n1 ab\n ... \n4 abcde\n5 "
+ "abcdef\ndtype: object"
+ )
+ assert exp == res
+ res = repr(test_sers["desc"])
+ exp = (
+ "5 abcdef\n4 abcde\n ... \n1 ab\n0 "
+ "a\ndtype: object"
+ )
+ assert exp == res
+
+ def test_ncols(self):
+ test_sers = gen_series_formatting()
+ for s in test_sers.values():
+ self.chck_ncols(s)
+
+ def test_max_rows_eq_one(self):
+ s = Series(range(10), dtype="int64")
+ with option_context("display.max_rows", 1):
+ strrepr = repr(s).split("\n")
+ exp1 = ["0", "0"]
+ res1 = strrepr[0].split()
+ assert exp1 == res1
+ exp2 = [".."]
+ res2 = strrepr[1].split()
+ assert exp2 == res2
+
+ def test_truncate_ndots(self):
+ def getndots(s):
+ return len(re.match(r"[^\.]*(\.*)", s).groups()[0])
+
+ s = Series([0, 2, 3, 6])
+ with option_context("display.max_rows", 2):
+ strrepr = repr(s).replace("\n", "")
+ assert getndots(strrepr) == 2
+
+ s = Series([0, 100, 200, 400])
+ with option_context("display.max_rows", 2):
+ strrepr = repr(s).replace("\n", "")
+ assert getndots(strrepr) == 3
+
+ def test_show_dimensions(self):
+ # gh-7117
+ s = Series(range(5))
+
+ assert "Length" not in repr(s)
+
+ with option_context("display.max_rows", 4):
+ assert "Length" in repr(s)
+
+ with option_context("display.show_dimensions", True):
+ assert "Length" in repr(s)
+
+ with option_context("display.max_rows", 4, "display.show_dimensions", False):
+ assert "Length" not in repr(s)
+
+ def test_repr_min_rows(self):
+ s = Series(range(20))
+
+ # default setting no truncation even if above min_rows
+ assert ".." not in repr(s)
+
+ s = Series(range(61))
+
+ # default of max_rows 60 triggers truncation if above
+ assert ".." in repr(s)
+
+ with option_context("display.max_rows", 10, "display.min_rows", 4):
+ # truncated after first two rows
+ assert ".." in repr(s)
+ assert "2 " not in repr(s)
+
+ with option_context("display.max_rows", 12, "display.min_rows", None):
+ # when set to None, follow value of max_rows
+ assert "5 5" in repr(s)
+
+ with option_context("display.max_rows", 10, "display.min_rows", 12):
+ # when set value higher as max_rows, use the minimum
+ assert "5 5" not in repr(s)
+
+ with option_context("display.max_rows", None, "display.min_rows", 12):
+ # max_rows of None -> never truncate
+ assert ".." not in repr(s)
+
+
+class TestGenericArrayFormatter:
+ def test_1d_array(self):
+ # _GenericArrayFormatter is used on types for which there isn't a dedicated
+ # formatter. np.bool_ is one of those types.
+ obj = fmt._GenericArrayFormatter(np.array([True, False]))
+ res = obj.get_result()
+ assert len(res) == 2
+ # Results should be right-justified.
+ assert res[0] == " True"
+ assert res[1] == " False"
+
+ def test_2d_array(self):
+ obj = fmt._GenericArrayFormatter(np.array([[True, False], [False, True]]))
+ res = obj.get_result()
+ assert len(res) == 2
+ assert res[0] == " [True, False]"
+ assert res[1] == " [False, True]"
+
+ def test_3d_array(self):
+ obj = fmt._GenericArrayFormatter(
+ np.array([[[True, True], [False, False]], [[False, True], [True, False]]])
+ )
+ res = obj.get_result()
+ assert len(res) == 2
+ assert res[0] == " [[True, True], [False, False]]"
+ assert res[1] == " [[False, True], [True, False]]"
+
+ def test_2d_extension_type(self):
+ # GH 33770
+
+ # Define a stub extension type with just enough code to run Series.__repr__()
+ class DtypeStub(pd.api.extensions.ExtensionDtype):
+ @property
+ def type(self):
+ return np.ndarray
+
+ @property
+ def name(self):
+ return "DtypeStub"
+
+ class ExtTypeStub(pd.api.extensions.ExtensionArray):
+ def __len__(self) -> int:
+ return 2
+
+ def __getitem__(self, ix):
+ return [ix == 1, ix == 0]
+
+ @property
+ def dtype(self):
+ return DtypeStub()
+
+ series = Series(ExtTypeStub(), copy=False)
+ res = repr(series) # This line crashed before #33770 was fixed.
+ expected = "\n".join(
+ ["0 [False True]", "1 [True False]", "dtype: DtypeStub"]
+ )
+ assert res == expected
+
+
+def _three_digit_exp():
+ return f"{1.7e8:.4g}" == "1.7e+008"
+
+
+class TestFloatArrayFormatter:
+ def test_misc(self):
+ obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
+ result = obj.get_result()
+ assert len(result) == 0
+
+ def test_format(self):
+ obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
+ result = obj.get_result()
+ assert result[0] == " 12.0"
+ assert result[1] == " 0.0"
+
+ def test_output_display_precision_trailing_zeroes(self):
+ # Issue #20359: trimming zeros while there is no decimal point
+
+ # Happens when display precision is set to zero
+ with option_context("display.precision", 0):
+ s = Series([840.0, 4200.0])
+ expected_output = "0 840\n1 4200\ndtype: float64"
+ assert str(s) == expected_output
+
+ @pytest.mark.parametrize(
+ "value,expected",
+ [
+ ([9.4444], " 0\n0 9"),
+ ([0.49], " 0\n0 5e-01"),
+ ([10.9999], " 0\n0 11"),
+ ([9.5444, 9.6], " 0\n0 10\n1 10"),
+ ([0.46, 0.78, -9.9999], " 0\n0 5e-01\n1 8e-01\n2 -1e+01"),
+ ],
+ )
+ def test_set_option_precision(self, value, expected):
+ # Issue #30122
+ # Precision was incorrectly shown
+
+ with option_context("display.precision", 0):
+ df_value = DataFrame(value)
+ assert str(df_value) == expected
+
+ def test_output_significant_digits(self):
+ # Issue #9764
+
+ # In case default display precision changes:
+ with option_context("display.precision", 6):
+ # DataFrame example from issue #9764
+ d = DataFrame(
+ {
+ "col1": [
+ 9.999e-8,
+ 1e-7,
+ 1.0001e-7,
+ 2e-7,
+ 4.999e-7,
+ 5e-7,
+ 5.0001e-7,
+ 6e-7,
+ 9.999e-7,
+ 1e-6,
+ 1.0001e-6,
+ 2e-6,
+ 4.999e-6,
+ 5e-6,
+ 5.0001e-6,
+ 6e-6,
+ ]
+ }
+ )
+
+ expected_output = {
+ (0, 6): " col1\n"
+ "0 9.999000e-08\n"
+ "1 1.000000e-07\n"
+ "2 1.000100e-07\n"
+ "3 2.000000e-07\n"
+ "4 4.999000e-07\n"
+ "5 5.000000e-07",
+ (1, 6): " col1\n"
+ "1 1.000000e-07\n"
+ "2 1.000100e-07\n"
+ "3 2.000000e-07\n"
+ "4 4.999000e-07\n"
+ "5 5.000000e-07",
+ (1, 8): " col1\n"
+ "1 1.000000e-07\n"
+ "2 1.000100e-07\n"
+ "3 2.000000e-07\n"
+ "4 4.999000e-07\n"
+ "5 5.000000e-07\n"
+ "6 5.000100e-07\n"
+ "7 6.000000e-07",
+ (8, 16): " col1\n"
+ "8 9.999000e-07\n"
+ "9 1.000000e-06\n"
+ "10 1.000100e-06\n"
+ "11 2.000000e-06\n"
+ "12 4.999000e-06\n"
+ "13 5.000000e-06\n"
+ "14 5.000100e-06\n"
+ "15 6.000000e-06",
+ (9, 16): " col1\n"
+ "9 0.000001\n"
+ "10 0.000001\n"
+ "11 0.000002\n"
+ "12 0.000005\n"
+ "13 0.000005\n"
+ "14 0.000005\n"
+ "15 0.000006",
+ }
+
+ for (start, stop), v in expected_output.items():
+ assert str(d[start:stop]) == v
+
+ def test_too_long(self):
+ # GH 10451
+ with option_context("display.precision", 4):
+ # need both a number > 1e6 and something that normally formats to
+ # having length > display.precision + 6
+ df = DataFrame({"x": [12345.6789]})
+ assert str(df) == " x\n0 12345.6789"
+ df = DataFrame({"x": [2e6]})
+ assert str(df) == " x\n0 2000000.0"
+ df = DataFrame({"x": [12345.6789, 2e6]})
+ assert str(df) == " x\n0 1.2346e+04\n1 2.0000e+06"
+
+
+class TestTimedelta64Formatter:
+ def test_days(self):
+ x = pd.to_timedelta(list(range(5)) + [NaT], unit="D")._values
+ result = fmt._Timedelta64Formatter(x).get_result()
+ assert result[0].strip() == "0 days"
+ assert result[1].strip() == "1 days"
+
+ result = fmt._Timedelta64Formatter(x[1:2]).get_result()
+ assert result[0].strip() == "1 days"
+
+ result = fmt._Timedelta64Formatter(x).get_result()
+ assert result[0].strip() == "0 days"
+ assert result[1].strip() == "1 days"
+
+ result = fmt._Timedelta64Formatter(x[1:2]).get_result()
+ assert result[0].strip() == "1 days"
+
+ def test_days_neg(self):
+ x = pd.to_timedelta(list(range(5)) + [NaT], unit="D")._values
+ result = fmt._Timedelta64Formatter(-x).get_result()
+ assert result[0].strip() == "0 days"
+ assert result[1].strip() == "-1 days"
+
+ def test_subdays(self):
+ y = pd.to_timedelta(list(range(5)) + [NaT], unit="s")._values
+ result = fmt._Timedelta64Formatter(y).get_result()
+ assert result[0].strip() == "0 days 00:00:00"
+ assert result[1].strip() == "0 days 00:00:01"
+
+ def test_subdays_neg(self):
+ y = pd.to_timedelta(list(range(5)) + [NaT], unit="s")._values
+ result = fmt._Timedelta64Formatter(-y).get_result()
+ assert result[0].strip() == "0 days 00:00:00"
+ assert result[1].strip() == "-1 days +23:59:59"
+
+ def test_zero(self):
+ x = pd.to_timedelta(list(range(1)) + [NaT], unit="D")._values
+ result = fmt._Timedelta64Formatter(x).get_result()
+ assert result[0].strip() == "0 days"
+
+ x = pd.to_timedelta(list(range(1)), unit="D")._values
+ result = fmt._Timedelta64Formatter(x).get_result()
+ assert result[0].strip() == "0 days"
+
+
+class TestDatetime64Formatter:
+ def test_mixed(self):
+ x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), NaT])._values
+ result = fmt._Datetime64Formatter(x).get_result()
+ assert result[0].strip() == "2013-01-01 00:00:00"
+ assert result[1].strip() == "2013-01-01 12:00:00"
+
+ def test_dates(self):
+ x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), NaT])._values
+ result = fmt._Datetime64Formatter(x).get_result()
+ assert result[0].strip() == "2013-01-01"
+ assert result[1].strip() == "2013-01-02"
+
+ def test_date_nanos(self):
+ x = Series([Timestamp(200)])._values
+ result = fmt._Datetime64Formatter(x).get_result()
+ assert result[0].strip() == "1970-01-01 00:00:00.000000200"
+
+ def test_dates_display(self):
+ # 10170
+ # make sure that we are consistently display date formatting
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="D"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-05 09:00:00"
+
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="s"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-01 09:00:04"
+
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="ms"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00.000"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-01 09:00:00.004"
+
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="us"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00.000000"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-01 09:00:00.000004"
+
+ x = Series(date_range("20130101 09:00:00", periods=5, freq="ns"))
+ x.iloc[1] = np.nan
+ result = fmt._Datetime64Formatter(x._values).get_result()
+ assert result[0].strip() == "2013-01-01 09:00:00.000000000"
+ assert result[1].strip() == "NaT"
+ assert result[4].strip() == "2013-01-01 09:00:00.000000004"
+
+ def test_datetime64formatter_yearmonth(self):
+ x = Series([datetime(2016, 1, 1), datetime(2016, 2, 2)])._values
+
+ def format_func(x):
+ return x.strftime("%Y-%m")
+
+ formatter = fmt._Datetime64Formatter(x, formatter=format_func)
+ result = formatter.get_result()
+ assert result == ["2016-01", "2016-02"]
+
+ def test_datetime64formatter_hoursecond(self):
+ x = Series(
+ pd.to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")
+ )._values
+
+ def format_func(x):
+ return x.strftime("%H:%M")
+
+ formatter = fmt._Datetime64Formatter(x, formatter=format_func)
+ result = formatter.get_result()
+ assert result == ["10:10", "12:12"]
+
+ def test_datetime64formatter_tz_ms(self):
+ x = (
+ Series(
+ np.array(["2999-01-01", "2999-01-02", "NaT"], dtype="datetime64[ms]")
+ )
+ .dt.tz_localize("US/Pacific")
+ ._values
+ )
+ result = fmt._Datetime64TZFormatter(x).get_result()
+ assert result[0].strip() == "2999-01-01 00:00:00-08:00"
+ assert result[1].strip() == "2999-01-02 00:00:00-08:00"
+
+
+class TestFormatPercentiles:
+ @pytest.mark.parametrize(
+ "percentiles, expected",
+ [
+ (
+ [0.01999, 0.02001, 0.5, 0.666666, 0.9999],
+ ["1.999%", "2.001%", "50%", "66.667%", "99.99%"],
+ ),
+ (
+ [0, 0.5, 0.02001, 0.5, 0.666666, 0.9999],
+ ["0%", "50%", "2.0%", "50%", "66.67%", "99.99%"],
+ ),
+ ([0.281, 0.29, 0.57, 0.58], ["28.1%", "29%", "57%", "58%"]),
+ ([0.28, 0.29, 0.57, 0.58], ["28%", "29%", "57%", "58%"]),
+ (
+ [0.9, 0.99, 0.999, 0.9999, 0.99999],
+ ["90%", "99%", "99.9%", "99.99%", "99.999%"],
+ ),
+ ],
+ )
+ def test_format_percentiles(self, percentiles, expected):
+ result = fmt.format_percentiles(percentiles)
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "percentiles",
+ [
+ ([0.1, np.nan, 0.5]),
+ ([-0.001, 0.1, 0.5]),
+ ([2, 0.1, 0.5]),
+ ([0.1, 0.5, "a"]),
+ ],
+ )
+ def test_error_format_percentiles(self, percentiles):
+ msg = r"percentiles should all be in the interval \[0,1\]"
+ with pytest.raises(ValueError, match=msg):
+ fmt.format_percentiles(percentiles)
+
+ def test_format_percentiles_integer_idx(self):
+ # Issue #26660
+ result = fmt.format_percentiles(np.linspace(0, 1, 10 + 1))
+ expected = [
+ "0%",
+ "10%",
+ "20%",
+ "30%",
+ "40%",
+ "50%",
+ "60%",
+ "70%",
+ "80%",
+ "90%",
+ "100%",
+ ]
+ assert result == expected
+
+
+@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
+@pytest.mark.parametrize(
+ "encoding, data",
+ [(None, "abc"), ("utf-8", "abc"), ("gbk", "造成输出中文显示乱码"), ("foo", "abc")],
+)
+def test_filepath_or_buffer_arg(
+ method,
+ filepath_or_buffer,
+ assert_filepath_or_buffer_equals,
+ encoding,
+ data,
+ filepath_or_buffer_id,
+):
+ df = DataFrame([data])
+ if method in ["to_latex"]: # uses styler implementation
+ pytest.importorskip("jinja2")
+
+ if filepath_or_buffer_id not in ["string", "pathlike"] and encoding is not None:
+ with pytest.raises(
+ ValueError, match="buf is not a file name and encoding is specified."
+ ):
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ elif encoding == "foo":
+ with pytest.raises(LookupError, match="unknown encoding"):
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ else:
+ expected = getattr(df, method)()
+ getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
+ assert_filepath_or_buffer_equals(expected)
+
+
+@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
+def test_filepath_or_buffer_bad_arg_raises(float_frame, method):
+ if method in ["to_latex"]: # uses styler implementation
+ pytest.importorskip("jinja2")
+ msg = "buf is not a file name and it has no write method"
+ with pytest.raises(TypeError, match=msg):
+ getattr(float_frame, method)(buf=object())
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_ipython_compat.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_ipython_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..8512f41396906de1f59bbb23d4b535f82c546132
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_ipython_compat.py
@@ -0,0 +1,90 @@
+import numpy as np
+
+import pandas._config.config as cf
+
+from pandas import (
+ DataFrame,
+ MultiIndex,
+)
+
+
+class TestTableSchemaRepr:
+ def test_publishes(self, ip):
+ ipython = ip.instance(config=ip.config)
+ df = DataFrame({"A": [1, 2]})
+ objects = [df["A"], df] # dataframe / series
+ expected_keys = [
+ {"text/plain", "application/vnd.dataresource+json"},
+ {"text/plain", "text/html", "application/vnd.dataresource+json"},
+ ]
+
+ opt = cf.option_context("display.html.table_schema", True)
+ last_obj = None
+ for obj, expected in zip(objects, expected_keys):
+ last_obj = obj
+ with opt:
+ formatted = ipython.display_formatter.format(obj)
+ assert set(formatted[0].keys()) == expected
+
+ with_latex = cf.option_context("styler.render.repr", "latex")
+
+ with opt, with_latex:
+ formatted = ipython.display_formatter.format(last_obj)
+
+ expected = {
+ "text/plain",
+ "text/html",
+ "text/latex",
+ "application/vnd.dataresource+json",
+ }
+ assert set(formatted[0].keys()) == expected
+
+ def test_publishes_not_implemented(self, ip):
+ # column MultiIndex
+ # GH#15996
+ midx = MultiIndex.from_product([["A", "B"], ["a", "b", "c"]])
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, len(midx))), columns=midx
+ )
+
+ opt = cf.option_context("display.html.table_schema", True)
+
+ with opt:
+ formatted = ip.instance(config=ip.config).display_formatter.format(df)
+
+ expected = {"text/plain", "text/html"}
+ assert set(formatted[0].keys()) == expected
+
+ def test_config_on(self):
+ df = DataFrame({"A": [1, 2]})
+ with cf.option_context("display.html.table_schema", True):
+ result = df._repr_data_resource_()
+
+ assert result is not None
+
+ def test_config_default_off(self):
+ df = DataFrame({"A": [1, 2]})
+ with cf.option_context("display.html.table_schema", False):
+ result = df._repr_data_resource_()
+
+ assert result is None
+
+ def test_enable_data_resource_formatter(self, ip):
+ # GH#10491
+ formatters = ip.instance(config=ip.config).display_formatter.formatters
+ mimetype = "application/vnd.dataresource+json"
+
+ with cf.option_context("display.html.table_schema", True):
+ assert "application/vnd.dataresource+json" in formatters
+ assert formatters[mimetype].enabled
+
+ # still there, just disabled
+ assert "application/vnd.dataresource+json" in formatters
+ assert not formatters[mimetype].enabled
+
+ # able to re-set
+ with cf.option_context("display.html.table_schema", True):
+ assert "application/vnd.dataresource+json" in formatters
+ assert formatters[mimetype].enabled
+ # smoke test that it works
+ ip.instance(config=ip.config).display_formatter.format(cf)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_csv.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_csv.py
new file mode 100644
index 0000000000000000000000000000000000000000..0db49a73621eab7fa59a76827a50b862fad41dca
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_csv.py
@@ -0,0 +1,758 @@
+import io
+import os
+import sys
+from zipfile import ZipFile
+
+from _csv import Error
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Index,
+ compat,
+)
+import pandas._testing as tm
+
+
+class TestToCSV:
+ def test_to_csv_with_single_column(self):
+ # see gh-18676, https://bugs.python.org/issue32255
+ #
+ # Python's CSV library adds an extraneous '""'
+ # before the newline when the NaN-value is in
+ # the first row. Otherwise, only the newline
+ # character is added. This behavior is inconsistent
+ # and was patched in https://bugs.python.org/pull_request4672.
+ df1 = DataFrame([None, 1])
+ expected1 = """\
+""
+1.0
+"""
+ with tm.ensure_clean("test.csv") as path:
+ df1.to_csv(path, header=None, index=None)
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected1
+
+ df2 = DataFrame([1, None])
+ expected2 = """\
+1.0
+""
+"""
+ with tm.ensure_clean("test.csv") as path:
+ df2.to_csv(path, header=None, index=None)
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected2
+
+ def test_to_csv_default_encoding(self):
+ # GH17097
+ df = DataFrame({"col": ["AAAAA", "ÄÄÄÄÄ", "ßßßßß", "聞聞聞聞聞"]})
+
+ with tm.ensure_clean("test.csv") as path:
+ # the default to_csv encoding is uft-8.
+ df.to_csv(path)
+ tm.assert_frame_equal(pd.read_csv(path, index_col=0), df)
+
+ def test_to_csv_quotechar(self):
+ df = DataFrame({"col": [1, 2]})
+ expected = """\
+"","col"
+"0","1"
+"1","2"
+"""
+
+ with tm.ensure_clean("test.csv") as path:
+ df.to_csv(path, quoting=1) # 1=QUOTE_ALL
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected
+
+ expected = """\
+$$,$col$
+$0$,$1$
+$1$,$2$
+"""
+
+ with tm.ensure_clean("test.csv") as path:
+ df.to_csv(path, quoting=1, quotechar="$")
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected
+
+ with tm.ensure_clean("test.csv") as path:
+ with pytest.raises(TypeError, match="quotechar"):
+ df.to_csv(path, quoting=1, quotechar=None)
+
+ def test_to_csv_doublequote(self):
+ df = DataFrame({"col": ['a"a', '"bb"']})
+ expected = '''\
+"","col"
+"0","a""a"
+"1","""bb"""
+'''
+
+ with tm.ensure_clean("test.csv") as path:
+ df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected
+
+ with tm.ensure_clean("test.csv") as path:
+ with pytest.raises(Error, match="escapechar"):
+ df.to_csv(path, doublequote=False) # no escapechar set
+
+ def test_to_csv_escapechar(self):
+ df = DataFrame({"col": ['a"a', '"bb"']})
+ expected = """\
+"","col"
+"0","a\\"a"
+"1","\\"bb\\""
+"""
+
+ with tm.ensure_clean("test.csv") as path: # QUOTE_ALL
+ df.to_csv(path, quoting=1, doublequote=False, escapechar="\\")
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected
+
+ df = DataFrame({"col": ["a,a", ",bb,"]})
+ expected = """\
+,col
+0,a\\,a
+1,\\,bb\\,
+"""
+
+ with tm.ensure_clean("test.csv") as path:
+ df.to_csv(path, quoting=3, escapechar="\\") # QUOTE_NONE
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected
+
+ def test_csv_to_string(self):
+ df = DataFrame({"col": [1, 2]})
+ expected_rows = [",col", "0,1", "1,2"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.to_csv() == expected
+
+ def test_to_csv_decimal(self):
+ # see gh-781
+ df = DataFrame({"col1": [1], "col2": ["a"], "col3": [10.1]})
+
+ expected_rows = [",col1,col2,col3", "0,1,a,10.1"]
+ expected_default = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.to_csv() == expected_default
+
+ expected_rows = [";col1;col2;col3", "0;1;a;10,1"]
+ expected_european_excel = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.to_csv(decimal=",", sep=";") == expected_european_excel
+
+ expected_rows = [",col1,col2,col3", "0,1,a,10.10"]
+ expected_float_format_default = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.to_csv(float_format="%.2f") == expected_float_format_default
+
+ expected_rows = [";col1;col2;col3", "0;1;a;10,10"]
+ expected_float_format = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert (
+ df.to_csv(decimal=",", sep=";", float_format="%.2f")
+ == expected_float_format
+ )
+
+ # see gh-11553: testing if decimal is taken into account for '0.0'
+ df = DataFrame({"a": [0, 1.1], "b": [2.2, 3.3], "c": 1})
+
+ expected_rows = ["a,b,c", "0^0,2^2,1", "1^1,3^3,1"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.to_csv(index=False, decimal="^") == expected
+
+ # same but for an index
+ assert df.set_index("a").to_csv(decimal="^") == expected
+
+ # same for a multi-index
+ assert df.set_index(["a", "b"]).to_csv(decimal="^") == expected
+
+ def test_to_csv_float_format(self):
+ # testing if float_format is taken into account for the index
+ # GH 11553
+ df = DataFrame({"a": [0, 1], "b": [2.2, 3.3], "c": 1})
+
+ expected_rows = ["a,b,c", "0,2.20,1", "1,3.30,1"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.set_index("a").to_csv(float_format="%.2f") == expected
+
+ # same for a multi-index
+ assert df.set_index(["a", "b"]).to_csv(float_format="%.2f") == expected
+
+ def test_to_csv_na_rep(self):
+ # see gh-11553
+ #
+ # Testing if NaN values are correctly represented in the index.
+ df = DataFrame({"a": [0, np.nan], "b": [0, 1], "c": [2, 3]})
+ expected_rows = ["a,b,c", "0.0,0,2", "_,1,3"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
+ assert df.set_index("a").to_csv(na_rep="_") == expected
+ assert df.set_index(["a", "b"]).to_csv(na_rep="_") == expected
+
+ # now with an index containing only NaNs
+ df = DataFrame({"a": np.nan, "b": [0, 1], "c": [2, 3]})
+ expected_rows = ["a,b,c", "_,0,2", "_,1,3"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
+ assert df.set_index("a").to_csv(na_rep="_") == expected
+ assert df.set_index(["a", "b"]).to_csv(na_rep="_") == expected
+
+ # check if na_rep parameter does not break anything when no NaN
+ df = DataFrame({"a": 0, "b": [0, 1], "c": [2, 3]})
+ expected_rows = ["a,b,c", "0,0,2", "0,1,3"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
+ assert df.set_index("a").to_csv(na_rep="_") == expected
+ assert df.set_index(["a", "b"]).to_csv(na_rep="_") == expected
+
+ csv = pd.Series(["a", pd.NA, "c"]).to_csv(na_rep="ZZZZZ")
+ expected = tm.convert_rows_list_to_csv_str([",0", "0,a", "1,ZZZZZ", "2,c"])
+ assert expected == csv
+
+ def test_to_csv_na_rep_nullable_string(self, nullable_string_dtype):
+ # GH 29975
+ # Make sure full na_rep shows up when a dtype is provided
+ expected = tm.convert_rows_list_to_csv_str([",0", "0,a", "1,ZZZZZ", "2,c"])
+ csv = pd.Series(["a", pd.NA, "c"], dtype=nullable_string_dtype).to_csv(
+ na_rep="ZZZZZ"
+ )
+ assert expected == csv
+
+ def test_to_csv_date_format(self):
+ # GH 10209
+ df_sec = DataFrame({"A": pd.date_range("20130101", periods=5, freq="s")})
+ df_day = DataFrame({"A": pd.date_range("20130101", periods=5, freq="d")})
+
+ expected_rows = [
+ ",A",
+ "0,2013-01-01 00:00:00",
+ "1,2013-01-01 00:00:01",
+ "2,2013-01-01 00:00:02",
+ "3,2013-01-01 00:00:03",
+ "4,2013-01-01 00:00:04",
+ ]
+ expected_default_sec = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df_sec.to_csv() == expected_default_sec
+
+ expected_rows = [
+ ",A",
+ "0,2013-01-01 00:00:00",
+ "1,2013-01-02 00:00:00",
+ "2,2013-01-03 00:00:00",
+ "3,2013-01-04 00:00:00",
+ "4,2013-01-05 00:00:00",
+ ]
+ expected_ymdhms_day = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df_day.to_csv(date_format="%Y-%m-%d %H:%M:%S") == expected_ymdhms_day
+
+ expected_rows = [
+ ",A",
+ "0,2013-01-01",
+ "1,2013-01-01",
+ "2,2013-01-01",
+ "3,2013-01-01",
+ "4,2013-01-01",
+ ]
+ expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df_sec.to_csv(date_format="%Y-%m-%d") == expected_ymd_sec
+
+ expected_rows = [
+ ",A",
+ "0,2013-01-01",
+ "1,2013-01-02",
+ "2,2013-01-03",
+ "3,2013-01-04",
+ "4,2013-01-05",
+ ]
+ expected_default_day = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df_day.to_csv() == expected_default_day
+ assert df_day.to_csv(date_format="%Y-%m-%d") == expected_default_day
+
+ # see gh-7791
+ #
+ # Testing if date_format parameter is taken into account
+ # for multi-indexed DataFrames.
+ df_sec["B"] = 0
+ df_sec["C"] = 1
+
+ expected_rows = ["A,B,C", "2013-01-01,0,1.0"]
+ expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
+
+ df_sec_grouped = df_sec.groupby([pd.Grouper(key="A", freq="1h"), "B"])
+ assert df_sec_grouped.mean().to_csv(date_format="%Y-%m-%d") == expected_ymd_sec
+
+ def test_to_csv_different_datetime_formats(self):
+ # GH#21734
+ df = DataFrame(
+ {
+ "date": pd.to_datetime("1970-01-01"),
+ "datetime": pd.date_range("1970-01-01", periods=2, freq="h"),
+ }
+ )
+ expected_rows = [
+ "date,datetime",
+ "1970-01-01,1970-01-01 00:00:00",
+ "1970-01-01,1970-01-01 01:00:00",
+ ]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert df.to_csv(index=False) == expected
+
+ def test_to_csv_date_format_in_categorical(self):
+ # GH#40754
+ ser = pd.Series(pd.to_datetime(["2021-03-27", pd.NaT], format="%Y-%m-%d"))
+ ser = ser.astype("category")
+ expected = tm.convert_rows_list_to_csv_str(["0", "2021-03-27", '""'])
+ assert ser.to_csv(index=False) == expected
+
+ ser = pd.Series(
+ pd.date_range(
+ start="2021-03-27", freq="D", periods=1, tz="Europe/Berlin"
+ ).append(pd.DatetimeIndex([pd.NaT]))
+ )
+ ser = ser.astype("category")
+ assert ser.to_csv(index=False, date_format="%Y-%m-%d") == expected
+
+ def test_to_csv_float_ea_float_format(self):
+ # GH#45991
+ df = DataFrame({"a": [1.1, 2.02, pd.NA, 6.000006], "b": "c"})
+ df["a"] = df["a"].astype("Float64")
+ result = df.to_csv(index=False, float_format="%.5f")
+ expected = tm.convert_rows_list_to_csv_str(
+ ["a,b", "1.10000,c", "2.02000,c", ",c", "6.00001,c"]
+ )
+ assert result == expected
+
+ def test_to_csv_float_ea_no_float_format(self):
+ # GH#45991
+ df = DataFrame({"a": [1.1, 2.02, pd.NA, 6.000006], "b": "c"})
+ df["a"] = df["a"].astype("Float64")
+ result = df.to_csv(index=False)
+ expected = tm.convert_rows_list_to_csv_str(
+ ["a,b", "1.1,c", "2.02,c", ",c", "6.000006,c"]
+ )
+ assert result == expected
+
+ def test_to_csv_multi_index(self):
+ # see gh-6618
+ df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]))
+
+ exp_rows = [",1", ",2", "0,1"]
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
+ assert df.to_csv() == exp
+
+ exp_rows = ["1", "2", "1"]
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
+ assert df.to_csv(index=False) == exp
+
+ df = DataFrame(
+ [1],
+ columns=pd.MultiIndex.from_arrays([[1], [2]]),
+ index=pd.MultiIndex.from_arrays([[1], [2]]),
+ )
+
+ exp_rows = [",,1", ",,2", "1,2,1"]
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
+ assert df.to_csv() == exp
+
+ exp_rows = ["1", "2", "1"]
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
+ assert df.to_csv(index=False) == exp
+
+ df = DataFrame([1], columns=pd.MultiIndex.from_arrays([["foo"], ["bar"]]))
+
+ exp_rows = [",foo", ",bar", "0,1"]
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
+ assert df.to_csv() == exp
+
+ exp_rows = ["foo", "bar", "1"]
+ exp = tm.convert_rows_list_to_csv_str(exp_rows)
+ assert df.to_csv(index=False) == exp
+
+ @pytest.mark.parametrize(
+ "ind,expected",
+ [
+ (
+ pd.MultiIndex(levels=[[1.0]], codes=[[0]], names=["x"]),
+ "x,data\n1.0,1\n",
+ ),
+ (
+ pd.MultiIndex(
+ levels=[[1.0], [2.0]], codes=[[0], [0]], names=["x", "y"]
+ ),
+ "x,y,data\n1.0,2.0,1\n",
+ ),
+ ],
+ )
+ def test_to_csv_single_level_multi_index(self, ind, expected, frame_or_series):
+ # see gh-19589
+ obj = frame_or_series(pd.Series([1], ind, name="data"))
+
+ result = obj.to_csv(lineterminator="\n", header=True)
+ assert result == expected
+
+ def test_to_csv_string_array_ascii(self):
+ # GH 10813
+ str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}]
+ df = DataFrame(str_array)
+ expected_ascii = """\
+,names
+0,"['foo', 'bar']"
+1,"['baz', 'qux']"
+"""
+ with tm.ensure_clean("str_test.csv") as path:
+ df.to_csv(path, encoding="ascii")
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected_ascii
+
+ def test_to_csv_string_array_utf8(self):
+ # GH 10813
+ str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}]
+ df = DataFrame(str_array)
+ expected_utf8 = """\
+,names
+0,"['foo', 'bar']"
+1,"['baz', 'qux']"
+"""
+ with tm.ensure_clean("unicode_test.csv") as path:
+ df.to_csv(path, encoding="utf-8")
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected_utf8
+
+ def test_to_csv_string_with_lf(self):
+ # GH 20353
+ data = {"int": [1, 2, 3], "str_lf": ["abc", "d\nef", "g\nh\n\ni"]}
+ df = DataFrame(data)
+ with tm.ensure_clean("lf_test.csv") as path:
+ # case 1: The default line terminator(=os.linesep)(PR 21406)
+ os_linesep = os.linesep.encode("utf-8")
+ expected_noarg = (
+ b"int,str_lf"
+ + os_linesep
+ + b"1,abc"
+ + os_linesep
+ + b'2,"d\nef"'
+ + os_linesep
+ + b'3,"g\nh\n\ni"'
+ + os_linesep
+ )
+ df.to_csv(path, index=False)
+ with open(path, "rb") as f:
+ assert f.read() == expected_noarg
+ with tm.ensure_clean("lf_test.csv") as path:
+ # case 2: LF as line terminator
+ expected_lf = b'int,str_lf\n1,abc\n2,"d\nef"\n3,"g\nh\n\ni"\n'
+ df.to_csv(path, lineterminator="\n", index=False)
+ with open(path, "rb") as f:
+ assert f.read() == expected_lf
+ with tm.ensure_clean("lf_test.csv") as path:
+ # case 3: CRLF as line terminator
+ # 'lineterminator' should not change inner element
+ expected_crlf = b'int,str_lf\r\n1,abc\r\n2,"d\nef"\r\n3,"g\nh\n\ni"\r\n'
+ df.to_csv(path, lineterminator="\r\n", index=False)
+ with open(path, "rb") as f:
+ assert f.read() == expected_crlf
+
+ def test_to_csv_string_with_crlf(self):
+ # GH 20353
+ data = {"int": [1, 2, 3], "str_crlf": ["abc", "d\r\nef", "g\r\nh\r\n\r\ni"]}
+ df = DataFrame(data)
+ with tm.ensure_clean("crlf_test.csv") as path:
+ # case 1: The default line terminator(=os.linesep)(PR 21406)
+ os_linesep = os.linesep.encode("utf-8")
+ expected_noarg = (
+ b"int,str_crlf"
+ + os_linesep
+ + b"1,abc"
+ + os_linesep
+ + b'2,"d\r\nef"'
+ + os_linesep
+ + b'3,"g\r\nh\r\n\r\ni"'
+ + os_linesep
+ )
+ df.to_csv(path, index=False)
+ with open(path, "rb") as f:
+ assert f.read() == expected_noarg
+ with tm.ensure_clean("crlf_test.csv") as path:
+ # case 2: LF as line terminator
+ expected_lf = b'int,str_crlf\n1,abc\n2,"d\r\nef"\n3,"g\r\nh\r\n\r\ni"\n'
+ df.to_csv(path, lineterminator="\n", index=False)
+ with open(path, "rb") as f:
+ assert f.read() == expected_lf
+ with tm.ensure_clean("crlf_test.csv") as path:
+ # case 3: CRLF as line terminator
+ # 'lineterminator' should not change inner element
+ expected_crlf = (
+ b"int,str_crlf\r\n"
+ b"1,abc\r\n"
+ b'2,"d\r\nef"\r\n'
+ b'3,"g\r\nh\r\n\r\ni"\r\n'
+ )
+ df.to_csv(path, lineterminator="\r\n", index=False)
+ with open(path, "rb") as f:
+ assert f.read() == expected_crlf
+
+ def test_to_csv_stdout_file(self, capsys):
+ # GH 21561
+ df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["name_1", "name_2"])
+ expected_rows = [",name_1,name_2", "0,foo,bar", "1,baz,qux"]
+ expected_ascii = tm.convert_rows_list_to_csv_str(expected_rows)
+
+ df.to_csv(sys.stdout, encoding="ascii")
+ captured = capsys.readouterr()
+
+ assert captured.out == expected_ascii
+ assert not sys.stdout.closed
+
+ @pytest.mark.xfail(
+ compat.is_platform_windows(),
+ reason=(
+ "Especially in Windows, file stream should not be passed"
+ "to csv writer without newline='' option."
+ "(https://docs.python.org/3/library/csv.html#csv.writer)"
+ ),
+ )
+ def test_to_csv_write_to_open_file(self):
+ # GH 21696
+ df = DataFrame({"a": ["x", "y", "z"]})
+ expected = """\
+manual header
+x
+y
+z
+"""
+ with tm.ensure_clean("test.txt") as path:
+ with open(path, "w", encoding="utf-8") as f:
+ f.write("manual header\n")
+ df.to_csv(f, header=None, index=None)
+ with open(path, encoding="utf-8") as f:
+ assert f.read() == expected
+
+ def test_to_csv_write_to_open_file_with_newline_py3(self):
+ # see gh-21696
+ # see gh-20353
+ df = DataFrame({"a": ["x", "y", "z"]})
+ expected_rows = ["x", "y", "z"]
+ expected = "manual header\n" + tm.convert_rows_list_to_csv_str(expected_rows)
+ with tm.ensure_clean("test.txt") as path:
+ with open(path, "w", newline="", encoding="utf-8") as f:
+ f.write("manual header\n")
+ df.to_csv(f, header=None, index=None)
+
+ with open(path, "rb") as f:
+ assert f.read() == bytes(expected, "utf-8")
+
+ @pytest.mark.parametrize("to_infer", [True, False])
+ @pytest.mark.parametrize("read_infer", [True, False])
+ def test_to_csv_compression(
+ self, compression_only, read_infer, to_infer, compression_to_extension
+ ):
+ # see gh-15008
+ compression = compression_only
+
+ # We'll complete file extension subsequently.
+ filename = "test."
+ filename += compression_to_extension[compression]
+
+ df = DataFrame({"A": [1]})
+
+ to_compression = "infer" if to_infer else compression
+ read_compression = "infer" if read_infer else compression
+
+ with tm.ensure_clean(filename) as path:
+ df.to_csv(path, compression=to_compression)
+ result = pd.read_csv(path, index_col=0, compression=read_compression)
+ tm.assert_frame_equal(result, df)
+
+ def test_to_csv_compression_dict(self, compression_only):
+ # GH 26023
+ method = compression_only
+ df = DataFrame({"ABC": [1]})
+ filename = "to_csv_compress_as_dict."
+ extension = {
+ "gzip": "gz",
+ "zstd": "zst",
+ }.get(method, method)
+ filename += extension
+ with tm.ensure_clean(filename) as path:
+ df.to_csv(path, compression={"method": method})
+ read_df = pd.read_csv(path, index_col=0)
+ tm.assert_frame_equal(read_df, df)
+
+ def test_to_csv_compression_dict_no_method_raises(self):
+ # GH 26023
+ df = DataFrame({"ABC": [1]})
+ compression = {"some_option": True}
+ msg = "must have key 'method'"
+
+ with tm.ensure_clean("out.zip") as path:
+ with pytest.raises(ValueError, match=msg):
+ df.to_csv(path, compression=compression)
+
+ @pytest.mark.parametrize("compression", ["zip", "infer"])
+ @pytest.mark.parametrize("archive_name", ["test_to_csv.csv", "test_to_csv.zip"])
+ def test_to_csv_zip_arguments(self, compression, archive_name):
+ # GH 26023
+ df = DataFrame({"ABC": [1]})
+ with tm.ensure_clean("to_csv_archive_name.zip") as path:
+ df.to_csv(
+ path, compression={"method": compression, "archive_name": archive_name}
+ )
+ with ZipFile(path) as zp:
+ assert len(zp.filelist) == 1
+ archived_file = zp.filelist[0].filename
+ assert archived_file == archive_name
+
+ @pytest.mark.parametrize(
+ "filename,expected_arcname",
+ [
+ ("archive.csv", "archive.csv"),
+ ("archive.tsv", "archive.tsv"),
+ ("archive.csv.zip", "archive.csv"),
+ ("archive.tsv.zip", "archive.tsv"),
+ ("archive.zip", "archive"),
+ ],
+ )
+ def test_to_csv_zip_infer_name(self, tmp_path, filename, expected_arcname):
+ # GH 39465
+ df = DataFrame({"ABC": [1]})
+ path = tmp_path / filename
+ df.to_csv(path, compression="zip")
+ with ZipFile(path) as zp:
+ assert len(zp.filelist) == 1
+ archived_file = zp.filelist[0].filename
+ assert archived_file == expected_arcname
+
+ @pytest.mark.parametrize("df_new_type", ["Int64"])
+ def test_to_csv_na_rep_long_string(self, df_new_type):
+ # see gh-25099
+ df = DataFrame({"c": [float("nan")] * 3})
+ df = df.astype(df_new_type)
+ expected_rows = ["c", "mynull", "mynull", "mynull"]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+
+ result = df.to_csv(index=False, na_rep="mynull", encoding="ascii")
+
+ assert expected == result
+
+ def test_to_csv_timedelta_precision(self):
+ # GH 6783
+ s = pd.Series([1, 1]).astype("timedelta64[ns]")
+ buf = io.StringIO()
+ s.to_csv(buf)
+ result = buf.getvalue()
+ expected_rows = [
+ ",0",
+ "0,0 days 00:00:00.000000001",
+ "1,0 days 00:00:00.000000001",
+ ]
+ expected = tm.convert_rows_list_to_csv_str(expected_rows)
+ assert result == expected
+
+ def test_na_rep_truncated(self):
+ # https://github.com/pandas-dev/pandas/issues/31447
+ result = pd.Series(range(8, 12)).to_csv(na_rep="-")
+ expected = tm.convert_rows_list_to_csv_str([",0", "0,8", "1,9", "2,10", "3,11"])
+ assert result == expected
+
+ result = pd.Series([True, False]).to_csv(na_rep="nan")
+ expected = tm.convert_rows_list_to_csv_str([",0", "0,True", "1,False"])
+ assert result == expected
+
+ result = pd.Series([1.1, 2.2]).to_csv(na_rep=".")
+ expected = tm.convert_rows_list_to_csv_str([",0", "0,1.1", "1,2.2"])
+ assert result == expected
+
+ @pytest.mark.parametrize("errors", ["surrogatepass", "ignore", "replace"])
+ def test_to_csv_errors(self, errors):
+ # GH 22610
+ data = ["\ud800foo"]
+ ser = pd.Series(data, index=Index(data, dtype=object), dtype=object)
+ with tm.ensure_clean("test.csv") as path:
+ ser.to_csv(path, errors=errors)
+ # No use in reading back the data as it is not the same anymore
+ # due to the error handling
+
+ @pytest.mark.parametrize("mode", ["wb", "w"])
+ def test_to_csv_binary_handle(self, mode):
+ """
+ Binary file objects should work (if 'mode' contains a 'b') or even without
+ it in most cases.
+
+ GH 35058 and GH 19827
+ """
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD")),
+ index=Index([f"i-{i}" for i in range(30)]),
+ )
+ with tm.ensure_clean() as path:
+ with open(path, mode="w+b") as handle:
+ df.to_csv(handle, mode=mode)
+ tm.assert_frame_equal(df, pd.read_csv(path, index_col=0))
+
+ @pytest.mark.parametrize("mode", ["wb", "w"])
+ def test_to_csv_encoding_binary_handle(self, mode):
+ """
+ Binary file objects should honor a specified encoding.
+
+ GH 23854 and GH 13068 with binary handles
+ """
+ # example from GH 23854
+ content = "a, b, 🐟".encode("utf-8-sig")
+ buffer = io.BytesIO(content)
+ df = pd.read_csv(buffer, encoding="utf-8-sig")
+
+ buffer = io.BytesIO()
+ df.to_csv(buffer, mode=mode, encoding="utf-8-sig", index=False)
+ buffer.seek(0) # tests whether file handle wasn't closed
+ assert buffer.getvalue().startswith(content)
+
+ # example from GH 13068
+ with tm.ensure_clean() as path:
+ with open(path, "w+b") as handle:
+ DataFrame().to_csv(handle, mode=mode, encoding="utf-8-sig")
+
+ handle.seek(0)
+ assert handle.read().startswith(b'\xef\xbb\xbf""')
+
+
+def test_to_csv_iterative_compression_name(compression):
+ # GH 38714
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD")),
+ index=Index([f"i-{i}" for i in range(30)]),
+ )
+ with tm.ensure_clean() as path:
+ df.to_csv(path, compression=compression, chunksize=1)
+ tm.assert_frame_equal(
+ pd.read_csv(path, compression=compression, index_col=0), df
+ )
+
+
+def test_to_csv_iterative_compression_buffer(compression):
+ # GH 38714
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD")),
+ index=Index([f"i-{i}" for i in range(30)]),
+ )
+ with io.BytesIO() as buffer:
+ df.to_csv(buffer, compression=compression, chunksize=1)
+ buffer.seek(0)
+ tm.assert_frame_equal(
+ pd.read_csv(buffer, compression=compression, index_col=0), df
+ )
+ assert not buffer.closed
+
+
+def test_to_csv_pos_args_deprecation():
+ # GH-54229
+ df = DataFrame({"a": [1, 2, 3]})
+ msg = (
+ r"Starting with pandas version 3.0 all arguments of to_csv except for the "
+ r"argument 'path_or_buf' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ buffer = io.BytesIO()
+ df.to_csv(buffer, ";")
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_html.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_html.py
new file mode 100644
index 0000000000000000000000000000000000000000..790ba92f70c40095af3f40396135be2842b33229
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_html.py
@@ -0,0 +1,1177 @@
+from datetime import datetime
+from io import StringIO
+import itertools
+import re
+import textwrap
+
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Index,
+ MultiIndex,
+ get_option,
+ option_context,
+)
+import pandas._testing as tm
+
+import pandas.io.formats.format as fmt
+
+lorem_ipsum = (
+ "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod "
+ "tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim "
+ "veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex "
+ "ea commodo consequat. Duis aute irure dolor in reprehenderit in "
+ "voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur "
+ "sint occaecat cupidatat non proident, sunt in culpa qui officia "
+ "deserunt mollit anim id est laborum."
+)
+
+
+def expected_html(datapath, name):
+ """
+ Read HTML file from formats data directory.
+
+ Parameters
+ ----------
+ datapath : pytest fixture
+ The datapath fixture injected into a test by pytest.
+ name : str
+ The name of the HTML file without the suffix.
+
+ Returns
+ -------
+ str : contents of HTML file.
+ """
+ filename = ".".join([name, "html"])
+ filepath = datapath("io", "formats", "data", "html", filename)
+ with open(filepath, encoding="utf-8") as f:
+ html = f.read()
+ return html.rstrip()
+
+
+@pytest.fixture(params=["mixed", "empty"])
+def biggie_df_fixture(request):
+ """Fixture for a big mixed Dataframe and an empty Dataframe"""
+ if request.param == "mixed":
+ df = DataFrame(
+ {
+ "A": np.random.default_rng(2).standard_normal(200),
+ "B": Index([f"{i}?!" for i in range(200)]),
+ },
+ index=np.arange(200),
+ )
+ df.loc[:20, "A"] = np.nan
+ df.loc[:20, "B"] = np.nan
+ return df
+ elif request.param == "empty":
+ df = DataFrame(index=np.arange(200))
+ return df
+
+
+@pytest.fixture(params=fmt.VALID_JUSTIFY_PARAMETERS)
+def justify(request):
+ return request.param
+
+
+@pytest.mark.parametrize("col_space", [30, 50])
+def test_to_html_with_col_space(col_space):
+ df = DataFrame(np.random.default_rng(2).random(size=(1, 3)))
+ # check that col_space affects HTML generation
+ # and be very brittle about it.
+ result = df.to_html(col_space=col_space)
+ hdrs = [x for x in result.split(r"\n") if re.search(r"\s]", x)]
+ assert len(hdrs) > 0
+ for h in hdrs:
+ assert "min-width" in h
+ assert str(col_space) in h
+
+
+def test_to_html_with_column_specific_col_space_raises():
+ df = DataFrame(
+ np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"]
+ )
+
+ msg = (
+ "Col_space length\\(\\d+\\) should match "
+ "DataFrame number of columns\\(\\d+\\)"
+ )
+ with pytest.raises(ValueError, match=msg):
+ df.to_html(col_space=[30, 40])
+
+ with pytest.raises(ValueError, match=msg):
+ df.to_html(col_space=[30, 40, 50, 60])
+
+ msg = "unknown column"
+ with pytest.raises(ValueError, match=msg):
+ df.to_html(col_space={"a": "foo", "b": 23, "d": 34})
+
+
+def test_to_html_with_column_specific_col_space():
+ df = DataFrame(
+ np.random.default_rng(2).random(size=(3, 3)), columns=["a", "b", "c"]
+ )
+
+ result = df.to_html(col_space={"a": "2em", "b": 23})
+ hdrs = [x for x in result.split("\n") if re.search(r" \s]", x)]
+ assert 'min-width: 2em;">a ' in hdrs[1]
+ assert 'min-width: 23px;">b' in hdrs[2]
+ assert "c " in hdrs[3]
+
+ result = df.to_html(col_space=["1em", 2, 3])
+ hdrs = [x for x in result.split("\n") if re.search(r"\s]", x)]
+ assert 'min-width: 1em;">a ' in hdrs[1]
+ assert 'min-width: 2px;">b' in hdrs[2]
+ assert 'min-width: 3px;">c' in hdrs[3]
+
+
+def test_to_html_with_empty_string_label():
+ # GH 3547, to_html regards empty string labels as repeated labels
+ data = {"c1": ["a", "b"], "c2": ["a", ""], "data": [1, 2]}
+ df = DataFrame(data).set_index(["c1", "c2"])
+ result = df.to_html()
+ assert "rowspan" not in result
+
+
+@pytest.mark.parametrize(
+ "df,expected",
+ [
+ (DataFrame({"\u03c3": np.arange(10.0)}), "unicode_1"),
+ (DataFrame({"A": ["\u03c3"]}), "unicode_2"),
+ ],
+)
+def test_to_html_unicode(df, expected, datapath):
+ expected = expected_html(datapath, expected)
+ result = df.to_html()
+ assert result == expected
+
+
+def test_to_html_encoding(float_frame, tmp_path):
+ # GH 28663
+ path = tmp_path / "test.html"
+ float_frame.to_html(path, encoding="gbk")
+ with open(str(path), encoding="gbk") as f:
+ assert float_frame.to_html() == f.read()
+
+
+def test_to_html_decimal(datapath):
+ # GH 12031
+ df = DataFrame({"A": [6.0, 3.1, 2.2]})
+ result = df.to_html(decimal=",")
+ expected = expected_html(datapath, "gh12031_expected_output")
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "kwargs,string,expected",
+ [
+ ({}, "", "escaped"),
+ ({"escape": False}, "bold ", "escape_disabled"),
+ ],
+)
+def test_to_html_escaped(kwargs, string, expected, datapath):
+ a = "strl2": {a: string, b: string}}
+ result = DataFrame(test_dict).to_html(**kwargs)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize("index_is_named", [True, False])
+def test_to_html_multiindex_index_false(index_is_named, datapath):
+ # GH 8452
+ df = DataFrame(
+ {"a": range(2), "b": range(3, 5), "c": range(5, 7), "d": range(3, 5)}
+ )
+ df.columns = MultiIndex.from_product([["a", "b"], ["c", "d"]])
+ if index_is_named:
+ df.index = Index(df.index.values, name="idx")
+ result = df.to_html(index=False)
+ expected = expected_html(datapath, "gh8452_expected_output")
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "multi_sparse,expected",
+ [
+ (False, "multiindex_sparsify_false_multi_sparse_1"),
+ (False, "multiindex_sparsify_false_multi_sparse_2"),
+ (True, "multiindex_sparsify_1"),
+ (True, "multiindex_sparsify_2"),
+ ],
+)
+def test_to_html_multiindex_sparsify(multi_sparse, expected, datapath):
+ index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]], names=["foo", None])
+ df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
+ if expected.endswith("2"):
+ df.columns = index[::2]
+ with option_context("display.multi_sparse", multi_sparse):
+ result = df.to_html()
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "max_rows,expected",
+ [
+ (60, "gh14882_expected_output_1"),
+ # Test that ... appears in a middle level
+ (56, "gh14882_expected_output_2"),
+ ],
+)
+def test_to_html_multiindex_odd_even_truncate(max_rows, expected, datapath):
+ # GH 14882 - Issue on truncation with odd length DataFrame
+ index = MultiIndex.from_product(
+ [[100, 200, 300], [10, 20, 30], [1, 2, 3, 4, 5, 6, 7]], names=["a", "b", "c"]
+ )
+ df = DataFrame({"n": range(len(index))}, index=index)
+ result = df.to_html(max_rows=max_rows)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "df,formatters,expected",
+ [
+ (
+ DataFrame(
+ [[0, 1], [2, 3], [4, 5], [6, 7]],
+ columns=Index(["foo", None], dtype=object),
+ index=np.arange(4),
+ ),
+ {"__index__": lambda x: "abcd"[x]},
+ "index_formatter",
+ ),
+ (
+ DataFrame({"months": [datetime(2016, 1, 1), datetime(2016, 2, 2)]}),
+ {"months": lambda x: x.strftime("%Y-%m")},
+ "datetime64_monthformatter",
+ ),
+ (
+ DataFrame(
+ {
+ "hod": pd.to_datetime(
+ ["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f"
+ )
+ }
+ ),
+ {"hod": lambda x: x.strftime("%H:%M")},
+ "datetime64_hourformatter",
+ ),
+ (
+ DataFrame(
+ {
+ "i": pd.Series([1, 2], dtype="int64"),
+ "f": pd.Series([1, 2], dtype="float64"),
+ "I": pd.Series([1, 2], dtype="Int64"),
+ "s": pd.Series([1, 2], dtype="string"),
+ "b": pd.Series([True, False], dtype="boolean"),
+ "c": pd.Series(["a", "b"], dtype=pd.CategoricalDtype(["a", "b"])),
+ "o": pd.Series([1, "2"], dtype=object),
+ }
+ ),
+ [lambda x: "formatted"] * 7,
+ "various_dtypes_formatted",
+ ),
+ ],
+)
+def test_to_html_formatters(df, formatters, expected, datapath):
+ expected = expected_html(datapath, expected)
+ result = df.to_html(formatters=formatters)
+ assert result == expected
+
+
+def test_to_html_regression_GH6098():
+ df = DataFrame(
+ {
+ "clé1": ["a", "a", "b", "b", "a"],
+ "clé2": ["1er", "2ème", "1er", "2ème", "1er"],
+ "données1": np.random.default_rng(2).standard_normal(5),
+ "données2": np.random.default_rng(2).standard_normal(5),
+ }
+ )
+
+ # it works
+ df.pivot_table(index=["clé1"], columns=["clé2"])._repr_html_()
+
+
+def test_to_html_truncate(datapath):
+ index = pd.date_range(start="20010101", freq="D", periods=20)
+ df = DataFrame(index=index, columns=range(20))
+ result = df.to_html(max_rows=8, max_cols=4)
+ expected = expected_html(datapath, "truncate")
+ assert result == expected
+
+
+@pytest.mark.parametrize("size", [1, 5])
+def test_html_invalid_formatters_arg_raises(size):
+ # issue-28469
+ df = DataFrame(columns=["a", "b", "c"])
+ msg = "Formatters length({}) should match DataFrame number of columns(3)"
+ with pytest.raises(ValueError, match=re.escape(msg.format(size))):
+ df.to_html(formatters=["{}".format] * size)
+
+
+def test_to_html_truncate_formatter(datapath):
+ # issue-25955
+ data = [
+ {"A": 1, "B": 2, "C": 3, "D": 4},
+ {"A": 5, "B": 6, "C": 7, "D": 8},
+ {"A": 9, "B": 10, "C": 11, "D": 12},
+ {"A": 13, "B": 14, "C": 15, "D": 16},
+ ]
+
+ df = DataFrame(data)
+ fmt = lambda x: str(x) + "_mod"
+ formatters = [fmt, fmt, None, None]
+ result = df.to_html(formatters=formatters, max_cols=3)
+ expected = expected_html(datapath, "truncate_formatter")
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "sparsify,expected",
+ [(True, "truncate_multi_index"), (False, "truncate_multi_index_sparse_off")],
+)
+def test_to_html_truncate_multi_index(sparsify, expected, datapath):
+ arrays = [
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
+ ]
+ df = DataFrame(index=arrays, columns=arrays)
+ result = df.to_html(max_rows=7, max_cols=7, sparsify=sparsify)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "option,result,expected",
+ [
+ (None, lambda df: df.to_html(), "1"),
+ (None, lambda df: df.to_html(border=2), "2"),
+ (2, lambda df: df.to_html(), "2"),
+ (2, lambda df: df._repr_html_(), "2"),
+ ],
+)
+def test_to_html_border(option, result, expected):
+ df = DataFrame({"A": [1, 2]})
+ if option is None:
+ result = result(df)
+ else:
+ with option_context("display.html.border", option):
+ result = result(df)
+ expected = f'border="{expected}"'
+ assert expected in result
+
+
+@pytest.mark.parametrize("biggie_df_fixture", ["mixed"], indirect=True)
+def test_to_html(biggie_df_fixture):
+ # TODO: split this test
+ df = biggie_df_fixture
+ s = df.to_html()
+
+ buf = StringIO()
+ retval = df.to_html(buf=buf)
+ assert retval is None
+ assert buf.getvalue() == s
+
+ assert isinstance(s, str)
+
+ df.to_html(columns=["B", "A"], col_space=17)
+ df.to_html(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
+
+ df.to_html(columns=["B", "A"], float_format=str)
+ df.to_html(columns=["B", "A"], col_space=12, float_format=str)
+
+
+@pytest.mark.parametrize("biggie_df_fixture", ["empty"], indirect=True)
+def test_to_html_empty_dataframe(biggie_df_fixture):
+ df = biggie_df_fixture
+ df.to_html()
+
+
+def test_to_html_filename(biggie_df_fixture, tmpdir):
+ df = biggie_df_fixture
+ expected = df.to_html()
+ path = tmpdir.join("test.html")
+ df.to_html(path)
+ result = path.read()
+ assert result == expected
+
+
+def test_to_html_with_no_bold():
+ df = DataFrame({"x": np.random.default_rng(2).standard_normal(5)})
+ html = df.to_html(bold_rows=False)
+ result = html[html.find("")]
+ assert "B" not in result
+
+
+@pytest.mark.parametrize(
+ "columns,justify,expected",
+ [
+ (
+ MultiIndex.from_arrays(
+ [np.arange(2).repeat(2), np.mod(range(4), 2)],
+ names=["CL0", "CL1"],
+ ),
+ "left",
+ "multiindex_1",
+ ),
+ (
+ MultiIndex.from_arrays([np.arange(4), np.mod(range(4), 2)]),
+ "right",
+ "multiindex_2",
+ ),
+ ],
+)
+def test_to_html_multiindex(columns, justify, expected, datapath):
+ df = DataFrame([list("abcd"), list("efgh")], columns=columns)
+ result = df.to_html(justify=justify)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+def test_to_html_justify(justify, datapath):
+ df = DataFrame(
+ {"A": [6, 30000, 2], "B": [1, 2, 70000], "C": [223442, 0, 1]},
+ columns=["A", "B", "C"],
+ )
+ result = df.to_html(justify=justify)
+ expected = expected_html(datapath, "justify").format(justify=justify)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "justify", ["super-right", "small-left", "noinherit", "tiny", "pandas"]
+)
+def test_to_html_invalid_justify(justify):
+ # GH 17527
+ df = DataFrame()
+ msg = "Invalid value for justify parameter"
+
+ with pytest.raises(ValueError, match=msg):
+ df.to_html(justify=justify)
+
+
+class TestHTMLIndex:
+ @pytest.fixture
+ def df(self):
+ index = ["foo", "bar", "baz"]
+ df = DataFrame(
+ {"A": [1, 2, 3], "B": [1.2, 3.4, 5.6], "C": ["one", "two", np.nan]},
+ columns=["A", "B", "C"],
+ index=index,
+ )
+ return df
+
+ @pytest.fixture
+ def expected_without_index(self, datapath):
+ return expected_html(datapath, "index_2")
+
+ def test_to_html_flat_index_without_name(
+ self, datapath, df, expected_without_index
+ ):
+ expected_with_index = expected_html(datapath, "index_1")
+ assert df.to_html() == expected_with_index
+
+ result = df.to_html(index=False)
+ for i in df.index:
+ assert i not in result
+ assert result == expected_without_index
+
+ def test_to_html_flat_index_with_name(self, datapath, df, expected_without_index):
+ df.index = Index(["foo", "bar", "baz"], name="idx")
+ expected_with_index = expected_html(datapath, "index_3")
+ assert df.to_html() == expected_with_index
+ assert df.to_html(index=False) == expected_without_index
+
+ def test_to_html_multiindex_without_names(
+ self, datapath, df, expected_without_index
+ ):
+ tuples = [("foo", "car"), ("foo", "bike"), ("bar", "car")]
+ df.index = MultiIndex.from_tuples(tuples)
+
+ expected_with_index = expected_html(datapath, "index_4")
+ assert df.to_html() == expected_with_index
+
+ result = df.to_html(index=False)
+ for i in ["foo", "bar", "car", "bike"]:
+ assert i not in result
+ # must be the same result as normal index
+ assert result == expected_without_index
+
+ def test_to_html_multiindex_with_names(self, datapath, df, expected_without_index):
+ tuples = [("foo", "car"), ("foo", "bike"), ("bar", "car")]
+ df.index = MultiIndex.from_tuples(tuples, names=["idx1", "idx2"])
+ expected_with_index = expected_html(datapath, "index_5")
+ assert df.to_html() == expected_with_index
+ assert df.to_html(index=False) == expected_without_index
+
+
+@pytest.mark.parametrize("classes", ["sortable draggable", ["sortable", "draggable"]])
+def test_to_html_with_classes(classes, datapath):
+ df = DataFrame()
+ expected = expected_html(datapath, "with_classes")
+ result = df.to_html(classes=classes)
+ assert result == expected
+
+
+def test_to_html_no_index_max_rows(datapath):
+ # GH 14998
+ df = DataFrame({"A": [1, 2, 3, 4]})
+ result = df.to_html(index=False, max_rows=1)
+ expected = expected_html(datapath, "gh14998_expected_output")
+ assert result == expected
+
+
+def test_to_html_multiindex_max_cols(datapath):
+ # GH 6131
+ index = MultiIndex(
+ levels=[["ba", "bb", "bc"], ["ca", "cb", "cc"]],
+ codes=[[0, 1, 2], [0, 1, 2]],
+ names=["b", "c"],
+ )
+ columns = MultiIndex(
+ levels=[["d"], ["aa", "ab", "ac"]],
+ codes=[[0, 0, 0], [0, 1, 2]],
+ names=[None, "a"],
+ )
+ data = np.array(
+ [[1.0, np.nan, np.nan], [np.nan, 2.0, np.nan], [np.nan, np.nan, 3.0]]
+ )
+ df = DataFrame(data, index, columns)
+ result = df.to_html(max_cols=2)
+ expected = expected_html(datapath, "gh6131_expected_output")
+ assert result == expected
+
+
+def test_to_html_multi_indexes_index_false(datapath):
+ # GH 22579
+ df = DataFrame(
+ {"a": range(10), "b": range(10, 20), "c": range(10, 20), "d": range(10, 20)}
+ )
+ df.columns = MultiIndex.from_product([["a", "b"], ["c", "d"]])
+ df.index = MultiIndex.from_product([["a", "b"], ["c", "d", "e", "f", "g"]])
+ result = df.to_html(index=False)
+ expected = expected_html(datapath, "gh22579_expected_output")
+ assert result == expected
+
+
+@pytest.mark.parametrize("index_names", [True, False])
+@pytest.mark.parametrize("header", [True, False])
+@pytest.mark.parametrize("index", [True, False])
+@pytest.mark.parametrize(
+ "column_index, column_type",
+ [
+ (Index([0, 1]), "unnamed_standard"),
+ (Index([0, 1], name="columns.name"), "named_standard"),
+ (MultiIndex.from_product([["a"], ["b", "c"]]), "unnamed_multi"),
+ (
+ MultiIndex.from_product(
+ [["a"], ["b", "c"]], names=["columns.name.0", "columns.name.1"]
+ ),
+ "named_multi",
+ ),
+ ],
+)
+@pytest.mark.parametrize(
+ "row_index, row_type",
+ [
+ (Index([0, 1]), "unnamed_standard"),
+ (Index([0, 1], name="index.name"), "named_standard"),
+ (MultiIndex.from_product([["a"], ["b", "c"]]), "unnamed_multi"),
+ (
+ MultiIndex.from_product(
+ [["a"], ["b", "c"]], names=["index.name.0", "index.name.1"]
+ ),
+ "named_multi",
+ ),
+ ],
+)
+def test_to_html_basic_alignment(
+ datapath, row_index, row_type, column_index, column_type, index, header, index_names
+):
+ # GH 22747, GH 22579
+ df = DataFrame(np.zeros((2, 2), dtype=int), index=row_index, columns=column_index)
+ result = df.to_html(index=index, header=header, index_names=index_names)
+
+ if not index:
+ row_type = "none"
+ elif not index_names and row_type.startswith("named"):
+ row_type = "un" + row_type
+
+ if not header:
+ column_type = "none"
+ elif not index_names and column_type.startswith("named"):
+ column_type = "un" + column_type
+
+ filename = "index_" + row_type + "_columns_" + column_type
+ expected = expected_html(datapath, filename)
+ assert result == expected
+
+
+@pytest.mark.parametrize("index_names", [True, False])
+@pytest.mark.parametrize("header", [True, False])
+@pytest.mark.parametrize("index", [True, False])
+@pytest.mark.parametrize(
+ "column_index, column_type",
+ [
+ (Index(np.arange(8)), "unnamed_standard"),
+ (Index(np.arange(8), name="columns.name"), "named_standard"),
+ (
+ MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
+ "unnamed_multi",
+ ),
+ (
+ MultiIndex.from_product(
+ [["a", "b"], ["c", "d"], ["e", "f"]], names=["foo", None, "baz"]
+ ),
+ "named_multi",
+ ),
+ ],
+)
+@pytest.mark.parametrize(
+ "row_index, row_type",
+ [
+ (Index(np.arange(8)), "unnamed_standard"),
+ (Index(np.arange(8), name="index.name"), "named_standard"),
+ (
+ MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
+ "unnamed_multi",
+ ),
+ (
+ MultiIndex.from_product(
+ [["a", "b"], ["c", "d"], ["e", "f"]], names=["foo", None, "baz"]
+ ),
+ "named_multi",
+ ),
+ ],
+)
+def test_to_html_alignment_with_truncation(
+ datapath, row_index, row_type, column_index, column_type, index, header, index_names
+):
+ # GH 22747, GH 22579
+ df = DataFrame(np.arange(64).reshape(8, 8), index=row_index, columns=column_index)
+ result = df.to_html(
+ max_rows=4, max_cols=4, index=index, header=header, index_names=index_names
+ )
+
+ if not index:
+ row_type = "none"
+ elif not index_names and row_type.startswith("named"):
+ row_type = "un" + row_type
+
+ if not header:
+ column_type = "none"
+ elif not index_names and column_type.startswith("named"):
+ column_type = "un" + column_type
+
+ filename = "trunc_df_index_" + row_type + "_columns_" + column_type
+ expected = expected_html(datapath, filename)
+ assert result == expected
+
+
+@pytest.mark.parametrize("index", [False, 0])
+def test_to_html_truncation_index_false_max_rows(datapath, index):
+ # GH 15019
+ data = [
+ [1.764052, 0.400157],
+ [0.978738, 2.240893],
+ [1.867558, -0.977278],
+ [0.950088, -0.151357],
+ [-0.103219, 0.410599],
+ ]
+ df = DataFrame(data)
+ result = df.to_html(max_rows=4, index=index)
+ expected = expected_html(datapath, "gh15019_expected_output")
+ assert result == expected
+
+
+@pytest.mark.parametrize("index", [False, 0])
+@pytest.mark.parametrize(
+ "col_index_named, expected_output",
+ [(False, "gh22783_expected_output"), (True, "gh22783_named_columns_index")],
+)
+def test_to_html_truncation_index_false_max_cols(
+ datapath, index, col_index_named, expected_output
+):
+ # GH 22783
+ data = [
+ [1.764052, 0.400157, 0.978738, 2.240893, 1.867558],
+ [-0.977278, 0.950088, -0.151357, -0.103219, 0.410599],
+ ]
+ df = DataFrame(data)
+ if col_index_named:
+ df.columns.rename("columns.name", inplace=True)
+ result = df.to_html(max_cols=4, index=index)
+ expected = expected_html(datapath, expected_output)
+ assert result == expected
+
+
+@pytest.mark.parametrize("notebook", [True, False])
+def test_to_html_notebook_has_style(notebook):
+ df = DataFrame({"A": [1, 2, 3]})
+ result = df.to_html(notebook=notebook)
+
+ if notebook:
+ assert "tbody tr th:only-of-type" in result
+ assert "vertical-align: middle;" in result
+ assert "thead th" in result
+ else:
+ assert "tbody tr th:only-of-type" not in result
+ assert "vertical-align: middle;" not in result
+ assert "thead th" not in result
+
+
+def test_to_html_with_index_names_false():
+ # GH 16493
+ df = DataFrame({"A": [1, 2]}, index=Index(["a", "b"], name="myindexname"))
+ result = df.to_html(index_names=False)
+ assert "myindexname" not in result
+
+
+def test_to_html_with_id():
+ # GH 8496
+ df = DataFrame({"A": [1, 2]}, index=Index(["a", "b"], name="myindexname"))
+ result = df.to_html(index_names=False, table_id="TEST_ID")
+ assert ' id="TEST_ID"' in result
+
+
+@pytest.mark.parametrize(
+ "value,float_format,expected",
+ [
+ (0.19999, "%.3f", "gh21625_expected_output"),
+ (100.0, "%.0f", "gh22270_expected_output"),
+ ],
+)
+def test_to_html_float_format_no_fixed_width(value, float_format, expected, datapath):
+ # GH 21625, GH 22270
+ df = DataFrame({"x": [value]})
+ expected = expected_html(datapath, expected)
+ result = df.to_html(float_format=float_format)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "render_links,expected",
+ [(True, "render_links_true"), (False, "render_links_false")],
+)
+def test_to_html_render_links(render_links, expected, datapath):
+ # GH 2679
+ data = [
+ [0, "https://pandas.pydata.org/?q1=a&q2=b", "pydata.org"],
+ [0, "www.pydata.org", "pydata.org"],
+ ]
+ df = DataFrame(data, columns=Index(["foo", "bar", None], dtype=object))
+
+ result = df.to_html(render_links=render_links)
+ expected = expected_html(datapath, expected)
+ assert result == expected
+
+
+@pytest.mark.parametrize(
+ "method,expected",
+ [
+ ("to_html", lambda x: lorem_ipsum),
+ ("_repr_html_", lambda x: lorem_ipsum[: x - 4] + "..."), # regression case
+ ],
+)
+@pytest.mark.parametrize("max_colwidth", [10, 20, 50, 100])
+def test_ignore_display_max_colwidth(method, expected, max_colwidth):
+ # see gh-17004
+ df = DataFrame([lorem_ipsum])
+ with option_context("display.max_colwidth", max_colwidth):
+ result = getattr(df, method)()
+ expected = expected(max_colwidth)
+ assert expected in result
+
+
+@pytest.mark.parametrize("classes", [True, 0])
+def test_to_html_invalid_classes_type(classes):
+ # GH 25608
+ df = DataFrame()
+ msg = "classes must be a string, list, or tuple"
+
+ with pytest.raises(TypeError, match=msg):
+ df.to_html(classes=classes)
+
+
+def test_to_html_round_column_headers():
+ # GH 17280
+ df = DataFrame([1], columns=[0.55555])
+ with option_context("display.precision", 3):
+ html = df.to_html(notebook=False)
+ notebook = df.to_html(notebook=True)
+ assert "0.55555" in html
+ assert "0.556" in notebook
+
+
+@pytest.mark.parametrize("unit", ["100px", "10%", "5em", 150])
+def test_to_html_with_col_space_units(unit):
+ # GH 25941
+ df = DataFrame(np.random.default_rng(2).random(size=(1, 3)))
+ result = df.to_html(col_space=unit)
+ result = result.split("tbody")[0]
+ hdrs = [x for x in result.split("\n") if re.search(r"\s]", x)]
+ if isinstance(unit, int):
+ unit = str(unit) + "px"
+ for h in hdrs:
+ expected = f' '
+ assert expected in h
+
+
+class TestReprHTML:
+ def test_html_repr_min_rows_default(self, datapath):
+ # gh-27991
+
+ # default setting no truncation even if above min_rows
+ df = DataFrame({"a": range(20)})
+ result = df._repr_html_()
+ expected = expected_html(datapath, "html_repr_min_rows_default_no_truncation")
+ assert result == expected
+
+ # default of max_rows 60 triggers truncation if above
+ df = DataFrame({"a": range(61)})
+ result = df._repr_html_()
+ expected = expected_html(datapath, "html_repr_min_rows_default_truncated")
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "max_rows,min_rows,expected",
+ [
+ # truncated after first two rows
+ (10, 4, "html_repr_max_rows_10_min_rows_4"),
+ # when set to None, follow value of max_rows
+ (12, None, "html_repr_max_rows_12_min_rows_None"),
+ # when set value higher as max_rows, use the minimum
+ (10, 12, "html_repr_max_rows_10_min_rows_12"),
+ # max_rows of None -> never truncate
+ (None, 12, "html_repr_max_rows_None_min_rows_12"),
+ ],
+ )
+ def test_html_repr_min_rows(self, datapath, max_rows, min_rows, expected):
+ # gh-27991
+
+ df = DataFrame({"a": range(61)})
+ expected = expected_html(datapath, expected)
+ with option_context("display.max_rows", max_rows, "display.min_rows", min_rows):
+ result = df._repr_html_()
+ assert result == expected
+
+ def test_repr_html_ipython_config(self, ip):
+ code = textwrap.dedent(
+ """\
+ from pandas import DataFrame
+ df = DataFrame({"A": [1, 2]})
+ df._repr_html_()
+
+ cfg = get_ipython().config
+ cfg['IPKernelApp']['parent_appname']
+ df._repr_html_()
+ """
+ )
+ result = ip.run_cell(code, silent=True)
+ assert not result.error_in_exec
+
+ def test_info_repr_html(self):
+ max_rows = 60
+ max_cols = 20
+ # Long
+ h, w = max_rows + 1, max_cols - 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ assert r"<class" not in df._repr_html_()
+ with option_context("display.large_repr", "info"):
+ assert r"<class" in df._repr_html_()
+
+ # Wide
+ h, w = max_rows - 1, max_cols + 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ assert "{40 + h}" in reg_repr
+
+ h = max_rows + 1
+ df = DataFrame(
+ {
+ "idx": np.linspace(-10, 10, h),
+ "A": np.arange(1, 1 + h),
+ "B": np.arange(41, 41 + h),
+ }
+ ).set_index("idx")
+ long_repr = df._repr_html_()
+ assert ".." in long_repr
+ assert "31 " not in long_repr
+ assert f"{h} rows " in long_repr
+ assert "2 columns" in long_repr
+
+ def test_repr_html_long_multiindex(self):
+ max_rows = 60
+ max_L1 = max_rows // 2
+
+ tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
+ idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((max_L1 * 2, 2)),
+ index=idx,
+ columns=["A", "B"],
+ )
+ with option_context("display.max_rows", 60, "display.max_columns", 20):
+ reg_repr = df._repr_html_()
+ assert "..." not in reg_repr
+
+ tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
+ idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal(((max_L1 + 1) * 2, 2)),
+ index=idx,
+ columns=["A", "B"],
+ )
+ long_repr = df._repr_html_()
+ assert "..." in long_repr
+
+ def test_repr_html_long_and_wide(self):
+ max_cols = 20
+ max_rows = 60
+
+ h, w = max_rows - 1, max_cols - 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ with option_context("display.max_rows", 60, "display.max_columns", 20):
+ assert "..." not in df._repr_html_()
+
+ h, w = max_rows + 1, max_cols + 1
+ df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
+ with option_context("display.max_rows", 60, "display.max_columns", 20):
+ assert "..." in df._repr_html_()
+
+
+def test_to_html_multilevel(multiindex_year_month_day_dataframe_random_data):
+ ymd = multiindex_year_month_day_dataframe_random_data
+
+ ymd.columns.name = "foo"
+ ymd.to_html()
+ ymd.T.to_html()
+
+
+@pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
+def test_to_html_na_rep_and_float_format(na_rep, datapath):
+ # https://github.com/pandas-dev/pandas/issues/13828
+ df = DataFrame(
+ [
+ ["A", 1.2225],
+ ["A", None],
+ ],
+ columns=["Group", "Data"],
+ )
+ result = df.to_html(na_rep=na_rep, float_format="{:.2f}".format)
+ expected = expected_html(datapath, "gh13828_expected_output")
+ expected = expected.format(na_rep=na_rep)
+ assert result == expected
+
+
+def test_to_html_na_rep_non_scalar_data(datapath):
+ # GH47103
+ df = DataFrame([{"a": 1, "b": [1, 2, 3]}])
+ result = df.to_html(na_rep="-")
+ expected = expected_html(datapath, "gh47103_expected_output")
+ assert result == expected
+
+
+def test_to_html_float_format_object_col(datapath):
+ # GH#40024
+ df = DataFrame(data={"x": [1000.0, "test"]})
+ result = df.to_html(float_format=lambda x: f"{x:,.0f}")
+ expected = expected_html(datapath, "gh40024_expected_output")
+ assert result == expected
+
+
+def test_to_html_multiindex_col_with_colspace():
+ # GH#53885
+ df = DataFrame([[1, 2]])
+ df.columns = MultiIndex.from_tuples([(1, 1), (2, 1)])
+ result = df.to_html(col_space=100)
+ expected = (
+ '\n'
+ " \n"
+ " \n"
+ ' \n'
+ ' 1 \n'
+ ' 2 \n'
+ " \n"
+ " \n"
+ ' \n'
+ ' 1 \n'
+ ' 1 \n'
+ " \n"
+ " \n"
+ " \n"
+ " \n"
+ " 0 \n"
+ " 1 \n"
+ " 2 \n"
+ " \n"
+ " \n"
+ "
"
+ )
+ assert result == expected
+
+
+def test_to_html_tuple_col_with_colspace():
+ # GH#53885
+ df = DataFrame({("a", "b"): [1], "b": [2]})
+ result = df.to_html(col_space=100)
+ expected = (
+ '\n'
+ " \n"
+ ' \n'
+ ' \n'
+ ' (a, b) \n'
+ ' b \n'
+ " \n"
+ " \n"
+ " \n"
+ " \n"
+ " 0 \n"
+ " 1 \n"
+ " 2 \n"
+ " \n"
+ " \n"
+ "
"
+ )
+ assert result == expected
+
+
+def test_to_html_empty_complex_array():
+ # GH#54167
+ df = DataFrame({"x": np.array([], dtype="complex")})
+ result = df.to_html(col_space=100)
+ expected = (
+ '\n'
+ " \n"
+ ' \n'
+ ' \n'
+ ' x \n'
+ " \n"
+ " \n"
+ " \n"
+ " \n"
+ "
"
+ )
+ assert result == expected
+
+
+def test_to_html_pos_args_deprecation():
+ # GH-54229
+ df = DataFrame({"a": [1, 2, 3]})
+ msg = (
+ r"Starting with pandas version 3.0 all arguments of to_html except for the "
+ r"argument 'buf' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ df.to_html(None, None)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_latex.py b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_latex.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fd96dff27d06dc3056b56c5f7e8eb054e98bd8f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_latex.py
@@ -0,0 +1,1425 @@
+import codecs
+from datetime import datetime
+from textwrap import dedent
+
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Series,
+)
+import pandas._testing as tm
+
+pytest.importorskip("jinja2")
+
+
+def _dedent(string):
+ """Dedent without new line in the beginning.
+
+ Built-in textwrap.dedent would keep new line character in the beginning
+ of multi-line string starting from the new line.
+ This version drops the leading new line character.
+ """
+ return dedent(string).lstrip()
+
+
+@pytest.fixture
+def df_short():
+ """Short dataframe for testing table/tabular/longtable LaTeX env."""
+ return DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+
+
+class TestToLatex:
+ def test_to_latex_to_file(self, float_frame):
+ with tm.ensure_clean("test.tex") as path:
+ float_frame.to_latex(path)
+ with open(path, encoding="utf-8") as f:
+ assert float_frame.to_latex() == f.read()
+
+ def test_to_latex_to_file_utf8_with_encoding(self):
+ # test with utf-8 and encoding option (GH 7061)
+ df = DataFrame([["au\xdfgangen"]])
+ with tm.ensure_clean("test.tex") as path:
+ df.to_latex(path, encoding="utf-8")
+ with codecs.open(path, "r", encoding="utf-8") as f:
+ assert df.to_latex() == f.read()
+
+ def test_to_latex_to_file_utf8_without_encoding(self):
+ # test with utf-8 without encoding option
+ df = DataFrame([["au\xdfgangen"]])
+ with tm.ensure_clean("test.tex") as path:
+ df.to_latex(path)
+ with codecs.open(path, "r", encoding="utf-8") as f:
+ assert df.to_latex() == f.read()
+
+ def test_to_latex_tabular_with_index(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_tabular_without_index(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(index=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{rl}
+ \toprule
+ a & b \\
+ \midrule
+ 1 & b1 \\
+ 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "bad_column_format",
+ [5, 1.2, ["l", "r"], ("r", "c"), {"r", "c", "l"}, {"a": "r", "b": "l"}],
+ )
+ def test_to_latex_bad_column_format(self, bad_column_format):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ msg = r"`column_format` must be str or unicode"
+ with pytest.raises(ValueError, match=msg):
+ df.to_latex(column_format=bad_column_format)
+
+ def test_to_latex_column_format_just_works(self, float_frame):
+ # GH Bug #9402
+ float_frame.to_latex(column_format="lcr")
+
+ def test_to_latex_column_format(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(column_format="lcr")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lcr}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_float_format_object_col(self):
+ # GH#40024
+ ser = Series([1000.0, "test"])
+ result = ser.to_latex(float_format="{:,.0f}".format)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & 0 \\
+ \midrule
+ 0 & 1,000 \\
+ 1 & test \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_empty_tabular(self):
+ df = DataFrame()
+ result = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{l}
+ \toprule
+ \midrule
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_series(self):
+ s = Series(["a", "b", "c"])
+ result = s.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & 0 \\
+ \midrule
+ 0 & a \\
+ 1 & b \\
+ 2 & c \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_midrule_location(self):
+ # GH 18326
+ df = DataFrame({"a": [1, 2]})
+ df.index.name = "foo"
+ result = df.to_latex(index_names=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lr}
+ \toprule
+ & a \\
+ \midrule
+ 0 & 1 \\
+ 1 & 2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_pos_args_deprecation(self):
+ # GH-54229
+ df = DataFrame(
+ {
+ "name": ["Raphael", "Donatello"],
+ "age": [26, 45],
+ "height": [181.23, 177.65],
+ }
+ )
+ msg = (
+ r"Starting with pandas version 3.0 all arguments of to_latex except for "
+ r"the argument 'buf' will be keyword-only."
+ )
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ df.to_latex(None, None)
+
+
+class TestToLatexLongtable:
+ def test_to_latex_empty_longtable(self):
+ df = DataFrame()
+ result = df.to_latex(longtable=True)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{l}
+ \toprule
+ \midrule
+ \endfirsthead
+ \toprule
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{0}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_with_index(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(longtable=True)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_without_index(self):
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(index=False, longtable=True)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{rl}
+ \toprule
+ a & b \\
+ \midrule
+ \endfirsthead
+ \toprule
+ a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{2}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 1 & b1 \\
+ 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "df, expected_number",
+ [
+ (DataFrame({"a": [1, 2]}), 1),
+ (DataFrame({"a": [1, 2], "b": [3, 4]}), 2),
+ (DataFrame({"a": [1, 2], "b": [3, 4], "c": [5, 6]}), 3),
+ ],
+ )
+ def test_to_latex_longtable_continued_on_next_page(self, df, expected_number):
+ result = df.to_latex(index=False, longtable=True)
+ assert rf"\multicolumn{{{expected_number}}}" in result
+
+
+class TestToLatexHeader:
+ def test_to_latex_no_header_with_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_no_header_without_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(index=False, header=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{rl}
+ \toprule
+ \midrule
+ 1 & b1 \\
+ 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_specified_header_with_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=["AA", "BB"])
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & AA & BB \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_specified_header_without_index(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=["AA", "BB"], index=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{rl}
+ \toprule
+ AA & BB \\
+ \midrule
+ 1 & b1 \\
+ 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "header, num_aliases",
+ [
+ (["A"], 1),
+ (("B",), 1),
+ (("Col1", "Col2", "Col3"), 3),
+ (("Col1", "Col2", "Col3", "Col4"), 4),
+ ],
+ )
+ def test_to_latex_number_of_items_in_header_missmatch_raises(
+ self,
+ header,
+ num_aliases,
+ ):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ msg = f"Writing 2 cols but got {num_aliases} aliases"
+ with pytest.raises(ValueError, match=msg):
+ df.to_latex(header=header)
+
+ def test_to_latex_decimal(self):
+ # GH 12031
+ df = DataFrame({"a": [1.0, 2.1], "b": ["b1", "b2"]})
+ result = df.to_latex(decimal=",")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1,000000 & b1 \\
+ 1 & 2,100000 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexBold:
+ def test_to_latex_bold_rows(self):
+ # GH 16707
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(bold_rows=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ \textbf{0} & 1 & b1 \\
+ \textbf{1} & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_no_bold_rows(self):
+ # GH 16707
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(bold_rows=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexCaptionLabel:
+ @pytest.fixture
+ def caption_table(self):
+ """Caption for table/tabular LaTeX environment."""
+ return "a table in a \\texttt{table/tabular} environment"
+
+ @pytest.fixture
+ def short_caption(self):
+ """Short caption for testing \\caption[short_caption]{full_caption}."""
+ return "a table"
+
+ @pytest.fixture
+ def label_table(self):
+ """Label for table/tabular LaTeX environment."""
+ return "tab:table_tabular"
+
+ @pytest.fixture
+ def caption_longtable(self):
+ """Caption for longtable LaTeX environment."""
+ return "a table in a \\texttt{longtable} environment"
+
+ @pytest.fixture
+ def label_longtable(self):
+ """Label for longtable LaTeX environment."""
+ return "tab:longtable"
+
+ def test_to_latex_caption_only(self, df_short, caption_table):
+ # GH 25436
+ result = df_short.to_latex(caption=caption_table)
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption{a table in a \texttt{table/tabular} environment}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_label_only(self, df_short, label_table):
+ # GH 25436
+ result = df_short.to_latex(label=label_table)
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \label{tab:table_tabular}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_caption_and_label(self, df_short, caption_table, label_table):
+ # GH 25436
+ result = df_short.to_latex(caption=caption_table, label=label_table)
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption{a table in a \texttt{table/tabular} environment}
+ \label{tab:table_tabular}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_caption_and_shortcaption(
+ self,
+ df_short,
+ caption_table,
+ short_caption,
+ ):
+ result = df_short.to_latex(caption=(caption_table, short_caption))
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption[a table]{a table in a \texttt{table/tabular} environment}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_caption_and_shortcaption_list_is_ok(self, df_short):
+ caption = ("Long-long-caption", "Short")
+ result_tuple = df_short.to_latex(caption=caption)
+ result_list = df_short.to_latex(caption=list(caption))
+ assert result_tuple == result_list
+
+ def test_to_latex_caption_shortcaption_and_label(
+ self,
+ df_short,
+ caption_table,
+ short_caption,
+ label_table,
+ ):
+ # test when the short_caption is provided alongside caption and label
+ result = df_short.to_latex(
+ caption=(caption_table, short_caption),
+ label=label_table,
+ )
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption[a table]{a table in a \texttt{table/tabular} environment}
+ \label{tab:table_tabular}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize(
+ "bad_caption",
+ [
+ ("full_caption", "short_caption", "extra_string"),
+ ("full_caption", "short_caption", 1),
+ ("full_caption", "short_caption", None),
+ ("full_caption",),
+ (None,),
+ ],
+ )
+ def test_to_latex_bad_caption_raises(self, bad_caption):
+ # test that wrong number of params is raised
+ df = DataFrame({"a": [1]})
+ msg = "`caption` must be either a string or 2-tuple of strings"
+ with pytest.raises(ValueError, match=msg):
+ df.to_latex(caption=bad_caption)
+
+ def test_to_latex_two_chars_caption(self, df_short):
+ # test that two chars caption is handled correctly
+ # it must not be unpacked into long_caption, short_caption.
+ result = df_short.to_latex(caption="xy")
+ expected = _dedent(
+ r"""
+ \begin{table}
+ \caption{xy}
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_caption_only(self, df_short, caption_longtable):
+ # GH 25436
+ # test when no caption and no label is provided
+ # is performed by test_to_latex_longtable()
+ result = df_short.to_latex(longtable=True, caption=caption_longtable)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \caption{a table in a \texttt{longtable} environment} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \caption[]{a table in a \texttt{longtable} environment} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_label_only(self, df_short, label_longtable):
+ # GH 25436
+ result = df_short.to_latex(longtable=True, label=label_longtable)
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \label{tab:longtable} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_caption_and_label(
+ self,
+ df_short,
+ caption_longtable,
+ label_longtable,
+ ):
+ # GH 25436
+ result = df_short.to_latex(
+ longtable=True,
+ caption=caption_longtable,
+ label=label_longtable,
+ )
+ expected = _dedent(
+ r"""
+ \begin{longtable}{lrl}
+ \caption{a table in a \texttt{longtable} environment} \label{tab:longtable} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \caption[]{a table in a \texttt{longtable} environment} \\
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_caption_shortcaption_and_label(
+ self,
+ df_short,
+ caption_longtable,
+ short_caption,
+ label_longtable,
+ ):
+ # test when the caption, the short_caption and the label are provided
+ result = df_short.to_latex(
+ longtable=True,
+ caption=(caption_longtable, short_caption),
+ label=label_longtable,
+ )
+ expected = _dedent(
+ r"""
+\begin{longtable}{lrl}
+\caption[a table]{a table in a \texttt{longtable} environment} \label{tab:longtable} \\
+\toprule
+ & a & b \\
+\midrule
+\endfirsthead
+\caption[]{a table in a \texttt{longtable} environment} \\
+\toprule
+ & a & b \\
+\midrule
+\endhead
+\midrule
+\multicolumn{3}{r}{Continued on next page} \\
+\midrule
+\endfoot
+\bottomrule
+\endlastfoot
+0 & 1 & b1 \\
+1 & 2 & b2 \\
+\end{longtable}
+"""
+ )
+ assert result == expected
+
+
+class TestToLatexEscape:
+ @pytest.fixture
+ def df_with_symbols(self):
+ """Dataframe with special characters for testing chars escaping."""
+ a = "a"
+ b = "b"
+ yield DataFrame({"co$e^x$": {a: "a", b: "b"}, "co^l1": {a: "a", b: "b"}})
+
+ def test_to_latex_escape_false(self, df_with_symbols):
+ result = df_with_symbols.to_latex(escape=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ & co$e^x$ & co^l1 \\
+ \midrule
+ a & a & a \\
+ b & b & b \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_escape_default(self, df_with_symbols):
+ # gh50871: in v2.0 escape is False by default (styler.format.escape=None)
+ default = df_with_symbols.to_latex()
+ specified_true = df_with_symbols.to_latex(escape=True)
+ assert default != specified_true
+
+ def test_to_latex_special_escape(self):
+ df = DataFrame([r"a\b\c", r"^a^b^c", r"~a~b~c"])
+ result = df.to_latex(escape=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & 0 \\
+ \midrule
+ 0 & a\textbackslash b\textbackslash c \\
+ 1 & \textasciicircum a\textasciicircum b\textasciicircum c \\
+ 2 & \textasciitilde a\textasciitilde b\textasciitilde c \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_escape_special_chars(self):
+ special_characters = ["&", "%", "$", "#", "_", "{", "}", "~", "^", "\\"]
+ df = DataFrame(data=special_characters)
+ result = df.to_latex(escape=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & 0 \\
+ \midrule
+ 0 & \& \\
+ 1 & \% \\
+ 2 & \$ \\
+ 3 & \# \\
+ 4 & \_ \\
+ 5 & \{ \\
+ 6 & \} \\
+ 7 & \textasciitilde \\
+ 8 & \textasciicircum \\
+ 9 & \textbackslash \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_specified_header_special_chars_without_escape(self):
+ # GH 7124
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(header=["$A$", "$B$"], escape=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrl}
+ \toprule
+ & $A$ & $B$ \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexPosition:
+ def test_to_latex_position(self):
+ the_position = "h"
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(position=the_position)
+ expected = _dedent(
+ r"""
+ \begin{table}[h]
+ \begin{tabular}{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \bottomrule
+ \end{tabular}
+ \end{table}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_longtable_position(self):
+ the_position = "t"
+ df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
+ result = df.to_latex(longtable=True, position=the_position)
+ expected = _dedent(
+ r"""
+ \begin{longtable}[t]{lrl}
+ \toprule
+ & a & b \\
+ \midrule
+ \endfirsthead
+ \toprule
+ & a & b \\
+ \midrule
+ \endhead
+ \midrule
+ \multicolumn{3}{r}{Continued on next page} \\
+ \midrule
+ \endfoot
+ \bottomrule
+ \endlastfoot
+ 0 & 1 & b1 \\
+ 1 & 2 & b2 \\
+ \end{longtable}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexFormatters:
+ def test_to_latex_with_formatters(self):
+ df = DataFrame(
+ {
+ "datetime64": [
+ datetime(2016, 1, 1),
+ datetime(2016, 2, 5),
+ datetime(2016, 3, 3),
+ ],
+ "float": [1.0, 2.0, 3.0],
+ "int": [1, 2, 3],
+ "object": [(1, 2), True, False],
+ }
+ )
+
+ formatters = {
+ "datetime64": lambda x: x.strftime("%Y-%m"),
+ "float": lambda x: f"[{x: 4.1f}]",
+ "int": lambda x: f"0x{x:x}",
+ "object": lambda x: f"-{x!s}-",
+ "__index__": lambda x: f"index: {x}",
+ }
+ result = df.to_latex(formatters=dict(formatters))
+
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrl}
+ \toprule
+ & datetime64 & float & int & object \\
+ \midrule
+ index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\
+ index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\
+ index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_float_format_no_fixed_width_3decimals(self):
+ # GH 21625
+ df = DataFrame({"x": [0.19999]})
+ result = df.to_latex(float_format="%.3f")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lr}
+ \toprule
+ & x \\
+ \midrule
+ 0 & 0.200 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_float_format_no_fixed_width_integer(self):
+ # GH 22270
+ df = DataFrame({"x": [100.0]})
+ result = df.to_latex(float_format="%.0f")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lr}
+ \toprule
+ & x \\
+ \midrule
+ 0 & 100 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
+ def test_to_latex_na_rep_and_float_format(self, na_rep):
+ df = DataFrame(
+ [
+ ["A", 1.2225],
+ ["A", None],
+ ],
+ columns=["Group", "Data"],
+ )
+ result = df.to_latex(na_rep=na_rep, float_format="{:.2f}".format)
+ expected = _dedent(
+ rf"""
+ \begin{{tabular}}{{llr}}
+ \toprule
+ & Group & Data \\
+ \midrule
+ 0 & A & 1.22 \\
+ 1 & A & {na_rep} \\
+ \bottomrule
+ \end{{tabular}}
+ """
+ )
+ assert result == expected
+
+
+class TestToLatexMultiindex:
+ @pytest.fixture
+ def multiindex_frame(self):
+ """Multiindex dataframe for testing multirow LaTeX macros."""
+ yield DataFrame.from_dict(
+ {
+ ("c1", 0): Series({x: x for x in range(4)}),
+ ("c1", 1): Series({x: x + 4 for x in range(4)}),
+ ("c2", 0): Series({x: x for x in range(4)}),
+ ("c2", 1): Series({x: x + 4 for x in range(4)}),
+ ("c3", 0): Series({x: x for x in range(4)}),
+ }
+ ).T
+
+ @pytest.fixture
+ def multicolumn_frame(self):
+ """Multicolumn dataframe for testing multicolumn LaTeX macros."""
+ yield DataFrame(
+ {
+ ("c1", 0): {x: x for x in range(5)},
+ ("c1", 1): {x: x + 5 for x in range(5)},
+ ("c2", 0): {x: x for x in range(5)},
+ ("c2", 1): {x: x + 5 for x in range(5)},
+ ("c3", 0): {x: x for x in range(5)},
+ }
+ )
+
+ def test_to_latex_multindex_header(self):
+ # GH 16718
+ df = DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]})
+ df = df.set_index(["a", "b"])
+ observed = df.to_latex(header=["r1", "r2"], multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrr}
+ \toprule
+ & & r1 & r2 \\
+ a & b & & \\
+ \midrule
+ 0 & 1 & 2 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert observed == expected
+
+ def test_to_latex_multiindex_empty_name(self):
+ # GH 18669
+ mi = pd.MultiIndex.from_product([[1, 2]], names=[""])
+ df = DataFrame(-1, index=mi, columns=range(4))
+ observed = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrr}
+ \toprule
+ & 0 & 1 & 2 & 3 \\
+ & & & & \\
+ \midrule
+ 1 & -1 & -1 & -1 & -1 \\
+ 2 & -1 & -1 & -1 & -1 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert observed == expected
+
+ def test_to_latex_multiindex_column_tabular(self):
+ df = DataFrame({("x", "y"): ["a"]})
+ result = df.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{ll}
+ \toprule
+ & x \\
+ & y \\
+ \midrule
+ 0 & a \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multiindex_small_tabular(self):
+ df = DataFrame({("x", "y"): ["a"]}).T
+ result = df.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ & & 0 \\
+ \midrule
+ x & y & a \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multiindex_tabular(self, multiindex_frame):
+ result = multiindex_frame.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrrr}
+ \toprule
+ & & 0 & 1 & 2 & 3 \\
+ \midrule
+ c1 & 0 & 0 & 1 & 2 & 3 \\
+ & 1 & 4 & 5 & 6 & 7 \\
+ c2 & 0 & 0 & 1 & 2 & 3 \\
+ & 1 & 4 & 5 & 6 & 7 \\
+ c3 & 0 & 0 & 1 & 2 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multicolumn_tabular(self, multiindex_frame):
+ # GH 14184
+ df = multiindex_frame.T
+ df.columns.names = ["a", "b"]
+ result = df.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrr}
+ \toprule
+ a & \multicolumn{2}{r}{c1} & \multicolumn{2}{r}{c2} & c3 \\
+ b & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 4 & 0 & 4 & 0 \\
+ 1 & 1 & 5 & 1 & 5 & 1 \\
+ 2 & 2 & 6 & 2 & 6 & 2 \\
+ 3 & 3 & 7 & 3 & 7 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_index_has_name_tabular(self):
+ # GH 10660
+ df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]})
+ result = df.set_index(["a", "b"]).to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llr}
+ \toprule
+ & & c \\
+ a & b & \\
+ \midrule
+ 0 & a & 1 \\
+ & b & 2 \\
+ 1 & a & 3 \\
+ & b & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_groupby_tabular(self):
+ # GH 10660
+ df = DataFrame({"a": [0, 0, 1, 1], "b": list("abab"), "c": [1, 2, 3, 4]})
+ result = (
+ df.groupby("a")
+ .describe()
+ .to_latex(float_format="{:.1f}".format, escape=True)
+ )
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrrrrr}
+ \toprule
+ & \multicolumn{8}{r}{c} \\
+ & count & mean & std & min & 25\% & 50\% & 75\% & max \\
+ a & & & & & & & & \\
+ \midrule
+ 0 & 2.0 & 1.5 & 0.7 & 1.0 & 1.2 & 1.5 & 1.8 & 2.0 \\
+ 1 & 2.0 & 3.5 & 0.7 & 3.0 & 3.2 & 3.5 & 3.8 & 4.0 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multiindex_dupe_level(self):
+ # see gh-14484
+ #
+ # If an index is repeated in subsequent rows, it should be
+ # replaced with a blank in the created table. This should
+ # ONLY happen if all higher order indices (to the left) are
+ # equal too. In this test, 'c' has to be printed both times
+ # because the higher order index 'A' != 'B'.
+ df = DataFrame(
+ index=pd.MultiIndex.from_tuples([("A", "c"), ("B", "c")]), columns=["col"]
+ )
+ result = df.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ & & col \\
+ \midrule
+ A & c & NaN \\
+ B & c & NaN \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multicolumn_default(self, multicolumn_frame):
+ result = multicolumn_frame.to_latex()
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrr}
+ \toprule
+ & \multicolumn{2}{r}{c1} & \multicolumn{2}{r}{c2} & c3 \\
+ & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 5 & 0 & 5 & 0 \\
+ 1 & 1 & 6 & 1 & 6 & 1 \\
+ 2 & 2 & 7 & 2 & 7 & 2 \\
+ 3 & 3 & 8 & 3 & 8 & 3 \\
+ 4 & 4 & 9 & 4 & 9 & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multicolumn_false(self, multicolumn_frame):
+ result = multicolumn_frame.to_latex(multicolumn=False, multicolumn_format="l")
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lrrrrr}
+ \toprule
+ & c1 & & c2 & & c3 \\
+ & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ 0 & 0 & 5 & 0 & 5 & 0 \\
+ 1 & 1 & 6 & 1 & 6 & 1 \\
+ 2 & 2 & 7 & 2 & 7 & 2 \\
+ 3 & 3 & 8 & 3 & 8 & 3 \\
+ 4 & 4 & 9 & 4 & 9 & 4 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multirow_true(self, multicolumn_frame):
+ result = multicolumn_frame.T.to_latex(multirow=True)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrrrr}
+ \toprule
+ & & 0 & 1 & 2 & 3 & 4 \\
+ \midrule
+ \multirow[t]{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ \multirow[t]{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
+ \cline{1-7}
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multicolumnrow_with_multicol_format(self, multicolumn_frame):
+ multicolumn_frame.index = multicolumn_frame.T.index
+ result = multicolumn_frame.T.to_latex(
+ multirow=True,
+ multicolumn=True,
+ multicolumn_format="c",
+ )
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llrrrrr}
+ \toprule
+ & & \multicolumn{2}{c}{c1} & \multicolumn{2}{c}{c2} & c3 \\
+ & & 0 & 1 & 0 & 1 & 0 \\
+ \midrule
+ \multirow[t]{2}{*}{c1} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ \multirow[t]{2}{*}{c2} & 0 & 0 & 1 & 2 & 3 & 4 \\
+ & 1 & 5 & 6 & 7 & 8 & 9 \\
+ \cline{1-7}
+ c3 & 0 & 0 & 1 & 2 & 3 & 4 \\
+ \cline{1-7}
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ @pytest.mark.parametrize("name0", [None, "named0"])
+ @pytest.mark.parametrize("name1", [None, "named1"])
+ @pytest.mark.parametrize("axes", [[0], [1], [0, 1]])
+ def test_to_latex_multiindex_names(self, name0, name1, axes):
+ # GH 18667
+ names = [name0, name1]
+ mi = pd.MultiIndex.from_product([[1, 2], [3, 4]])
+ df = DataFrame(-1, index=mi.copy(), columns=mi.copy())
+ for idx in axes:
+ df.axes[idx].names = names
+
+ idx_names = tuple(n or "" for n in names)
+ idx_names_row = (
+ f"{idx_names[0]} & {idx_names[1]} & & & & \\\\\n"
+ if (0 in axes and any(names))
+ else ""
+ )
+ col_names = [n if (bool(n) and 1 in axes) else "" for n in names]
+ observed = df.to_latex(multirow=False)
+ # pylint: disable-next=consider-using-f-string
+ expected = r"""\begin{tabular}{llrrrr}
+\toprule
+ & %s & \multicolumn{2}{r}{1} & \multicolumn{2}{r}{2} \\
+ & %s & 3 & 4 & 3 & 4 \\
+%s\midrule
+1 & 3 & -1 & -1 & -1 & -1 \\
+ & 4 & -1 & -1 & -1 & -1 \\
+2 & 3 & -1 & -1 & -1 & -1 \\
+ & 4 & -1 & -1 & -1 & -1 \\
+\bottomrule
+\end{tabular}
+""" % tuple(
+ list(col_names) + [idx_names_row]
+ )
+ assert observed == expected
+
+ @pytest.mark.parametrize("one_row", [True, False])
+ def test_to_latex_multiindex_nans(self, one_row):
+ # GH 14249
+ df = DataFrame({"a": [None, 1], "b": [2, 3], "c": [4, 5]})
+ if one_row:
+ df = df.iloc[[0]]
+ observed = df.set_index(["a", "b"]).to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llr}
+ \toprule
+ & & c \\
+ a & b & \\
+ \midrule
+ NaN & 2 & 4 \\
+ """
+ )
+ if not one_row:
+ expected += r"""1.000000 & 3 & 5 \\
+"""
+ expected += r"""\bottomrule
+\end{tabular}
+"""
+ assert observed == expected
+
+ def test_to_latex_non_string_index(self):
+ # GH 19981
+ df = DataFrame([[1, 2, 3]] * 2).set_index([0, 1])
+ result = df.to_latex(multirow=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{llr}
+ \toprule
+ & & 2 \\
+ 0 & 1 & \\
+ \midrule
+ 1 & 2 & 3 \\
+ & 2 & 3 \\
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
+
+ def test_to_latex_multiindex_multirow(self):
+ # GH 16719
+ mi = pd.MultiIndex.from_product(
+ [[0.0, 1.0], [3.0, 2.0, 1.0], ["0", "1"]], names=["i", "val0", "val1"]
+ )
+ df = DataFrame(index=mi)
+ result = df.to_latex(multirow=True, escape=False)
+ expected = _dedent(
+ r"""
+ \begin{tabular}{lll}
+ \toprule
+ i & val0 & val1 \\
+ \midrule
+ \multirow[t]{6}{*}{0.000000} & \multirow[t]{2}{*}{3.000000} & 0 \\
+ & & 1 \\
+ \cline{2-3}
+ & \multirow[t]{2}{*}{2.000000} & 0 \\
+ & & 1 \\
+ \cline{2-3}
+ & \multirow[t]{2}{*}{1.000000} & 0 \\
+ & & 1 \\
+ \cline{1-3} \cline{2-3}
+ \multirow[t]{6}{*}{1.000000} & \multirow[t]{2}{*}{3.000000} & 0 \\
+ & & 1 \\
+ \cline{2-3}
+ & \multirow[t]{2}{*}{2.000000} & 0 \\
+ & & 1 \\
+ \cline{2-3}
+ & \multirow[t]{2}{*}{1.000000} & 0 \\
+ & & 1 \\
+ \cline{1-3} \cline{2-3}
+ \bottomrule
+ \end{tabular}
+ """
+ )
+ assert result == expected
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/common.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..62582b212eb387c94b095ca54029f971dc54e777
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/common.py
@@ -0,0 +1,50 @@
+from collections.abc import Generator
+from contextlib import contextmanager
+import pathlib
+import tempfile
+
+import pytest
+
+from pandas.io.pytables import HDFStore
+
+tables = pytest.importorskip("tables")
+# set these parameters so we don't have file sharing
+tables.parameters.MAX_NUMEXPR_THREADS = 1
+tables.parameters.MAX_BLOSC_THREADS = 1
+tables.parameters.MAX_THREADS = 1
+
+
+def safe_close(store):
+ try:
+ if store is not None:
+ store.close()
+ except OSError:
+ pass
+
+
+# contextmanager to ensure the file cleanup
+@contextmanager
+def ensure_clean_store(
+ path, mode="a", complevel=None, complib=None, fletcher32=False
+) -> Generator[HDFStore, None, None]:
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ tmp_path = pathlib.Path(tmpdirname, path)
+ with HDFStore(
+ tmp_path,
+ mode=mode,
+ complevel=complevel,
+ complib=complib,
+ fletcher32=fletcher32,
+ ) as store:
+ yield store
+
+
+def _maybe_remove(store, key):
+ """
+ For tests using tables, try removing the table to be sure there is
+ no content from previous tests using the same table name.
+ """
+ try:
+ store.remove(key)
+ except (ValueError, KeyError):
+ pass
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/conftest.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..466e4ae8bb99c6c4e0f24045f71b4c3aa27b7851
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/conftest.py
@@ -0,0 +1,9 @@
+import uuid
+
+import pytest
+
+
+@pytest.fixture
+def setup_path():
+ """Fixture for setup path"""
+ return f"tmp.__{uuid.uuid4()}__.h5"
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_append.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_append.py
new file mode 100644
index 0000000000000000000000000000000000000000..00a81a4f1f385d044a21b987cfa9c5d2c65d1f0d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_append.py
@@ -0,0 +1,986 @@
+import datetime
+from datetime import timedelta
+import re
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs import Timestamp
+import pandas.util._test_decorators as td
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Index,
+ Series,
+ _testing as tm,
+ concat,
+ date_range,
+ read_hdf,
+)
+from pandas.tests.io.pytables.common import (
+ _maybe_remove,
+ ensure_clean_store,
+)
+
+pytestmark = pytest.mark.single_cpu
+
+tables = pytest.importorskip("tables")
+
+
+@pytest.mark.filterwarnings("ignore::tables.NaturalNameWarning")
+def test_append(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ # this is allowed by almost always don't want to do it
+ # tables.NaturalNameWarning):
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((20, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=20, freq="B"),
+ )
+ _maybe_remove(store, "df1")
+ store.append("df1", df[:10])
+ store.append("df1", df[10:])
+ tm.assert_frame_equal(store["df1"], df)
+
+ _maybe_remove(store, "df2")
+ store.put("df2", df[:10], format="table")
+ store.append("df2", df[10:])
+ tm.assert_frame_equal(store["df2"], df)
+
+ _maybe_remove(store, "df3")
+ store.append("/df3", df[:10])
+ store.append("/df3", df[10:])
+ tm.assert_frame_equal(store["df3"], df)
+
+ # this is allowed by almost always don't want to do it
+ # tables.NaturalNameWarning
+ _maybe_remove(store, "/df3 foo")
+ store.append("/df3 foo", df[:10])
+ store.append("/df3 foo", df[10:])
+ tm.assert_frame_equal(store["df3 foo"], df)
+
+ # dtype issues - mizxed type in a single object column
+ df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
+ df["mixed_column"] = "testing"
+ df.loc[2, "mixed_column"] = np.nan
+ _maybe_remove(store, "df")
+ store.append("df", df)
+ tm.assert_frame_equal(store["df"], df)
+
+ # uints - test storage of uints
+ uint_data = DataFrame(
+ {
+ "u08": Series(
+ np.random.default_rng(2).integers(0, high=255, size=5),
+ dtype=np.uint8,
+ ),
+ "u16": Series(
+ np.random.default_rng(2).integers(0, high=65535, size=5),
+ dtype=np.uint16,
+ ),
+ "u32": Series(
+ np.random.default_rng(2).integers(0, high=2**30, size=5),
+ dtype=np.uint32,
+ ),
+ "u64": Series(
+ [2**58, 2**59, 2**60, 2**61, 2**62],
+ dtype=np.uint64,
+ ),
+ },
+ index=np.arange(5),
+ )
+ _maybe_remove(store, "uints")
+ store.append("uints", uint_data)
+ tm.assert_frame_equal(store["uints"], uint_data, check_index_type=True)
+
+ # uints - test storage of uints in indexable columns
+ _maybe_remove(store, "uints")
+ # 64-bit indices not yet supported
+ store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
+ tm.assert_frame_equal(store["uints"], uint_data, check_index_type=True)
+
+
+def test_append_series(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ # basic
+ ss = Series(range(20), dtype=np.float64, index=[f"i_{i}" for i in range(20)])
+ ts = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+ ns = Series(np.arange(100))
+
+ store.append("ss", ss)
+ result = store["ss"]
+ tm.assert_series_equal(result, ss)
+ assert result.name is None
+
+ store.append("ts", ts)
+ result = store["ts"]
+ tm.assert_series_equal(result, ts)
+ assert result.name is None
+
+ ns.name = "foo"
+ store.append("ns", ns)
+ result = store["ns"]
+ tm.assert_series_equal(result, ns)
+ assert result.name == ns.name
+
+ # select on the values
+ expected = ns[ns > 60]
+ result = store.select("ns", "foo>60")
+ tm.assert_series_equal(result, expected)
+
+ # select on the index and values
+ expected = ns[(ns > 70) & (ns.index < 90)]
+ result = store.select("ns", "foo>70 and index<90")
+ tm.assert_series_equal(result, expected, check_index_type=True)
+
+ # multi-index
+ mi = DataFrame(np.random.default_rng(2).standard_normal((5, 1)), columns=["A"])
+ mi["B"] = np.arange(len(mi))
+ mi["C"] = "foo"
+ mi.loc[3:5, "C"] = "bar"
+ mi.set_index(["C", "B"], inplace=True)
+ s = mi.stack(future_stack=True)
+ s.index = s.index.droplevel(2)
+ store.append("mi", s)
+ tm.assert_series_equal(store["mi"], s, check_index_type=True)
+
+
+def test_append_some_nans(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ {
+ "A": Series(np.random.default_rng(2).standard_normal(20)).astype(
+ "int32"
+ ),
+ "A1": np.random.default_rng(2).standard_normal(20),
+ "A2": np.random.default_rng(2).standard_normal(20),
+ "B": "foo",
+ "C": "bar",
+ "D": Timestamp("2001-01-01").as_unit("ns"),
+ "E": Timestamp("2001-01-02").as_unit("ns"),
+ },
+ index=np.arange(20),
+ )
+ # some nans
+ _maybe_remove(store, "df1")
+ df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
+ store.append("df1", df[:10])
+ store.append("df1", df[10:])
+ tm.assert_frame_equal(store["df1"], df, check_index_type=True)
+
+ # first column
+ df1 = df.copy()
+ df1["A1"] = np.nan
+ _maybe_remove(store, "df1")
+ store.append("df1", df1[:10])
+ store.append("df1", df1[10:])
+ tm.assert_frame_equal(store["df1"], df1, check_index_type=True)
+
+ # 2nd column
+ df2 = df.copy()
+ df2["A2"] = np.nan
+ _maybe_remove(store, "df2")
+ store.append("df2", df2[:10])
+ store.append("df2", df2[10:])
+ tm.assert_frame_equal(store["df2"], df2, check_index_type=True)
+
+ # datetimes
+ df3 = df.copy()
+ df3["E"] = np.nan
+ _maybe_remove(store, "df3")
+ store.append("df3", df3[:10])
+ store.append("df3", df3[10:])
+ tm.assert_frame_equal(store["df3"], df3, check_index_type=True)
+
+
+def test_append_all_nans(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ {
+ "A1": np.random.default_rng(2).standard_normal(20),
+ "A2": np.random.default_rng(2).standard_normal(20),
+ },
+ index=np.arange(20),
+ )
+ df.loc[0:15, :] = np.nan
+
+ # nan some entire rows (dropna=True)
+ _maybe_remove(store, "df")
+ store.append("df", df[:10], dropna=True)
+ store.append("df", df[10:], dropna=True)
+ tm.assert_frame_equal(store["df"], df[-4:], check_index_type=True)
+
+ # nan some entire rows (dropna=False)
+ _maybe_remove(store, "df2")
+ store.append("df2", df[:10], dropna=False)
+ store.append("df2", df[10:], dropna=False)
+ tm.assert_frame_equal(store["df2"], df, check_index_type=True)
+
+ # tests the option io.hdf.dropna_table
+ with pd.option_context("io.hdf.dropna_table", False):
+ _maybe_remove(store, "df3")
+ store.append("df3", df[:10])
+ store.append("df3", df[10:])
+ tm.assert_frame_equal(store["df3"], df)
+
+ with pd.option_context("io.hdf.dropna_table", True):
+ _maybe_remove(store, "df4")
+ store.append("df4", df[:10])
+ store.append("df4", df[10:])
+ tm.assert_frame_equal(store["df4"], df[-4:])
+
+ # nan some entire rows (string are still written!)
+ df = DataFrame(
+ {
+ "A1": np.random.default_rng(2).standard_normal(20),
+ "A2": np.random.default_rng(2).standard_normal(20),
+ "B": "foo",
+ "C": "bar",
+ },
+ index=np.arange(20),
+ )
+
+ df.loc[0:15, :] = np.nan
+
+ _maybe_remove(store, "df")
+ store.append("df", df[:10], dropna=True)
+ store.append("df", df[10:], dropna=True)
+ tm.assert_frame_equal(store["df"], df, check_index_type=True)
+
+ _maybe_remove(store, "df2")
+ store.append("df2", df[:10], dropna=False)
+ store.append("df2", df[10:], dropna=False)
+ tm.assert_frame_equal(store["df2"], df, check_index_type=True)
+
+ # nan some entire rows (but since we have dates they are still
+ # written!)
+ df = DataFrame(
+ {
+ "A1": np.random.default_rng(2).standard_normal(20),
+ "A2": np.random.default_rng(2).standard_normal(20),
+ "B": "foo",
+ "C": "bar",
+ "D": Timestamp("2001-01-01").as_unit("ns"),
+ "E": Timestamp("2001-01-02").as_unit("ns"),
+ },
+ index=np.arange(20),
+ )
+
+ df.loc[0:15, :] = np.nan
+
+ _maybe_remove(store, "df")
+ store.append("df", df[:10], dropna=True)
+ store.append("df", df[10:], dropna=True)
+ tm.assert_frame_equal(store["df"], df, check_index_type=True)
+
+ _maybe_remove(store, "df2")
+ store.append("df2", df[:10], dropna=False)
+ store.append("df2", df[10:], dropna=False)
+ tm.assert_frame_equal(store["df2"], df, check_index_type=True)
+
+
+def test_append_frame_column_oriented(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ # column oriented
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df.index = df.index._with_freq(None) # freq doesn't round-trip
+
+ _maybe_remove(store, "df1")
+ store.append("df1", df.iloc[:, :2], axes=["columns"])
+ store.append("df1", df.iloc[:, 2:])
+ tm.assert_frame_equal(store["df1"], df)
+
+ result = store.select("df1", "columns=A")
+ expected = df.reindex(columns=["A"])
+ tm.assert_frame_equal(expected, result)
+
+ # selection on the non-indexable
+ result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
+ expected = df.reindex(columns=["A"], index=df.index[0:4])
+ tm.assert_frame_equal(expected, result)
+
+ # this isn't supported
+ msg = re.escape(
+ "passing a filterable condition to a non-table indexer "
+ "[Filter: Not Initialized]"
+ )
+ with pytest.raises(TypeError, match=msg):
+ store.select("df1", "columns=A and index>df.index[4]")
+
+
+def test_append_with_different_block_ordering(setup_path):
+ # GH 4096; using same frames, but different block orderings
+ with ensure_clean_store(setup_path) as store:
+ for i in range(10):
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB")
+ )
+ df["index"] = range(10)
+ df["index"] += i * 10
+ df["int64"] = Series([1] * len(df), dtype="int64")
+ df["int16"] = Series([1] * len(df), dtype="int16")
+
+ if i % 2 == 0:
+ del df["int64"]
+ df["int64"] = Series([1] * len(df), dtype="int64")
+ if i % 3 == 0:
+ a = df.pop("A")
+ df["A"] = a
+
+ df.set_index("index", inplace=True)
+
+ store.append("df", df)
+
+ # test a different ordering but with more fields (like invalid
+ # combinations)
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 2)),
+ columns=list("AB"),
+ dtype="float64",
+ )
+ df["int64"] = Series([1] * len(df), dtype="int64")
+ df["int16"] = Series([1] * len(df), dtype="int16")
+ store.append("df", df)
+
+ # store additional fields in different blocks
+ df["int16_2"] = Series([1] * len(df), dtype="int16")
+ msg = re.escape(
+ "cannot match existing table structure for [int16] on appending data"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.append("df", df)
+
+ # store multiple additional fields in different blocks
+ df["float_3"] = Series([1.0] * len(df), dtype="float64")
+ msg = re.escape(
+ "cannot match existing table structure for [A,B] on appending data"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.append("df", df)
+
+
+def test_append_with_strings(setup_path):
+ with ensure_clean_store(setup_path) as store:
+
+ def check_col(key, name, size):
+ assert (
+ getattr(store.get_storer(key).table.description, name).itemsize == size
+ )
+
+ # avoid truncation on elements
+ df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
+ store.append("df_big", df)
+ tm.assert_frame_equal(store.select("df_big"), df)
+ check_col("df_big", "values_block_1", 15)
+
+ # appending smaller string ok
+ df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
+ store.append("df_big", df2)
+ expected = concat([df, df2])
+ tm.assert_frame_equal(store.select("df_big"), expected)
+ check_col("df_big", "values_block_1", 15)
+
+ # avoid truncation on elements
+ df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
+ store.append("df_big2", df, min_itemsize={"values": 50})
+ tm.assert_frame_equal(store.select("df_big2"), df)
+ check_col("df_big2", "values_block_1", 50)
+
+ # bigger string on next append
+ store.append("df_new", df)
+ df_new = DataFrame([[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]])
+ msg = (
+ r"Trying to store a string with len \[26\] in "
+ r"\[values_block_1\] column but\n"
+ r"this column has a limit of \[15\]!\n"
+ "Consider using min_itemsize to preset the sizes on these "
+ "columns"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.append("df_new", df_new)
+
+ # min_itemsize on Series index (GH 11412)
+ df = DataFrame(
+ {
+ "A": [0.0, 1.0, 2.0, 3.0, 4.0],
+ "B": [0.0, 1.0, 0.0, 1.0, 0.0],
+ "C": Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object),
+ "D": date_range("20130101", periods=5),
+ }
+ ).set_index("C")
+ store.append("ss", df["B"], min_itemsize={"index": 4})
+ tm.assert_series_equal(store.select("ss"), df["B"])
+
+ # same as above, with data_columns=True
+ store.append("ss2", df["B"], data_columns=True, min_itemsize={"index": 4})
+ tm.assert_series_equal(store.select("ss2"), df["B"])
+
+ # min_itemsize in index without appending (GH 10381)
+ store.put("ss3", df, format="table", min_itemsize={"index": 6})
+ # just make sure there is a longer string:
+ df2 = df.copy().reset_index().assign(C="longer").set_index("C")
+ store.append("ss3", df2)
+ tm.assert_frame_equal(store.select("ss3"), concat([df, df2]))
+
+ # same as above, with a Series
+ store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
+ store.append("ss4", df2["B"])
+ tm.assert_series_equal(store.select("ss4"), concat([df["B"], df2["B"]]))
+
+ # with nans
+ _maybe_remove(store, "df")
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df["string"] = "foo"
+ df.loc[df.index[1:4], "string"] = np.nan
+ df["string2"] = "bar"
+ df.loc[df.index[4:8], "string2"] = np.nan
+ df["string3"] = "bah"
+ df.loc[df.index[1:], "string3"] = np.nan
+ store.append("df", df)
+ result = store.select("df")
+ tm.assert_frame_equal(result, df)
+
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame({"A": "foo", "B": "bar"}, index=range(10))
+
+ # a min_itemsize that creates a data_column
+ _maybe_remove(store, "df")
+ store.append("df", df, min_itemsize={"A": 200})
+ check_col("df", "A", 200)
+ assert store.get_storer("df").data_columns == ["A"]
+
+ # a min_itemsize that creates a data_column2
+ _maybe_remove(store, "df")
+ store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
+ check_col("df", "A", 200)
+ assert store.get_storer("df").data_columns == ["B", "A"]
+
+ # a min_itemsize that creates a data_column2
+ _maybe_remove(store, "df")
+ store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
+ check_col("df", "B", 200)
+ check_col("df", "values_block_0", 200)
+ assert store.get_storer("df").data_columns == ["B"]
+
+ # infer the .typ on subsequent appends
+ _maybe_remove(store, "df")
+ store.append("df", df[:5], min_itemsize=200)
+ store.append("df", df[5:], min_itemsize=200)
+ tm.assert_frame_equal(store["df"], df)
+
+ # invalid min_itemsize keys
+ df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
+ _maybe_remove(store, "df")
+ msg = re.escape(
+ "min_itemsize has the key [foo] which is not an axis or data_column"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
+
+
+def test_append_with_empty_string(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ # with all empty strings (GH 12242)
+ df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
+ store.append("df", df[:-1], min_itemsize={"x": 1})
+ store.append("df", df[-1:], min_itemsize={"x": 1})
+ tm.assert_frame_equal(store.select("df"), df)
+
+
+def test_append_with_data_columns(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df.iloc[0, df.columns.get_loc("B")] = 1.0
+ _maybe_remove(store, "df")
+ store.append("df", df[:2], data_columns=["B"])
+ store.append("df", df[2:])
+ tm.assert_frame_equal(store["df"], df)
+
+ # check that we have indices created
+ assert store._handle.root.df.table.cols.index.is_indexed is True
+ assert store._handle.root.df.table.cols.B.is_indexed is True
+
+ # data column searching
+ result = store.select("df", "B>0")
+ expected = df[df.B > 0]
+ tm.assert_frame_equal(result, expected)
+
+ # data column searching (with an indexable and a data_columns)
+ result = store.select("df", "B>0 and index>df.index[3]")
+ df_new = df.reindex(index=df.index[4:])
+ expected = df_new[df_new.B > 0]
+ tm.assert_frame_equal(result, expected)
+
+ # data column selection with a string data_column
+ df_new = df.copy()
+ df_new["string"] = "foo"
+ df_new.loc[df_new.index[1:4], "string"] = np.nan
+ df_new.loc[df_new.index[5:6], "string"] = "bar"
+ _maybe_remove(store, "df")
+ store.append("df", df_new, data_columns=["string"])
+ result = store.select("df", "string='foo'")
+ expected = df_new[df_new.string == "foo"]
+ tm.assert_frame_equal(result, expected)
+
+ # using min_itemsize and a data column
+ def check_col(key, name, size):
+ assert (
+ getattr(store.get_storer(key).table.description, name).itemsize == size
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ _maybe_remove(store, "df")
+ store.append("df", df_new, data_columns=["string"], min_itemsize={"string": 30})
+ check_col("df", "string", 30)
+ _maybe_remove(store, "df")
+ store.append("df", df_new, data_columns=["string"], min_itemsize=30)
+ check_col("df", "string", 30)
+ _maybe_remove(store, "df")
+ store.append("df", df_new, data_columns=["string"], min_itemsize={"values": 30})
+ check_col("df", "string", 30)
+
+ with ensure_clean_store(setup_path) as store:
+ df_new["string2"] = "foobarbah"
+ df_new["string_block1"] = "foobarbah1"
+ df_new["string_block2"] = "foobarbah2"
+ _maybe_remove(store, "df")
+ store.append(
+ "df",
+ df_new,
+ data_columns=["string", "string2"],
+ min_itemsize={"string": 30, "string2": 40, "values": 50},
+ )
+ check_col("df", "string", 30)
+ check_col("df", "string2", 40)
+ check_col("df", "values_block_1", 50)
+
+ with ensure_clean_store(setup_path) as store:
+ # multiple data columns
+ df_new = df.copy()
+ df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
+ df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
+ df_new["string"] = "foo"
+
+ sl = df_new.columns.get_loc("string")
+ df_new.iloc[1:4, sl] = np.nan
+ df_new.iloc[5:6, sl] = "bar"
+
+ df_new["string2"] = "foo"
+ sl = df_new.columns.get_loc("string2")
+ df_new.iloc[2:5, sl] = np.nan
+ df_new.iloc[7:8, sl] = "bar"
+ _maybe_remove(store, "df")
+ store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
+ result = store.select("df", "string='foo' and string2='foo' and A>0 and B<0")
+ expected = df_new[
+ (df_new.string == "foo")
+ & (df_new.string2 == "foo")
+ & (df_new.A > 0)
+ & (df_new.B < 0)
+ ]
+ tm.assert_frame_equal(result, expected, check_freq=False)
+ # FIXME: 2020-05-07 freq check randomly fails in the CI
+
+ # yield an empty frame
+ result = store.select("df", "string='foo' and string2='cool'")
+ expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
+ tm.assert_frame_equal(result, expected)
+
+ with ensure_clean_store(setup_path) as store:
+ # doc example
+ df_dc = df.copy()
+ df_dc["string"] = "foo"
+ df_dc.loc[df_dc.index[4:6], "string"] = np.nan
+ df_dc.loc[df_dc.index[7:9], "string"] = "bar"
+ df_dc["string2"] = "cool"
+ df_dc["datetime"] = Timestamp("20010102").as_unit("ns")
+ df_dc.loc[df_dc.index[3:5], ["A", "B", "datetime"]] = np.nan
+
+ _maybe_remove(store, "df_dc")
+ store.append(
+ "df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
+ )
+ result = store.select("df_dc", "B>0")
+
+ expected = df_dc[df_dc.B > 0]
+ tm.assert_frame_equal(result, expected)
+
+ result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
+ expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
+ tm.assert_frame_equal(result, expected, check_freq=False)
+ # FIXME: 2020-12-07 intermittent build failures here with freq of
+ # None instead of BDay(4)
+
+ with ensure_clean_store(setup_path) as store:
+ # doc example part 2
+
+ index = date_range("1/1/2000", periods=8)
+ df_dc = DataFrame(
+ np.random.default_rng(2).standard_normal((8, 3)),
+ index=index,
+ columns=["A", "B", "C"],
+ )
+ df_dc["string"] = "foo"
+ df_dc.loc[df_dc.index[4:6], "string"] = np.nan
+ df_dc.loc[df_dc.index[7:9], "string"] = "bar"
+ df_dc[["B", "C"]] = df_dc[["B", "C"]].abs()
+ df_dc["string2"] = "cool"
+
+ # on-disk operations
+ store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
+
+ result = store.select("df_dc", "B>0")
+ expected = df_dc[df_dc.B > 0]
+ tm.assert_frame_equal(result, expected)
+
+ result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
+ expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
+ tm.assert_frame_equal(result, expected)
+
+
+def test_append_hierarchical(tmp_path, setup_path, multiindex_dataframe_random_data):
+ df = multiindex_dataframe_random_data
+ df.columns.name = None
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("mi", df)
+ result = store.select("mi")
+ tm.assert_frame_equal(result, df)
+
+ # GH 3748
+ result = store.select("mi", columns=["A", "B"])
+ expected = df.reindex(columns=["A", "B"])
+ tm.assert_frame_equal(result, expected)
+
+ path = tmp_path / "test.hdf"
+ df.to_hdf(path, key="df", format="table")
+ result = read_hdf(path, "df", columns=["A", "B"])
+ expected = df.reindex(columns=["A", "B"])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_append_misc(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ store.append("df", df, chunksize=1)
+ result = store.select("df")
+ tm.assert_frame_equal(result, df)
+
+ store.append("df1", df, expectedrows=10)
+ result = store.select("df1")
+ tm.assert_frame_equal(result, df)
+
+
+@pytest.mark.parametrize("chunksize", [10, 200, 1000])
+def test_append_misc_chunksize(setup_path, chunksize):
+ # more chunksize in append tests
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ df["string"] = "foo"
+ df["float322"] = 1.0
+ df["float322"] = df["float322"].astype("float32")
+ df["bool"] = df["float322"] > 0
+ df["time1"] = Timestamp("20130101").as_unit("ns")
+ df["time2"] = Timestamp("20130102").as_unit("ns")
+ with ensure_clean_store(setup_path, mode="w") as store:
+ store.append("obj", df, chunksize=chunksize)
+ result = store.select("obj")
+ tm.assert_frame_equal(result, df)
+
+
+def test_append_misc_empty_frame(setup_path):
+ # empty frame, GH4273
+ with ensure_clean_store(setup_path) as store:
+ # 0 len
+ df_empty = DataFrame(columns=list("ABC"))
+ store.append("df", df_empty)
+ with pytest.raises(KeyError, match="'No object named df in the file'"):
+ store.select("df")
+
+ # repeated append of 0/non-zero frames
+ df = DataFrame(np.random.default_rng(2).random((10, 3)), columns=list("ABC"))
+ store.append("df", df)
+ tm.assert_frame_equal(store.select("df"), df)
+ store.append("df", df_empty)
+ tm.assert_frame_equal(store.select("df"), df)
+
+ # store
+ df = DataFrame(columns=list("ABC"))
+ store.put("df2", df)
+ tm.assert_frame_equal(store.select("df2"), df)
+
+
+# TODO(ArrayManager) currently we rely on falling back to BlockManager, but
+# the conversion from AM->BM converts the invalid object dtype column into
+# a datetime64 column no longer raising an error
+@td.skip_array_manager_not_yet_implemented
+def test_append_raise(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ # test append with invalid input to get good error messages
+
+ # list in column
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ df["invalid"] = [["a"]] * len(df)
+ assert df.dtypes["invalid"] == np.object_
+ msg = re.escape(
+ """Cannot serialize the column [invalid]
+because its data contents are not [string] but [mixed] object dtype"""
+ )
+ with pytest.raises(TypeError, match=msg):
+ store.append("df", df)
+
+ # multiple invalid columns
+ df["invalid2"] = [["a"]] * len(df)
+ df["invalid3"] = [["a"]] * len(df)
+ with pytest.raises(TypeError, match=msg):
+ store.append("df", df)
+
+ # datetime with embedded nans as object
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ s = Series(datetime.datetime(2001, 1, 2), index=df.index)
+ s = s.astype(object)
+ s[0:5] = np.nan
+ df["invalid"] = s
+ assert df.dtypes["invalid"] == np.object_
+ msg = "too many timezones in this block, create separate data columns"
+ with pytest.raises(TypeError, match=msg):
+ store.append("df", df)
+
+ # directly ndarray
+ msg = "value must be None, Series, or DataFrame"
+ with pytest.raises(TypeError, match=msg):
+ store.append("df", np.arange(10))
+
+ # series directly
+ msg = re.escape(
+ "cannot properly create the storer for: "
+ "[group->df,value->]"
+ )
+ with pytest.raises(TypeError, match=msg):
+ store.append("df", Series(np.arange(10)))
+
+ # appending an incompatible table
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ store.append("df", df)
+
+ df["foo"] = "foo"
+ msg = re.escape(
+ "invalid combination of [non_index_axes] on appending data "
+ "[(1, ['A', 'B', 'C', 'D', 'foo'])] vs current table "
+ "[(1, ['A', 'B', 'C', 'D'])]"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.append("df", df)
+
+ # incompatible type (GH 41897)
+ _maybe_remove(store, "df")
+ df["foo"] = Timestamp("20130101")
+ store.append("df", df)
+ df["foo"] = "bar"
+ msg = re.escape(
+ "invalid combination of [values_axes] on appending data "
+ "[name->values_block_1,cname->values_block_1,"
+ "dtype->bytes24,kind->string,shape->(1, 30)] "
+ "vs current table "
+ "[name->values_block_1,cname->values_block_1,"
+ "dtype->datetime64[s],kind->datetime64[s],shape->None]"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.append("df", df)
+
+
+def test_append_with_timedelta(setup_path):
+ # GH 3577
+ # append timedelta
+
+ ts = Timestamp("20130101").as_unit("ns")
+ df = DataFrame(
+ {
+ "A": ts,
+ "B": [ts + timedelta(days=i, seconds=10) for i in range(10)],
+ }
+ )
+ df["C"] = df["A"] - df["B"]
+ df.loc[3:5, "C"] = np.nan
+
+ with ensure_clean_store(setup_path) as store:
+ # table
+ _maybe_remove(store, "df")
+ store.append("df", df, data_columns=True)
+ result = store.select("df")
+ tm.assert_frame_equal(result, df)
+
+ result = store.select("df", where="C<100000")
+ tm.assert_frame_equal(result, df)
+
+ result = store.select("df", where="C0", "B>0"], selector="df1"
+ )
+ expected = df[(df.A > 0) & (df.B > 0)]
+ tm.assert_frame_equal(result, expected)
+
+
+def test_append_to_multiple_dropna(setup_path):
+ df1 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df2 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ ).rename(columns="{}_2".format)
+ df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
+ df = concat([df1, df2], axis=1)
+
+ with ensure_clean_store(setup_path) as store:
+ # dropna=True should guarantee rows are synchronized
+ store.append_to_multiple(
+ {"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True
+ )
+ result = store.select_as_multiple(["df1", "df2"])
+ expected = df.dropna()
+ tm.assert_frame_equal(result, expected, check_index_type=True)
+ tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
+
+
+def test_append_to_multiple_dropna_false(setup_path):
+ df1 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df2 = df1.copy().rename(columns="{}_2".format)
+ df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
+ df = concat([df1, df2], axis=1)
+
+ with ensure_clean_store(setup_path) as store, pd.option_context(
+ "io.hdf.dropna_table", True
+ ):
+ # dropna=False shouldn't synchronize row indexes
+ store.append_to_multiple(
+ {"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
+ )
+
+ msg = "all tables must have exactly the same nrows!"
+ with pytest.raises(ValueError, match=msg):
+ store.select_as_multiple(["df1a", "df2a"])
+
+ assert not store.select("df1a").index.equals(store.select("df2a").index)
+
+
+def test_append_to_multiple_min_itemsize(setup_path):
+ # GH 11238
+ df = DataFrame(
+ {
+ "IX": np.arange(1, 21),
+ "Num": np.arange(1, 21),
+ "BigNum": np.arange(1, 21) * 88,
+ "Str": ["a" for _ in range(20)],
+ "LongStr": ["abcde" for _ in range(20)],
+ }
+ )
+ expected = df.iloc[[0]]
+
+ with ensure_clean_store(setup_path) as store:
+ store.append_to_multiple(
+ {
+ "index": ["IX"],
+ "nums": ["Num", "BigNum"],
+ "strs": ["Str", "LongStr"],
+ },
+ df.iloc[[0]],
+ "index",
+ min_itemsize={"Str": 10, "LongStr": 100, "Num": 2},
+ )
+ result = store.select_as_multiple(["index", "nums", "strs"])
+ tm.assert_frame_equal(result, expected, check_index_type=True)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_categorical.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_categorical.py
new file mode 100644
index 0000000000000000000000000000000000000000..58ebdfe7696b4bc9f37c193a3e2a4f19f55c3fe8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_categorical.py
@@ -0,0 +1,214 @@
+import numpy as np
+import pytest
+
+from pandas import (
+ Categorical,
+ DataFrame,
+ Series,
+ _testing as tm,
+ concat,
+ read_hdf,
+)
+from pandas.tests.io.pytables.common import (
+ _maybe_remove,
+ ensure_clean_store,
+)
+
+pytestmark = pytest.mark.single_cpu
+
+
+def test_categorical(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ # Basic
+ _maybe_remove(store, "s")
+ s = Series(
+ Categorical(
+ ["a", "b", "b", "a", "a", "c"],
+ categories=["a", "b", "c", "d"],
+ ordered=False,
+ )
+ )
+ store.append("s", s, format="table")
+ result = store.select("s")
+ tm.assert_series_equal(s, result)
+
+ _maybe_remove(store, "s_ordered")
+ s = Series(
+ Categorical(
+ ["a", "b", "b", "a", "a", "c"],
+ categories=["a", "b", "c", "d"],
+ ordered=True,
+ )
+ )
+ store.append("s_ordered", s, format="table")
+ result = store.select("s_ordered")
+ tm.assert_series_equal(s, result)
+
+ _maybe_remove(store, "df")
+ df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]})
+ store.append("df", df, format="table")
+ result = store.select("df")
+ tm.assert_frame_equal(result, df)
+
+ # Dtypes
+ _maybe_remove(store, "si")
+ s = Series([1, 1, 2, 2, 3, 4, 5]).astype("category")
+ store.append("si", s)
+ result = store.select("si")
+ tm.assert_series_equal(result, s)
+
+ _maybe_remove(store, "si2")
+ s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype("category")
+ store.append("si2", s)
+ result = store.select("si2")
+ tm.assert_series_equal(result, s)
+
+ # Multiple
+ _maybe_remove(store, "df2")
+ df2 = df.copy()
+ df2["s2"] = Series(list("abcdefg")).astype("category")
+ store.append("df2", df2)
+ result = store.select("df2")
+ tm.assert_frame_equal(result, df2)
+
+ # Make sure the metadata is OK
+ info = store.info()
+ assert "/df2 " in info
+ # df2._mgr.blocks[0] and df2._mgr.blocks[2] are Categorical
+ assert "/df2/meta/values_block_0/meta" in info
+ assert "/df2/meta/values_block_2/meta" in info
+
+ # unordered
+ _maybe_remove(store, "s2")
+ s = Series(
+ Categorical(
+ ["a", "b", "b", "a", "a", "c"],
+ categories=["a", "b", "c", "d"],
+ ordered=False,
+ )
+ )
+ store.append("s2", s, format="table")
+ result = store.select("s2")
+ tm.assert_series_equal(result, s)
+
+ # Query
+ _maybe_remove(store, "df3")
+ store.append("df3", df, data_columns=["s"])
+ expected = df[df.s.isin(["b", "c"])]
+ result = store.select("df3", where=['s in ["b","c"]'])
+ tm.assert_frame_equal(result, expected)
+
+ expected = df[df.s.isin(["b", "c"])]
+ result = store.select("df3", where=['s = ["b","c"]'])
+ tm.assert_frame_equal(result, expected)
+
+ expected = df[df.s.isin(["d"])]
+ result = store.select("df3", where=['s in ["d"]'])
+ tm.assert_frame_equal(result, expected)
+
+ expected = df[df.s.isin(["f"])]
+ result = store.select("df3", where=['s in ["f"]'])
+ tm.assert_frame_equal(result, expected)
+
+ # Appending with same categories is ok
+ store.append("df3", df)
+
+ df = concat([df, df])
+ expected = df[df.s.isin(["b", "c"])]
+ result = store.select("df3", where=['s in ["b","c"]'])
+ tm.assert_frame_equal(result, expected)
+
+ # Appending must have the same categories
+ df3 = df.copy()
+ df3["s"] = df3["s"].cat.remove_unused_categories()
+
+ msg = "cannot append a categorical with different categories to the existing"
+ with pytest.raises(ValueError, match=msg):
+ store.append("df3", df3)
+
+ # Remove, and make sure meta data is removed (its a recursive
+ # removal so should be).
+ result = store.select("df3/meta/s/meta")
+ assert result is not None
+ store.remove("df3")
+
+ with pytest.raises(
+ KeyError, match="'No object named df3/meta/s/meta in the file'"
+ ):
+ store.select("df3/meta/s/meta")
+
+
+def test_categorical_conversion(tmp_path, setup_path):
+ # GH13322
+ # Check that read_hdf with categorical columns doesn't return rows if
+ # where criteria isn't met.
+ obsids = ["ESP_012345_6789", "ESP_987654_3210"]
+ imgids = ["APF00006np", "APF0001imm"]
+ data = [4.3, 9.8]
+
+ # Test without categories
+ df = DataFrame({"obsids": obsids, "imgids": imgids, "data": data})
+
+ # We are expecting an empty DataFrame matching types of df
+ expected = df.iloc[[], :]
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", format="table", data_columns=True)
+ result = read_hdf(path, "df", where="obsids=B")
+ tm.assert_frame_equal(result, expected)
+
+ # Test with categories
+ df.obsids = df.obsids.astype("category")
+ df.imgids = df.imgids.astype("category")
+
+ # We are expecting an empty DataFrame matching types of df
+ expected = df.iloc[[], :]
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", format="table", data_columns=True)
+ result = read_hdf(path, "df", where="obsids=B")
+ tm.assert_frame_equal(result, expected)
+
+
+def test_categorical_nan_only_columns(tmp_path, setup_path):
+ # GH18413
+ # Check that read_hdf with categorical columns with NaN-only values can
+ # be read back.
+ df = DataFrame(
+ {
+ "a": ["a", "b", "c", np.nan],
+ "b": [np.nan, np.nan, np.nan, np.nan],
+ "c": [1, 2, 3, 4],
+ "d": Series([None] * 4, dtype=object),
+ }
+ )
+ df["a"] = df.a.astype("category")
+ df["b"] = df.b.astype("category")
+ df["d"] = df.b.astype("category")
+ expected = df
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", format="table", data_columns=True)
+ result = read_hdf(path, "df")
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "where, df, expected",
+ [
+ ('col=="q"', DataFrame({"col": ["a", "b", "s"]}), DataFrame({"col": []})),
+ ('col=="a"', DataFrame({"col": ["a", "b", "s"]}), DataFrame({"col": ["a"]})),
+ ],
+)
+def test_convert_value(
+ tmp_path, setup_path, where: str, df: DataFrame, expected: DataFrame
+):
+ # GH39420
+ # Check that read_hdf with categorical columns can filter by where condition.
+ df.col = df.col.astype("category")
+ max_widths = {"col": 1}
+ categorical_values = sorted(df.col.unique())
+ expected.col = expected.col.astype("category")
+ expected.col = expected.col.cat.set_categories(categorical_values)
+
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", format="table", min_itemsize=max_widths)
+ result = read_hdf(path, where=where)
+ tm.assert_frame_equal(result, expected)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_compat.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..b07fb3ddd3ac829f5b90d6fd7226926aeed284e6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_compat.py
@@ -0,0 +1,75 @@
+import pytest
+
+import pandas as pd
+import pandas._testing as tm
+
+tables = pytest.importorskip("tables")
+
+
+@pytest.fixture
+def pytables_hdf5_file(tmp_path):
+ """
+ Use PyTables to create a simple HDF5 file.
+ """
+ table_schema = {
+ "c0": tables.Time64Col(pos=0),
+ "c1": tables.StringCol(5, pos=1),
+ "c2": tables.Int64Col(pos=2),
+ }
+
+ t0 = 1_561_105_000.0
+
+ testsamples = [
+ {"c0": t0, "c1": "aaaaa", "c2": 1},
+ {"c0": t0 + 1, "c1": "bbbbb", "c2": 2},
+ {"c0": t0 + 2, "c1": "ccccc", "c2": 10**5},
+ {"c0": t0 + 3, "c1": "ddddd", "c2": 4_294_967_295},
+ ]
+
+ objname = "pandas_test_timeseries"
+
+ path = tmp_path / "written_with_pytables.h5"
+ with tables.open_file(path, mode="w") as f:
+ t = f.create_table("/", name=objname, description=table_schema)
+ for sample in testsamples:
+ for key, value in sample.items():
+ t.row[key] = value
+ t.row.append()
+
+ yield path, objname, pd.DataFrame(testsamples)
+
+
+class TestReadPyTablesHDF5:
+ """
+ A group of tests which covers reading HDF5 files written by plain PyTables
+ (not written by pandas).
+
+ Was introduced for regression-testing issue 11188.
+ """
+
+ def test_read_complete(self, pytables_hdf5_file):
+ path, objname, df = pytables_hdf5_file
+ result = pd.read_hdf(path, key=objname)
+ expected = df
+ tm.assert_frame_equal(result, expected, check_index_type=True)
+
+ def test_read_with_start(self, pytables_hdf5_file):
+ path, objname, df = pytables_hdf5_file
+ # This is a regression test for pandas-dev/pandas/issues/11188
+ result = pd.read_hdf(path, key=objname, start=1)
+ expected = df[1:].reset_index(drop=True)
+ tm.assert_frame_equal(result, expected, check_index_type=True)
+
+ def test_read_with_stop(self, pytables_hdf5_file):
+ path, objname, df = pytables_hdf5_file
+ # This is a regression test for pandas-dev/pandas/issues/11188
+ result = pd.read_hdf(path, key=objname, stop=1)
+ expected = df[:1].reset_index(drop=True)
+ tm.assert_frame_equal(result, expected, check_index_type=True)
+
+ def test_read_with_startstop(self, pytables_hdf5_file):
+ path, objname, df = pytables_hdf5_file
+ # This is a regression test for pandas-dev/pandas/issues/11188
+ result = pd.read_hdf(path, key=objname, start=1, stop=2)
+ expected = df[1:2].reset_index(drop=True)
+ tm.assert_frame_equal(result, expected, check_index_type=True)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_complex.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_complex.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5cac5a5caf090d85d7284103459c6f03d3d41ce
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_complex.py
@@ -0,0 +1,195 @@
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Series,
+)
+import pandas._testing as tm
+from pandas.tests.io.pytables.common import ensure_clean_store
+
+from pandas.io.pytables import read_hdf
+
+
+def test_complex_fixed(tmp_path, setup_path):
+ df = DataFrame(
+ np.random.default_rng(2).random((4, 5)).astype(np.complex64),
+ index=list("abcd"),
+ columns=list("ABCDE"),
+ )
+
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df")
+ reread = read_hdf(path, "df")
+ tm.assert_frame_equal(df, reread)
+
+ df = DataFrame(
+ np.random.default_rng(2).random((4, 5)).astype(np.complex128),
+ index=list("abcd"),
+ columns=list("ABCDE"),
+ )
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df")
+ reread = read_hdf(path, "df")
+ tm.assert_frame_equal(df, reread)
+
+
+def test_complex_table(tmp_path, setup_path):
+ df = DataFrame(
+ np.random.default_rng(2).random((4, 5)).astype(np.complex64),
+ index=list("abcd"),
+ columns=list("ABCDE"),
+ )
+
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", format="table")
+ reread = read_hdf(path, key="df")
+ tm.assert_frame_equal(df, reread)
+
+ df = DataFrame(
+ np.random.default_rng(2).random((4, 5)).astype(np.complex128),
+ index=list("abcd"),
+ columns=list("ABCDE"),
+ )
+
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", format="table", mode="w")
+ reread = read_hdf(path, "df")
+ tm.assert_frame_equal(df, reread)
+
+
+def test_complex_mixed_fixed(tmp_path, setup_path):
+ complex64 = np.array(
+ [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64
+ )
+ complex128 = np.array(
+ [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
+ )
+ df = DataFrame(
+ {
+ "A": [1, 2, 3, 4],
+ "B": ["a", "b", "c", "d"],
+ "C": complex64,
+ "D": complex128,
+ "E": [1.0, 2.0, 3.0, 4.0],
+ },
+ index=list("abcd"),
+ )
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df")
+ reread = read_hdf(path, "df")
+ tm.assert_frame_equal(df, reread)
+
+
+def test_complex_mixed_table(tmp_path, setup_path):
+ complex64 = np.array(
+ [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64
+ )
+ complex128 = np.array(
+ [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
+ )
+ df = DataFrame(
+ {
+ "A": [1, 2, 3, 4],
+ "B": ["a", "b", "c", "d"],
+ "C": complex64,
+ "D": complex128,
+ "E": [1.0, 2.0, 3.0, 4.0],
+ },
+ index=list("abcd"),
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("df", df, data_columns=["A", "B"])
+ result = store.select("df", where="A>2")
+ tm.assert_frame_equal(df.loc[df.A > 2], result)
+
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", format="table")
+ reread = read_hdf(path, "df")
+ tm.assert_frame_equal(df, reread)
+
+
+def test_complex_across_dimensions_fixed(tmp_path, setup_path):
+ complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
+ s = Series(complex128, index=list("abcd"))
+ df = DataFrame({"A": s, "B": s})
+
+ objs = [s, df]
+ comps = [tm.assert_series_equal, tm.assert_frame_equal]
+ for obj, comp in zip(objs, comps):
+ path = tmp_path / setup_path
+ obj.to_hdf(path, key="obj", format="fixed")
+ reread = read_hdf(path, "obj")
+ comp(obj, reread)
+
+
+def test_complex_across_dimensions(tmp_path, setup_path):
+ complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
+ s = Series(complex128, index=list("abcd"))
+ df = DataFrame({"A": s, "B": s})
+
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="obj", format="table")
+ reread = read_hdf(path, "obj")
+ tm.assert_frame_equal(df, reread)
+
+
+def test_complex_indexing_error(setup_path):
+ complex128 = np.array(
+ [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
+ )
+ df = DataFrame(
+ {"A": [1, 2, 3, 4], "B": ["a", "b", "c", "d"], "C": complex128},
+ index=list("abcd"),
+ )
+
+ msg = (
+ "Columns containing complex values can be stored "
+ "but cannot be indexed when using table format. "
+ "Either use fixed format, set index=False, "
+ "or do not include the columns containing complex "
+ "values to data_columns when initializing the table."
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ with pytest.raises(TypeError, match=msg):
+ store.append("df", df, data_columns=["C"])
+
+
+def test_complex_series_error(tmp_path, setup_path):
+ complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
+ s = Series(complex128, index=list("abcd"))
+
+ msg = (
+ "Columns containing complex values can be stored "
+ "but cannot be indexed when using table format. "
+ "Either use fixed format, set index=False, "
+ "or do not include the columns containing complex "
+ "values to data_columns when initializing the table."
+ )
+
+ path = tmp_path / setup_path
+ with pytest.raises(TypeError, match=msg):
+ s.to_hdf(path, key="obj", format="t")
+
+ path = tmp_path / setup_path
+ s.to_hdf(path, key="obj", format="t", index=False)
+ reread = read_hdf(path, "obj")
+ tm.assert_series_equal(s, reread)
+
+
+def test_complex_append(setup_path):
+ df = DataFrame(
+ {
+ "a": np.random.default_rng(2).standard_normal(100).astype(np.complex128),
+ "b": np.random.default_rng(2).standard_normal(100),
+ }
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("df", df, data_columns=["b"])
+ store.append("df", df)
+ result = store.select("df")
+ tm.assert_frame_equal(pd.concat([df, df], axis=0), result)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_errors.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_errors.py
new file mode 100644
index 0000000000000000000000000000000000000000..20211010988924ed601f52ed9adf03ca081838c8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_errors.py
@@ -0,0 +1,251 @@
+import datetime
+from io import BytesIO
+import re
+
+import numpy as np
+import pytest
+
+from pandas import (
+ CategoricalIndex,
+ DataFrame,
+ HDFStore,
+ Index,
+ MultiIndex,
+ _testing as tm,
+ date_range,
+ read_hdf,
+)
+from pandas.tests.io.pytables.common import ensure_clean_store
+
+from pandas.io.pytables import (
+ Term,
+ _maybe_adjust_name,
+)
+
+pytestmark = pytest.mark.single_cpu
+
+
+def test_pass_spec_to_storer(setup_path):
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ store.put("df", df)
+ msg = (
+ "cannot pass a column specification when reading a Fixed format "
+ "store. this store must be selected in its entirety"
+ )
+ with pytest.raises(TypeError, match=msg):
+ store.select("df", columns=["A"])
+ msg = (
+ "cannot pass a where specification when reading from a Fixed "
+ "format store. this store must be selected in its entirety"
+ )
+ with pytest.raises(TypeError, match=msg):
+ store.select("df", where=[("columns=A")])
+
+
+def test_table_index_incompatible_dtypes(setup_path):
+ df1 = DataFrame({"a": [1, 2, 3]})
+ df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
+
+ with ensure_clean_store(setup_path) as store:
+ store.put("frame", df1, format="table")
+ msg = re.escape("incompatible kind in col [integer - datetime64[ns]]")
+ with pytest.raises(TypeError, match=msg):
+ store.put("frame", df2, format="table", append=True)
+
+
+def test_unimplemented_dtypes_table_columns(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ dtypes = [("date", datetime.date(2001, 1, 2))]
+
+ # currently not supported dtypes ####
+ for n, f in dtypes:
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ df[n] = f
+ msg = re.escape(f"[{n}] is not implemented as a table column")
+ with pytest.raises(TypeError, match=msg):
+ store.append(f"df1_{n}", df)
+
+ # frame
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ df["obj1"] = "foo"
+ df["obj2"] = "bar"
+ df["datetime1"] = datetime.date(2001, 1, 2)
+ df = df._consolidate()
+
+ with ensure_clean_store(setup_path) as store:
+ # this fails because we have a date in the object block......
+ msg = re.escape(
+ """Cannot serialize the column [datetime1]
+because its data contents are not [string] but [date] object dtype"""
+ )
+ with pytest.raises(TypeError, match=msg):
+ store.append("df_unimplemented", df)
+
+
+def test_invalid_terms(tmp_path, setup_path):
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df["string"] = "foo"
+ df.loc[df.index[0:4], "string"] = "bar"
+
+ store.put("df", df, format="table")
+
+ # some invalid terms
+ msg = re.escape("__init__() missing 1 required positional argument: 'where'")
+ with pytest.raises(TypeError, match=msg):
+ Term()
+
+ # more invalid
+ msg = re.escape(
+ "cannot process expression [df.index[3]], "
+ "[2000-01-06 00:00:00] is not a valid condition"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.select("df", "df.index[3]")
+
+ msg = "invalid syntax"
+ with pytest.raises(SyntaxError, match=msg):
+ store.select("df", "index>")
+
+ # from the docs
+ path = tmp_path / setup_path
+ dfq = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=list("ABCD"),
+ index=date_range("20130101", periods=10),
+ )
+ dfq.to_hdf(path, key="dfq", format="table", data_columns=True)
+
+ # check ok
+ read_hdf(path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']")
+ read_hdf(path, "dfq", where="A>0 or C>0")
+
+ # catch the invalid reference
+ path = tmp_path / setup_path
+ dfq = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=list("ABCD"),
+ index=date_range("20130101", periods=10),
+ )
+ dfq.to_hdf(path, key="dfq", format="table")
+
+ msg = (
+ r"The passed where expression: A>0 or C>0\n\s*"
+ r"contains an invalid variable reference\n\s*"
+ r"all of the variable references must be a reference to\n\s*"
+ r"an axis \(e.g. 'index' or 'columns'\), or a data_column\n\s*"
+ r"The currently defined references are: index,columns\n"
+ )
+ with pytest.raises(ValueError, match=msg):
+ read_hdf(path, "dfq", where="A>0 or C>0")
+
+
+def test_append_with_diff_col_name_types_raises_value_error(setup_path):
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 1)))
+ df2 = DataFrame({"a": np.random.default_rng(2).standard_normal(10)})
+ df3 = DataFrame({(1, 2): np.random.default_rng(2).standard_normal(10)})
+ df4 = DataFrame({("1", 2): np.random.default_rng(2).standard_normal(10)})
+ df5 = DataFrame({("1", 2, object): np.random.default_rng(2).standard_normal(10)})
+
+ with ensure_clean_store(setup_path) as store:
+ name = "df_diff_valerror"
+ store.append(name, df)
+
+ for d in (df2, df3, df4, df5):
+ msg = re.escape(
+ "cannot match existing table structure for [0] on appending data"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.append(name, d)
+
+
+def test_invalid_complib(setup_path):
+ df = DataFrame(
+ np.random.default_rng(2).random((4, 5)),
+ index=list("abcd"),
+ columns=list("ABCDE"),
+ )
+ with tm.ensure_clean(setup_path) as path:
+ msg = r"complib only supports \[.*\] compression."
+ with pytest.raises(ValueError, match=msg):
+ df.to_hdf(path, key="df", complib="foolib")
+
+
+@pytest.mark.parametrize(
+ "idx",
+ [
+ date_range("2019", freq="D", periods=3, tz="UTC"),
+ CategoricalIndex(list("abc")),
+ ],
+)
+def test_to_hdf_multiindex_extension_dtype(idx, tmp_path, setup_path):
+ # GH 7775
+ mi = MultiIndex.from_arrays([idx, idx])
+ df = DataFrame(0, index=mi, columns=["a"])
+ path = tmp_path / setup_path
+ with pytest.raises(NotImplementedError, match="Saving a MultiIndex"):
+ df.to_hdf(path, key="df")
+
+
+def test_unsuppored_hdf_file_error(datapath):
+ # GH 9539
+ data_path = datapath("io", "data", "legacy_hdf/incompatible_dataset.h5")
+ message = (
+ r"Dataset\(s\) incompatible with Pandas data types, "
+ "not table, or no datasets found in HDF5 file."
+ )
+
+ with pytest.raises(ValueError, match=message):
+ read_hdf(data_path)
+
+
+def test_read_hdf_errors(setup_path, tmp_path):
+ df = DataFrame(
+ np.random.default_rng(2).random((4, 5)),
+ index=list("abcd"),
+ columns=list("ABCDE"),
+ )
+
+ path = tmp_path / setup_path
+ msg = r"File [\S]* does not exist"
+ with pytest.raises(OSError, match=msg):
+ read_hdf(path, "key")
+
+ df.to_hdf(path, key="df")
+ store = HDFStore(path, mode="r")
+ store.close()
+
+ msg = "The HDFStore must be open for reading."
+ with pytest.raises(OSError, match=msg):
+ read_hdf(store, "df")
+
+
+def test_read_hdf_generic_buffer_errors():
+ msg = "Support for generic buffers has not been implemented."
+ with pytest.raises(NotImplementedError, match=msg):
+ read_hdf(BytesIO(b""), "df")
+
+
+@pytest.mark.parametrize("bad_version", [(1, 2), (1,), [], "12", "123"])
+def test_maybe_adjust_name_bad_version_raises(bad_version):
+ msg = "Version is incorrect, expected sequence of 3 integers"
+ with pytest.raises(ValueError, match=msg):
+ _maybe_adjust_name("values_block_0", version=bad_version)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_file_handling.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_file_handling.py
new file mode 100644
index 0000000000000000000000000000000000000000..d93de16816725cf6a3a326301eadcd35345a96d2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_file_handling.py
@@ -0,0 +1,495 @@
+import os
+
+import numpy as np
+import pytest
+
+from pandas.compat import (
+ PY311,
+ is_ci_environment,
+ is_platform_linux,
+ is_platform_little_endian,
+)
+from pandas.errors import (
+ ClosedFileError,
+ PossibleDataLossError,
+)
+
+from pandas import (
+ DataFrame,
+ HDFStore,
+ Index,
+ Series,
+ _testing as tm,
+ date_range,
+ read_hdf,
+)
+from pandas.tests.io.pytables.common import (
+ _maybe_remove,
+ ensure_clean_store,
+ tables,
+)
+
+from pandas.io import pytables
+from pandas.io.pytables import Term
+
+pytestmark = pytest.mark.single_cpu
+
+
+@pytest.mark.parametrize("mode", ["r", "r+", "a", "w"])
+def test_mode(setup_path, tmp_path, mode):
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ msg = r"[\S]* does not exist"
+ path = tmp_path / setup_path
+
+ # constructor
+ if mode in ["r", "r+"]:
+ with pytest.raises(OSError, match=msg):
+ HDFStore(path, mode=mode)
+
+ else:
+ with HDFStore(path, mode=mode) as store:
+ assert store._handle.mode == mode
+
+ path = tmp_path / setup_path
+
+ # context
+ if mode in ["r", "r+"]:
+ with pytest.raises(OSError, match=msg):
+ with HDFStore(path, mode=mode) as store:
+ pass
+ else:
+ with HDFStore(path, mode=mode) as store:
+ assert store._handle.mode == mode
+
+ path = tmp_path / setup_path
+
+ # conv write
+ if mode in ["r", "r+"]:
+ with pytest.raises(OSError, match=msg):
+ df.to_hdf(path, key="df", mode=mode)
+ df.to_hdf(path, key="df", mode="w")
+ else:
+ df.to_hdf(path, key="df", mode=mode)
+
+ # conv read
+ if mode in ["w"]:
+ msg = (
+ "mode w is not allowed while performing a read. "
+ r"Allowed modes are r, r\+ and a."
+ )
+ with pytest.raises(ValueError, match=msg):
+ read_hdf(path, "df", mode=mode)
+ else:
+ result = read_hdf(path, "df", mode=mode)
+ tm.assert_frame_equal(result, df)
+
+
+def test_default_mode(tmp_path, setup_path):
+ # read_hdf uses default mode
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", mode="w")
+ result = read_hdf(path, "df")
+ tm.assert_frame_equal(result, df)
+
+
+def test_reopen_handle(tmp_path, setup_path):
+ path = tmp_path / setup_path
+
+ store = HDFStore(path, mode="a")
+ store["a"] = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+
+ msg = (
+ r"Re-opening the file \[[\S]*\] with mode \[a\] will delete the "
+ "current file!"
+ )
+ # invalid mode change
+ with pytest.raises(PossibleDataLossError, match=msg):
+ store.open("w")
+
+ store.close()
+ assert not store.is_open
+
+ # truncation ok here
+ store.open("w")
+ assert store.is_open
+ assert len(store) == 0
+ store.close()
+ assert not store.is_open
+
+ store = HDFStore(path, mode="a")
+ store["a"] = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+
+ # reopen as read
+ store.open("r")
+ assert store.is_open
+ assert len(store) == 1
+ assert store._mode == "r"
+ store.close()
+ assert not store.is_open
+
+ # reopen as append
+ store.open("a")
+ assert store.is_open
+ assert len(store) == 1
+ assert store._mode == "a"
+ store.close()
+ assert not store.is_open
+
+ # reopen as append (again)
+ store.open("a")
+ assert store.is_open
+ assert len(store) == 1
+ assert store._mode == "a"
+ store.close()
+ assert not store.is_open
+
+
+def test_open_args(setup_path):
+ with tm.ensure_clean(setup_path) as path:
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ # create an in memory store
+ store = HDFStore(
+ path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
+ )
+ store["df"] = df
+ store.append("df2", df)
+
+ tm.assert_frame_equal(store["df"], df)
+ tm.assert_frame_equal(store["df2"], df)
+
+ store.close()
+
+ # the file should not have actually been written
+ assert not os.path.exists(path)
+
+
+def test_flush(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ store["a"] = Series(range(5))
+ store.flush()
+ store.flush(fsync=True)
+
+
+def test_complibs_default_settings(tmp_path, setup_path):
+ # GH15943
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ # Set complevel and check if complib is automatically set to
+ # default value
+ tmpfile = tmp_path / setup_path
+ df.to_hdf(tmpfile, key="df", complevel=9)
+ result = read_hdf(tmpfile, "df")
+ tm.assert_frame_equal(result, df)
+
+ with tables.open_file(tmpfile, mode="r") as h5file:
+ for node in h5file.walk_nodes(where="/df", classname="Leaf"):
+ assert node.filters.complevel == 9
+ assert node.filters.complib == "zlib"
+
+ # Set complib and check to see if compression is disabled
+ tmpfile = tmp_path / setup_path
+ df.to_hdf(tmpfile, key="df", complib="zlib")
+ result = read_hdf(tmpfile, "df")
+ tm.assert_frame_equal(result, df)
+
+ with tables.open_file(tmpfile, mode="r") as h5file:
+ for node in h5file.walk_nodes(where="/df", classname="Leaf"):
+ assert node.filters.complevel == 0
+ assert node.filters.complib is None
+
+ # Check if not setting complib or complevel results in no compression
+ tmpfile = tmp_path / setup_path
+ df.to_hdf(tmpfile, key="df")
+ result = read_hdf(tmpfile, "df")
+ tm.assert_frame_equal(result, df)
+
+ with tables.open_file(tmpfile, mode="r") as h5file:
+ for node in h5file.walk_nodes(where="/df", classname="Leaf"):
+ assert node.filters.complevel == 0
+ assert node.filters.complib is None
+
+
+def test_complibs_default_settings_override(tmp_path, setup_path):
+ # Check if file-defaults can be overridden on a per table basis
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ tmpfile = tmp_path / setup_path
+ store = HDFStore(tmpfile)
+ store.append("dfc", df, complevel=9, complib="blosc")
+ store.append("df", df)
+ store.close()
+
+ with tables.open_file(tmpfile, mode="r") as h5file:
+ for node in h5file.walk_nodes(where="/df", classname="Leaf"):
+ assert node.filters.complevel == 0
+ assert node.filters.complib is None
+ for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
+ assert node.filters.complevel == 9
+ assert node.filters.complib == "blosc"
+
+
+@pytest.mark.parametrize("lvl", range(10))
+@pytest.mark.parametrize("lib", tables.filters.all_complibs)
+@pytest.mark.filterwarnings("ignore:object name is not a valid")
+@pytest.mark.skipif(
+ not PY311 and is_ci_environment() and is_platform_linux(),
+ reason="Segfaulting in a CI environment"
+ # with xfail, would sometimes raise UnicodeDecodeError
+ # invalid state byte
+)
+def test_complibs(tmp_path, lvl, lib, request):
+ # GH14478
+ if PY311 and is_platform_linux() and lib == "blosc2" and lvl != 0:
+ request.applymarker(
+ pytest.mark.xfail(reason=f"Fails for {lib} on Linux and PY > 3.11")
+ )
+ df = DataFrame(
+ np.ones((30, 4)), columns=list("ABCD"), index=np.arange(30).astype(np.str_)
+ )
+
+ # Remove lzo if its not available on this platform
+ if not tables.which_lib_version("lzo"):
+ pytest.skip("lzo not available")
+ # Remove bzip2 if its not available on this platform
+ if not tables.which_lib_version("bzip2"):
+ pytest.skip("bzip2 not available")
+
+ tmpfile = tmp_path / f"{lvl}_{lib}.h5"
+ gname = f"{lvl}_{lib}"
+
+ # Write and read file to see if data is consistent
+ df.to_hdf(tmpfile, key=gname, complib=lib, complevel=lvl)
+ result = read_hdf(tmpfile, gname)
+ tm.assert_frame_equal(result, df)
+
+ # Open file and check metadata for correct amount of compression
+ with tables.open_file(tmpfile, mode="r") as h5table:
+ for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
+ assert node.filters.complevel == lvl
+ if lvl == 0:
+ assert node.filters.complib is None
+ else:
+ assert node.filters.complib == lib
+
+
+@pytest.mark.skipif(
+ not is_platform_little_endian(), reason="reason platform is not little endian"
+)
+def test_encoding(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame({"A": "foo", "B": "bar"}, index=range(5))
+ df.loc[2, "A"] = np.nan
+ df.loc[3, "B"] = np.nan
+ _maybe_remove(store, "df")
+ store.append("df", df, encoding="ascii")
+ tm.assert_frame_equal(store["df"], df)
+
+ expected = df.reindex(columns=["A"])
+ result = store.select("df", Term("columns=A", encoding="ascii"))
+ tm.assert_frame_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+ "val",
+ [
+ [b"E\xc9, 17", b"", b"a", b"b", b"c"],
+ [b"E\xc9, 17", b"a", b"b", b"c"],
+ [b"EE, 17", b"", b"a", b"b", b"c"],
+ [b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
+ [b"", b"a", b"b", b"c"],
+ [b"\xf8\xfc", b"a", b"b", b"c"],
+ [b"A\xf8\xfc", b"", b"a", b"b", b"c"],
+ [np.nan, b"", b"b", b"c"],
+ [b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
+ ],
+)
+@pytest.mark.parametrize("dtype", ["category", object])
+def test_latin_encoding(tmp_path, setup_path, dtype, val):
+ enc = "latin-1"
+ nan_rep = ""
+ key = "data"
+
+ val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
+ ser = Series(val, dtype=dtype)
+
+ store = tmp_path / setup_path
+ ser.to_hdf(store, key=key, format="table", encoding=enc, nan_rep=nan_rep)
+ retr = read_hdf(store, key)
+
+ # TODO:(3.0): once Categorical replace deprecation is enforced,
+ # we may be able to re-simplify the construction of s_nan
+ if dtype == "category":
+ if nan_rep in ser.cat.categories:
+ s_nan = ser.cat.remove_categories([nan_rep])
+ else:
+ s_nan = ser
+ else:
+ s_nan = ser.replace(nan_rep, np.nan)
+
+ tm.assert_series_equal(s_nan, retr)
+
+
+def test_multiple_open_close(tmp_path, setup_path):
+ # gh-4409: open & close multiple times
+
+ path = tmp_path / setup_path
+
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ df.to_hdf(path, key="df", mode="w", format="table")
+
+ # single
+ store = HDFStore(path)
+ assert "CLOSED" not in store.info()
+ assert store.is_open
+
+ store.close()
+ assert "CLOSED" in store.info()
+ assert not store.is_open
+
+ path = tmp_path / setup_path
+
+ if pytables._table_file_open_policy_is_strict:
+ # multiples
+ store1 = HDFStore(path)
+ msg = (
+ r"The file [\S]* is already opened\. Please close it before "
+ r"reopening in write mode\."
+ )
+ with pytest.raises(ValueError, match=msg):
+ HDFStore(path)
+
+ store1.close()
+ else:
+ # multiples
+ store1 = HDFStore(path)
+ store2 = HDFStore(path)
+
+ assert "CLOSED" not in store1.info()
+ assert "CLOSED" not in store2.info()
+ assert store1.is_open
+ assert store2.is_open
+
+ store1.close()
+ assert "CLOSED" in store1.info()
+ assert not store1.is_open
+ assert "CLOSED" not in store2.info()
+ assert store2.is_open
+
+ store2.close()
+ assert "CLOSED" in store1.info()
+ assert "CLOSED" in store2.info()
+ assert not store1.is_open
+ assert not store2.is_open
+
+ # nested close
+ store = HDFStore(path, mode="w")
+ store.append("df", df)
+
+ store2 = HDFStore(path)
+ store2.append("df2", df)
+ store2.close()
+ assert "CLOSED" in store2.info()
+ assert not store2.is_open
+
+ store.close()
+ assert "CLOSED" in store.info()
+ assert not store.is_open
+
+ # double closing
+ store = HDFStore(path, mode="w")
+ store.append("df", df)
+
+ store2 = HDFStore(path)
+ store.close()
+ assert "CLOSED" in store.info()
+ assert not store.is_open
+
+ store2.close()
+ assert "CLOSED" in store2.info()
+ assert not store2.is_open
+
+ # ops on a closed store
+ path = tmp_path / setup_path
+
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ df.to_hdf(path, key="df", mode="w", format="table")
+
+ store = HDFStore(path)
+ store.close()
+
+ msg = r"[\S]* file is not open!"
+ with pytest.raises(ClosedFileError, match=msg):
+ store.keys()
+
+ with pytest.raises(ClosedFileError, match=msg):
+ "df" in store
+
+ with pytest.raises(ClosedFileError, match=msg):
+ len(store)
+
+ with pytest.raises(ClosedFileError, match=msg):
+ store["df"]
+
+ with pytest.raises(ClosedFileError, match=msg):
+ store.select("df")
+
+ with pytest.raises(ClosedFileError, match=msg):
+ store.get("df")
+
+ with pytest.raises(ClosedFileError, match=msg):
+ store.append("df2", df)
+
+ with pytest.raises(ClosedFileError, match=msg):
+ store.put("df3", df)
+
+ with pytest.raises(ClosedFileError, match=msg):
+ store.get_storer("df2")
+
+ with pytest.raises(ClosedFileError, match=msg):
+ store.remove("df2")
+
+ with pytest.raises(ClosedFileError, match=msg):
+ store.select("df")
+
+ msg = "'HDFStore' object has no attribute 'df'"
+ with pytest.raises(AttributeError, match=msg):
+ store.df
+
+
+def test_fspath():
+ with tm.ensure_clean("foo.h5") as path:
+ with HDFStore(path) as store:
+ assert os.fspath(store) == str(path)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_keys.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_keys.py
new file mode 100644
index 0000000000000000000000000000000000000000..55bd3f0d5a03a1636ae07ea9e1e3776743fd6464
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_keys.py
@@ -0,0 +1,87 @@
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ HDFStore,
+ Index,
+ Series,
+ date_range,
+)
+from pandas.tests.io.pytables.common import (
+ ensure_clean_store,
+ tables,
+)
+
+pytestmark = pytest.mark.single_cpu
+
+
+def test_keys(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ store["a"] = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+ store["b"] = Series(
+ range(10), dtype="float64", index=[f"i_{i}" for i in range(10)]
+ )
+ store["c"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ assert len(store) == 3
+ expected = {"/a", "/b", "/c"}
+ assert set(store.keys()) == expected
+ assert set(store) == expected
+
+
+def test_non_pandas_keys(tmp_path, setup_path):
+ class Table1(tables.IsDescription):
+ value1 = tables.Float32Col()
+
+ class Table2(tables.IsDescription):
+ value2 = tables.Float32Col()
+
+ class Table3(tables.IsDescription):
+ value3 = tables.Float32Col()
+
+ path = tmp_path / setup_path
+ with tables.open_file(path, mode="w") as h5file:
+ group = h5file.create_group("/", "group")
+ h5file.create_table(group, "table1", Table1, "Table 1")
+ h5file.create_table(group, "table2", Table2, "Table 2")
+ h5file.create_table(group, "table3", Table3, "Table 3")
+ with HDFStore(path) as store:
+ assert len(store.keys(include="native")) == 3
+ expected = {"/group/table1", "/group/table2", "/group/table3"}
+ assert set(store.keys(include="native")) == expected
+ assert set(store.keys(include="pandas")) == set()
+ for name in expected:
+ df = store.get(name)
+ assert len(df.columns) == 1
+
+
+def test_keys_illegal_include_keyword_value(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ with pytest.raises(
+ ValueError,
+ match="`include` should be either 'pandas' or 'native' but is 'illegal'",
+ ):
+ store.keys(include="illegal")
+
+
+def test_keys_ignore_hdf_softlink(setup_path):
+ # GH 20523
+ # Puts a softlink into HDF file and rereads
+
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame({"A": range(5), "B": range(5)})
+ store.put("df", df)
+
+ assert store.keys() == ["/df"]
+
+ store._handle.create_soft_link(store._handle.root, "symlink", "df")
+
+ # Should ignore the softlink
+ assert store.keys() == ["/df"]
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_put.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_put.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc5f046b7fa3308e30959953f8751056de941a32
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_put.py
@@ -0,0 +1,374 @@
+import re
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs import Timestamp
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ HDFStore,
+ Index,
+ MultiIndex,
+ Series,
+ _testing as tm,
+ concat,
+ date_range,
+)
+from pandas.tests.io.pytables.common import (
+ _maybe_remove,
+ ensure_clean_store,
+)
+from pandas.util import _test_decorators as td
+
+pytestmark = pytest.mark.single_cpu
+
+
+def test_format_type(tmp_path, setup_path):
+ df = DataFrame({"A": [1, 2]})
+ with HDFStore(tmp_path / setup_path) as store:
+ store.put("a", df, format="fixed")
+ store.put("b", df, format="table")
+
+ assert store.get_storer("a").format_type == "fixed"
+ assert store.get_storer("b").format_type == "table"
+
+
+def test_format_kwarg_in_constructor(tmp_path, setup_path):
+ # GH 13291
+
+ msg = "format is not a defined argument for HDFStore"
+
+ with pytest.raises(ValueError, match=msg):
+ HDFStore(tmp_path / setup_path, format="table")
+
+
+def test_api_default_format(tmp_path, setup_path):
+ # default_format option
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ with pd.option_context("io.hdf.default_format", "fixed"):
+ _maybe_remove(store, "df")
+ store.put("df", df)
+ assert not store.get_storer("df").is_table
+
+ msg = "Can only append to Tables"
+ with pytest.raises(ValueError, match=msg):
+ store.append("df2", df)
+
+ with pd.option_context("io.hdf.default_format", "table"):
+ _maybe_remove(store, "df")
+ store.put("df", df)
+ assert store.get_storer("df").is_table
+
+ _maybe_remove(store, "df2")
+ store.append("df2", df)
+ assert store.get_storer("df").is_table
+
+ path = tmp_path / setup_path
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ with pd.option_context("io.hdf.default_format", "fixed"):
+ df.to_hdf(path, key="df")
+ with HDFStore(path) as store:
+ assert not store.get_storer("df").is_table
+ with pytest.raises(ValueError, match=msg):
+ df.to_hdf(path, key="df2", append=True)
+
+ with pd.option_context("io.hdf.default_format", "table"):
+ df.to_hdf(path, key="df3")
+ with HDFStore(path) as store:
+ assert store.get_storer("df3").is_table
+ df.to_hdf(path, key="df4", append=True)
+ with HDFStore(path) as store:
+ assert store.get_storer("df4").is_table
+
+
+def test_put(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ ts = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((20, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=20, freq="B"),
+ )
+ store["a"] = ts
+ store["b"] = df[:10]
+ store["foo/bar/bah"] = df[:10]
+ store["foo"] = df[:10]
+ store["/foo"] = df[:10]
+ store.put("c", df[:10], format="table")
+
+ # not OK, not a table
+ msg = "Can only append to Tables"
+ with pytest.raises(ValueError, match=msg):
+ store.put("b", df[10:], append=True)
+
+ # node does not currently exist, test _is_table_type returns False
+ # in this case
+ _maybe_remove(store, "f")
+ with pytest.raises(ValueError, match=msg):
+ store.put("f", df[10:], append=True)
+
+ # can't put to a table (use append instead)
+ with pytest.raises(ValueError, match=msg):
+ store.put("c", df[10:], append=True)
+
+ # overwrite table
+ store.put("c", df[:10], format="table", append=False)
+ tm.assert_frame_equal(df[:10], store["c"])
+
+
+def test_put_string_index(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ index = Index([f"I am a very long string index: {i}" for i in range(20)])
+ s = Series(np.arange(20), index=index)
+ df = DataFrame({"A": s, "B": s})
+
+ store["a"] = s
+ tm.assert_series_equal(store["a"], s)
+
+ store["b"] = df
+ tm.assert_frame_equal(store["b"], df)
+
+ # mixed length
+ index = Index(
+ ["abcdefghijklmnopqrstuvwxyz1234567890"]
+ + [f"I am a very long string index: {i}" for i in range(20)]
+ )
+ s = Series(np.arange(21), index=index)
+ df = DataFrame({"A": s, "B": s})
+ store["a"] = s
+ tm.assert_series_equal(store["a"], s)
+
+ store["b"] = df
+ tm.assert_frame_equal(store["b"], df)
+
+
+def test_put_compression(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+
+ store.put("c", df, format="table", complib="zlib")
+ tm.assert_frame_equal(store["c"], df)
+
+ # can't compress if format='fixed'
+ msg = "Compression not supported on Fixed format stores"
+ with pytest.raises(ValueError, match=msg):
+ store.put("b", df, format="fixed", complib="zlib")
+
+
+@td.skip_if_windows
+def test_put_compression_blosc(setup_path):
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ # can't compress if format='fixed'
+ msg = "Compression not supported on Fixed format stores"
+ with pytest.raises(ValueError, match=msg):
+ store.put("b", df, format="fixed", complib="blosc")
+
+ store.put("c", df, format="table", complib="blosc")
+ tm.assert_frame_equal(store["c"], df)
+
+
+def test_put_mixed_type(setup_path):
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df["obj1"] = "foo"
+ df["obj2"] = "bar"
+ df["bool1"] = df["A"] > 0
+ df["bool2"] = df["B"] > 0
+ df["bool3"] = True
+ df["int1"] = 1
+ df["int2"] = 2
+ df["timestamp1"] = Timestamp("20010102").as_unit("ns")
+ df["timestamp2"] = Timestamp("20010103").as_unit("ns")
+ df["datetime1"] = Timestamp("20010102").as_unit("ns")
+ df["datetime2"] = Timestamp("20010103").as_unit("ns")
+ df.loc[df.index[3:6], ["obj1"]] = np.nan
+ df = df._consolidate()
+
+ with ensure_clean_store(setup_path) as store:
+ _maybe_remove(store, "df")
+
+ with tm.assert_produces_warning(pd.errors.PerformanceWarning):
+ store.put("df", df)
+
+ expected = store.get("df")
+ tm.assert_frame_equal(expected, df)
+
+
+@pytest.mark.parametrize("format", ["table", "fixed"])
+@pytest.mark.parametrize(
+ "index",
+ [
+ Index([str(i) for i in range(10)]),
+ Index(np.arange(10, dtype=float)),
+ Index(np.arange(10)),
+ date_range("2020-01-01", periods=10),
+ pd.period_range("2020-01-01", periods=10),
+ ],
+)
+def test_store_index_types(setup_path, format, index):
+ # GH5386
+ # test storing various index types
+
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 2)),
+ columns=list("AB"),
+ index=index,
+ )
+ _maybe_remove(store, "df")
+ store.put("df", df, format=format)
+ tm.assert_frame_equal(df, store["df"])
+
+
+def test_column_multiindex(setup_path):
+ # GH 4710
+ # recreate multi-indexes properly
+
+ index = MultiIndex.from_tuples(
+ [("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
+ )
+ df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
+ expected = df.set_axis(df.index.to_numpy())
+
+ with ensure_clean_store(setup_path) as store:
+ store.put("df", df)
+ tm.assert_frame_equal(
+ store["df"], expected, check_index_type=True, check_column_type=True
+ )
+
+ store.put("df1", df, format="table")
+ tm.assert_frame_equal(
+ store["df1"], expected, check_index_type=True, check_column_type=True
+ )
+
+ msg = re.escape("cannot use a multi-index on axis [1] with data_columns ['A']")
+ with pytest.raises(ValueError, match=msg):
+ store.put("df2", df, format="table", data_columns=["A"])
+ msg = re.escape("cannot use a multi-index on axis [1] with data_columns True")
+ with pytest.raises(ValueError, match=msg):
+ store.put("df3", df, format="table", data_columns=True)
+
+ # appending multi-column on existing table (see GH 6167)
+ with ensure_clean_store(setup_path) as store:
+ store.append("df2", df)
+ store.append("df2", df)
+
+ tm.assert_frame_equal(store["df2"], concat((df, df)))
+
+ # non_index_axes name
+ df = DataFrame(np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo"))
+ expected = df.set_axis(df.index.to_numpy())
+
+ with ensure_clean_store(setup_path) as store:
+ store.put("df1", df, format="table")
+ tm.assert_frame_equal(
+ store["df1"], expected, check_index_type=True, check_column_type=True
+ )
+
+
+def test_store_multiindex(setup_path):
+ # validate multi-index names
+ # GH 5527
+ with ensure_clean_store(setup_path) as store:
+
+ def make_index(names=None):
+ dti = date_range("2013-12-01", "2013-12-02")
+ mi = MultiIndex.from_product([dti, range(2), range(3)], names=names)
+ return mi
+
+ # no names
+ _maybe_remove(store, "df")
+ df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
+ store.append("df", df)
+ tm.assert_frame_equal(store.select("df"), df)
+
+ # partial names
+ _maybe_remove(store, "df")
+ df = DataFrame(
+ np.zeros((12, 2)),
+ columns=["a", "b"],
+ index=make_index(["date", None, None]),
+ )
+ store.append("df", df)
+ tm.assert_frame_equal(store.select("df"), df)
+
+ # series
+ _maybe_remove(store, "ser")
+ ser = Series(np.zeros(12), index=make_index(["date", None, None]))
+ store.append("ser", ser)
+ xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
+ tm.assert_series_equal(store.select("ser"), xp)
+
+ # dup with column
+ _maybe_remove(store, "df")
+ df = DataFrame(
+ np.zeros((12, 2)),
+ columns=["a", "b"],
+ index=make_index(["date", "a", "t"]),
+ )
+ msg = "duplicate names/columns in the multi-index when storing as a table"
+ with pytest.raises(ValueError, match=msg):
+ store.append("df", df)
+
+ # dup within level
+ _maybe_remove(store, "df")
+ df = DataFrame(
+ np.zeros((12, 2)),
+ columns=["a", "b"],
+ index=make_index(["date", "date", "date"]),
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.append("df", df)
+
+ # fully names
+ _maybe_remove(store, "df")
+ df = DataFrame(
+ np.zeros((12, 2)),
+ columns=["a", "b"],
+ index=make_index(["date", "s", "t"]),
+ )
+ store.append("df", df)
+ tm.assert_frame_equal(store.select("df"), df)
+
+
+@pytest.mark.parametrize("format", ["fixed", "table"])
+def test_store_periodindex(tmp_path, setup_path, format):
+ # GH 7796
+ # test of PeriodIndex in HDFStore
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 1)),
+ index=pd.period_range("20220101", freq="M", periods=5),
+ )
+
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", mode="w", format=format)
+ expected = pd.read_hdf(path, "df")
+ tm.assert_frame_equal(df, expected)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_pytables_missing.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_pytables_missing.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d9d3afc4ad6f04b070a96922bdba3b5208ba6a9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_pytables_missing.py
@@ -0,0 +1,14 @@
+import pytest
+
+import pandas.util._test_decorators as td
+
+import pandas as pd
+import pandas._testing as tm
+
+
+@td.skip_if_installed("tables")
+def test_pytables_raises():
+ df = pd.DataFrame({"A": [1, 2]})
+ with pytest.raises(ImportError, match="tables"):
+ with tm.ensure_clean("foo.h5") as path:
+ df.to_hdf(path, key="df")
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_read.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_read.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4a3ea1fc9db871275f07d1d39f75cc1f91216d2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_read.py
@@ -0,0 +1,412 @@
+from contextlib import closing
+from pathlib import Path
+import re
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs import Timestamp
+from pandas.compat import is_platform_windows
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ HDFStore,
+ Index,
+ Series,
+ _testing as tm,
+ date_range,
+ read_hdf,
+)
+from pandas.tests.io.pytables.common import (
+ _maybe_remove,
+ ensure_clean_store,
+)
+from pandas.util import _test_decorators as td
+
+from pandas.io.pytables import TableIterator
+
+pytestmark = pytest.mark.single_cpu
+
+
+def test_read_missing_key_close_store(tmp_path, setup_path):
+ # GH 25766
+ path = tmp_path / setup_path
+ df = DataFrame({"a": range(2), "b": range(2)})
+ df.to_hdf(path, key="k1")
+
+ with pytest.raises(KeyError, match="'No object named k2 in the file'"):
+ read_hdf(path, "k2")
+
+ # smoke test to test that file is properly closed after
+ # read with KeyError before another write
+ df.to_hdf(path, key="k2")
+
+
+def test_read_index_error_close_store(tmp_path, setup_path):
+ # GH 25766
+ path = tmp_path / setup_path
+ df = DataFrame({"A": [], "B": []}, index=[])
+ df.to_hdf(path, key="k1")
+
+ with pytest.raises(IndexError, match=r"list index out of range"):
+ read_hdf(path, "k1", stop=0)
+
+ # smoke test to test that file is properly closed after
+ # read with IndexError before another write
+ df.to_hdf(path, key="k1")
+
+
+def test_read_missing_key_opened_store(tmp_path, setup_path):
+ # GH 28699
+ path = tmp_path / setup_path
+ df = DataFrame({"a": range(2), "b": range(2)})
+ df.to_hdf(path, key="k1")
+
+ with HDFStore(path, "r") as store:
+ with pytest.raises(KeyError, match="'No object named k2 in the file'"):
+ read_hdf(store, "k2")
+
+ # Test that the file is still open after a KeyError and that we can
+ # still read from it.
+ read_hdf(store, "k1")
+
+
+def test_read_column(setup_path):
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ _maybe_remove(store, "df")
+
+ # GH 17912
+ # HDFStore.select_column should raise a KeyError
+ # exception if the key is not a valid store
+ with pytest.raises(KeyError, match="No object named df in the file"):
+ store.select_column("df", "index")
+
+ store.append("df", df)
+ # error
+ with pytest.raises(
+ KeyError, match=re.escape("'column [foo] not found in the table'")
+ ):
+ store.select_column("df", "foo")
+
+ msg = re.escape("select_column() got an unexpected keyword argument 'where'")
+ with pytest.raises(TypeError, match=msg):
+ store.select_column("df", "index", where=["index>5"])
+
+ # valid
+ result = store.select_column("df", "index")
+ tm.assert_almost_equal(result.values, Series(df.index).values)
+ assert isinstance(result, Series)
+
+ # not a data indexable column
+ msg = re.escape(
+ "column [values_block_0] can not be extracted individually; "
+ "it is not data indexable"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.select_column("df", "values_block_0")
+
+ # a data column
+ df2 = df.copy()
+ df2["string"] = "foo"
+ store.append("df2", df2, data_columns=["string"])
+ result = store.select_column("df2", "string")
+ tm.assert_almost_equal(result.values, df2["string"].values)
+
+ # a data column with NaNs, result excludes the NaNs
+ df3 = df.copy()
+ df3["string"] = "foo"
+ df3.loc[df3.index[4:6], "string"] = np.nan
+ store.append("df3", df3, data_columns=["string"])
+ result = store.select_column("df3", "string")
+ tm.assert_almost_equal(result.values, df3["string"].values)
+
+ # start/stop
+ result = store.select_column("df3", "string", start=2)
+ tm.assert_almost_equal(result.values, df3["string"].values[2:])
+
+ result = store.select_column("df3", "string", start=-2)
+ tm.assert_almost_equal(result.values, df3["string"].values[-2:])
+
+ result = store.select_column("df3", "string", stop=2)
+ tm.assert_almost_equal(result.values, df3["string"].values[:2])
+
+ result = store.select_column("df3", "string", stop=-2)
+ tm.assert_almost_equal(result.values, df3["string"].values[:-2])
+
+ result = store.select_column("df3", "string", start=2, stop=-2)
+ tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
+
+ result = store.select_column("df3", "string", start=-2, stop=2)
+ tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
+
+ # GH 10392 - make sure column name is preserved
+ df4 = DataFrame({"A": np.random.default_rng(2).standard_normal(10), "B": "foo"})
+ store.append("df4", df4, data_columns=True)
+ expected = df4["B"]
+ result = store.select_column("df4", "B")
+ tm.assert_series_equal(result, expected)
+
+
+def test_pytables_native_read(datapath):
+ with ensure_clean_store(
+ datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
+ ) as store:
+ d2 = store["detector/readout"]
+ assert isinstance(d2, DataFrame)
+
+
+@pytest.mark.skipif(is_platform_windows(), reason="native2 read fails oddly on windows")
+def test_pytables_native2_read(datapath):
+ with ensure_clean_store(
+ datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
+ ) as store:
+ str(store)
+ d1 = store["detector"]
+ assert isinstance(d1, DataFrame)
+
+
+def test_legacy_table_fixed_format_read_py2(datapath):
+ # GH 24510
+ # legacy table with fixed format written in Python 2
+ with ensure_clean_store(
+ datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r"
+ ) as store:
+ result = store.select("df")
+ expected = DataFrame(
+ [[1, 2, 3, "D"]],
+ columns=["A", "B", "C", "D"],
+ index=Index(["ABC"], name="INDEX_NAME"),
+ )
+ tm.assert_frame_equal(expected, result)
+
+
+def test_legacy_table_fixed_format_read_datetime_py2(datapath):
+ # GH 31750
+ # legacy table with fixed format and datetime64 column written in Python 2
+ expected = DataFrame(
+ [[Timestamp("2020-02-06T18:00")]],
+ columns=["A"],
+ index=Index(["date"]),
+ dtype="M8[ns]",
+ )
+ with ensure_clean_store(
+ datapath("io", "data", "legacy_hdf", "legacy_table_fixed_datetime_py2.h5"),
+ mode="r",
+ ) as store:
+ result = store.select("df")
+ tm.assert_frame_equal(expected, result)
+
+
+def test_legacy_table_read_py2(datapath):
+ # issue: 24925
+ # legacy table written in Python 2
+ with ensure_clean_store(
+ datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r"
+ ) as store:
+ result = store.select("table")
+
+ expected = DataFrame({"a": ["a", "b"], "b": [2, 3]})
+ tm.assert_frame_equal(expected, result)
+
+
+def test_read_hdf_open_store(tmp_path, setup_path):
+ # GH10330
+ # No check for non-string path_or-buf, and no test of open store
+ df = DataFrame(
+ np.random.default_rng(2).random((4, 5)),
+ index=list("abcd"),
+ columns=list("ABCDE"),
+ )
+ df.index.name = "letters"
+ df = df.set_index(keys="E", append=True)
+
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", mode="w")
+ direct = read_hdf(path, "df")
+ with HDFStore(path, mode="r") as store:
+ indirect = read_hdf(store, "df")
+ tm.assert_frame_equal(direct, indirect)
+ assert store.is_open
+
+
+def test_read_hdf_index_not_view(tmp_path, setup_path):
+ # GH 37441
+ # Ensure that the index of the DataFrame is not a view
+ # into the original recarray that pytables reads in
+ df = DataFrame(
+ np.random.default_rng(2).random((4, 5)),
+ index=[0, 1, 2, 3],
+ columns=list("ABCDE"),
+ )
+
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", mode="w", format="table")
+
+ df2 = read_hdf(path, "df")
+ assert df2.index._data.base is None
+ tm.assert_frame_equal(df, df2)
+
+
+def test_read_hdf_iterator(tmp_path, setup_path):
+ df = DataFrame(
+ np.random.default_rng(2).random((4, 5)),
+ index=list("abcd"),
+ columns=list("ABCDE"),
+ )
+ df.index.name = "letters"
+ df = df.set_index(keys="E", append=True)
+
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", mode="w", format="t")
+ direct = read_hdf(path, "df")
+ iterator = read_hdf(path, "df", iterator=True)
+ with closing(iterator.store):
+ assert isinstance(iterator, TableIterator)
+ indirect = next(iterator.__iter__())
+ tm.assert_frame_equal(direct, indirect)
+
+
+def test_read_nokey(tmp_path, setup_path):
+ # GH10443
+ df = DataFrame(
+ np.random.default_rng(2).random((4, 5)),
+ index=list("abcd"),
+ columns=list("ABCDE"),
+ )
+
+ # Categorical dtype not supported for "fixed" format. So no need
+ # to test with that dtype in the dataframe here.
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", mode="a")
+ reread = read_hdf(path)
+ tm.assert_frame_equal(df, reread)
+ df.to_hdf(path, key="df2", mode="a")
+
+ msg = "key must be provided when HDF5 file contains multiple datasets."
+ with pytest.raises(ValueError, match=msg):
+ read_hdf(path)
+
+
+def test_read_nokey_table(tmp_path, setup_path):
+ # GH13231
+ df = DataFrame({"i": range(5), "c": Series(list("abacd"), dtype="category")})
+
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", mode="a", format="table")
+ reread = read_hdf(path)
+ tm.assert_frame_equal(df, reread)
+ df.to_hdf(path, key="df2", mode="a", format="table")
+
+ msg = "key must be provided when HDF5 file contains multiple datasets."
+ with pytest.raises(ValueError, match=msg):
+ read_hdf(path)
+
+
+def test_read_nokey_empty(tmp_path, setup_path):
+ path = tmp_path / setup_path
+ store = HDFStore(path)
+ store.close()
+ msg = re.escape(
+ "Dataset(s) incompatible with Pandas data types, not table, or no "
+ "datasets found in HDF5 file."
+ )
+ with pytest.raises(ValueError, match=msg):
+ read_hdf(path)
+
+
+def test_read_from_pathlib_path(tmp_path, setup_path):
+ # GH11773
+ expected = DataFrame(
+ np.random.default_rng(2).random((4, 5)),
+ index=list("abcd"),
+ columns=list("ABCDE"),
+ )
+ filename = tmp_path / setup_path
+ path_obj = Path(filename)
+
+ expected.to_hdf(path_obj, key="df", mode="a")
+ actual = read_hdf(path_obj, key="df")
+
+ tm.assert_frame_equal(expected, actual)
+
+
+@td.skip_if_no("py.path")
+def test_read_from_py_localpath(tmp_path, setup_path):
+ # GH11773
+ from py.path import local as LocalPath
+
+ expected = DataFrame(
+ np.random.default_rng(2).random((4, 5)),
+ index=list("abcd"),
+ columns=list("ABCDE"),
+ )
+ filename = tmp_path / setup_path
+ path_obj = LocalPath(filename)
+
+ expected.to_hdf(path_obj, key="df", mode="a")
+ actual = read_hdf(path_obj, key="df")
+
+ tm.assert_frame_equal(expected, actual)
+
+
+@pytest.mark.parametrize("format", ["fixed", "table"])
+def test_read_hdf_series_mode_r(tmp_path, format, setup_path):
+ # GH 16583
+ # Tests that reading a Series saved to an HDF file
+ # still works if a mode='r' argument is supplied
+ series = Series(range(10), dtype=np.float64)
+ path = tmp_path / setup_path
+ series.to_hdf(path, key="data", format=format)
+ result = read_hdf(path, key="data", mode="r")
+ tm.assert_series_equal(result, series)
+
+
+@pytest.mark.filterwarnings(r"ignore:Period with BDay freq is deprecated:FutureWarning")
+@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
+def test_read_py2_hdf_file_in_py3(datapath):
+ # GH 16781
+
+ # tests reading a PeriodIndex DataFrame written in Python2 in Python3
+
+ # the file was generated in Python 2.7 like so:
+ #
+ # df = DataFrame([1.,2,3], index=pd.PeriodIndex(
+ # ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))
+ # df.to_hdf('periodindex_0.20.1_x86_64_darwin_2.7.13.h5', 'p')
+
+ expected = DataFrame(
+ [1.0, 2, 3],
+ index=pd.PeriodIndex(["2015-01-01", "2015-01-02", "2015-01-05"], freq="B"),
+ )
+
+ with ensure_clean_store(
+ datapath(
+ "io", "data", "legacy_hdf", "periodindex_0.20.1_x86_64_darwin_2.7.13.h5"
+ ),
+ mode="r",
+ ) as store:
+ result = store["p"]
+ tm.assert_frame_equal(result, expected)
+
+
+def test_read_infer_string(tmp_path, setup_path):
+ # GH#54431
+ pytest.importorskip("pyarrow")
+ df = DataFrame({"a": ["a", "b", None]})
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="data", format="table")
+ with pd.option_context("future.infer_string", True):
+ result = read_hdf(path, key="data", mode="r")
+ expected = DataFrame(
+ {"a": ["a", "b", None]},
+ dtype="string[pyarrow_numpy]",
+ columns=Index(["a"], dtype="string[pyarrow_numpy]"),
+ )
+ tm.assert_frame_equal(result, expected)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_retain_attributes.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_retain_attributes.py
new file mode 100644
index 0000000000000000000000000000000000000000..6284b826c3cf01fed6ce50e5519f6c2f543b8c64
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_retain_attributes.py
@@ -0,0 +1,92 @@
+import pytest
+
+from pandas import (
+ DataFrame,
+ DatetimeIndex,
+ Series,
+ _testing as tm,
+ date_range,
+ errors,
+ read_hdf,
+)
+from pandas.tests.io.pytables.common import (
+ _maybe_remove,
+ ensure_clean_store,
+)
+
+pytestmark = pytest.mark.single_cpu
+
+
+def test_retain_index_attributes(setup_path, unit):
+ # GH 3499, losing frequency info on index recreation
+ dti = date_range("2000-1-1", periods=3, freq="h", unit=unit)
+ df = DataFrame({"A": Series(range(3), index=dti)})
+
+ with ensure_clean_store(setup_path) as store:
+ _maybe_remove(store, "data")
+ store.put("data", df, format="table")
+
+ result = store.get("data")
+ tm.assert_frame_equal(df, result)
+
+ for attr in ["freq", "tz", "name"]:
+ for idx in ["index", "columns"]:
+ assert getattr(getattr(df, idx), attr, None) == getattr(
+ getattr(result, idx), attr, None
+ )
+
+ dti2 = date_range("2002-1-1", periods=3, freq="D", unit=unit)
+ # try to append a table with a different frequency
+ with tm.assert_produces_warning(errors.AttributeConflictWarning):
+ df2 = DataFrame({"A": Series(range(3), index=dti2)})
+ store.append("data", df2)
+
+ assert store.get_storer("data").info["index"]["freq"] is None
+
+ # this is ok
+ _maybe_remove(store, "df2")
+ dti3 = DatetimeIndex(
+ ["2001-01-01", "2001-01-02", "2002-01-01"], dtype=f"M8[{unit}]"
+ )
+ df2 = DataFrame(
+ {
+ "A": Series(
+ range(3),
+ index=dti3,
+ )
+ }
+ )
+ store.append("df2", df2)
+ dti4 = date_range("2002-1-1", periods=3, freq="D", unit=unit)
+ df3 = DataFrame({"A": Series(range(3), index=dti4)})
+ store.append("df2", df3)
+
+
+def test_retain_index_attributes2(tmp_path, setup_path):
+ path = tmp_path / setup_path
+
+ with tm.assert_produces_warning(errors.AttributeConflictWarning):
+ df = DataFrame(
+ {"A": Series(range(3), index=date_range("2000-1-1", periods=3, freq="h"))}
+ )
+ df.to_hdf(path, key="data", mode="w", append=True)
+ df2 = DataFrame(
+ {"A": Series(range(3), index=date_range("2002-1-1", periods=3, freq="D"))}
+ )
+
+ df2.to_hdf(path, key="data", append=True)
+
+ idx = date_range("2000-1-1", periods=3, freq="h")
+ idx.name = "foo"
+ df = DataFrame({"A": Series(range(3), index=idx)})
+ df.to_hdf(path, key="data", mode="w", append=True)
+
+ assert read_hdf(path, key="data").index.name == "foo"
+
+ with tm.assert_produces_warning(errors.AttributeConflictWarning):
+ idx2 = date_range("2001-1-1", periods=3, freq="h")
+ idx2.name = "bar"
+ df2 = DataFrame({"A": Series(range(3), index=idx2)})
+ df2.to_hdf(path, key="data", append=True)
+
+ assert read_hdf(path, "data").index.name is None
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_round_trip.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_round_trip.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ba9787a5a6b9ec3dcfa60b64b0e43a8af1d1afc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_round_trip.py
@@ -0,0 +1,578 @@
+import datetime
+import re
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs import Timestamp
+from pandas.compat import is_platform_windows
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ DatetimeIndex,
+ Index,
+ Series,
+ _testing as tm,
+ bdate_range,
+ date_range,
+ read_hdf,
+)
+from pandas.tests.io.pytables.common import (
+ _maybe_remove,
+ ensure_clean_store,
+)
+from pandas.util import _test_decorators as td
+
+pytestmark = pytest.mark.single_cpu
+
+
+def test_conv_read_write():
+ with tm.ensure_clean() as path:
+
+ def roundtrip(key, obj, **kwargs):
+ obj.to_hdf(path, key=key, **kwargs)
+ return read_hdf(path, key)
+
+ o = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+ tm.assert_series_equal(o, roundtrip("series", o))
+
+ o = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)])
+ tm.assert_series_equal(o, roundtrip("string_series", o))
+
+ o = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ tm.assert_frame_equal(o, roundtrip("frame", o))
+
+ # table
+ df = DataFrame({"A": range(5), "B": range(5)})
+ df.to_hdf(path, key="table", append=True)
+ result = read_hdf(path, "table", where=["index>2"])
+ tm.assert_frame_equal(df[df.index > 2], result)
+
+
+def test_long_strings(setup_path):
+ # GH6166
+ data = ["a" * 50] * 10
+ df = DataFrame({"a": data}, index=data)
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("df", df, data_columns=["a"])
+
+ result = store.select("df")
+ tm.assert_frame_equal(df, result)
+
+
+def test_api(tmp_path, setup_path):
+ # GH4584
+ # API issue when to_hdf doesn't accept append AND format args
+ path = tmp_path / setup_path
+
+ df = DataFrame(range(20))
+ df.iloc[:10].to_hdf(path, key="df", append=True, format="table")
+ df.iloc[10:].to_hdf(path, key="df", append=True, format="table")
+ tm.assert_frame_equal(read_hdf(path, "df"), df)
+
+ # append to False
+ df.iloc[:10].to_hdf(path, key="df", append=False, format="table")
+ df.iloc[10:].to_hdf(path, key="df", append=True, format="table")
+ tm.assert_frame_equal(read_hdf(path, "df"), df)
+
+
+def test_api_append(tmp_path, setup_path):
+ path = tmp_path / setup_path
+
+ df = DataFrame(range(20))
+ df.iloc[:10].to_hdf(path, key="df", append=True)
+ df.iloc[10:].to_hdf(path, key="df", append=True, format="table")
+ tm.assert_frame_equal(read_hdf(path, "df"), df)
+
+ # append to False
+ df.iloc[:10].to_hdf(path, key="df", append=False, format="table")
+ df.iloc[10:].to_hdf(path, key="df", append=True)
+ tm.assert_frame_equal(read_hdf(path, "df"), df)
+
+
+def test_api_2(tmp_path, setup_path):
+ path = tmp_path / setup_path
+
+ df = DataFrame(range(20))
+ df.to_hdf(path, key="df", append=False, format="fixed")
+ tm.assert_frame_equal(read_hdf(path, "df"), df)
+
+ df.to_hdf(path, key="df", append=False, format="f")
+ tm.assert_frame_equal(read_hdf(path, "df"), df)
+
+ df.to_hdf(path, key="df", append=False)
+ tm.assert_frame_equal(read_hdf(path, "df"), df)
+
+ df.to_hdf(path, key="df")
+ tm.assert_frame_equal(read_hdf(path, "df"), df)
+
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(range(20))
+
+ _maybe_remove(store, "df")
+ store.append("df", df.iloc[:10], append=True, format="table")
+ store.append("df", df.iloc[10:], append=True, format="table")
+ tm.assert_frame_equal(store.select("df"), df)
+
+ # append to False
+ _maybe_remove(store, "df")
+ store.append("df", df.iloc[:10], append=False, format="table")
+ store.append("df", df.iloc[10:], append=True, format="table")
+ tm.assert_frame_equal(store.select("df"), df)
+
+ # formats
+ _maybe_remove(store, "df")
+ store.append("df", df.iloc[:10], append=False, format="table")
+ store.append("df", df.iloc[10:], append=True, format="table")
+ tm.assert_frame_equal(store.select("df"), df)
+
+ _maybe_remove(store, "df")
+ store.append("df", df.iloc[:10], append=False, format="table")
+ store.append("df", df.iloc[10:], append=True, format=None)
+ tm.assert_frame_equal(store.select("df"), df)
+
+
+def test_api_invalid(tmp_path, setup_path):
+ path = tmp_path / setup_path
+ # Invalid.
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ msg = "Can only append to Tables"
+
+ with pytest.raises(ValueError, match=msg):
+ df.to_hdf(path, key="df", append=True, format="f")
+
+ with pytest.raises(ValueError, match=msg):
+ df.to_hdf(path, key="df", append=True, format="fixed")
+
+ msg = r"invalid HDFStore format specified \[foo\]"
+
+ with pytest.raises(TypeError, match=msg):
+ df.to_hdf(path, key="df", append=True, format="foo")
+
+ with pytest.raises(TypeError, match=msg):
+ df.to_hdf(path, key="df", append=False, format="foo")
+
+ # File path doesn't exist
+ path = ""
+ msg = f"File {path} does not exist"
+
+ with pytest.raises(FileNotFoundError, match=msg):
+ read_hdf(path, "df")
+
+
+def test_get(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ store["a"] = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+ left = store.get("a")
+ right = store["a"]
+ tm.assert_series_equal(left, right)
+
+ left = store.get("/a")
+ right = store["/a"]
+ tm.assert_series_equal(left, right)
+
+ with pytest.raises(KeyError, match="'No object named b in the file'"):
+ store.get("b")
+
+
+def test_put_integer(setup_path):
+ # non-date, non-string index
+ df = DataFrame(np.random.default_rng(2).standard_normal((50, 100)))
+ _check_roundtrip(df, tm.assert_frame_equal, setup_path)
+
+
+def test_table_values_dtypes_roundtrip(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
+ store.append("df_f8", df1)
+ tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
+
+ df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
+ store.append("df_i8", df2)
+ tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
+
+ # incompatible dtype
+ msg = re.escape(
+ "invalid combination of [values_axes] on appending data "
+ "[name->values_block_0,cname->values_block_0,"
+ "dtype->float64,kind->float,shape->(1, 3)] vs "
+ "current table [name->values_block_0,"
+ "cname->values_block_0,dtype->int64,kind->integer,"
+ "shape->None]"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.append("df_i8", df1)
+
+ # check creation/storage/retrieval of float32 (a bit hacky to
+ # actually create them thought)
+ df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
+ store.append("df_f4", df1)
+ tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
+ assert df1.dtypes.iloc[0] == "float32"
+
+ # check with mixed dtypes
+ df1 = DataFrame(
+ {
+ c: Series(np.random.default_rng(2).integers(5), dtype=c)
+ for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
+ }
+ )
+ df1["string"] = "foo"
+ df1["float322"] = 1.0
+ df1["float322"] = df1["float322"].astype("float32")
+ df1["bool"] = df1["float32"] > 0
+ df1["time1"] = Timestamp("20130101")
+ df1["time2"] = Timestamp("20130102")
+
+ store.append("df_mixed_dtypes1", df1)
+ result = store.select("df_mixed_dtypes1").dtypes.value_counts()
+ result.index = [str(i) for i in result.index]
+ expected = Series(
+ {
+ "float32": 2,
+ "float64": 1,
+ "int32": 1,
+ "bool": 1,
+ "int16": 1,
+ "int8": 1,
+ "int64": 1,
+ "object": 1,
+ "datetime64[ns]": 2,
+ },
+ name="count",
+ )
+ result = result.sort_index()
+ expected = expected.sort_index()
+ tm.assert_series_equal(result, expected)
+
+
+@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
+def test_series(setup_path):
+ s = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)])
+ _check_roundtrip(s, tm.assert_series_equal, path=setup_path)
+
+ ts = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+ _check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
+
+ ts2 = Series(ts.index, Index(ts.index, dtype=object))
+ _check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
+
+ ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
+ _check_roundtrip(
+ ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
+ )
+
+
+def test_float_index(setup_path):
+ # GH #454
+ index = np.random.default_rng(2).standard_normal(10)
+ s = Series(np.random.default_rng(2).standard_normal(10), index=index)
+ _check_roundtrip(s, tm.assert_series_equal, path=setup_path)
+
+
+def test_tuple_index(setup_path):
+ # GH #492
+ col = np.arange(10)
+ idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
+ data = np.random.default_rng(2).standard_normal(30).reshape((3, 10))
+ DF = DataFrame(data, index=idx, columns=col)
+
+ with tm.assert_produces_warning(pd.errors.PerformanceWarning):
+ _check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
+
+
+@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
+def test_index_types(setup_path):
+ values = np.random.default_rng(2).standard_normal(2)
+
+ func = lambda lhs, rhs: tm.assert_series_equal(lhs, rhs, check_index_type=True)
+
+ ser = Series(values, [0, "y"])
+ _check_roundtrip(ser, func, path=setup_path)
+
+ ser = Series(values, [datetime.datetime.today(), 0])
+ _check_roundtrip(ser, func, path=setup_path)
+
+ ser = Series(values, ["y", 0])
+ _check_roundtrip(ser, func, path=setup_path)
+
+ ser = Series(values, [datetime.date.today(), "a"])
+ _check_roundtrip(ser, func, path=setup_path)
+
+ ser = Series(values, [0, "y"])
+ _check_roundtrip(ser, func, path=setup_path)
+
+ ser = Series(values, [datetime.datetime.today(), 0])
+ _check_roundtrip(ser, func, path=setup_path)
+
+ ser = Series(values, ["y", 0])
+ _check_roundtrip(ser, func, path=setup_path)
+
+ ser = Series(values, [datetime.date.today(), "a"])
+ _check_roundtrip(ser, func, path=setup_path)
+
+ ser = Series(values, [1.23, "b"])
+ _check_roundtrip(ser, func, path=setup_path)
+
+ ser = Series(values, [1, 1.53])
+ _check_roundtrip(ser, func, path=setup_path)
+
+ ser = Series(values, [1, 5])
+ _check_roundtrip(ser, func, path=setup_path)
+
+ dti = DatetimeIndex(["2012-01-01", "2012-01-02"], dtype="M8[ns]")
+ ser = Series(values, index=dti)
+ _check_roundtrip(ser, func, path=setup_path)
+
+ ser.index = ser.index.as_unit("s")
+ _check_roundtrip(ser, func, path=setup_path)
+
+
+def test_timeseries_preepoch(setup_path, request):
+ dr = bdate_range("1/1/1940", "1/1/1960")
+ ts = Series(np.random.default_rng(2).standard_normal(len(dr)), index=dr)
+ try:
+ _check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
+ except OverflowError:
+ if is_platform_windows():
+ request.applymarker(
+ pytest.mark.xfail("known failure on some windows platforms")
+ )
+ raise
+
+
+@pytest.mark.parametrize(
+ "compression", [False, pytest.param(True, marks=td.skip_if_windows)]
+)
+def test_frame(compression, setup_path):
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ # put in some random NAs
+ df.iloc[0, 0] = np.nan
+ df.iloc[5, 3] = np.nan
+
+ _check_roundtrip_table(
+ df, tm.assert_frame_equal, path=setup_path, compression=compression
+ )
+ _check_roundtrip(
+ df, tm.assert_frame_equal, path=setup_path, compression=compression
+ )
+
+ tdf = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ _check_roundtrip(
+ tdf, tm.assert_frame_equal, path=setup_path, compression=compression
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ # not consolidated
+ df["foo"] = np.random.default_rng(2).standard_normal(len(df))
+ store["df"] = df
+ recons = store["df"]
+ assert recons._mgr.is_consolidated()
+
+ # empty
+ _check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
+
+
+def test_empty_series_frame(setup_path):
+ s0 = Series(dtype=object)
+ s1 = Series(name="myseries", dtype=object)
+ df0 = DataFrame()
+ df1 = DataFrame(index=["a", "b", "c"])
+ df2 = DataFrame(columns=["d", "e", "f"])
+
+ _check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
+ _check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
+ _check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
+ _check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
+ _check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
+
+
+@pytest.mark.parametrize("dtype", [np.int64, np.float64, object, "m8[ns]", "M8[ns]"])
+def test_empty_series(dtype, setup_path):
+ s = Series(dtype=dtype)
+ _check_roundtrip(s, tm.assert_series_equal, path=setup_path)
+
+
+def test_can_serialize_dates(setup_path):
+ rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
+ frame = DataFrame(
+ np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng
+ )
+
+ _check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
+
+
+def test_store_hierarchical(setup_path, multiindex_dataframe_random_data):
+ frame = multiindex_dataframe_random_data
+
+ _check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
+ _check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
+ _check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
+
+ # check that the names are stored
+ with ensure_clean_store(setup_path) as store:
+ store["frame"] = frame
+ recons = store["frame"]
+ tm.assert_frame_equal(recons, frame)
+
+
+@pytest.mark.parametrize(
+ "compression", [False, pytest.param(True, marks=td.skip_if_windows)]
+)
+def test_store_mixed(compression, setup_path):
+ def _make_one():
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ df["obj1"] = "foo"
+ df["obj2"] = "bar"
+ df["bool1"] = df["A"] > 0
+ df["bool2"] = df["B"] > 0
+ df["int1"] = 1
+ df["int2"] = 2
+ return df._consolidate()
+
+ df1 = _make_one()
+ df2 = _make_one()
+
+ _check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
+ _check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
+
+ with ensure_clean_store(setup_path) as store:
+ store["obj"] = df1
+ tm.assert_frame_equal(store["obj"], df1)
+ store["obj"] = df2
+ tm.assert_frame_equal(store["obj"], df2)
+
+ # check that can store Series of all of these types
+ _check_roundtrip(
+ df1["obj1"],
+ tm.assert_series_equal,
+ path=setup_path,
+ compression=compression,
+ )
+ _check_roundtrip(
+ df1["bool1"],
+ tm.assert_series_equal,
+ path=setup_path,
+ compression=compression,
+ )
+ _check_roundtrip(
+ df1["int1"],
+ tm.assert_series_equal,
+ path=setup_path,
+ compression=compression,
+ )
+
+
+def _check_roundtrip(obj, comparator, path, compression=False, **kwargs):
+ options = {}
+ if compression:
+ options["complib"] = "blosc"
+
+ with ensure_clean_store(path, "w", **options) as store:
+ store["obj"] = obj
+ retrieved = store["obj"]
+ comparator(retrieved, obj, **kwargs)
+
+
+def _check_roundtrip_table(obj, comparator, path, compression=False):
+ options = {}
+ if compression:
+ options["complib"] = "blosc"
+
+ with ensure_clean_store(path, "w", **options) as store:
+ store.put("obj", obj, format="table")
+ retrieved = store["obj"]
+
+ comparator(retrieved, obj)
+
+
+def test_unicode_index(setup_path):
+ unicode_values = ["\u03c3", "\u03c3\u03c3"]
+
+ s = Series(
+ np.random.default_rng(2).standard_normal(len(unicode_values)),
+ unicode_values,
+ )
+ _check_roundtrip(s, tm.assert_series_equal, path=setup_path)
+
+
+def test_unicode_longer_encoded(setup_path):
+ # GH 11234
+ char = "\u0394"
+ df = DataFrame({"A": [char]})
+ with ensure_clean_store(setup_path) as store:
+ store.put("df", df, format="table", encoding="utf-8")
+ result = store.get("df")
+ tm.assert_frame_equal(result, df)
+
+ df = DataFrame({"A": ["a", char], "B": ["b", "b"]})
+ with ensure_clean_store(setup_path) as store:
+ store.put("df", df, format="table", encoding="utf-8")
+ result = store.get("df")
+ tm.assert_frame_equal(result, df)
+
+
+def test_store_datetime_mixed(setup_path):
+ df = DataFrame({"a": [1, 2, 3], "b": [1.0, 2.0, 3.0], "c": ["a", "b", "c"]})
+ ts = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+ df["d"] = ts.index[:3]
+ _check_roundtrip(df, tm.assert_frame_equal, path=setup_path)
+
+
+def test_round_trip_equals(tmp_path, setup_path):
+ # GH 9330
+ df = DataFrame({"B": [1, 2], "A": ["x", "y"]})
+
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", format="table")
+ other = read_hdf(path, "df")
+ tm.assert_frame_equal(df, other)
+ assert df.equals(other)
+ assert other.equals(df)
+
+
+def test_infer_string_columns(tmp_path, setup_path):
+ # GH#
+ pytest.importorskip("pyarrow")
+ path = tmp_path / setup_path
+ with pd.option_context("future.infer_string", True):
+ df = DataFrame(1, columns=list("ABCD"), index=list(range(10))).set_index(
+ ["A", "B"]
+ )
+ expected = df.copy()
+ df.to_hdf(path, key="df", format="table")
+
+ result = read_hdf(path, "df")
+ tm.assert_frame_equal(result, expected)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_select.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_select.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e303d1c890c5b6ea4fcfb8d526297981a14069b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_select.py
@@ -0,0 +1,1047 @@
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs import Timestamp
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ HDFStore,
+ Index,
+ MultiIndex,
+ Series,
+ _testing as tm,
+ bdate_range,
+ concat,
+ date_range,
+ isna,
+ read_hdf,
+)
+from pandas.tests.io.pytables.common import (
+ _maybe_remove,
+ ensure_clean_store,
+)
+
+from pandas.io.pytables import Term
+
+pytestmark = pytest.mark.single_cpu
+
+
+def test_select_columns_in_where(setup_path):
+ # GH 6169
+ # recreate multi-indexes when columns is passed
+ # in the `where` argument
+ index = MultiIndex(
+ levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
+ codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
+ names=["foo_name", "bar_name"],
+ )
+
+ # With a DataFrame
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 3)),
+ index=index,
+ columns=["A", "B", "C"],
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ store.put("df", df, format="table")
+ expected = df[["A"]]
+
+ tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
+
+ tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
+
+ # With a Series
+ s = Series(np.random.default_rng(2).standard_normal(10), index=index, name="A")
+ with ensure_clean_store(setup_path) as store:
+ store.put("s", s, format="table")
+ tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
+
+
+def test_select_with_dups(setup_path):
+ # single dtypes
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)), columns=["A", "A", "B", "B"]
+ )
+ df.index = date_range("20130101 9:30", periods=10, freq="min")
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("df", df)
+
+ result = store.select("df")
+ expected = df
+ tm.assert_frame_equal(result, expected, by_blocks=True)
+
+ result = store.select("df", columns=df.columns)
+ expected = df
+ tm.assert_frame_equal(result, expected, by_blocks=True)
+
+ result = store.select("df", columns=["A"])
+ expected = df.loc[:, ["A"]]
+ tm.assert_frame_equal(result, expected)
+
+ # dups across dtypes
+ df = concat(
+ [
+ DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=["A", "A", "B", "B"],
+ ),
+ DataFrame(
+ np.random.default_rng(2).integers(0, 10, size=20).reshape(10, 2),
+ columns=["A", "C"],
+ ),
+ ],
+ axis=1,
+ )
+ df.index = date_range("20130101 9:30", periods=10, freq="min")
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("df", df)
+
+ result = store.select("df")
+ expected = df
+ tm.assert_frame_equal(result, expected, by_blocks=True)
+
+ result = store.select("df", columns=df.columns)
+ expected = df
+ tm.assert_frame_equal(result, expected, by_blocks=True)
+
+ expected = df.loc[:, ["A"]]
+ result = store.select("df", columns=["A"])
+ tm.assert_frame_equal(result, expected, by_blocks=True)
+
+ expected = df.loc[:, ["B", "A"]]
+ result = store.select("df", columns=["B", "A"])
+ tm.assert_frame_equal(result, expected, by_blocks=True)
+
+ # duplicates on both index and columns
+ with ensure_clean_store(setup_path) as store:
+ store.append("df", df)
+ store.append("df", df)
+
+ expected = df.loc[:, ["B", "A"]]
+ expected = concat([expected, expected])
+ result = store.select("df", columns=["B", "A"])
+ tm.assert_frame_equal(result, expected, by_blocks=True)
+
+
+def test_select(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ # select with columns=
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ _maybe_remove(store, "df")
+ store.append("df", df)
+ result = store.select("df", columns=["A", "B"])
+ expected = df.reindex(columns=["A", "B"])
+ tm.assert_frame_equal(expected, result)
+
+ # equivalently
+ result = store.select("df", [("columns=['A', 'B']")])
+ expected = df.reindex(columns=["A", "B"])
+ tm.assert_frame_equal(expected, result)
+
+ # with a data column
+ _maybe_remove(store, "df")
+ store.append("df", df, data_columns=["A"])
+ result = store.select("df", ["A > 0"], columns=["A", "B"])
+ expected = df[df.A > 0].reindex(columns=["A", "B"])
+ tm.assert_frame_equal(expected, result)
+
+ # all a data columns
+ _maybe_remove(store, "df")
+ store.append("df", df, data_columns=True)
+ result = store.select("df", ["A > 0"], columns=["A", "B"])
+ expected = df[df.A > 0].reindex(columns=["A", "B"])
+ tm.assert_frame_equal(expected, result)
+
+ # with a data column, but different columns
+ _maybe_remove(store, "df")
+ store.append("df", df, data_columns=["A"])
+ result = store.select("df", ["A > 0"], columns=["C", "D"])
+ expected = df[df.A > 0].reindex(columns=["C", "D"])
+ tm.assert_frame_equal(expected, result)
+
+
+def test_select_dtypes(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ # with a Timestamp data column (GH #2637)
+ df = DataFrame(
+ {
+ "ts": bdate_range("2012-01-01", periods=300),
+ "A": np.random.default_rng(2).standard_normal(300),
+ }
+ )
+ _maybe_remove(store, "df")
+ store.append("df", df, data_columns=["ts", "A"])
+
+ result = store.select("df", "ts>=Timestamp('2012-02-01')")
+ expected = df[df.ts >= Timestamp("2012-02-01")]
+ tm.assert_frame_equal(expected, result)
+
+ # bool columns (GH #2849)
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((5, 2)), columns=["A", "B"]
+ )
+ df["object"] = "foo"
+ df.loc[4:5, "object"] = "bar"
+ df["boolv"] = df["A"] > 0
+ _maybe_remove(store, "df")
+ store.append("df", df, data_columns=True)
+
+ expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa: E712
+ for v in [True, "true", 1]:
+ result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
+ tm.assert_frame_equal(expected, result)
+
+ expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa: E712
+ for v in [False, "false", 0]:
+ result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
+ tm.assert_frame_equal(expected, result)
+
+ # integer index
+ df = DataFrame(
+ {
+ "A": np.random.default_rng(2).random(20),
+ "B": np.random.default_rng(2).random(20),
+ }
+ )
+ _maybe_remove(store, "df_int")
+ store.append("df_int", df)
+ result = store.select("df_int", "index<10 and columns=['A']")
+ expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
+ tm.assert_frame_equal(expected, result)
+
+ # float index
+ df = DataFrame(
+ {
+ "A": np.random.default_rng(2).random(20),
+ "B": np.random.default_rng(2).random(20),
+ "index": np.arange(20, dtype="f8"),
+ }
+ )
+ _maybe_remove(store, "df_float")
+ store.append("df_float", df)
+ result = store.select("df_float", "index<10.0 and columns=['A']")
+ expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
+ tm.assert_frame_equal(expected, result)
+
+ with ensure_clean_store(setup_path) as store:
+ # floats w/o NaN
+ df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
+ df["cols"] = (df["cols"] + 10).apply(str)
+
+ store.append("df1", df, data_columns=True)
+ result = store.select("df1", where="values>2.0")
+ expected = df[df["values"] > 2.0]
+ tm.assert_frame_equal(expected, result)
+
+ # floats with NaN
+ df.iloc[0] = np.nan
+ expected = df[df["values"] > 2.0]
+
+ store.append("df2", df, data_columns=True, index=False)
+ result = store.select("df2", where="values>2.0")
+ tm.assert_frame_equal(expected, result)
+
+ # https://github.com/PyTables/PyTables/issues/282
+ # bug in selection when 0th row has a np.nan and an index
+ # store.append('df3',df,data_columns=True)
+ # result = store.select(
+ # 'df3', where='values>2.0')
+ # tm.assert_frame_equal(expected, result)
+
+ # not in first position float with NaN ok too
+ df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
+ df["cols"] = (df["cols"] + 10).apply(str)
+
+ df.iloc[1] = np.nan
+ expected = df[df["values"] > 2.0]
+
+ store.append("df4", df, data_columns=True)
+ result = store.select("df4", where="values>2.0")
+ tm.assert_frame_equal(expected, result)
+
+ # test selection with comparison against numpy scalar
+ # GH 11283
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ expected = df[df["A"] > 0]
+
+ store.append("df", df, data_columns=True)
+ np_zero = np.float64(0) # noqa: F841
+ result = store.select("df", where=["A>np_zero"])
+ tm.assert_frame_equal(expected, result)
+
+
+def test_select_with_many_inputs(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ {
+ "ts": bdate_range("2012-01-01", periods=300),
+ "A": np.random.default_rng(2).standard_normal(300),
+ "B": range(300),
+ "users": ["a"] * 50
+ + ["b"] * 50
+ + ["c"] * 100
+ + [f"a{i:03d}" for i in range(100)],
+ }
+ )
+ _maybe_remove(store, "df")
+ store.append("df", df, data_columns=["ts", "A", "B", "users"])
+
+ # regular select
+ result = store.select("df", "ts>=Timestamp('2012-02-01')")
+ expected = df[df.ts >= Timestamp("2012-02-01")]
+ tm.assert_frame_equal(expected, result)
+
+ # small selector
+ result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
+ expected = df[
+ (df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
+ ]
+ tm.assert_frame_equal(expected, result)
+
+ # big selector along the columns
+ selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
+ result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
+ expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
+ tm.assert_frame_equal(expected, result)
+
+ selector = range(100, 200)
+ result = store.select("df", "B=selector")
+ expected = df[df.B.isin(selector)]
+ tm.assert_frame_equal(expected, result)
+ assert len(result) == 100
+
+ # big selector along the index
+ selector = Index(df.ts[0:100].values)
+ result = store.select("df", "ts=selector")
+ expected = df[df.ts.isin(selector.values)]
+ tm.assert_frame_equal(expected, result)
+ assert len(result) == 100
+
+
+def test_select_iterator(tmp_path, setup_path):
+ # single table
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ _maybe_remove(store, "df")
+ store.append("df", df)
+
+ expected = store.select("df")
+
+ results = list(store.select("df", iterator=True))
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+ results = list(store.select("df", chunksize=2))
+ assert len(results) == 5
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+ results = list(store.select("df", chunksize=2))
+ result = concat(results)
+ tm.assert_frame_equal(result, expected)
+
+ path = tmp_path / setup_path
+
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df.to_hdf(path, key="df_non_table")
+
+ msg = "can only use an iterator or chunksize on a table"
+ with pytest.raises(TypeError, match=msg):
+ read_hdf(path, "df_non_table", chunksize=2)
+
+ with pytest.raises(TypeError, match=msg):
+ read_hdf(path, "df_non_table", iterator=True)
+
+ path = tmp_path / setup_path
+
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df.to_hdf(path, key="df", format="table")
+
+ results = list(read_hdf(path, "df", chunksize=2))
+ result = concat(results)
+
+ assert len(results) == 5
+ tm.assert_frame_equal(result, df)
+ tm.assert_frame_equal(result, read_hdf(path, "df"))
+
+ # multiple
+
+ with ensure_clean_store(setup_path) as store:
+ df1 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ store.append("df1", df1, data_columns=True)
+ df2 = df1.copy().rename(columns="{}_2".format)
+ df2["foo"] = "bar"
+ store.append("df2", df2)
+
+ df = concat([df1, df2], axis=1)
+
+ # full selection
+ expected = store.select_as_multiple(["df1", "df2"], selector="df1")
+ results = list(
+ store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=2)
+ )
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+
+def test_select_iterator_complete_8014(setup_path):
+ # GH 8014
+ # using iterator and where clause
+ chunksize = 1e4
+
+ # no iterator
+ with ensure_clean_store(setup_path) as store:
+ expected = DataFrame(
+ np.random.default_rng(2).standard_normal((100064, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100064, freq="s"),
+ )
+ _maybe_remove(store, "df")
+ store.append("df", expected)
+
+ beg_dt = expected.index[0]
+ end_dt = expected.index[-1]
+
+ # select w/o iteration and no where clause works
+ result = store.select("df")
+ tm.assert_frame_equal(expected, result)
+
+ # select w/o iterator and where clause, single term, begin
+ # of range, works
+ where = f"index >= '{beg_dt}'"
+ result = store.select("df", where=where)
+ tm.assert_frame_equal(expected, result)
+
+ # select w/o iterator and where clause, single term, end
+ # of range, works
+ where = f"index <= '{end_dt}'"
+ result = store.select("df", where=where)
+ tm.assert_frame_equal(expected, result)
+
+ # select w/o iterator and where clause, inclusive range,
+ # works
+ where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
+ result = store.select("df", where=where)
+ tm.assert_frame_equal(expected, result)
+
+ # with iterator, full range
+ with ensure_clean_store(setup_path) as store:
+ expected = DataFrame(
+ np.random.default_rng(2).standard_normal((100064, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100064, freq="s"),
+ )
+ _maybe_remove(store, "df")
+ store.append("df", expected)
+
+ beg_dt = expected.index[0]
+ end_dt = expected.index[-1]
+
+ # select w/iterator and no where clause works
+ results = list(store.select("df", chunksize=chunksize))
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+ # select w/iterator and where clause, single term, begin of range
+ where = f"index >= '{beg_dt}'"
+ results = list(store.select("df", where=where, chunksize=chunksize))
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+ # select w/iterator and where clause, single term, end of range
+ where = f"index <= '{end_dt}'"
+ results = list(store.select("df", where=where, chunksize=chunksize))
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+ # select w/iterator and where clause, inclusive range
+ where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
+ results = list(store.select("df", where=where, chunksize=chunksize))
+ result = concat(results)
+ tm.assert_frame_equal(expected, result)
+
+
+def test_select_iterator_non_complete_8014(setup_path):
+ # GH 8014
+ # using iterator and where clause
+ chunksize = 1e4
+
+ # with iterator, non complete range
+ with ensure_clean_store(setup_path) as store:
+ expected = DataFrame(
+ np.random.default_rng(2).standard_normal((100064, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100064, freq="s"),
+ )
+ _maybe_remove(store, "df")
+ store.append("df", expected)
+
+ beg_dt = expected.index[1]
+ end_dt = expected.index[-2]
+
+ # select w/iterator and where clause, single term, begin of range
+ where = f"index >= '{beg_dt}'"
+ results = list(store.select("df", where=where, chunksize=chunksize))
+ result = concat(results)
+ rexpected = expected[expected.index >= beg_dt]
+ tm.assert_frame_equal(rexpected, result)
+
+ # select w/iterator and where clause, single term, end of range
+ where = f"index <= '{end_dt}'"
+ results = list(store.select("df", where=where, chunksize=chunksize))
+ result = concat(results)
+ rexpected = expected[expected.index <= end_dt]
+ tm.assert_frame_equal(rexpected, result)
+
+ # select w/iterator and where clause, inclusive range
+ where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
+ results = list(store.select("df", where=where, chunksize=chunksize))
+ result = concat(results)
+ rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
+ tm.assert_frame_equal(rexpected, result)
+
+ # with iterator, empty where
+ with ensure_clean_store(setup_path) as store:
+ expected = DataFrame(
+ np.random.default_rng(2).standard_normal((100064, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100064, freq="s"),
+ )
+ _maybe_remove(store, "df")
+ store.append("df", expected)
+
+ end_dt = expected.index[-1]
+
+ # select w/iterator and where clause, single term, begin of range
+ where = f"index > '{end_dt}'"
+ results = list(store.select("df", where=where, chunksize=chunksize))
+ assert 0 == len(results)
+
+
+def test_select_iterator_many_empty_frames(setup_path):
+ # GH 8014
+ # using iterator and where clause can return many empty
+ # frames.
+ chunksize = 10_000
+
+ # with iterator, range limited to the first chunk
+ with ensure_clean_store(setup_path) as store:
+ expected = DataFrame(
+ np.random.default_rng(2).standard_normal((100064, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=100064, freq="s"),
+ )
+ _maybe_remove(store, "df")
+ store.append("df", expected)
+
+ beg_dt = expected.index[0]
+ end_dt = expected.index[chunksize - 1]
+
+ # select w/iterator and where clause, single term, begin of range
+ where = f"index >= '{beg_dt}'"
+ results = list(store.select("df", where=where, chunksize=chunksize))
+ result = concat(results)
+ rexpected = expected[expected.index >= beg_dt]
+ tm.assert_frame_equal(rexpected, result)
+
+ # select w/iterator and where clause, single term, end of range
+ where = f"index <= '{end_dt}'"
+ results = list(store.select("df", where=where, chunksize=chunksize))
+
+ assert len(results) == 1
+ result = concat(results)
+ rexpected = expected[expected.index <= end_dt]
+ tm.assert_frame_equal(rexpected, result)
+
+ # select w/iterator and where clause, inclusive range
+ where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
+ results = list(store.select("df", where=where, chunksize=chunksize))
+
+ # should be 1, is 10
+ assert len(results) == 1
+ result = concat(results)
+ rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
+ tm.assert_frame_equal(rexpected, result)
+
+ # select w/iterator and where clause which selects
+ # *nothing*.
+ #
+ # To be consistent with Python idiom I suggest this should
+ # return [] e.g. `for e in []: print True` never prints
+ # True.
+
+ where = f"index <= '{beg_dt}' & index >= '{end_dt}'"
+ results = list(store.select("df", where=where, chunksize=chunksize))
+
+ # should be []
+ assert len(results) == 0
+
+
+def test_frame_select(setup_path):
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ store.put("frame", df, format="table")
+ date = df.index[len(df) // 2]
+
+ crit1 = Term("index>=date")
+ assert crit1.env.scope["date"] == date
+
+ crit2 = "columns=['A', 'D']"
+ crit3 = "columns=A"
+
+ result = store.select("frame", [crit1, crit2])
+ expected = df.loc[date:, ["A", "D"]]
+ tm.assert_frame_equal(result, expected)
+
+ result = store.select("frame", [crit3])
+ expected = df.loc[:, ["A"]]
+ tm.assert_frame_equal(result, expected)
+
+ # invalid terms
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ store.append("df_time", df)
+ msg = "day is out of range for month: 0"
+ with pytest.raises(ValueError, match=msg):
+ store.select("df_time", "index>0")
+
+ # can't select if not written as table
+ # store['frame'] = df
+ # with pytest.raises(ValueError):
+ # store.select('frame', [crit1, crit2])
+
+
+def test_frame_select_complex(setup_path):
+ # select via complex criteria
+
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df["string"] = "foo"
+ df.loc[df.index[0:4], "string"] = "bar"
+
+ with ensure_clean_store(setup_path) as store:
+ store.put("df", df, format="table", data_columns=["string"])
+
+ # empty
+ result = store.select("df", 'index>df.index[3] & string="bar"')
+ expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
+ tm.assert_frame_equal(result, expected)
+
+ result = store.select("df", 'index>df.index[3] & string="foo"')
+ expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
+ tm.assert_frame_equal(result, expected)
+
+ # or
+ result = store.select("df", 'index>df.index[3] | string="bar"')
+ expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
+ tm.assert_frame_equal(result, expected)
+
+ result = store.select(
+ "df", '(index>df.index[3] & index<=df.index[6]) | string="bar"'
+ )
+ expected = df.loc[
+ ((df.index > df.index[3]) & (df.index <= df.index[6]))
+ | (df.string == "bar")
+ ]
+ tm.assert_frame_equal(result, expected)
+
+ # invert
+ result = store.select("df", 'string!="bar"')
+ expected = df.loc[df.string != "bar"]
+ tm.assert_frame_equal(result, expected)
+
+ # invert not implemented in numexpr :(
+ msg = "cannot use an invert condition when passing to numexpr"
+ with pytest.raises(NotImplementedError, match=msg):
+ store.select("df", '~(string="bar")')
+
+ # invert ok for filters
+ result = store.select("df", "~(columns=['A','B'])")
+ expected = df.loc[:, df.columns.difference(["A", "B"])]
+ tm.assert_frame_equal(result, expected)
+
+ # in
+ result = store.select("df", "index>df.index[3] & columns in ['A','B']")
+ expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
+ tm.assert_frame_equal(result, expected)
+
+
+def test_frame_select_complex2(tmp_path):
+ pp = tmp_path / "params.hdf"
+ hh = tmp_path / "hist.hdf"
+
+ # use non-trivial selection criteria
+ params = DataFrame({"A": [1, 1, 2, 2, 3]})
+ params.to_hdf(pp, key="df", mode="w", format="table", data_columns=["A"])
+
+ selection = read_hdf(pp, "df", where="A=[2,3]")
+ hist = DataFrame(
+ np.random.default_rng(2).standard_normal((25, 1)),
+ columns=["data"],
+ index=MultiIndex.from_tuples(
+ [(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
+ ),
+ )
+
+ hist.to_hdf(hh, key="df", mode="w", format="table")
+
+ expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
+
+ # scope with list like
+ l0 = selection.index.tolist() # noqa: F841
+ with HDFStore(hh) as store:
+ result = store.select("df", where="l1=l0")
+ tm.assert_frame_equal(result, expected)
+
+ result = read_hdf(hh, "df", where="l1=l0")
+ tm.assert_frame_equal(result, expected)
+
+ # index
+ index = selection.index # noqa: F841
+ result = read_hdf(hh, "df", where="l1=index")
+ tm.assert_frame_equal(result, expected)
+
+ result = read_hdf(hh, "df", where="l1=selection.index")
+ tm.assert_frame_equal(result, expected)
+
+ result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
+ tm.assert_frame_equal(result, expected)
+
+ result = read_hdf(hh, "df", where="l1=list(selection.index)")
+ tm.assert_frame_equal(result, expected)
+
+ # scope with index
+ with HDFStore(hh) as store:
+ result = store.select("df", where="l1=index")
+ tm.assert_frame_equal(result, expected)
+
+ result = store.select("df", where="l1=selection.index")
+ tm.assert_frame_equal(result, expected)
+
+ result = store.select("df", where="l1=selection.index.tolist()")
+ tm.assert_frame_equal(result, expected)
+
+ result = store.select("df", where="l1=list(selection.index)")
+ tm.assert_frame_equal(result, expected)
+
+
+def test_invalid_filtering(setup_path):
+ # can't use more than one filter (atm)
+
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ store.put("df", df, format="table")
+
+ msg = "unable to collapse Joint Filters"
+ # not implemented
+ with pytest.raises(NotImplementedError, match=msg):
+ store.select("df", "columns=['A'] | columns=['B']")
+
+ # in theory we could deal with this
+ with pytest.raises(NotImplementedError, match=msg):
+ store.select("df", "columns=['A','B'] & columns=['C']")
+
+
+def test_string_select(setup_path):
+ # GH 2973
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+
+ # test string ==/!=
+ df["x"] = "none"
+ df.loc[df.index[2:7], "x"] = ""
+
+ store.append("df", df, data_columns=["x"])
+
+ result = store.select("df", "x=none")
+ expected = df[df.x == "none"]
+ tm.assert_frame_equal(result, expected)
+
+ result = store.select("df", "x!=none")
+ expected = df[df.x != "none"]
+ tm.assert_frame_equal(result, expected)
+
+ df2 = df.copy()
+ df2.loc[df2.x == "", "x"] = np.nan
+
+ store.append("df2", df2, data_columns=["x"])
+ result = store.select("df2", "x!=none")
+ expected = df2[isna(df2.x)]
+ tm.assert_frame_equal(result, expected)
+
+ # int ==/!=
+ df["int"] = 1
+ df.loc[df.index[2:7], "int"] = 2
+
+ store.append("df3", df, data_columns=["int"])
+
+ result = store.select("df3", "int=2")
+ expected = df[df.int == 2]
+ tm.assert_frame_equal(result, expected)
+
+ result = store.select("df3", "int!=2")
+ expected = df[df.int != 2]
+ tm.assert_frame_equal(result, expected)
+
+
+def test_select_as_multiple(setup_path):
+ df1 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df2 = df1.copy().rename(columns="{}_2".format)
+ df2["foo"] = "bar"
+
+ with ensure_clean_store(setup_path) as store:
+ msg = "keys must be a list/tuple"
+ # no tables stored
+ with pytest.raises(TypeError, match=msg):
+ store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
+
+ store.append("df1", df1, data_columns=["A", "B"])
+ store.append("df2", df2)
+
+ # exceptions
+ with pytest.raises(TypeError, match=msg):
+ store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
+
+ with pytest.raises(TypeError, match=msg):
+ store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
+
+ msg = "'No object named df3 in the file'"
+ with pytest.raises(KeyError, match=msg):
+ store.select_as_multiple(
+ ["df1", "df3"], where=["A>0", "B>0"], selector="df1"
+ )
+
+ with pytest.raises(KeyError, match=msg):
+ store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
+
+ with pytest.raises(KeyError, match="'No object named df4 in the file'"):
+ store.select_as_multiple(
+ ["df1", "df2"], where=["A>0", "B>0"], selector="df4"
+ )
+
+ # default select
+ result = store.select("df1", ["A>0", "B>0"])
+ expected = store.select_as_multiple(
+ ["df1"], where=["A>0", "B>0"], selector="df1"
+ )
+ tm.assert_frame_equal(result, expected)
+ expected = store.select_as_multiple("df1", where=["A>0", "B>0"], selector="df1")
+ tm.assert_frame_equal(result, expected)
+
+ # multiple
+ result = store.select_as_multiple(
+ ["df1", "df2"], where=["A>0", "B>0"], selector="df1"
+ )
+ expected = concat([df1, df2], axis=1)
+ expected = expected[(expected.A > 0) & (expected.B > 0)]
+ tm.assert_frame_equal(result, expected, check_freq=False)
+ # FIXME: 2021-01-20 this is failing with freq None vs 4B on some builds
+
+ # multiple (diff selector)
+ result = store.select_as_multiple(
+ ["df1", "df2"], where="index>df2.index[4]", selector="df2"
+ )
+ expected = concat([df1, df2], axis=1)
+ expected = expected[5:]
+ tm.assert_frame_equal(result, expected)
+
+ # test exception for diff rows
+ df3 = df1.copy().head(2)
+ store.append("df3", df3)
+ msg = "all tables must have exactly the same nrows!"
+ with pytest.raises(ValueError, match=msg):
+ store.select_as_multiple(
+ ["df1", "df3"], where=["A>0", "B>0"], selector="df1"
+ )
+
+
+def test_nan_selection_bug_4858(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame({"cols": range(6), "values": range(6)}, dtype="float64")
+ df["cols"] = (df["cols"] + 10).apply(str)
+ df.iloc[0] = np.nan
+
+ expected = DataFrame(
+ {"cols": ["13.0", "14.0", "15.0"], "values": [3.0, 4.0, 5.0]},
+ index=[3, 4, 5],
+ )
+
+ # write w/o the index on that particular column
+ store.append("df", df, data_columns=True, index=["cols"])
+ result = store.select("df", where="values>2.0")
+ tm.assert_frame_equal(result, expected)
+
+
+def test_query_with_nested_special_character(setup_path):
+ df = DataFrame(
+ {
+ "a": ["a", "a", "c", "b", "test & test", "c", "b", "e"],
+ "b": [1, 2, 3, 4, 5, 6, 7, 8],
+ }
+ )
+ expected = df[df.a == "test & test"]
+ with ensure_clean_store(setup_path) as store:
+ store.append("test", df, format="table", data_columns=True)
+ result = store.select("test", 'a = "test & test"')
+ tm.assert_frame_equal(expected, result)
+
+
+def test_query_long_float_literal(setup_path):
+ # GH 14241
+ df = DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]})
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("test", df, format="table", data_columns=True)
+
+ cutoff = 1000000000.0006
+ result = store.select("test", f"A < {cutoff:.4f}")
+ assert result.empty
+
+ cutoff = 1000000000.0010
+ result = store.select("test", f"A > {cutoff:.4f}")
+ expected = df.loc[[1, 2], :]
+ tm.assert_frame_equal(expected, result)
+
+ exact = 1000000000.0011
+ result = store.select("test", f"A == {exact:.4f}")
+ expected = df.loc[[1], :]
+ tm.assert_frame_equal(expected, result)
+
+
+def test_query_compare_column_type(setup_path):
+ # GH 15492
+ df = DataFrame(
+ {
+ "date": ["2014-01-01", "2014-01-02"],
+ "real_date": date_range("2014-01-01", periods=2),
+ "float": [1.1, 1.2],
+ "int": [1, 2],
+ },
+ columns=["date", "real_date", "float", "int"],
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("test", df, format="table", data_columns=True)
+
+ ts = Timestamp("2014-01-01") # noqa: F841
+ result = store.select("test", where="real_date > ts")
+ expected = df.loc[[1], :]
+ tm.assert_frame_equal(expected, result)
+
+ for op in ["<", ">", "=="]:
+ # non strings to string column always fail
+ for v in [2.1, True, Timestamp("2014-01-01"), pd.Timedelta(1, "s")]:
+ query = f"date {op} v"
+ msg = f"Cannot compare {v} of type {type(v)} to string column"
+ with pytest.raises(TypeError, match=msg):
+ store.select("test", where=query)
+
+ # strings to other columns must be convertible to type
+ v = "a"
+ for col in ["int", "float", "real_date"]:
+ query = f"{col} {op} v"
+ if col == "real_date":
+ msg = 'Given date string "a" not likely a datetime'
+ else:
+ msg = "could not convert string to"
+ with pytest.raises(ValueError, match=msg):
+ store.select("test", where=query)
+
+ for v, col in zip(
+ ["1", "1.1", "2014-01-01"], ["int", "float", "real_date"]
+ ):
+ query = f"{col} {op} v"
+ result = store.select("test", where=query)
+
+ if op == "==":
+ expected = df.loc[[0], :]
+ elif op == ">":
+ expected = df.loc[[1], :]
+ else:
+ expected = df.loc[[], :]
+ tm.assert_frame_equal(expected, result)
+
+
+@pytest.mark.parametrize("where", ["", (), (None,), [], [None]])
+def test_select_empty_where(tmp_path, where):
+ # GH26610
+
+ df = DataFrame([1, 2, 3])
+ path = tmp_path / "empty_where.h5"
+ with HDFStore(path) as store:
+ store.put("df", df, "t")
+ result = read_hdf(store, "df", where=where)
+ tm.assert_frame_equal(result, df)
+
+
+def test_select_large_integer(tmp_path):
+ path = tmp_path / "large_int.h5"
+
+ df = DataFrame(
+ zip(
+ ["a", "b", "c", "d"],
+ [-9223372036854775801, -9223372036854775802, -9223372036854775803, 123],
+ ),
+ columns=["x", "y"],
+ )
+ result = None
+ with HDFStore(path) as s:
+ s.append("data", df, data_columns=True, index=False)
+ result = s.select("data", where="y==-9223372036854775801").get("y").get(0)
+ expected = df["y"][0]
+
+ assert expected == result
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_store.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_store.py
new file mode 100644
index 0000000000000000000000000000000000000000..82d3052e7f5d6738801e973a9f34307ea0dff2cf
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_store.py
@@ -0,0 +1,1119 @@
+import contextlib
+import datetime as dt
+import hashlib
+import tempfile
+import time
+
+import numpy as np
+import pytest
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ DatetimeIndex,
+ Index,
+ MultiIndex,
+ Series,
+ Timestamp,
+ concat,
+ date_range,
+ period_range,
+ timedelta_range,
+)
+import pandas._testing as tm
+from pandas.tests.io.pytables.common import (
+ _maybe_remove,
+ ensure_clean_store,
+)
+
+from pandas.io.pytables import (
+ HDFStore,
+ read_hdf,
+)
+
+pytestmark = pytest.mark.single_cpu
+
+tables = pytest.importorskip("tables")
+
+
+def test_context(setup_path):
+ with tm.ensure_clean(setup_path) as path:
+ try:
+ with HDFStore(path) as tbl:
+ raise ValueError("blah")
+ except ValueError:
+ pass
+ with tm.ensure_clean(setup_path) as path:
+ with HDFStore(path) as tbl:
+ tbl["a"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ assert len(tbl) == 1
+ assert type(tbl["a"]) == DataFrame
+
+
+def test_no_track_times(tmp_path, setup_path):
+ # GH 32682
+ # enables to set track_times (see `pytables` `create_table` documentation)
+
+ def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
+ h = hash_factory()
+ with open(filename, "rb") as f:
+ for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
+ h.update(chunk)
+ return h.digest()
+
+ def create_h5_and_return_checksum(tmp_path, track_times):
+ path = tmp_path / setup_path
+ df = DataFrame({"a": [1]})
+
+ with HDFStore(path, mode="w") as hdf:
+ hdf.put(
+ "table",
+ df,
+ format="table",
+ data_columns=True,
+ index=None,
+ track_times=track_times,
+ )
+
+ return checksum(path)
+
+ checksum_0_tt_false = create_h5_and_return_checksum(tmp_path, track_times=False)
+ checksum_0_tt_true = create_h5_and_return_checksum(tmp_path, track_times=True)
+
+ # sleep is necessary to create h5 with different creation time
+ time.sleep(1)
+
+ checksum_1_tt_false = create_h5_and_return_checksum(tmp_path, track_times=False)
+ checksum_1_tt_true = create_h5_and_return_checksum(tmp_path, track_times=True)
+
+ # checksums are the same if track_time = False
+ assert checksum_0_tt_false == checksum_1_tt_false
+
+ # checksums are NOT same if track_time = True
+ assert checksum_0_tt_true != checksum_1_tt_true
+
+
+def test_iter_empty(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ # GH 12221
+ assert list(store) == []
+
+
+def test_repr(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ repr(store)
+ store.info()
+ store["a"] = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+ store["b"] = Series(
+ range(10), dtype="float64", index=[f"i_{i}" for i in range(10)]
+ )
+ store["c"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ df["obj1"] = "foo"
+ df["obj2"] = "bar"
+ df["bool1"] = df["A"] > 0
+ df["bool2"] = df["B"] > 0
+ df["bool3"] = True
+ df["int1"] = 1
+ df["int2"] = 2
+ df["timestamp1"] = Timestamp("20010102")
+ df["timestamp2"] = Timestamp("20010103")
+ df["datetime1"] = dt.datetime(2001, 1, 2, 0, 0)
+ df["datetime2"] = dt.datetime(2001, 1, 3, 0, 0)
+ df.loc[df.index[3:6], ["obj1"]] = np.nan
+ df = df._consolidate()
+
+ with tm.assert_produces_warning(pd.errors.PerformanceWarning):
+ store["df"] = df
+
+ # make a random group in hdf space
+ store._handle.create_group(store._handle.root, "bah")
+
+ assert store.filename in repr(store)
+ assert store.filename in str(store)
+ store.info()
+
+ # storers
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ store.append("df", df)
+
+ s = store.get_storer("df")
+ repr(s)
+ str(s)
+
+
+def test_contains(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ store["a"] = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+ store["b"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ store["foo/bar"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ assert "a" in store
+ assert "b" in store
+ assert "c" not in store
+ assert "foo/bar" in store
+ assert "/foo/bar" in store
+ assert "/foo/b" not in store
+ assert "bar" not in store
+
+ # gh-2694: tables.NaturalNameWarning
+ with tm.assert_produces_warning(
+ tables.NaturalNameWarning, check_stacklevel=False
+ ):
+ store["node())"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ assert "node())" in store
+
+
+def test_versioning(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ store["a"] = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+ store["b"] = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((20, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=20, freq="B"),
+ )
+ _maybe_remove(store, "df1")
+ store.append("df1", df[:10])
+ store.append("df1", df[10:])
+ assert store.root.a._v_attrs.pandas_version == "0.15.2"
+ assert store.root.b._v_attrs.pandas_version == "0.15.2"
+ assert store.root.df1._v_attrs.pandas_version == "0.15.2"
+
+ # write a file and wipe its versioning
+ _maybe_remove(store, "df2")
+ store.append("df2", df)
+
+ # this is an error because its table_type is appendable, but no
+ # version info
+ store.get_node("df2")._v_attrs.pandas_version = None
+
+ msg = "'NoneType' object has no attribute 'startswith'"
+
+ with pytest.raises(Exception, match=msg):
+ store.select("df2")
+
+
+@pytest.mark.parametrize(
+ "where, expected",
+ [
+ (
+ "/",
+ {
+ "": ({"first_group", "second_group"}, set()),
+ "/first_group": (set(), {"df1", "df2"}),
+ "/second_group": ({"third_group"}, {"df3", "s1"}),
+ "/second_group/third_group": (set(), {"df4"}),
+ },
+ ),
+ (
+ "/second_group",
+ {
+ "/second_group": ({"third_group"}, {"df3", "s1"}),
+ "/second_group/third_group": (set(), {"df4"}),
+ },
+ ),
+ ],
+)
+def test_walk(where, expected):
+ # GH10143
+ objs = {
+ "df1": DataFrame([1, 2, 3]),
+ "df2": DataFrame([4, 5, 6]),
+ "df3": DataFrame([6, 7, 8]),
+ "df4": DataFrame([9, 10, 11]),
+ "s1": Series([10, 9, 8]),
+ # Next 3 items aren't pandas objects and should be ignored
+ "a1": np.array([[1, 2, 3], [4, 5, 6]]),
+ "tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
+ "tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
+ }
+
+ with ensure_clean_store("walk_groups.hdf", mode="w") as store:
+ store.put("/first_group/df1", objs["df1"])
+ store.put("/first_group/df2", objs["df2"])
+ store.put("/second_group/df3", objs["df3"])
+ store.put("/second_group/s1", objs["s1"])
+ store.put("/second_group/third_group/df4", objs["df4"])
+ # Create non-pandas objects
+ store._handle.create_array("/first_group", "a1", objs["a1"])
+ store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
+ store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
+
+ assert len(list(store.walk(where=where))) == len(expected)
+ for path, groups, leaves in store.walk(where=where):
+ assert path in expected
+ expected_groups, expected_frames = expected[path]
+ assert expected_groups == set(groups)
+ assert expected_frames == set(leaves)
+ for leaf in leaves:
+ frame_path = "/".join([path, leaf])
+ obj = store.get(frame_path)
+ if "df" in leaf:
+ tm.assert_frame_equal(obj, objs[leaf])
+ else:
+ tm.assert_series_equal(obj, objs[leaf])
+
+
+def test_getattr(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ s = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+ store["a"] = s
+
+ # test attribute access
+ result = store.a
+ tm.assert_series_equal(result, s)
+ result = getattr(store, "a")
+ tm.assert_series_equal(result, s)
+
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ store["df"] = df
+ result = store.df
+ tm.assert_frame_equal(result, df)
+
+ # errors
+ for x in ["d", "mode", "path", "handle", "complib"]:
+ msg = f"'HDFStore' object has no attribute '{x}'"
+ with pytest.raises(AttributeError, match=msg):
+ getattr(store, x)
+
+ # not stores
+ for x in ["mode", "path", "handle", "complib"]:
+ getattr(store, f"_{x}")
+
+
+def test_store_dropna(tmp_path, setup_path):
+ df_with_missing = DataFrame(
+ {"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]},
+ index=list("abc"),
+ )
+ df_without_missing = DataFrame(
+ {"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac")
+ )
+
+ # # Test to make sure defaults are to not drop.
+ # # Corresponding to Issue 9382
+ path = tmp_path / setup_path
+ df_with_missing.to_hdf(path, key="df", format="table")
+ reloaded = read_hdf(path, "df")
+ tm.assert_frame_equal(df_with_missing, reloaded)
+
+ path = tmp_path / setup_path
+ df_with_missing.to_hdf(path, key="df", format="table", dropna=False)
+ reloaded = read_hdf(path, "df")
+ tm.assert_frame_equal(df_with_missing, reloaded)
+
+ path = tmp_path / setup_path
+ df_with_missing.to_hdf(path, key="df", format="table", dropna=True)
+ reloaded = read_hdf(path, "df")
+ tm.assert_frame_equal(df_without_missing, reloaded)
+
+
+def test_keyword_deprecation(tmp_path, setup_path):
+ # GH 54229
+ path = tmp_path / setup_path
+
+ msg = (
+ "Starting with pandas version 3.0 all arguments of to_hdf except for the "
+ "argument 'path_or_buf' will be keyword-only."
+ )
+ df = DataFrame([{"A": 1, "B": 2, "C": 3}, {"A": 1, "B": 2, "C": 3}])
+
+ with tm.assert_produces_warning(FutureWarning, match=msg):
+ df.to_hdf(path, "key")
+
+
+def test_to_hdf_with_min_itemsize(tmp_path, setup_path):
+ path = tmp_path / setup_path
+
+ # min_itemsize in index with to_hdf (GH 10381)
+ df = DataFrame(
+ {
+ "A": [0.0, 1.0, 2.0, 3.0, 4.0],
+ "B": [0.0, 1.0, 0.0, 1.0, 0.0],
+ "C": Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object),
+ "D": date_range("20130101", periods=5),
+ }
+ ).set_index("C")
+ df.to_hdf(path, key="ss3", format="table", min_itemsize={"index": 6})
+ # just make sure there is a longer string:
+ df2 = df.copy().reset_index().assign(C="longer").set_index("C")
+ df2.to_hdf(path, key="ss3", append=True, format="table")
+ tm.assert_frame_equal(read_hdf(path, "ss3"), concat([df, df2]))
+
+ # same as above, with a Series
+ df["B"].to_hdf(path, key="ss4", format="table", min_itemsize={"index": 6})
+ df2["B"].to_hdf(path, key="ss4", append=True, format="table")
+ tm.assert_series_equal(read_hdf(path, "ss4"), concat([df["B"], df2["B"]]))
+
+
+@pytest.mark.parametrize("format", ["fixed", "table"])
+def test_to_hdf_errors(tmp_path, format, setup_path):
+ data = ["\ud800foo"]
+ ser = Series(data, index=Index(data))
+ path = tmp_path / setup_path
+ # GH 20835
+ ser.to_hdf(path, key="table", format=format, errors="surrogatepass")
+
+ result = read_hdf(path, "table", errors="surrogatepass")
+ tm.assert_series_equal(result, ser)
+
+
+def test_create_table_index(setup_path):
+ with ensure_clean_store(setup_path) as store:
+
+ def col(t, column):
+ return getattr(store.get_storer(t).table.cols, column)
+
+ # data columns
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df["string"] = "foo"
+ df["string2"] = "bar"
+ store.append("f", df, data_columns=["string", "string2"])
+ assert col("f", "index").is_indexed is True
+ assert col("f", "string").is_indexed is True
+ assert col("f", "string2").is_indexed is True
+
+ # specify index=columns
+ store.append("f2", df, index=["string"], data_columns=["string", "string2"])
+ assert col("f2", "index").is_indexed is False
+ assert col("f2", "string").is_indexed is True
+ assert col("f2", "string2").is_indexed is False
+
+ # try to index a non-table
+ _maybe_remove(store, "f2")
+ store.put("f2", df)
+ msg = "cannot create table index on a Fixed format store"
+ with pytest.raises(TypeError, match=msg):
+ store.create_table_index("f2")
+
+
+def test_create_table_index_data_columns_argument(setup_path):
+ # GH 28156
+
+ with ensure_clean_store(setup_path) as store:
+
+ def col(t, column):
+ return getattr(store.get_storer(t).table.cols, column)
+
+ # data columns
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df["string"] = "foo"
+ df["string2"] = "bar"
+ store.append("f", df, data_columns=["string"])
+ assert col("f", "index").is_indexed is True
+ assert col("f", "string").is_indexed is True
+
+ msg = "'Cols' object has no attribute 'string2'"
+ with pytest.raises(AttributeError, match=msg):
+ col("f", "string2").is_indexed
+
+ # try to index a col which isn't a data_column
+ msg = (
+ "column string2 is not a data_column.\n"
+ "In order to read column string2 you must reload the dataframe \n"
+ "into HDFStore and include string2 with the data_columns argument."
+ )
+ with pytest.raises(AttributeError, match=msg):
+ store.create_table_index("f", columns=["string2"])
+
+
+def test_mi_data_columns(setup_path):
+ # GH 14435
+ idx = MultiIndex.from_arrays(
+ [date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
+ )
+ df = DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("df", df, data_columns=True)
+
+ actual = store.select("df", where="id == 1")
+ expected = df.iloc[[1], :]
+ tm.assert_frame_equal(actual, expected)
+
+
+def test_table_mixed_dtypes(setup_path):
+ # frame
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ df["obj1"] = "foo"
+ df["obj2"] = "bar"
+ df["bool1"] = df["A"] > 0
+ df["bool2"] = df["B"] > 0
+ df["bool3"] = True
+ df["int1"] = 1
+ df["int2"] = 2
+ df["timestamp1"] = Timestamp("20010102").as_unit("ns")
+ df["timestamp2"] = Timestamp("20010103").as_unit("ns")
+ df["datetime1"] = Timestamp("20010102").as_unit("ns")
+ df["datetime2"] = Timestamp("20010103").as_unit("ns")
+ df.loc[df.index[3:6], ["obj1"]] = np.nan
+ df = df._consolidate()
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("df1_mixed", df)
+ tm.assert_frame_equal(store.select("df1_mixed"), df)
+
+
+def test_calendar_roundtrip_issue(setup_path):
+ # 8591
+ # doc example from tseries holiday section
+ weekmask_egypt = "Sun Mon Tue Wed Thu"
+ holidays = [
+ "2012-05-01",
+ dt.datetime(2013, 5, 1),
+ np.datetime64("2014-05-01"),
+ ]
+ bday_egypt = pd.offsets.CustomBusinessDay(
+ holidays=holidays, weekmask=weekmask_egypt
+ )
+ mydt = dt.datetime(2013, 4, 30)
+ dts = date_range(mydt, periods=5, freq=bday_egypt)
+
+ s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
+
+ with ensure_clean_store(setup_path) as store:
+ store.put("fixed", s)
+ result = store.select("fixed")
+ tm.assert_series_equal(result, s)
+
+ store.append("table", s)
+ result = store.select("table")
+ tm.assert_series_equal(result, s)
+
+
+def test_remove(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ ts = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ store["a"] = ts
+ store["b"] = df
+ _maybe_remove(store, "a")
+ assert len(store) == 1
+ tm.assert_frame_equal(df, store["b"])
+
+ _maybe_remove(store, "b")
+ assert len(store) == 0
+
+ # nonexistence
+ with pytest.raises(
+ KeyError, match="'No object named a_nonexistent_store in the file'"
+ ):
+ store.remove("a_nonexistent_store")
+
+ # pathing
+ store["a"] = ts
+ store["b/foo"] = df
+ _maybe_remove(store, "foo")
+ _maybe_remove(store, "b/foo")
+ assert len(store) == 1
+
+ store["a"] = ts
+ store["b/foo"] = df
+ _maybe_remove(store, "b")
+ assert len(store) == 1
+
+ # __delitem__
+ store["a"] = ts
+ store["b"] = df
+ del store["a"]
+ del store["b"]
+ assert len(store) == 0
+
+
+def test_same_name_scoping(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((20, 2)),
+ index=date_range("20130101", periods=20),
+ )
+ store.put("df", df, format="table")
+ expected = df[df.index > Timestamp("20130105")]
+
+ result = store.select("df", "index>datetime.datetime(2013,1,5)")
+ tm.assert_frame_equal(result, expected)
+
+ # changes what 'datetime' points to in the namespace where
+ # 'select' does the lookup
+
+ # technically an error, but allow it
+ result = store.select("df", "index>datetime.datetime(2013,1,5)")
+ tm.assert_frame_equal(result, expected)
+
+ result = store.select("df", "index>datetime(2013,1,5)")
+ tm.assert_frame_equal(result, expected)
+
+
+def test_store_index_name(setup_path):
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ df.index.name = "foo"
+
+ with ensure_clean_store(setup_path) as store:
+ store["frame"] = df
+ recons = store["frame"]
+ tm.assert_frame_equal(recons, df)
+
+
+@pytest.mark.parametrize("tz", [None, "US/Pacific"])
+@pytest.mark.parametrize("table_format", ["table", "fixed"])
+def test_store_index_name_numpy_str(tmp_path, table_format, setup_path, unit, tz):
+ # GH #13492
+ idx = DatetimeIndex(
+ [dt.date(2000, 1, 1), dt.date(2000, 1, 2)],
+ name="cols\u05d2",
+ ).tz_localize(tz)
+ idx1 = (
+ DatetimeIndex(
+ [dt.date(2010, 1, 1), dt.date(2010, 1, 2)],
+ name="rows\u05d0",
+ )
+ .as_unit(unit)
+ .tz_localize(tz)
+ )
+ df = DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
+
+ # This used to fail, returning numpy strings instead of python strings.
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", format=table_format)
+ df2 = read_hdf(path, "df")
+
+ tm.assert_frame_equal(df, df2, check_names=True)
+
+ assert isinstance(df2.index.name, str)
+ assert isinstance(df2.columns.name, str)
+
+
+def test_store_series_name(setup_path):
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ series = df["A"]
+
+ with ensure_clean_store(setup_path) as store:
+ store["series"] = series
+ recons = store["series"]
+ tm.assert_series_equal(recons, series)
+
+
+def test_overwrite_node(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ store["a"] = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ ts = Series(
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
+ )
+ store["a"] = ts
+
+ tm.assert_series_equal(store["a"], ts)
+
+
+def test_coordinates(setup_path):
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ _maybe_remove(store, "df")
+ store.append("df", df)
+
+ # all
+ c = store.select_as_coordinates("df")
+ assert (c.values == np.arange(len(df.index))).all()
+
+ # get coordinates back & test vs frame
+ _maybe_remove(store, "df")
+
+ df = DataFrame({"A": range(5), "B": range(5)})
+ store.append("df", df)
+ c = store.select_as_coordinates("df", ["index<3"])
+ assert (c.values == np.arange(3)).all()
+ result = store.select("df", where=c)
+ expected = df.loc[0:2, :]
+ tm.assert_frame_equal(result, expected)
+
+ c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
+ assert (c.values == np.arange(2) + 3).all()
+ result = store.select("df", where=c)
+ expected = df.loc[3:4, :]
+ tm.assert_frame_equal(result, expected)
+ assert isinstance(c, Index)
+
+ # multiple tables
+ _maybe_remove(store, "df1")
+ _maybe_remove(store, "df2")
+ df1 = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=date_range("2000-01-01", periods=10, freq="B"),
+ )
+ df2 = df1.copy().rename(columns="{}_2".format)
+ store.append("df1", df1, data_columns=["A", "B"])
+ store.append("df2", df2)
+
+ c = store.select_as_coordinates("df1", ["A>0", "B>0"])
+ df1_result = store.select("df1", c)
+ df2_result = store.select("df2", c)
+ result = concat([df1_result, df2_result], axis=1)
+
+ expected = concat([df1, df2], axis=1)
+ expected = expected[(expected.A > 0) & (expected.B > 0)]
+ tm.assert_frame_equal(result, expected, check_freq=False)
+ # FIXME: 2021-01-18 on some (mostly windows) builds we get freq=None
+ # but expect freq="18B"
+
+ # pass array/mask as the coordinates
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((1000, 2)),
+ index=date_range("20000101", periods=1000),
+ )
+ store.append("df", df)
+ c = store.select_column("df", "index")
+ where = c[DatetimeIndex(c).month == 5].index
+ expected = df.iloc[where]
+
+ # locations
+ result = store.select("df", where=where)
+ tm.assert_frame_equal(result, expected)
+
+ # boolean
+ result = store.select("df", where=where)
+ tm.assert_frame_equal(result, expected)
+
+ # invalid
+ msg = (
+ "where must be passed as a string, PyTablesExpr, "
+ "or list-like of PyTablesExpr"
+ )
+ with pytest.raises(TypeError, match=msg):
+ store.select("df", where=np.arange(len(df), dtype="float64"))
+
+ with pytest.raises(TypeError, match=msg):
+ store.select("df", where=np.arange(len(df) + 1))
+
+ with pytest.raises(TypeError, match=msg):
+ store.select("df", where=np.arange(len(df)), start=5)
+
+ with pytest.raises(TypeError, match=msg):
+ store.select("df", where=np.arange(len(df)), start=5, stop=10)
+
+ # selection with filter
+ selection = date_range("20000101", periods=500)
+ result = store.select("df", where="index in selection")
+ expected = df[df.index.isin(selection)]
+ tm.assert_frame_equal(result, expected)
+
+ # list
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)))
+ store.append("df2", df)
+ result = store.select("df2", where=[0, 3, 5])
+ expected = df.iloc[[0, 3, 5]]
+ tm.assert_frame_equal(result, expected)
+
+ # boolean
+ where = [True] * 10
+ where[-2] = False
+ result = store.select("df2", where=where)
+ expected = df.loc[where]
+ tm.assert_frame_equal(result, expected)
+
+ # start/stop
+ result = store.select("df2", start=5, stop=10)
+ expected = df[5:10]
+ tm.assert_frame_equal(result, expected)
+
+
+def test_start_stop_table(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ # table
+ df = DataFrame(
+ {
+ "A": np.random.default_rng(2).random(20),
+ "B": np.random.default_rng(2).random(20),
+ }
+ )
+ store.append("df", df)
+
+ result = store.select("df", "columns=['A']", start=0, stop=5)
+ expected = df.loc[0:4, ["A"]]
+ tm.assert_frame_equal(result, expected)
+
+ # out of range
+ result = store.select("df", "columns=['A']", start=30, stop=40)
+ assert len(result) == 0
+ expected = df.loc[30:40, ["A"]]
+ tm.assert_frame_equal(result, expected)
+
+
+def test_start_stop_multiple(setup_path):
+ # GH 16209
+ with ensure_clean_store(setup_path) as store:
+ df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
+
+ store.append_to_multiple(
+ {"selector": ["foo"], "data": None}, df, selector="selector"
+ )
+ result = store.select_as_multiple(
+ ["selector", "data"], selector="selector", start=0, stop=1
+ )
+ expected = df.loc[[0], ["foo", "bar"]]
+ tm.assert_frame_equal(result, expected)
+
+
+def test_start_stop_fixed(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ # fixed, GH 8287
+ df = DataFrame(
+ {
+ "A": np.random.default_rng(2).random(20),
+ "B": np.random.default_rng(2).random(20),
+ },
+ index=date_range("20130101", periods=20),
+ )
+ store.put("df", df)
+
+ result = store.select("df", start=0, stop=5)
+ expected = df.iloc[0:5, :]
+ tm.assert_frame_equal(result, expected)
+
+ result = store.select("df", start=5, stop=10)
+ expected = df.iloc[5:10, :]
+ tm.assert_frame_equal(result, expected)
+
+ # out of range
+ result = store.select("df", start=30, stop=40)
+ expected = df.iloc[30:40, :]
+ tm.assert_frame_equal(result, expected)
+
+ # series
+ s = df.A
+ store.put("s", s)
+ result = store.select("s", start=0, stop=5)
+ expected = s.iloc[0:5]
+ tm.assert_series_equal(result, expected)
+
+ result = store.select("s", start=5, stop=10)
+ expected = s.iloc[5:10]
+ tm.assert_series_equal(result, expected)
+
+ # sparse; not implemented
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ df.iloc[3:5, 1:3] = np.nan
+ df.iloc[8:10, -2] = np.nan
+
+
+def test_select_filter_corner(setup_path):
+ df = DataFrame(np.random.default_rng(2).standard_normal((50, 100)))
+ df.index = [f"{c:3d}" for c in df.index]
+ df.columns = [f"{c:3d}" for c in df.columns]
+
+ with ensure_clean_store(setup_path) as store:
+ store.put("frame", df, format="table")
+
+ crit = "columns=df.columns[:75]"
+ result = store.select("frame", [crit])
+ tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
+
+ crit = "columns=df.columns[:75:2]"
+ result = store.select("frame", [crit])
+ tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
+
+
+def test_path_pathlib():
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ result = tm.round_trip_pathlib(
+ lambda p: df.to_hdf(p, key="df"), lambda p: read_hdf(p, "df")
+ )
+ tm.assert_frame_equal(df, result)
+
+
+@pytest.mark.parametrize("start, stop", [(0, 2), (1, 2), (None, None)])
+def test_contiguous_mixed_data_table(start, stop, setup_path):
+ # GH 17021
+ df = DataFrame(
+ {
+ "a": Series([20111010, 20111011, 20111012]),
+ "b": Series(["ab", "cd", "ab"]),
+ }
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("test_dataset", df)
+
+ result = store.select("test_dataset", start=start, stop=stop)
+ tm.assert_frame_equal(df[start:stop], result)
+
+
+def test_path_pathlib_hdfstore():
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ def writer(path):
+ with HDFStore(path) as store:
+ df.to_hdf(store, key="df")
+
+ def reader(path):
+ with HDFStore(path) as store:
+ return read_hdf(store, "df")
+
+ result = tm.round_trip_pathlib(writer, reader)
+ tm.assert_frame_equal(df, result)
+
+
+def test_pickle_path_localpath():
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+ result = tm.round_trip_pathlib(
+ lambda p: df.to_hdf(p, key="df"), lambda p: read_hdf(p, "df")
+ )
+ tm.assert_frame_equal(df, result)
+
+
+def test_path_localpath_hdfstore():
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ def writer(path):
+ with HDFStore(path) as store:
+ df.to_hdf(store, key="df")
+
+ def reader(path):
+ with HDFStore(path) as store:
+ return read_hdf(store, "df")
+
+ result = tm.round_trip_localpath(writer, reader)
+ tm.assert_frame_equal(df, result)
+
+
+@pytest.mark.parametrize("propindexes", [True, False])
+def test_copy(propindexes):
+ df = DataFrame(
+ 1.1 * np.arange(120).reshape((30, 4)),
+ columns=Index(list("ABCD"), dtype=object),
+ index=Index([f"i-{i}" for i in range(30)], dtype=object),
+ )
+
+ with tm.ensure_clean() as path:
+ with HDFStore(path) as st:
+ st.append("df", df, data_columns=["A"])
+ with tempfile.NamedTemporaryFile() as new_f:
+ with HDFStore(path) as store:
+ with contextlib.closing(
+ store.copy(new_f.name, keys=None, propindexes=propindexes)
+ ) as tstore:
+ # check keys
+ keys = store.keys()
+ assert set(keys) == set(tstore.keys())
+ # check indices & nrows
+ for k in tstore.keys():
+ if tstore.get_storer(k).is_table:
+ new_t = tstore.get_storer(k)
+ orig_t = store.get_storer(k)
+
+ assert orig_t.nrows == new_t.nrows
+
+ # check propindixes
+ if propindexes:
+ for a in orig_t.axes:
+ if a.is_indexed:
+ assert new_t[a.name].is_indexed
+
+
+def test_duplicate_column_name(tmp_path, setup_path):
+ df = DataFrame(columns=["a", "a"], data=[[0, 0]])
+
+ path = tmp_path / setup_path
+ msg = "Columns index has to be unique for fixed format"
+ with pytest.raises(ValueError, match=msg):
+ df.to_hdf(path, key="df", format="fixed")
+
+ df.to_hdf(path, key="df", format="table")
+ other = read_hdf(path, "df")
+
+ tm.assert_frame_equal(df, other)
+ assert df.equals(other)
+ assert other.equals(df)
+
+
+def test_preserve_timedeltaindex_type(setup_path):
+ # GH9635
+ df = DataFrame(np.random.default_rng(2).normal(size=(10, 5)))
+ df.index = timedelta_range(start="0s", periods=10, freq="1s", name="example")
+
+ with ensure_clean_store(setup_path) as store:
+ store["df"] = df
+ tm.assert_frame_equal(store["df"], df)
+
+
+def test_columns_multiindex_modified(tmp_path, setup_path):
+ # BUG: 7212
+
+ df = DataFrame(
+ np.random.default_rng(2).random((4, 5)),
+ index=list("abcd"),
+ columns=list("ABCDE"),
+ )
+ df.index.name = "letters"
+ df = df.set_index(keys="E", append=True)
+
+ data_columns = df.index.names + df.columns.tolist()
+ path = tmp_path / setup_path
+ df.to_hdf(
+ path,
+ key="df",
+ mode="a",
+ append=True,
+ data_columns=data_columns,
+ index=False,
+ )
+ cols2load = list("BCD")
+ cols2load_original = list(cols2load)
+ # GH#10055 make sure read_hdf call does not alter cols2load inplace
+ read_hdf(path, "df", columns=cols2load)
+ assert cols2load_original == cols2load
+
+
+@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
+@pytest.mark.parametrize(
+ "columns",
+ [
+ Index([0, 1], dtype=np.int64),
+ Index([0.0, 1.0], dtype=np.float64),
+ date_range("2020-01-01", periods=2),
+ timedelta_range("1 day", periods=2),
+ period_range("2020-01-01", periods=2, freq="D"),
+ ],
+)
+def test_to_hdf_with_object_column_names_should_fail(tmp_path, setup_path, columns):
+ # GH9057
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)), columns=columns)
+ path = tmp_path / setup_path
+ msg = "cannot have non-object label DataIndexableCol"
+ with pytest.raises(ValueError, match=msg):
+ df.to_hdf(path, key="df", format="table", data_columns=True)
+
+
+@pytest.mark.parametrize("dtype", [None, "category"])
+def test_to_hdf_with_object_column_names_should_run(tmp_path, setup_path, dtype):
+ # GH9057
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((10, 2)),
+ columns=Index(["a", "b"], dtype=dtype),
+ )
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="df", format="table", data_columns=True)
+ result = read_hdf(path, "df", where=f"index = [{df.index[0]}]")
+ assert len(result)
+
+
+def test_hdfstore_strides(setup_path):
+ # GH22073
+ df = DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]})
+ with ensure_clean_store(setup_path) as store:
+ store.put("df", df)
+ assert df["a"].values.strides == store["df"]["a"].values.strides
+
+
+def test_store_bool_index(tmp_path, setup_path):
+ # GH#48667
+ df = DataFrame([[1]], columns=[True], index=Index([False], dtype="bool"))
+ expected = df.copy()
+
+ # # Test to make sure defaults are to not drop.
+ # # Corresponding to Issue 9382
+ path = tmp_path / setup_path
+ df.to_hdf(path, key="a")
+ result = read_hdf(path, "a")
+ tm.assert_frame_equal(expected, result)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_subclass.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_subclass.py
new file mode 100644
index 0000000000000000000000000000000000000000..03622faa2b5a8f65d709c23cab23fd3680084cb4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_subclass.py
@@ -0,0 +1,52 @@
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ Series,
+)
+import pandas._testing as tm
+
+from pandas.io.pytables import (
+ HDFStore,
+ read_hdf,
+)
+
+pytest.importorskip("tables")
+
+
+class TestHDFStoreSubclass:
+ # GH 33748
+ def test_supported_for_subclass_dataframe(self, tmp_path):
+ data = {"a": [1, 2], "b": [3, 4]}
+ sdf = tm.SubclassedDataFrame(data, dtype=np.intp)
+
+ expected = DataFrame(data, dtype=np.intp)
+
+ path = tmp_path / "temp.h5"
+ sdf.to_hdf(path, key="df")
+ result = read_hdf(path, "df")
+ tm.assert_frame_equal(result, expected)
+
+ path = tmp_path / "temp.h5"
+ with HDFStore(path) as store:
+ store.put("df", sdf)
+ result = read_hdf(path, "df")
+ tm.assert_frame_equal(result, expected)
+
+ def test_supported_for_subclass_series(self, tmp_path):
+ data = [1, 2, 3]
+ sser = tm.SubclassedSeries(data, dtype=np.intp)
+
+ expected = Series(data, dtype=np.intp)
+
+ path = tmp_path / "temp.h5"
+ sser.to_hdf(path, key="ser")
+ result = read_hdf(path, "ser")
+ tm.assert_series_equal(result, expected)
+
+ path = tmp_path / "temp.h5"
+ with HDFStore(path) as store:
+ store.put("ser", sser)
+ result = read_hdf(path, "ser")
+ tm.assert_series_equal(result, expected)
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_time_series.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_time_series.py
new file mode 100644
index 0000000000000000000000000000000000000000..726dd0d42034756b205c427c60546afb2901be49
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_time_series.py
@@ -0,0 +1,72 @@
+import datetime
+
+import numpy as np
+import pytest
+
+from pandas import (
+ DataFrame,
+ DatetimeIndex,
+ Series,
+ _testing as tm,
+ date_range,
+ period_range,
+)
+from pandas.tests.io.pytables.common import ensure_clean_store
+
+pytestmark = pytest.mark.single_cpu
+
+
+@pytest.mark.parametrize("unit", ["us", "ns"])
+def test_store_datetime_fractional_secs(setup_path, unit):
+ dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
+ dti = DatetimeIndex([dt], dtype=f"M8[{unit}]")
+ series = Series([0], index=dti)
+ with ensure_clean_store(setup_path) as store:
+ store["a"] = series
+ assert store["a"].index[0] == dt
+
+
+@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
+def test_tseries_indices_series(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ idx = date_range("2020-01-01", periods=10)
+ ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx)
+ store["a"] = ser
+ result = store["a"]
+
+ tm.assert_series_equal(result, ser)
+ assert result.index.freq == ser.index.freq
+ tm.assert_class_equal(result.index, ser.index, obj="series index")
+
+ idx = period_range("2020-01-01", periods=10, freq="D")
+ ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx)
+ store["a"] = ser
+ result = store["a"]
+
+ tm.assert_series_equal(result, ser)
+ assert result.index.freq == ser.index.freq
+ tm.assert_class_equal(result.index, ser.index, obj="series index")
+
+
+@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
+def test_tseries_indices_frame(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ idx = date_range("2020-01-01", periods=10)
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((len(idx), 3)), index=idx
+ )
+ store["a"] = df
+ result = store["a"]
+
+ tm.assert_frame_equal(result, df)
+ assert result.index.freq == df.index.freq
+ tm.assert_class_equal(result.index, df.index, obj="dataframe index")
+
+ idx = period_range("2020-01-01", periods=10, freq="D")
+ df = DataFrame(np.random.default_rng(2).standard_normal((len(idx), 3)), idx)
+ store["a"] = df
+ result = store["a"]
+
+ tm.assert_frame_equal(result, df)
+ assert result.index.freq == df.index.freq
+ tm.assert_class_equal(result.index, df.index, obj="dataframe index")
diff --git a/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_timezones.py b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_timezones.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5613daf62207319d9dc1e10beea669cd7248f38
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/tests/io/pytables/test_timezones.py
@@ -0,0 +1,378 @@
+from datetime import (
+ date,
+ timedelta,
+)
+
+import numpy as np
+import pytest
+
+from pandas._libs.tslibs.timezones import maybe_get_tz
+import pandas.util._test_decorators as td
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ DatetimeIndex,
+ Series,
+ Timestamp,
+ date_range,
+)
+import pandas._testing as tm
+from pandas.tests.io.pytables.common import (
+ _maybe_remove,
+ ensure_clean_store,
+)
+
+
+def _compare_with_tz(a, b):
+ tm.assert_frame_equal(a, b)
+
+ # compare the zones on each element
+ for c in a.columns:
+ for i in a.index:
+ a_e = a.loc[i, c]
+ b_e = b.loc[i, c]
+ if not (a_e == b_e and a_e.tz == b_e.tz):
+ raise AssertionError(f"invalid tz comparison [{a_e}] [{b_e}]")
+
+
+# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows
+# filename issues.
+gettz_dateutil = lambda x: maybe_get_tz("dateutil/" + x)
+gettz_pytz = lambda x: x
+
+
+@pytest.mark.parametrize("gettz", [gettz_dateutil, gettz_pytz])
+def test_append_with_timezones(setup_path, gettz):
+ # as columns
+
+ # Single-tzinfo, no DST transition
+ df_est = DataFrame(
+ {
+ "A": [
+ Timestamp("20130102 2:00:00", tz=gettz("US/Eastern")).as_unit("ns")
+ + timedelta(hours=1) * i
+ for i in range(5)
+ ]
+ }
+ )
+
+ # frame with all columns having same tzinfo, but different sides
+ # of DST transition
+ df_crosses_dst = DataFrame(
+ {
+ "A": Timestamp("20130102", tz=gettz("US/Eastern")).as_unit("ns"),
+ "B": Timestamp("20130603", tz=gettz("US/Eastern")).as_unit("ns"),
+ },
+ index=range(5),
+ )
+
+ df_mixed_tz = DataFrame(
+ {
+ "A": Timestamp("20130102", tz=gettz("US/Eastern")).as_unit("ns"),
+ "B": Timestamp("20130102", tz=gettz("EET")).as_unit("ns"),
+ },
+ index=range(5),
+ )
+
+ df_different_tz = DataFrame(
+ {
+ "A": Timestamp("20130102", tz=gettz("US/Eastern")).as_unit("ns"),
+ "B": Timestamp("20130102", tz=gettz("CET")).as_unit("ns"),
+ },
+ index=range(5),
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ _maybe_remove(store, "df_tz")
+ store.append("df_tz", df_est, data_columns=["A"])
+ result = store["df_tz"]
+ _compare_with_tz(result, df_est)
+ tm.assert_frame_equal(result, df_est)
+
+ # select with tz aware
+ expected = df_est[df_est.A >= df_est.A[3]]
+ result = store.select("df_tz", where="A>=df_est.A[3]")
+ _compare_with_tz(result, expected)
+
+ # ensure we include dates in DST and STD time here.
+ _maybe_remove(store, "df_tz")
+ store.append("df_tz", df_crosses_dst)
+ result = store["df_tz"]
+ _compare_with_tz(result, df_crosses_dst)
+ tm.assert_frame_equal(result, df_crosses_dst)
+
+ msg = (
+ r"invalid info for \[values_block_1\] for \[tz\], "
+ r"existing_value \[(dateutil/.*)?(US/Eastern|America/New_York)\] "
+ r"conflicts with new value \[(dateutil/.*)?EET\]"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.append("df_tz", df_mixed_tz)
+
+ # this is ok
+ _maybe_remove(store, "df_tz")
+ store.append("df_tz", df_mixed_tz, data_columns=["A", "B"])
+ result = store["df_tz"]
+ _compare_with_tz(result, df_mixed_tz)
+ tm.assert_frame_equal(result, df_mixed_tz)
+
+ # can't append with diff timezone
+ msg = (
+ r"invalid info for \[B\] for \[tz\], "
+ r"existing_value \[(dateutil/.*)?EET\] "
+ r"conflicts with new value \[(dateutil/.*)?CET\]"
+ )
+ with pytest.raises(ValueError, match=msg):
+ store.append("df_tz", df_different_tz)
+
+
+@pytest.mark.parametrize("gettz", [gettz_dateutil, gettz_pytz])
+def test_append_with_timezones_as_index(setup_path, gettz):
+ # GH#4098 example
+
+ dti = date_range("2000-1-1", periods=3, freq="h", tz=gettz("US/Eastern"))
+ dti = dti._with_freq(None) # freq doesn't round-trip
+
+ df = DataFrame({"A": Series(range(3), index=dti)})
+
+ with ensure_clean_store(setup_path) as store:
+ _maybe_remove(store, "df")
+ store.put("df", df)
+ result = store.select("df")
+ tm.assert_frame_equal(result, df)
+
+ _maybe_remove(store, "df")
+ store.append("df", df)
+ result = store.select("df")
+ tm.assert_frame_equal(result, df)
+
+
+def test_roundtrip_tz_aware_index(setup_path, unit):
+ # GH 17618
+ ts = Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
+ dti = DatetimeIndex([ts]).as_unit(unit)
+ df = DataFrame(data=[0], index=dti)
+
+ with ensure_clean_store(setup_path) as store:
+ store.put("frame", df, format="fixed")
+ recons = store["frame"]
+ tm.assert_frame_equal(recons, df)
+
+ value = recons.index[0]._value
+ denom = {"ns": 1, "us": 1000, "ms": 10**6, "s": 10**9}[unit]
+ assert value == 946706400000000000 // denom
+
+
+def test_store_index_name_with_tz(setup_path):
+ # GH 13884
+ df = DataFrame({"A": [1, 2]})
+ df.index = DatetimeIndex([1234567890123456787, 1234567890123456788])
+ df.index = df.index.tz_localize("UTC")
+ df.index.name = "foo"
+
+ with ensure_clean_store(setup_path) as store:
+ store.put("frame", df, format="table")
+ recons = store["frame"]
+ tm.assert_frame_equal(recons, df)
+
+
+def test_tseries_select_index_column(setup_path):
+ # GH7777
+ # selecting a UTC datetimeindex column did
+ # not preserve UTC tzinfo set before storing
+
+ # check that no tz still works
+ rng = date_range("1/1/2000", "1/30/2000")
+ frame = DataFrame(
+ np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("frame", frame)
+ result = store.select_column("frame", "index")
+ assert rng.tz == DatetimeIndex(result.values).tz
+
+ # check utc
+ rng = date_range("1/1/2000", "1/30/2000", tz="UTC")
+ frame = DataFrame(
+ np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("frame", frame)
+ result = store.select_column("frame", "index")
+ assert rng.tz == result.dt.tz
+
+ # double check non-utc
+ rng = date_range("1/1/2000", "1/30/2000", tz="US/Eastern")
+ frame = DataFrame(
+ np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ store.append("frame", frame)
+ result = store.select_column("frame", "index")
+ assert rng.tz == result.dt.tz
+
+
+def test_timezones_fixed_format_frame_non_empty(setup_path):
+ with ensure_clean_store(setup_path) as store:
+ # index
+ rng = date_range("1/1/2000", "1/30/2000", tz="US/Eastern")
+ rng = rng._with_freq(None) # freq doesn't round-trip
+ df = DataFrame(
+ np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng
+ )
+ store["df"] = df
+ result = store["df"]
+ tm.assert_frame_equal(result, df)
+
+ # as data
+ # GH11411
+ _maybe_remove(store, "df")
+ df = DataFrame(
+ {
+ "A": rng,
+ "B": rng.tz_convert("UTC").tz_localize(None),
+ "C": rng.tz_convert("CET"),
+ "D": range(len(rng)),
+ },
+ index=rng,
+ )
+ store["df"] = df
+ result = store["df"]
+ tm.assert_frame_equal(result, df)
+
+
+def test_timezones_fixed_format_empty(setup_path, tz_aware_fixture, frame_or_series):
+ # GH 20594
+
+ dtype = pd.DatetimeTZDtype(tz=tz_aware_fixture)
+
+ obj = Series(dtype=dtype, name="A")
+ if frame_or_series is DataFrame:
+ obj = obj.to_frame()
+
+ with ensure_clean_store(setup_path) as store:
+ store["obj"] = obj
+ result = store["obj"]
+ tm.assert_equal(result, obj)
+
+
+def test_timezones_fixed_format_series_nonempty(setup_path, tz_aware_fixture):
+ # GH 20594
+
+ dtype = pd.DatetimeTZDtype(tz=tz_aware_fixture)
+
+ with ensure_clean_store(setup_path) as store:
+ s = Series([0], dtype=dtype)
+ store["s"] = s
+ result = store["s"]
+ tm.assert_series_equal(result, s)
+
+
+def test_fixed_offset_tz(setup_path):
+ rng = date_range("1/1/2000 00:00:00-07:00", "1/30/2000 00:00:00-07:00")
+ frame = DataFrame(
+ np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng
+ )
+
+ with ensure_clean_store(setup_path) as store:
+ store["frame"] = frame
+ recons = store["frame"]
+ tm.assert_index_equal(recons.index, rng)
+ assert rng.tz == recons.index.tz
+
+
+@td.skip_if_windows
+def test_store_timezone(setup_path):
+ # GH2852
+ # issue storing datetime.date with a timezone as it resets when read
+ # back in a new timezone
+
+ # original method
+ with ensure_clean_store(setup_path) as store:
+ today = date(2013, 9, 10)
+ df = DataFrame([1, 2, 3], index=[today, today, today])
+ store["obj1"] = df
+ result = store["obj1"]
+ tm.assert_frame_equal(result, df)
+
+ # with tz setting
+ with ensure_clean_store(setup_path) as store:
+ with tm.set_timezone("EST5EDT"):
+ today = date(2013, 9, 10)
+ df = DataFrame([1, 2, 3], index=[today, today, today])
+ store["obj1"] = df
+
+ with tm.set_timezone("CST6CDT"):
+ result = store["obj1"]
+
+ tm.assert_frame_equal(result, df)
+
+
+def test_legacy_datetimetz_object(datapath):
+ # legacy from < 0.17.0
+ # 8260
+ expected = DataFrame(
+ {
+ "A": Timestamp("20130102", tz="US/Eastern").as_unit("ns"),
+ "B": Timestamp("20130603", tz="CET").as_unit("ns"),
+ },
+ index=range(5),
+ )
+ with ensure_clean_store(
+ datapath("io", "data", "legacy_hdf", "datetimetz_object.h5"), mode="r"
+ ) as store:
+ result = store["df"]
+ tm.assert_frame_equal(result, expected)
+
+
+def test_dst_transitions(setup_path):
+ # make sure we are not failing on transitions
+ with ensure_clean_store(setup_path) as store:
+ times = date_range(
+ "2013-10-26 23:00",
+ "2013-10-27 01:00",
+ tz="Europe/London",
+ freq="h",
+ ambiguous="infer",
+ )
+ times = times._with_freq(None) # freq doesn't round-trip
+
+ for i in [times, times + pd.Timedelta("10min")]:
+ _maybe_remove(store, "df")
+ df = DataFrame({"A": range(len(i)), "B": i}, index=i)
+ store.append("df", df)
+ result = store.select("df")
+ tm.assert_frame_equal(result, df)
+
+
+def test_read_with_where_tz_aware_index(tmp_path, setup_path):
+ # GH 11926
+ periods = 10
+ dts = date_range("20151201", periods=periods, freq="D", tz="UTC")
+ mi = pd.MultiIndex.from_arrays([dts, range(periods)], names=["DATE", "NO"])
+ expected = DataFrame({"MYCOL": 0}, index=mi)
+
+ key = "mykey"
+ path = tmp_path / setup_path
+ with pd.HDFStore(path) as store:
+ store.append(key, expected, format="table", append=True)
+ result = pd.read_hdf(path, key, where="DATE > 20151130")
+ tm.assert_frame_equal(result, expected)
+
+
+def test_py2_created_with_datetimez(datapath):
+ # The test HDF5 file was created in Python 2, but could not be read in
+ # Python 3.
+ #
+ # GH26443
+ index = DatetimeIndex(["2019-01-01T18:00"], dtype="M8[ns, America/New_York]")
+ expected = DataFrame({"data": 123}, index=index)
+ with ensure_clean_store(
+ datapath("io", "data", "legacy_hdf", "gh26443.h5"), mode="r"
+ ) as store:
+ result = store["key"]
+ tm.assert_frame_equal(result, expected)