diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_117_mp_rank_02_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_117_mp_rank_02_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9190e943fcd21706942944695afc0077ec18721a
--- /dev/null
+++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_117_mp_rank_02_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8634498c78ef5764c676db79ff9d360b4afd7e3a4e6c68c11730145ccfab4d01
+size 41830340
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/__init__.py b/venv/lib/python3.10/site-packages/pandas/compat/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb890c8b8c0ab5cad3a72f7eeb33dd3824ed20af
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/compat/__init__.py
@@ -0,0 +1,197 @@
+"""
+compat
+======
+
+Cross-compatible functions for different versions of Python.
+
+Other items:
+* platform checker
+"""
+from __future__ import annotations
+
+import os
+import platform
+import sys
+from typing import TYPE_CHECKING
+
+from pandas.compat._constants import (
+ IS64,
+ ISMUSL,
+ PY310,
+ PY311,
+ PY312,
+ PYPY,
+)
+import pandas.compat.compressors
+from pandas.compat.numpy import is_numpy_dev
+from pandas.compat.pyarrow import (
+ pa_version_under10p1,
+ pa_version_under11p0,
+ pa_version_under13p0,
+ pa_version_under14p0,
+ pa_version_under14p1,
+ pa_version_under16p0,
+)
+
+if TYPE_CHECKING:
+ from pandas._typing import F
+
+
+def set_function_name(f: F, name: str, cls: type) -> F:
+ """
+ Bind the name/qualname attributes of the function.
+ """
+ f.__name__ = name
+ f.__qualname__ = f"{cls.__name__}.{name}"
+ f.__module__ = cls.__module__
+ return f
+
+
+def is_platform_little_endian() -> bool:
+ """
+ Checking if the running platform is little endian.
+
+ Returns
+ -------
+ bool
+ True if the running platform is little endian.
+ """
+ return sys.byteorder == "little"
+
+
+def is_platform_windows() -> bool:
+ """
+ Checking if the running platform is windows.
+
+ Returns
+ -------
+ bool
+ True if the running platform is windows.
+ """
+ return sys.platform in ["win32", "cygwin"]
+
+
+def is_platform_linux() -> bool:
+ """
+ Checking if the running platform is linux.
+
+ Returns
+ -------
+ bool
+ True if the running platform is linux.
+ """
+ return sys.platform == "linux"
+
+
+def is_platform_mac() -> bool:
+ """
+ Checking if the running platform is mac.
+
+ Returns
+ -------
+ bool
+ True if the running platform is mac.
+ """
+ return sys.platform == "darwin"
+
+
+def is_platform_arm() -> bool:
+ """
+ Checking if the running platform use ARM architecture.
+
+ Returns
+ -------
+ bool
+ True if the running platform uses ARM architecture.
+ """
+ return platform.machine() in ("arm64", "aarch64") or platform.machine().startswith(
+ "armv"
+ )
+
+
+def is_platform_power() -> bool:
+ """
+ Checking if the running platform use Power architecture.
+
+ Returns
+ -------
+ bool
+ True if the running platform uses ARM architecture.
+ """
+ return platform.machine() in ("ppc64", "ppc64le")
+
+
+def is_ci_environment() -> bool:
+ """
+ Checking if running in a continuous integration environment by checking
+ the PANDAS_CI environment variable.
+
+ Returns
+ -------
+ bool
+ True if the running in a continuous integration environment.
+ """
+ return os.environ.get("PANDAS_CI", "0") == "1"
+
+
+def get_lzma_file() -> type[pandas.compat.compressors.LZMAFile]:
+ """
+ Importing the `LZMAFile` class from the `lzma` module.
+
+ Returns
+ -------
+ class
+ The `LZMAFile` class from the `lzma` module.
+
+ Raises
+ ------
+ RuntimeError
+ If the `lzma` module was not imported correctly, or didn't exist.
+ """
+ if not pandas.compat.compressors.has_lzma:
+ raise RuntimeError(
+ "lzma module not available. "
+ "A Python re-install with the proper dependencies, "
+ "might be required to solve this issue."
+ )
+ return pandas.compat.compressors.LZMAFile
+
+
+def get_bz2_file() -> type[pandas.compat.compressors.BZ2File]:
+ """
+ Importing the `BZ2File` class from the `bz2` module.
+
+ Returns
+ -------
+ class
+ The `BZ2File` class from the `bz2` module.
+
+ Raises
+ ------
+ RuntimeError
+ If the `bz2` module was not imported correctly, or didn't exist.
+ """
+ if not pandas.compat.compressors.has_bz2:
+ raise RuntimeError(
+ "bz2 module not available. "
+ "A Python re-install with the proper dependencies, "
+ "might be required to solve this issue."
+ )
+ return pandas.compat.compressors.BZ2File
+
+
+__all__ = [
+ "is_numpy_dev",
+ "pa_version_under10p1",
+ "pa_version_under11p0",
+ "pa_version_under13p0",
+ "pa_version_under14p0",
+ "pa_version_under14p1",
+ "pa_version_under16p0",
+ "IS64",
+ "ISMUSL",
+ "PY310",
+ "PY311",
+ "PY312",
+ "PYPY",
+]
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dc38c1b3434e6b0c7d738ff6032a20df3e057fae
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/_constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/_constants.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e1ce1d907b11cb86fde5dd511864061eaa83c15
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/_constants.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/_optional.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/_optional.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d837331e404776542ac7e2896f4f5636ac7043f2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/_optional.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9392d05a4c08565e4caec32e94de630957509892
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/pickle_compat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/pickle_compat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..feb1691b1916580a55d00e2a7beaf8c730685629
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/pickle_compat.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/pyarrow.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/pyarrow.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b27bfbd13a5ffd9ecdce31a004fd68ce39e2e682
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/compat/__pycache__/pyarrow.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/_constants.py b/venv/lib/python3.10/site-packages/pandas/compat/_constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..7bc3fbaaefebf69d8ebd622406dc9357237add1a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/compat/_constants.py
@@ -0,0 +1,30 @@
+"""
+_constants
+======
+
+Constants relevant for the Python implementation.
+"""
+
+from __future__ import annotations
+
+import platform
+import sys
+import sysconfig
+
+IS64 = sys.maxsize > 2**32
+
+PY310 = sys.version_info >= (3, 10)
+PY311 = sys.version_info >= (3, 11)
+PY312 = sys.version_info >= (3, 12)
+PYPY = platform.python_implementation() == "PyPy"
+ISMUSL = "musl" in (sysconfig.get_config_var("HOST_GNU_TYPE") or "")
+REF_COUNT = 2 if PY311 else 3
+
+__all__ = [
+ "IS64",
+ "ISMUSL",
+ "PY310",
+ "PY311",
+ "PY312",
+ "PYPY",
+]
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/_optional.py b/venv/lib/python3.10/site-packages/pandas/compat/_optional.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bc6cd46f09a7e4b103658f9c2ec9a69d93d00b9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/compat/_optional.py
@@ -0,0 +1,168 @@
+from __future__ import annotations
+
+import importlib
+import sys
+from typing import TYPE_CHECKING
+import warnings
+
+from pandas.util._exceptions import find_stack_level
+
+from pandas.util.version import Version
+
+if TYPE_CHECKING:
+ import types
+
+# Update install.rst & setup.cfg when updating versions!
+
+VERSIONS = {
+ "adbc-driver-postgresql": "0.8.0",
+ "adbc-driver-sqlite": "0.8.0",
+ "bs4": "4.11.2",
+ "blosc": "1.21.3",
+ "bottleneck": "1.3.6",
+ "dataframe-api-compat": "0.1.7",
+ "fastparquet": "2022.12.0",
+ "fsspec": "2022.11.0",
+ "html5lib": "1.1",
+ "hypothesis": "6.46.1",
+ "gcsfs": "2022.11.0",
+ "jinja2": "3.1.2",
+ "lxml.etree": "4.9.2",
+ "matplotlib": "3.6.3",
+ "numba": "0.56.4",
+ "numexpr": "2.8.4",
+ "odfpy": "1.4.1",
+ "openpyxl": "3.1.0",
+ "pandas_gbq": "0.19.0",
+ "psycopg2": "2.9.6", # (dt dec pq3 ext lo64)
+ "pymysql": "1.0.2",
+ "pyarrow": "10.0.1",
+ "pyreadstat": "1.2.0",
+ "pytest": "7.3.2",
+ "python-calamine": "0.1.7",
+ "pyxlsb": "1.0.10",
+ "s3fs": "2022.11.0",
+ "scipy": "1.10.0",
+ "sqlalchemy": "2.0.0",
+ "tables": "3.8.0",
+ "tabulate": "0.9.0",
+ "xarray": "2022.12.0",
+ "xlrd": "2.0.1",
+ "xlsxwriter": "3.0.5",
+ "zstandard": "0.19.0",
+ "tzdata": "2022.7",
+ "qtpy": "2.3.0",
+ "pyqt5": "5.15.9",
+}
+
+# A mapping from import name to package name (on PyPI) for packages where
+# these two names are different.
+
+INSTALL_MAPPING = {
+ "bs4": "beautifulsoup4",
+ "bottleneck": "Bottleneck",
+ "jinja2": "Jinja2",
+ "lxml.etree": "lxml",
+ "odf": "odfpy",
+ "pandas_gbq": "pandas-gbq",
+ "python_calamine": "python-calamine",
+ "sqlalchemy": "SQLAlchemy",
+ "tables": "pytables",
+}
+
+
+def get_version(module: types.ModuleType) -> str:
+ version = getattr(module, "__version__", None)
+
+ if version is None:
+ raise ImportError(f"Can't determine version for {module.__name__}")
+ if module.__name__ == "psycopg2":
+ # psycopg2 appends " (dt dec pq3 ext lo64)" to it's version
+ version = version.split()[0]
+ return version
+
+
+def import_optional_dependency(
+ name: str,
+ extra: str = "",
+ errors: str = "raise",
+ min_version: str | None = None,
+):
+ """
+ Import an optional dependency.
+
+ By default, if a dependency is missing an ImportError with a nice
+ message will be raised. If a dependency is present, but too old,
+ we raise.
+
+ Parameters
+ ----------
+ name : str
+ The module name.
+ extra : str
+ Additional text to include in the ImportError message.
+ errors : str {'raise', 'warn', 'ignore'}
+ What to do when a dependency is not found or its version is too old.
+
+ * raise : Raise an ImportError
+ * warn : Only applicable when a module's version is to old.
+ Warns that the version is too old and returns None
+ * ignore: If the module is not installed, return None, otherwise,
+ return the module, even if the version is too old.
+ It's expected that users validate the version locally when
+ using ``errors="ignore"`` (see. ``io/html.py``)
+ min_version : str, default None
+ Specify a minimum version that is different from the global pandas
+ minimum version required.
+ Returns
+ -------
+ maybe_module : Optional[ModuleType]
+ The imported module, when found and the version is correct.
+ None is returned when the package is not found and `errors`
+ is False, or when the package's version is too old and `errors`
+ is ``'warn'`` or ``'ignore'``.
+ """
+ assert errors in {"warn", "raise", "ignore"}
+
+ package_name = INSTALL_MAPPING.get(name)
+ install_name = package_name if package_name is not None else name
+
+ msg = (
+ f"Missing optional dependency '{install_name}'. {extra} "
+ f"Use pip or conda to install {install_name}."
+ )
+ try:
+ module = importlib.import_module(name)
+ except ImportError:
+ if errors == "raise":
+ raise ImportError(msg)
+ return None
+
+ # Handle submodules: if we have submodule, grab parent module from sys.modules
+ parent = name.split(".")[0]
+ if parent != name:
+ install_name = parent
+ module_to_get = sys.modules[install_name]
+ else:
+ module_to_get = module
+ minimum_version = min_version if min_version is not None else VERSIONS.get(parent)
+ if minimum_version:
+ version = get_version(module_to_get)
+ if version and Version(version) < Version(minimum_version):
+ msg = (
+ f"Pandas requires version '{minimum_version}' or newer of '{parent}' "
+ f"(version '{version}' currently installed)."
+ )
+ if errors == "warn":
+ warnings.warn(
+ msg,
+ UserWarning,
+ stacklevel=find_stack_level(),
+ )
+ return None
+ elif errors == "raise":
+ raise ImportError(msg)
+ else:
+ return None
+
+ return module
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/compressors.py b/venv/lib/python3.10/site-packages/pandas/compat/compressors.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f31e34c092c9672559ca2f5194cb1da7083d03b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/compat/compressors.py
@@ -0,0 +1,77 @@
+"""
+Patched ``BZ2File`` and ``LZMAFile`` to handle pickle protocol 5.
+"""
+
+from __future__ import annotations
+
+from pickle import PickleBuffer
+
+from pandas.compat._constants import PY310
+
+try:
+ import bz2
+
+ has_bz2 = True
+except ImportError:
+ has_bz2 = False
+
+try:
+ import lzma
+
+ has_lzma = True
+except ImportError:
+ has_lzma = False
+
+
+def flatten_buffer(
+ b: bytes | bytearray | memoryview | PickleBuffer,
+) -> bytes | bytearray | memoryview:
+ """
+ Return some 1-D `uint8` typed buffer.
+
+ Coerces anything that does not match that description to one that does
+ without copying if possible (otherwise will copy).
+ """
+
+ if isinstance(b, (bytes, bytearray)):
+ return b
+
+ if not isinstance(b, PickleBuffer):
+ b = PickleBuffer(b)
+
+ try:
+ # coerce to 1-D `uint8` C-contiguous `memoryview` zero-copy
+ return b.raw()
+ except BufferError:
+ # perform in-memory copy if buffer is not contiguous
+ return memoryview(b).tobytes("A")
+
+
+if has_bz2:
+
+ class BZ2File(bz2.BZ2File):
+ if not PY310:
+
+ def write(self, b) -> int:
+ # Workaround issue where `bz2.BZ2File` expects `len`
+ # to return the number of bytes in `b` by converting
+ # `b` into something that meets that constraint with
+ # minimal copying.
+ #
+ # Note: This is fixed in Python 3.10.
+ return super().write(flatten_buffer(b))
+
+
+if has_lzma:
+
+ class LZMAFile(lzma.LZMAFile):
+ if not PY310:
+
+ def write(self, b) -> int:
+ # Workaround issue where `lzma.LZMAFile` expects `len`
+ # to return the number of bytes in `b` by converting
+ # `b` into something that meets that constraint with
+ # minimal copying.
+ #
+ # Note: This is fixed in Python 3.10.
+ return super().write(flatten_buffer(b))
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/numpy/__init__.py b/venv/lib/python3.10/site-packages/pandas/compat/numpy/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3014bd652d8c46b49bcf34e8d050679f70e3f7da
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/compat/numpy/__init__.py
@@ -0,0 +1,53 @@
+""" support numpy compatibility across versions """
+import warnings
+
+import numpy as np
+
+from pandas.util.version import Version
+
+# numpy versioning
+_np_version = np.__version__
+_nlv = Version(_np_version)
+np_version_lt1p23 = _nlv < Version("1.23")
+np_version_gte1p24 = _nlv >= Version("1.24")
+np_version_gte1p24p3 = _nlv >= Version("1.24.3")
+np_version_gte1p25 = _nlv >= Version("1.25")
+np_version_gt2 = _nlv >= Version("2.0.0.dev0")
+is_numpy_dev = _nlv.dev is not None
+_min_numpy_ver = "1.22.4"
+
+
+if _nlv < Version(_min_numpy_ver):
+ raise ImportError(
+ f"this version of pandas is incompatible with numpy < {_min_numpy_ver}\n"
+ f"your numpy version is {_np_version}.\n"
+ f"Please upgrade numpy to >= {_min_numpy_ver} to use this pandas version"
+ )
+
+
+np_long: type
+np_ulong: type
+
+if np_version_gt2:
+ try:
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ r".*In the future `np\.long` will be defined as.*",
+ FutureWarning,
+ )
+ np_long = np.long # type: ignore[attr-defined]
+ np_ulong = np.ulong # type: ignore[attr-defined]
+ except AttributeError:
+ np_long = np.int_
+ np_ulong = np.uint
+else:
+ np_long = np.int_
+ np_ulong = np.uint
+
+
+__all__ = [
+ "np",
+ "_np_version",
+ "is_numpy_dev",
+]
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0bfb00524c6b99d062fbd0f2636c4c4ac39b27ac
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/function.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/function.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a601c287a38c63c47a1f1c413f716843f444b465
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/function.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/numpy/function.py b/venv/lib/python3.10/site-packages/pandas/compat/numpy/function.py
new file mode 100644
index 0000000000000000000000000000000000000000..4df30f7f4a8a79984ca6de521ac058bd30fd8faf
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/compat/numpy/function.py
@@ -0,0 +1,418 @@
+"""
+For compatibility with numpy libraries, pandas functions or methods have to
+accept '*args' and '**kwargs' parameters to accommodate numpy arguments that
+are not actually used or respected in the pandas implementation.
+
+To ensure that users do not abuse these parameters, validation is performed in
+'validators.py' to make sure that any extra parameters passed correspond ONLY
+to those in the numpy signature. Part of that validation includes whether or
+not the user attempted to pass in non-default values for these extraneous
+parameters. As we want to discourage users from relying on these parameters
+when calling the pandas implementation, we want them only to pass in the
+default values for these parameters.
+
+This module provides a set of commonly used default arguments for functions and
+methods that are spread throughout the codebase. This module will make it
+easier to adjust to future upstream changes in the analogous numpy signatures.
+"""
+from __future__ import annotations
+
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ TypeVar,
+ cast,
+ overload,
+)
+
+import numpy as np
+from numpy import ndarray
+
+from pandas._libs.lib import (
+ is_bool,
+ is_integer,
+)
+from pandas.errors import UnsupportedFunctionCall
+from pandas.util._validators import (
+ validate_args,
+ validate_args_and_kwargs,
+ validate_kwargs,
+)
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ Axis,
+ AxisInt,
+ )
+
+ AxisNoneT = TypeVar("AxisNoneT", Axis, None)
+
+
+class CompatValidator:
+ def __init__(
+ self,
+ defaults,
+ fname=None,
+ method: str | None = None,
+ max_fname_arg_count=None,
+ ) -> None:
+ self.fname = fname
+ self.method = method
+ self.defaults = defaults
+ self.max_fname_arg_count = max_fname_arg_count
+
+ def __call__(
+ self,
+ args,
+ kwargs,
+ fname=None,
+ max_fname_arg_count=None,
+ method: str | None = None,
+ ) -> None:
+ if not args and not kwargs:
+ return None
+
+ fname = self.fname if fname is None else fname
+ max_fname_arg_count = (
+ self.max_fname_arg_count
+ if max_fname_arg_count is None
+ else max_fname_arg_count
+ )
+ method = self.method if method is None else method
+
+ if method == "args":
+ validate_args(fname, args, max_fname_arg_count, self.defaults)
+ elif method == "kwargs":
+ validate_kwargs(fname, kwargs, self.defaults)
+ elif method == "both":
+ validate_args_and_kwargs(
+ fname, args, kwargs, max_fname_arg_count, self.defaults
+ )
+ else:
+ raise ValueError(f"invalid validation method '{method}'")
+
+
+ARGMINMAX_DEFAULTS = {"out": None}
+validate_argmin = CompatValidator(
+ ARGMINMAX_DEFAULTS, fname="argmin", method="both", max_fname_arg_count=1
+)
+validate_argmax = CompatValidator(
+ ARGMINMAX_DEFAULTS, fname="argmax", method="both", max_fname_arg_count=1
+)
+
+
+def process_skipna(skipna: bool | ndarray | None, args) -> tuple[bool, Any]:
+ if isinstance(skipna, ndarray) or skipna is None:
+ args = (skipna,) + args
+ skipna = True
+
+ return skipna, args
+
+
+def validate_argmin_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool:
+ """
+ If 'Series.argmin' is called via the 'numpy' library, the third parameter
+ in its signature is 'out', which takes either an ndarray or 'None', so
+ check if the 'skipna' parameter is either an instance of ndarray or is
+ None, since 'skipna' itself should be a boolean
+ """
+ skipna, args = process_skipna(skipna, args)
+ validate_argmin(args, kwargs)
+ return skipna
+
+
+def validate_argmax_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool:
+ """
+ If 'Series.argmax' is called via the 'numpy' library, the third parameter
+ in its signature is 'out', which takes either an ndarray or 'None', so
+ check if the 'skipna' parameter is either an instance of ndarray or is
+ None, since 'skipna' itself should be a boolean
+ """
+ skipna, args = process_skipna(skipna, args)
+ validate_argmax(args, kwargs)
+ return skipna
+
+
+ARGSORT_DEFAULTS: dict[str, int | str | None] = {}
+ARGSORT_DEFAULTS["axis"] = -1
+ARGSORT_DEFAULTS["kind"] = "quicksort"
+ARGSORT_DEFAULTS["order"] = None
+ARGSORT_DEFAULTS["kind"] = None
+ARGSORT_DEFAULTS["stable"] = None
+
+
+validate_argsort = CompatValidator(
+ ARGSORT_DEFAULTS, fname="argsort", max_fname_arg_count=0, method="both"
+)
+
+# two different signatures of argsort, this second validation for when the
+# `kind` param is supported
+ARGSORT_DEFAULTS_KIND: dict[str, int | None] = {}
+ARGSORT_DEFAULTS_KIND["axis"] = -1
+ARGSORT_DEFAULTS_KIND["order"] = None
+ARGSORT_DEFAULTS_KIND["stable"] = None
+validate_argsort_kind = CompatValidator(
+ ARGSORT_DEFAULTS_KIND, fname="argsort", max_fname_arg_count=0, method="both"
+)
+
+
+def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs) -> bool:
+ """
+ If 'Categorical.argsort' is called via the 'numpy' library, the first
+ parameter in its signature is 'axis', which takes either an integer or
+ 'None', so check if the 'ascending' parameter has either integer type or is
+ None, since 'ascending' itself should be a boolean
+ """
+ if is_integer(ascending) or ascending is None:
+ args = (ascending,) + args
+ ascending = True
+
+ validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
+ ascending = cast(bool, ascending)
+ return ascending
+
+
+CLIP_DEFAULTS: dict[str, Any] = {"out": None}
+validate_clip = CompatValidator(
+ CLIP_DEFAULTS, fname="clip", method="both", max_fname_arg_count=3
+)
+
+
+@overload
+def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None:
+ ...
+
+
+@overload
+def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT:
+ ...
+
+
+def validate_clip_with_axis(
+ axis: ndarray | AxisNoneT, args, kwargs
+) -> AxisNoneT | None:
+ """
+ If 'NDFrame.clip' is called via the numpy library, the third parameter in
+ its signature is 'out', which can takes an ndarray, so check if the 'axis'
+ parameter is an instance of ndarray, since 'axis' itself should either be
+ an integer or None
+ """
+ if isinstance(axis, ndarray):
+ args = (axis,) + args
+ # error: Incompatible types in assignment (expression has type "None",
+ # variable has type "Union[ndarray[Any, Any], str, int]")
+ axis = None # type: ignore[assignment]
+
+ validate_clip(args, kwargs)
+ # error: Incompatible return value type (got "Union[ndarray[Any, Any],
+ # str, int]", expected "Union[str, int, None]")
+ return axis # type: ignore[return-value]
+
+
+CUM_FUNC_DEFAULTS: dict[str, Any] = {}
+CUM_FUNC_DEFAULTS["dtype"] = None
+CUM_FUNC_DEFAULTS["out"] = None
+validate_cum_func = CompatValidator(
+ CUM_FUNC_DEFAULTS, method="both", max_fname_arg_count=1
+)
+validate_cumsum = CompatValidator(
+ CUM_FUNC_DEFAULTS, fname="cumsum", method="both", max_fname_arg_count=1
+)
+
+
+def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool:
+ """
+ If this function is called via the 'numpy' library, the third parameter in
+ its signature is 'dtype', which takes either a 'numpy' dtype or 'None', so
+ check if the 'skipna' parameter is a boolean or not
+ """
+ if not is_bool(skipna):
+ args = (skipna,) + args
+ skipna = True
+ elif isinstance(skipna, np.bool_):
+ skipna = bool(skipna)
+
+ validate_cum_func(args, kwargs, fname=name)
+ return skipna
+
+
+ALLANY_DEFAULTS: dict[str, bool | None] = {}
+ALLANY_DEFAULTS["dtype"] = None
+ALLANY_DEFAULTS["out"] = None
+ALLANY_DEFAULTS["keepdims"] = False
+ALLANY_DEFAULTS["axis"] = None
+validate_all = CompatValidator(
+ ALLANY_DEFAULTS, fname="all", method="both", max_fname_arg_count=1
+)
+validate_any = CompatValidator(
+ ALLANY_DEFAULTS, fname="any", method="both", max_fname_arg_count=1
+)
+
+LOGICAL_FUNC_DEFAULTS = {"out": None, "keepdims": False}
+validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method="kwargs")
+
+MINMAX_DEFAULTS = {"axis": None, "dtype": None, "out": None, "keepdims": False}
+validate_min = CompatValidator(
+ MINMAX_DEFAULTS, fname="min", method="both", max_fname_arg_count=1
+)
+validate_max = CompatValidator(
+ MINMAX_DEFAULTS, fname="max", method="both", max_fname_arg_count=1
+)
+
+RESHAPE_DEFAULTS: dict[str, str] = {"order": "C"}
+validate_reshape = CompatValidator(
+ RESHAPE_DEFAULTS, fname="reshape", method="both", max_fname_arg_count=1
+)
+
+REPEAT_DEFAULTS: dict[str, Any] = {"axis": None}
+validate_repeat = CompatValidator(
+ REPEAT_DEFAULTS, fname="repeat", method="both", max_fname_arg_count=1
+)
+
+ROUND_DEFAULTS: dict[str, Any] = {"out": None}
+validate_round = CompatValidator(
+ ROUND_DEFAULTS, fname="round", method="both", max_fname_arg_count=1
+)
+
+SORT_DEFAULTS: dict[str, int | str | None] = {}
+SORT_DEFAULTS["axis"] = -1
+SORT_DEFAULTS["kind"] = "quicksort"
+SORT_DEFAULTS["order"] = None
+validate_sort = CompatValidator(SORT_DEFAULTS, fname="sort", method="kwargs")
+
+STAT_FUNC_DEFAULTS: dict[str, Any | None] = {}
+STAT_FUNC_DEFAULTS["dtype"] = None
+STAT_FUNC_DEFAULTS["out"] = None
+
+SUM_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
+SUM_DEFAULTS["axis"] = None
+SUM_DEFAULTS["keepdims"] = False
+SUM_DEFAULTS["initial"] = None
+
+PROD_DEFAULTS = SUM_DEFAULTS.copy()
+
+MEAN_DEFAULTS = SUM_DEFAULTS.copy()
+
+MEDIAN_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
+MEDIAN_DEFAULTS["overwrite_input"] = False
+MEDIAN_DEFAULTS["keepdims"] = False
+
+STAT_FUNC_DEFAULTS["keepdims"] = False
+
+validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS, method="kwargs")
+validate_sum = CompatValidator(
+ SUM_DEFAULTS, fname="sum", method="both", max_fname_arg_count=1
+)
+validate_prod = CompatValidator(
+ PROD_DEFAULTS, fname="prod", method="both", max_fname_arg_count=1
+)
+validate_mean = CompatValidator(
+ MEAN_DEFAULTS, fname="mean", method="both", max_fname_arg_count=1
+)
+validate_median = CompatValidator(
+ MEDIAN_DEFAULTS, fname="median", method="both", max_fname_arg_count=1
+)
+
+STAT_DDOF_FUNC_DEFAULTS: dict[str, bool | None] = {}
+STAT_DDOF_FUNC_DEFAULTS["dtype"] = None
+STAT_DDOF_FUNC_DEFAULTS["out"] = None
+STAT_DDOF_FUNC_DEFAULTS["keepdims"] = False
+validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS, method="kwargs")
+
+TAKE_DEFAULTS: dict[str, str | None] = {}
+TAKE_DEFAULTS["out"] = None
+TAKE_DEFAULTS["mode"] = "raise"
+validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs")
+
+
+def validate_take_with_convert(convert: ndarray | bool | None, args, kwargs) -> bool:
+ """
+ If this function is called via the 'numpy' library, the third parameter in
+ its signature is 'axis', which takes either an ndarray or 'None', so check
+ if the 'convert' parameter is either an instance of ndarray or is None
+ """
+ if isinstance(convert, ndarray) or convert is None:
+ args = (convert,) + args
+ convert = True
+
+ validate_take(args, kwargs, max_fname_arg_count=3, method="both")
+ return convert
+
+
+TRANSPOSE_DEFAULTS = {"axes": None}
+validate_transpose = CompatValidator(
+ TRANSPOSE_DEFAULTS, fname="transpose", method="both", max_fname_arg_count=0
+)
+
+
+def validate_groupby_func(name: str, args, kwargs, allowed=None) -> None:
+ """
+ 'args' and 'kwargs' should be empty, except for allowed kwargs because all
+ of their necessary parameters are explicitly listed in the function
+ signature
+ """
+ if allowed is None:
+ allowed = []
+
+ kwargs = set(kwargs) - set(allowed)
+
+ if len(args) + len(kwargs) > 0:
+ raise UnsupportedFunctionCall(
+ "numpy operations are not valid with groupby. "
+ f"Use .groupby(...).{name}() instead"
+ )
+
+
+RESAMPLER_NUMPY_OPS = ("min", "max", "sum", "prod", "mean", "std", "var")
+
+
+def validate_resampler_func(method: str, args, kwargs) -> None:
+ """
+ 'args' and 'kwargs' should be empty because all of their necessary
+ parameters are explicitly listed in the function signature
+ """
+ if len(args) + len(kwargs) > 0:
+ if method in RESAMPLER_NUMPY_OPS:
+ raise UnsupportedFunctionCall(
+ "numpy operations are not valid with resample. "
+ f"Use .resample(...).{method}() instead"
+ )
+ raise TypeError("too many arguments passed in")
+
+
+def validate_minmax_axis(axis: AxisInt | None, ndim: int = 1) -> None:
+ """
+ Ensure that the axis argument passed to min, max, argmin, or argmax is zero
+ or None, as otherwise it will be incorrectly ignored.
+
+ Parameters
+ ----------
+ axis : int or None
+ ndim : int, default 1
+
+ Raises
+ ------
+ ValueError
+ """
+ if axis is None:
+ return
+ if axis >= ndim or (axis < 0 and ndim + axis < 0):
+ raise ValueError(f"`axis` must be fewer than the number of dimensions ({ndim})")
+
+
+_validation_funcs = {
+ "median": validate_median,
+ "mean": validate_mean,
+ "min": validate_min,
+ "max": validate_max,
+ "sum": validate_sum,
+ "prod": validate_prod,
+}
+
+
+def validate_func(fname, args, kwargs) -> None:
+ if fname not in _validation_funcs:
+ return validate_stat_func(args, kwargs, fname=fname)
+
+ validation_func = _validation_funcs[fname]
+ return validation_func(args, kwargs)
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/pickle_compat.py b/venv/lib/python3.10/site-packages/pandas/compat/pickle_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd98087c06c18634304c29d88837017a6952a4fc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/compat/pickle_compat.py
@@ -0,0 +1,262 @@
+"""
+Support pre-0.12 series pickle compatibility.
+"""
+from __future__ import annotations
+
+import contextlib
+import copy
+import io
+import pickle as pkl
+from typing import TYPE_CHECKING
+
+import numpy as np
+
+from pandas._libs.arrays import NDArrayBacked
+from pandas._libs.tslibs import BaseOffset
+
+from pandas import Index
+from pandas.core.arrays import (
+ DatetimeArray,
+ PeriodArray,
+ TimedeltaArray,
+)
+from pandas.core.internals import BlockManager
+
+if TYPE_CHECKING:
+ from collections.abc import Generator
+
+
+def load_reduce(self) -> None:
+ stack = self.stack
+ args = stack.pop()
+ func = stack[-1]
+
+ try:
+ stack[-1] = func(*args)
+ return
+ except TypeError as err:
+ # If we have a deprecated function,
+ # try to replace and try again.
+
+ msg = "_reconstruct: First argument must be a sub-type of ndarray"
+
+ if msg in str(err):
+ try:
+ cls = args[0]
+ stack[-1] = object.__new__(cls)
+ return
+ except TypeError:
+ pass
+ elif args and isinstance(args[0], type) and issubclass(args[0], BaseOffset):
+ # TypeError: object.__new__(Day) is not safe, use Day.__new__()
+ cls = args[0]
+ stack[-1] = cls.__new__(*args)
+ return
+ elif args and issubclass(args[0], PeriodArray):
+ cls = args[0]
+ stack[-1] = NDArrayBacked.__new__(*args)
+ return
+
+ raise
+
+
+# If classes are moved, provide compat here.
+_class_locations_map = {
+ ("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"),
+ # 15477
+ ("pandas.core.base", "FrozenNDArray"): ("numpy", "ndarray"),
+ # Re-routing unpickle block logic to go through _unpickle_block instead
+ # for pandas <= 1.3.5
+ ("pandas.core.internals.blocks", "new_block"): (
+ "pandas._libs.internals",
+ "_unpickle_block",
+ ),
+ ("pandas.core.indexes.frozen", "FrozenNDArray"): ("numpy", "ndarray"),
+ ("pandas.core.base", "FrozenList"): ("pandas.core.indexes.frozen", "FrozenList"),
+ # 10890
+ ("pandas.core.series", "TimeSeries"): ("pandas.core.series", "Series"),
+ ("pandas.sparse.series", "SparseTimeSeries"): (
+ "pandas.core.sparse.series",
+ "SparseSeries",
+ ),
+ # 12588, extensions moving
+ ("pandas._sparse", "BlockIndex"): ("pandas._libs.sparse", "BlockIndex"),
+ ("pandas.tslib", "Timestamp"): ("pandas._libs.tslib", "Timestamp"),
+ # 18543 moving period
+ ("pandas._period", "Period"): ("pandas._libs.tslibs.period", "Period"),
+ ("pandas._libs.period", "Period"): ("pandas._libs.tslibs.period", "Period"),
+ # 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype
+ ("pandas.tslib", "__nat_unpickle"): (
+ "pandas._libs.tslibs.nattype",
+ "__nat_unpickle",
+ ),
+ ("pandas._libs.tslib", "__nat_unpickle"): (
+ "pandas._libs.tslibs.nattype",
+ "__nat_unpickle",
+ ),
+ # 15998 top-level dirs moving
+ ("pandas.sparse.array", "SparseArray"): (
+ "pandas.core.arrays.sparse",
+ "SparseArray",
+ ),
+ ("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"),
+ ("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"),
+ ("pandas.indexes.numeric", "Int64Index"): (
+ "pandas.core.indexes.base",
+ "Index", # updated in 50775
+ ),
+ ("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"),
+ ("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"),
+ ("pandas.tseries.index", "_new_DatetimeIndex"): (
+ "pandas.core.indexes.datetimes",
+ "_new_DatetimeIndex",
+ ),
+ ("pandas.tseries.index", "DatetimeIndex"): (
+ "pandas.core.indexes.datetimes",
+ "DatetimeIndex",
+ ),
+ ("pandas.tseries.period", "PeriodIndex"): (
+ "pandas.core.indexes.period",
+ "PeriodIndex",
+ ),
+ # 19269, arrays moving
+ ("pandas.core.categorical", "Categorical"): ("pandas.core.arrays", "Categorical"),
+ # 19939, add timedeltaindex, float64index compat from 15998 move
+ ("pandas.tseries.tdi", "TimedeltaIndex"): (
+ "pandas.core.indexes.timedeltas",
+ "TimedeltaIndex",
+ ),
+ ("pandas.indexes.numeric", "Float64Index"): (
+ "pandas.core.indexes.base",
+ "Index", # updated in 50775
+ ),
+ # 50775, remove Int64Index, UInt64Index & Float64Index from codabase
+ ("pandas.core.indexes.numeric", "Int64Index"): (
+ "pandas.core.indexes.base",
+ "Index",
+ ),
+ ("pandas.core.indexes.numeric", "UInt64Index"): (
+ "pandas.core.indexes.base",
+ "Index",
+ ),
+ ("pandas.core.indexes.numeric", "Float64Index"): (
+ "pandas.core.indexes.base",
+ "Index",
+ ),
+ ("pandas.core.arrays.sparse.dtype", "SparseDtype"): (
+ "pandas.core.dtypes.dtypes",
+ "SparseDtype",
+ ),
+}
+
+
+# our Unpickler sub-class to override methods and some dispatcher
+# functions for compat and uses a non-public class of the pickle module.
+
+
+class Unpickler(pkl._Unpickler):
+ def find_class(self, module, name):
+ # override superclass
+ key = (module, name)
+ module, name = _class_locations_map.get(key, key)
+ return super().find_class(module, name)
+
+
+Unpickler.dispatch = copy.copy(Unpickler.dispatch)
+Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
+
+
+def load_newobj(self) -> None:
+ args = self.stack.pop()
+ cls = self.stack[-1]
+
+ # compat
+ if issubclass(cls, Index):
+ obj = object.__new__(cls)
+ elif issubclass(cls, DatetimeArray) and not args:
+ arr = np.array([], dtype="M8[ns]")
+ obj = cls.__new__(cls, arr, arr.dtype)
+ elif issubclass(cls, TimedeltaArray) and not args:
+ arr = np.array([], dtype="m8[ns]")
+ obj = cls.__new__(cls, arr, arr.dtype)
+ elif cls is BlockManager and not args:
+ obj = cls.__new__(cls, (), [], False)
+ else:
+ obj = cls.__new__(cls, *args)
+
+ self.stack[-1] = obj
+
+
+Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
+
+
+def load_newobj_ex(self) -> None:
+ kwargs = self.stack.pop()
+ args = self.stack.pop()
+ cls = self.stack.pop()
+
+ # compat
+ if issubclass(cls, Index):
+ obj = object.__new__(cls)
+ else:
+ obj = cls.__new__(cls, *args, **kwargs)
+ self.append(obj)
+
+
+try:
+ Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
+except (AttributeError, KeyError):
+ pass
+
+
+def load(fh, encoding: str | None = None, is_verbose: bool = False):
+ """
+ Load a pickle, with a provided encoding,
+
+ Parameters
+ ----------
+ fh : a filelike object
+ encoding : an optional encoding
+ is_verbose : show exception output
+ """
+ try:
+ fh.seek(0)
+ if encoding is not None:
+ up = Unpickler(fh, encoding=encoding)
+ else:
+ up = Unpickler(fh)
+ # "Unpickler" has no attribute "is_verbose" [attr-defined]
+ up.is_verbose = is_verbose # type: ignore[attr-defined]
+
+ return up.load()
+ except (ValueError, TypeError):
+ raise
+
+
+def loads(
+ bytes_object: bytes,
+ *,
+ fix_imports: bool = True,
+ encoding: str = "ASCII",
+ errors: str = "strict",
+):
+ """
+ Analogous to pickle._loads.
+ """
+ fd = io.BytesIO(bytes_object)
+ return Unpickler(
+ fd, fix_imports=fix_imports, encoding=encoding, errors=errors
+ ).load()
+
+
+@contextlib.contextmanager
+def patch_pickle() -> Generator[None, None, None]:
+ """
+ Temporarily patch pickle to use our unpickler.
+ """
+ orig_loads = pkl.loads
+ try:
+ setattr(pkl, "loads", loads)
+ yield
+ finally:
+ setattr(pkl, "loads", orig_loads)
diff --git a/venv/lib/python3.10/site-packages/pandas/compat/pyarrow.py b/venv/lib/python3.10/site-packages/pandas/compat/pyarrow.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2dfa69bbf236b3df1e2963d50f3c9aca387d4e3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/compat/pyarrow.py
@@ -0,0 +1,27 @@
+""" support pyarrow compatibility across versions """
+
+from __future__ import annotations
+
+from pandas.util.version import Version
+
+try:
+ import pyarrow as pa
+
+ _palv = Version(Version(pa.__version__).base_version)
+ pa_version_under10p1 = _palv < Version("10.0.1")
+ pa_version_under11p0 = _palv < Version("11.0.0")
+ pa_version_under12p0 = _palv < Version("12.0.0")
+ pa_version_under13p0 = _palv < Version("13.0.0")
+ pa_version_under14p0 = _palv < Version("14.0.0")
+ pa_version_under14p1 = _palv < Version("14.0.1")
+ pa_version_under15p0 = _palv < Version("15.0.0")
+ pa_version_under16p0 = _palv < Version("16.0.0")
+except ImportError:
+ pa_version_under10p1 = True
+ pa_version_under11p0 = True
+ pa_version_under12p0 = True
+ pa_version_under13p0 = True
+ pa_version_under14p0 = True
+ pa_version_under14p1 = True
+ pa_version_under15p0 = True
+ pa_version_under16p0 = True
diff --git a/venv/lib/python3.10/site-packages/pandas/io/__init__.py b/venv/lib/python3.10/site-packages/pandas/io/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c804b81c49e7c8abb406f2132909df6036df1c09
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/__init__.py
@@ -0,0 +1,13 @@
+# ruff: noqa: TCH004
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ # import modules that have public classes/functions
+ from pandas.io import (
+ formats,
+ json,
+ stata,
+ )
+
+ # mark only those modules as public
+ __all__ = ["formats", "json", "stata"]
diff --git a/venv/lib/python3.10/site-packages/pandas/io/_util.py b/venv/lib/python3.10/site-packages/pandas/io/_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b2ae5daffdbaf515a330a54a83e550751e29fdb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/_util.py
@@ -0,0 +1,34 @@
+from __future__ import annotations
+
+from typing import Callable
+
+from pandas.compat._optional import import_optional_dependency
+
+import pandas as pd
+
+
+def _arrow_dtype_mapping() -> dict:
+ pa = import_optional_dependency("pyarrow")
+ return {
+ pa.int8(): pd.Int8Dtype(),
+ pa.int16(): pd.Int16Dtype(),
+ pa.int32(): pd.Int32Dtype(),
+ pa.int64(): pd.Int64Dtype(),
+ pa.uint8(): pd.UInt8Dtype(),
+ pa.uint16(): pd.UInt16Dtype(),
+ pa.uint32(): pd.UInt32Dtype(),
+ pa.uint64(): pd.UInt64Dtype(),
+ pa.bool_(): pd.BooleanDtype(),
+ pa.string(): pd.StringDtype(),
+ pa.float32(): pd.Float32Dtype(),
+ pa.float64(): pd.Float64Dtype(),
+ }
+
+
+def arrow_string_types_mapper() -> Callable:
+ pa = import_optional_dependency("pyarrow")
+
+ return {
+ pa.string(): pd.StringDtype(storage="pyarrow_numpy"),
+ pa.large_string(): pd.StringDtype(storage="pyarrow_numpy"),
+ }.get
diff --git a/venv/lib/python3.10/site-packages/pandas/io/api.py b/venv/lib/python3.10/site-packages/pandas/io/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e8b34a61dfc62992a37d9fab3263ee00a28d1fc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/api.py
@@ -0,0 +1,65 @@
+"""
+Data IO api
+"""
+
+from pandas.io.clipboards import read_clipboard
+from pandas.io.excel import (
+ ExcelFile,
+ ExcelWriter,
+ read_excel,
+)
+from pandas.io.feather_format import read_feather
+from pandas.io.gbq import read_gbq
+from pandas.io.html import read_html
+from pandas.io.json import read_json
+from pandas.io.orc import read_orc
+from pandas.io.parquet import read_parquet
+from pandas.io.parsers import (
+ read_csv,
+ read_fwf,
+ read_table,
+)
+from pandas.io.pickle import (
+ read_pickle,
+ to_pickle,
+)
+from pandas.io.pytables import (
+ HDFStore,
+ read_hdf,
+)
+from pandas.io.sas import read_sas
+from pandas.io.spss import read_spss
+from pandas.io.sql import (
+ read_sql,
+ read_sql_query,
+ read_sql_table,
+)
+from pandas.io.stata import read_stata
+from pandas.io.xml import read_xml
+
+__all__ = [
+ "ExcelFile",
+ "ExcelWriter",
+ "HDFStore",
+ "read_clipboard",
+ "read_csv",
+ "read_excel",
+ "read_feather",
+ "read_fwf",
+ "read_gbq",
+ "read_hdf",
+ "read_html",
+ "read_json",
+ "read_orc",
+ "read_parquet",
+ "read_pickle",
+ "read_sas",
+ "read_spss",
+ "read_sql",
+ "read_sql_query",
+ "read_sql_table",
+ "read_stata",
+ "read_table",
+ "read_xml",
+ "to_pickle",
+]
diff --git a/venv/lib/python3.10/site-packages/pandas/io/clipboards.py b/venv/lib/python3.10/site-packages/pandas/io/clipboards.py
new file mode 100644
index 0000000000000000000000000000000000000000..a15e37328e9fa95587d53b58b1af10e1e57fd60c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/clipboards.py
@@ -0,0 +1,197 @@
+""" io on the clipboard """
+from __future__ import annotations
+
+from io import StringIO
+from typing import TYPE_CHECKING
+import warnings
+
+from pandas._libs import lib
+from pandas.util._exceptions import find_stack_level
+from pandas.util._validators import check_dtype_backend
+
+from pandas.core.dtypes.generic import ABCDataFrame
+
+from pandas import (
+ get_option,
+ option_context,
+)
+
+if TYPE_CHECKING:
+ from pandas._typing import DtypeBackend
+
+
+def read_clipboard(
+ sep: str = r"\s+",
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ **kwargs,
+): # pragma: no cover
+ r"""
+ Read text from clipboard and pass to :func:`~pandas.read_csv`.
+
+ Parses clipboard contents similar to how CSV files are parsed
+ using :func:`~pandas.read_csv`.
+
+ Parameters
+ ----------
+ sep : str, default '\\s+'
+ A string or regex delimiter. The default of ``'\\s+'`` denotes
+ one or more whitespace characters.
+
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ **kwargs
+ See :func:`~pandas.read_csv` for the full argument list.
+
+ Returns
+ -------
+ DataFrame
+ A parsed :class:`~pandas.DataFrame` object.
+
+ See Also
+ --------
+ DataFrame.to_clipboard : Copy object to the system clipboard.
+ read_csv : Read a comma-separated values (csv) file into DataFrame.
+ read_fwf : Read a table of fixed-width formatted lines into DataFrame.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
+ >>> df.to_clipboard() # doctest: +SKIP
+ >>> pd.read_clipboard() # doctest: +SKIP
+ A B C
+ 0 1 2 3
+ 1 4 5 6
+ """
+ encoding = kwargs.pop("encoding", "utf-8")
+
+ # only utf-8 is valid for passed value because that's what clipboard
+ # supports
+ if encoding is not None and encoding.lower().replace("-", "") != "utf8":
+ raise NotImplementedError("reading from clipboard only supports utf-8 encoding")
+
+ check_dtype_backend(dtype_backend)
+
+ from pandas.io.clipboard import clipboard_get
+ from pandas.io.parsers import read_csv
+
+ text = clipboard_get()
+
+ # Try to decode (if needed, as "text" might already be a string here).
+ try:
+ text = text.decode(kwargs.get("encoding") or get_option("display.encoding"))
+ except AttributeError:
+ pass
+
+ # Excel copies into clipboard with \t separation
+ # inspect no more then the 10 first lines, if they
+ # all contain an equal number (>0) of tabs, infer
+ # that this came from excel and set 'sep' accordingly
+ lines = text[:10000].split("\n")[:-1][:10]
+
+ # Need to remove leading white space, since read_csv
+ # accepts:
+ # a b
+ # 0 1 2
+ # 1 3 4
+
+ counts = {x.lstrip(" ").count("\t") for x in lines}
+ if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
+ sep = "\t"
+ # check the number of leading tabs in the first line
+ # to account for index columns
+ index_length = len(lines[0]) - len(lines[0].lstrip(" \t"))
+ if index_length != 0:
+ kwargs.setdefault("index_col", list(range(index_length)))
+
+ # Edge case where sep is specified to be None, return to default
+ if sep is None and kwargs.get("delim_whitespace") is None:
+ sep = r"\s+"
+
+ # Regex separator currently only works with python engine.
+ # Default to python if separator is multi-character (regex)
+ if len(sep) > 1 and kwargs.get("engine") is None:
+ kwargs["engine"] = "python"
+ elif len(sep) > 1 and kwargs.get("engine") == "c":
+ warnings.warn(
+ "read_clipboard with regex separator does not work properly with c engine.",
+ stacklevel=find_stack_level(),
+ )
+
+ return read_csv(StringIO(text), sep=sep, dtype_backend=dtype_backend, **kwargs)
+
+
+def to_clipboard(
+ obj, excel: bool | None = True, sep: str | None = None, **kwargs
+) -> None: # pragma: no cover
+ """
+ Attempt to write text representation of object to the system clipboard
+ The clipboard can be then pasted into Excel for example.
+
+ Parameters
+ ----------
+ obj : the object to write to the clipboard
+ excel : bool, defaults to True
+ if True, use the provided separator, writing in a csv
+ format for allowing easy pasting into excel.
+ if False, write a string representation of the object
+ to the clipboard
+ sep : optional, defaults to tab
+ other keywords are passed to to_csv
+
+ Notes
+ -----
+ Requirements for your platform
+ - Linux: xclip, or xsel (with PyQt4 modules)
+ - Windows:
+ - OS X:
+ """
+ encoding = kwargs.pop("encoding", "utf-8")
+
+ # testing if an invalid encoding is passed to clipboard
+ if encoding is not None and encoding.lower().replace("-", "") != "utf8":
+ raise ValueError("clipboard only supports utf-8 encoding")
+
+ from pandas.io.clipboard import clipboard_set
+
+ if excel is None:
+ excel = True
+
+ if excel:
+ try:
+ if sep is None:
+ sep = "\t"
+ buf = StringIO()
+
+ # clipboard_set (pyperclip) expects unicode
+ obj.to_csv(buf, sep=sep, encoding="utf-8", **kwargs)
+ text = buf.getvalue()
+
+ clipboard_set(text)
+ return
+ except TypeError:
+ warnings.warn(
+ "to_clipboard in excel mode requires a single character separator.",
+ stacklevel=find_stack_level(),
+ )
+ elif sep is not None:
+ warnings.warn(
+ "to_clipboard with excel=False ignores the sep argument.",
+ stacklevel=find_stack_level(),
+ )
+
+ if isinstance(obj, ABCDataFrame):
+ # str(df) has various unhelpful defaults, like truncation
+ with option_context("display.max_colwidth", None):
+ objstr = obj.to_string(**kwargs)
+ else:
+ objstr = str(obj)
+ clipboard_set(objstr)
diff --git a/venv/lib/python3.10/site-packages/pandas/io/common.py b/venv/lib/python3.10/site-packages/pandas/io/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..72c9deeb54fc7aaab781b2870171cf983a47da1f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/common.py
@@ -0,0 +1,1267 @@
+"""Common IO api utilities"""
+from __future__ import annotations
+
+from abc import (
+ ABC,
+ abstractmethod,
+)
+import codecs
+from collections import defaultdict
+from collections.abc import (
+ Hashable,
+ Mapping,
+ Sequence,
+)
+import dataclasses
+import functools
+import gzip
+from io import (
+ BufferedIOBase,
+ BytesIO,
+ RawIOBase,
+ StringIO,
+ TextIOBase,
+ TextIOWrapper,
+)
+import mmap
+import os
+from pathlib import Path
+import re
+import tarfile
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ AnyStr,
+ DefaultDict,
+ Generic,
+ Literal,
+ TypeVar,
+ cast,
+ overload,
+)
+from urllib.parse import (
+ urljoin,
+ urlparse as parse_url,
+ uses_netloc,
+ uses_params,
+ uses_relative,
+)
+import warnings
+import zipfile
+
+from pandas._typing import (
+ BaseBuffer,
+ ReadCsvBuffer,
+)
+from pandas.compat import (
+ get_bz2_file,
+ get_lzma_file,
+)
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import doc
+from pandas.util._exceptions import find_stack_level
+
+from pandas.core.dtypes.common import (
+ is_bool,
+ is_file_like,
+ is_integer,
+ is_list_like,
+)
+from pandas.core.dtypes.generic import ABCMultiIndex
+
+from pandas.core.shared_docs import _shared_docs
+
+_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
+_VALID_URLS.discard("")
+_RFC_3986_PATTERN = re.compile(r"^[A-Za-z][A-Za-z0-9+\-+.]*://")
+
+BaseBufferT = TypeVar("BaseBufferT", bound=BaseBuffer)
+
+
+if TYPE_CHECKING:
+ from types import TracebackType
+
+ from pandas._typing import (
+ CompressionDict,
+ CompressionOptions,
+ FilePath,
+ ReadBuffer,
+ StorageOptions,
+ WriteBuffer,
+ )
+
+ from pandas import MultiIndex
+
+
+@dataclasses.dataclass
+class IOArgs:
+ """
+ Return value of io/common.py:_get_filepath_or_buffer.
+ """
+
+ filepath_or_buffer: str | BaseBuffer
+ encoding: str
+ mode: str
+ compression: CompressionDict
+ should_close: bool = False
+
+
+@dataclasses.dataclass
+class IOHandles(Generic[AnyStr]):
+ """
+ Return value of io/common.py:get_handle
+
+ Can be used as a context manager.
+
+ This is used to easily close created buffers and to handle corner cases when
+ TextIOWrapper is inserted.
+
+ handle: The file handle to be used.
+ created_handles: All file handles that are created by get_handle
+ is_wrapped: Whether a TextIOWrapper needs to be detached.
+ """
+
+ # handle might not implement the IO-interface
+ handle: IO[AnyStr]
+ compression: CompressionDict
+ created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list)
+ is_wrapped: bool = False
+
+ def close(self) -> None:
+ """
+ Close all created buffers.
+
+ Note: If a TextIOWrapper was inserted, it is flushed and detached to
+ avoid closing the potentially user-created buffer.
+ """
+ if self.is_wrapped:
+ assert isinstance(self.handle, TextIOWrapper)
+ self.handle.flush()
+ self.handle.detach()
+ self.created_handles.remove(self.handle)
+ for handle in self.created_handles:
+ handle.close()
+ self.created_handles = []
+ self.is_wrapped = False
+
+ def __enter__(self) -> IOHandles[AnyStr]:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.close()
+
+
+def is_url(url: object) -> bool:
+ """
+ Check to see if a URL has a valid protocol.
+
+ Parameters
+ ----------
+ url : str or unicode
+
+ Returns
+ -------
+ isurl : bool
+ If `url` has a valid protocol return True otherwise False.
+ """
+ if not isinstance(url, str):
+ return False
+ return parse_url(url).scheme in _VALID_URLS
+
+
+@overload
+def _expand_user(filepath_or_buffer: str) -> str:
+ ...
+
+
+@overload
+def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT:
+ ...
+
+
+def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT:
+ """
+ Return the argument with an initial component of ~ or ~user
+ replaced by that user's home directory.
+
+ Parameters
+ ----------
+ filepath_or_buffer : object to be converted if possible
+
+ Returns
+ -------
+ expanded_filepath_or_buffer : an expanded filepath or the
+ input if not expandable
+ """
+ if isinstance(filepath_or_buffer, str):
+ return os.path.expanduser(filepath_or_buffer)
+ return filepath_or_buffer
+
+
+def validate_header_arg(header: object) -> None:
+ if header is None:
+ return
+ if is_integer(header):
+ header = cast(int, header)
+ if header < 0:
+ # GH 27779
+ raise ValueError(
+ "Passing negative integer to header is invalid. "
+ "For no header, use header=None instead"
+ )
+ return
+ if is_list_like(header, allow_sets=False):
+ header = cast(Sequence, header)
+ if not all(map(is_integer, header)):
+ raise ValueError("header must be integer or list of integers")
+ if any(i < 0 for i in header):
+ raise ValueError("cannot specify multi-index header with negative integers")
+ return
+ if is_bool(header):
+ raise TypeError(
+ "Passing a bool to header is invalid. Use header=None for no header or "
+ "header=int or list-like of ints to specify "
+ "the row(s) making up the column names"
+ )
+ # GH 16338
+ raise ValueError("header must be integer or list of integers")
+
+
+@overload
+def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str:
+ ...
+
+
+@overload
+def stringify_path(
+ filepath_or_buffer: BaseBufferT, convert_file_like: bool = ...
+) -> BaseBufferT:
+ ...
+
+
+def stringify_path(
+ filepath_or_buffer: FilePath | BaseBufferT,
+ convert_file_like: bool = False,
+) -> str | BaseBufferT:
+ """
+ Attempt to convert a path-like object to a string.
+
+ Parameters
+ ----------
+ filepath_or_buffer : object to be converted
+
+ Returns
+ -------
+ str_filepath_or_buffer : maybe a string version of the object
+
+ Notes
+ -----
+ Objects supporting the fspath protocol are coerced
+ according to its __fspath__ method.
+
+ Any other object is passed through unchanged, which includes bytes,
+ strings, buffers, or anything else that's not even path-like.
+ """
+ if not convert_file_like and is_file_like(filepath_or_buffer):
+ # GH 38125: some fsspec objects implement os.PathLike but have already opened a
+ # file. This prevents opening the file a second time. infer_compression calls
+ # this function with convert_file_like=True to infer the compression.
+ return cast(BaseBufferT, filepath_or_buffer)
+
+ if isinstance(filepath_or_buffer, os.PathLike):
+ filepath_or_buffer = filepath_or_buffer.__fspath__()
+ return _expand_user(filepath_or_buffer)
+
+
+def urlopen(*args, **kwargs):
+ """
+ Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of
+ the stdlib.
+ """
+ import urllib.request
+
+ return urllib.request.urlopen(*args, **kwargs)
+
+
+def is_fsspec_url(url: FilePath | BaseBuffer) -> bool:
+ """
+ Returns true if the given URL looks like
+ something fsspec can handle
+ """
+ return (
+ isinstance(url, str)
+ and bool(_RFC_3986_PATTERN.match(url))
+ and not url.startswith(("http://", "https://"))
+ )
+
+
+@doc(
+ storage_options=_shared_docs["storage_options"],
+ compression_options=_shared_docs["compression_options"] % "filepath_or_buffer",
+)
+def _get_filepath_or_buffer(
+ filepath_or_buffer: FilePath | BaseBuffer,
+ encoding: str = "utf-8",
+ compression: CompressionOptions | None = None,
+ mode: str = "r",
+ storage_options: StorageOptions | None = None,
+) -> IOArgs:
+ """
+ If the filepath_or_buffer is a url, translate and return the buffer.
+ Otherwise passthrough.
+
+ Parameters
+ ----------
+ filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
+ or buffer
+ {compression_options}
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+ encoding : the encoding to use to decode bytes, default is 'utf-8'
+ mode : str, optional
+
+ {storage_options}
+
+
+ Returns the dataclass IOArgs.
+ """
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
+
+ # handle compression dict
+ compression_method, compression = get_compression_method(compression)
+ compression_method = infer_compression(filepath_or_buffer, compression_method)
+
+ # GH21227 internal compression is not used for non-binary handles.
+ if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode:
+ warnings.warn(
+ "compression has no effect when passing a non-binary object as input.",
+ RuntimeWarning,
+ stacklevel=find_stack_level(),
+ )
+ compression_method = None
+
+ compression = dict(compression, method=compression_method)
+
+ # bz2 and xz do not write the byte order mark for utf-16 and utf-32
+ # print a warning when writing such files
+ if (
+ "w" in mode
+ and compression_method in ["bz2", "xz"]
+ and encoding in ["utf-16", "utf-32"]
+ ):
+ warnings.warn(
+ f"{compression} will not write the byte order mark for {encoding}",
+ UnicodeWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ # Use binary mode when converting path-like objects to file-like objects (fsspec)
+ # except when text mode is explicitly requested. The original mode is returned if
+ # fsspec is not used.
+ fsspec_mode = mode
+ if "t" not in fsspec_mode and "b" not in fsspec_mode:
+ fsspec_mode += "b"
+
+ if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):
+ # TODO: fsspec can also handle HTTP via requests, but leaving this
+ # unchanged. using fsspec appears to break the ability to infer if the
+ # server responded with gzipped data
+ storage_options = storage_options or {}
+
+ # waiting until now for importing to match intended lazy logic of
+ # urlopen function defined elsewhere in this module
+ import urllib.request
+
+ # assuming storage_options is to be interpreted as headers
+ req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options)
+ with urlopen(req_info) as req:
+ content_encoding = req.headers.get("Content-Encoding", None)
+ if content_encoding == "gzip":
+ # Override compression based on Content-Encoding header
+ compression = {"method": "gzip"}
+ reader = BytesIO(req.read())
+ return IOArgs(
+ filepath_or_buffer=reader,
+ encoding=encoding,
+ compression=compression,
+ should_close=True,
+ mode=fsspec_mode,
+ )
+
+ if is_fsspec_url(filepath_or_buffer):
+ assert isinstance(
+ filepath_or_buffer, str
+ ) # just to appease mypy for this branch
+ # two special-case s3-like protocols; these have special meaning in Hadoop,
+ # but are equivalent to just "s3" from fsspec's point of view
+ # cc #11071
+ if filepath_or_buffer.startswith("s3a://"):
+ filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://")
+ if filepath_or_buffer.startswith("s3n://"):
+ filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://")
+ fsspec = import_optional_dependency("fsspec")
+
+ # If botocore is installed we fallback to reading with anon=True
+ # to allow reads from public buckets
+ err_types_to_retry_with_anon: list[Any] = []
+ try:
+ import_optional_dependency("botocore")
+ from botocore.exceptions import (
+ ClientError,
+ NoCredentialsError,
+ )
+
+ err_types_to_retry_with_anon = [
+ ClientError,
+ NoCredentialsError,
+ PermissionError,
+ ]
+ except ImportError:
+ pass
+
+ try:
+ file_obj = fsspec.open(
+ filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
+ ).open()
+ # GH 34626 Reads from Public Buckets without Credentials needs anon=True
+ except tuple(err_types_to_retry_with_anon):
+ if storage_options is None:
+ storage_options = {"anon": True}
+ else:
+ # don't mutate user input.
+ storage_options = dict(storage_options)
+ storage_options["anon"] = True
+ file_obj = fsspec.open(
+ filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
+ ).open()
+
+ return IOArgs(
+ filepath_or_buffer=file_obj,
+ encoding=encoding,
+ compression=compression,
+ should_close=True,
+ mode=fsspec_mode,
+ )
+ elif storage_options:
+ raise ValueError(
+ "storage_options passed with file object or non-fsspec file path"
+ )
+
+ if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):
+ return IOArgs(
+ filepath_or_buffer=_expand_user(filepath_or_buffer),
+ encoding=encoding,
+ compression=compression,
+ should_close=False,
+ mode=mode,
+ )
+
+ # is_file_like requires (read | write) & __iter__ but __iter__ is only
+ # needed for read_csv(engine=python)
+ if not (
+ hasattr(filepath_or_buffer, "read") or hasattr(filepath_or_buffer, "write")
+ ):
+ msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
+ raise ValueError(msg)
+
+ return IOArgs(
+ filepath_or_buffer=filepath_or_buffer,
+ encoding=encoding,
+ compression=compression,
+ should_close=False,
+ mode=mode,
+ )
+
+
+def file_path_to_url(path: str) -> str:
+ """
+ converts an absolute native path to a FILE URL.
+
+ Parameters
+ ----------
+ path : a path in native format
+
+ Returns
+ -------
+ a valid FILE URL
+ """
+ # lazify expensive import (~30ms)
+ from urllib.request import pathname2url
+
+ return urljoin("file:", pathname2url(path))
+
+
+extension_to_compression = {
+ ".tar": "tar",
+ ".tar.gz": "tar",
+ ".tar.bz2": "tar",
+ ".tar.xz": "tar",
+ ".gz": "gzip",
+ ".bz2": "bz2",
+ ".zip": "zip",
+ ".xz": "xz",
+ ".zst": "zstd",
+}
+_supported_compressions = set(extension_to_compression.values())
+
+
+def get_compression_method(
+ compression: CompressionOptions,
+) -> tuple[str | None, CompressionDict]:
+ """
+ Simplifies a compression argument to a compression method string and
+ a mapping containing additional arguments.
+
+ Parameters
+ ----------
+ compression : str or mapping
+ If string, specifies the compression method. If mapping, value at key
+ 'method' specifies compression method.
+
+ Returns
+ -------
+ tuple of ({compression method}, Optional[str]
+ {compression arguments}, Dict[str, Any])
+
+ Raises
+ ------
+ ValueError on mapping missing 'method' key
+ """
+ compression_method: str | None
+ if isinstance(compression, Mapping):
+ compression_args = dict(compression)
+ try:
+ compression_method = compression_args.pop("method")
+ except KeyError as err:
+ raise ValueError("If mapping, compression must have key 'method'") from err
+ else:
+ compression_args = {}
+ compression_method = compression
+ return compression_method, compression_args
+
+
+@doc(compression_options=_shared_docs["compression_options"] % "filepath_or_buffer")
+def infer_compression(
+ filepath_or_buffer: FilePath | BaseBuffer, compression: str | None
+) -> str | None:
+ """
+ Get the compression method for filepath_or_buffer. If compression='infer',
+ the inferred compression method is returned. Otherwise, the input
+ compression method is returned unchanged, unless it's invalid, in which
+ case an error is raised.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str or file handle
+ File path or object.
+ {compression_options}
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+ Returns
+ -------
+ string or None
+
+ Raises
+ ------
+ ValueError on invalid compression specified.
+ """
+ if compression is None:
+ return None
+
+ # Infer compression
+ if compression == "infer":
+ # Convert all path types (e.g. pathlib.Path) to strings
+ filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)
+ if not isinstance(filepath_or_buffer, str):
+ # Cannot infer compression of a buffer, assume no compression
+ return None
+
+ # Infer compression from the filename/URL extension
+ for extension, compression in extension_to_compression.items():
+ if filepath_or_buffer.lower().endswith(extension):
+ return compression
+ return None
+
+ # Compression has been specified. Check that it's valid
+ if compression in _supported_compressions:
+ return compression
+
+ valid = ["infer", None] + sorted(_supported_compressions)
+ msg = (
+ f"Unrecognized compression type: {compression}\n"
+ f"Valid compression types are {valid}"
+ )
+ raise ValueError(msg)
+
+
+def check_parent_directory(path: Path | str) -> None:
+ """
+ Check if parent directory of a file exists, raise OSError if it does not
+
+ Parameters
+ ----------
+ path: Path or str
+ Path to check parent directory of
+ """
+ parent = Path(path).parent
+ if not parent.is_dir():
+ raise OSError(rf"Cannot save file into a non-existent directory: '{parent}'")
+
+
+@overload
+def get_handle(
+ path_or_buf: FilePath | BaseBuffer,
+ mode: str,
+ *,
+ encoding: str | None = ...,
+ compression: CompressionOptions = ...,
+ memory_map: bool = ...,
+ is_text: Literal[False],
+ errors: str | None = ...,
+ storage_options: StorageOptions = ...,
+) -> IOHandles[bytes]:
+ ...
+
+
+@overload
+def get_handle(
+ path_or_buf: FilePath | BaseBuffer,
+ mode: str,
+ *,
+ encoding: str | None = ...,
+ compression: CompressionOptions = ...,
+ memory_map: bool = ...,
+ is_text: Literal[True] = ...,
+ errors: str | None = ...,
+ storage_options: StorageOptions = ...,
+) -> IOHandles[str]:
+ ...
+
+
+@overload
+def get_handle(
+ path_or_buf: FilePath | BaseBuffer,
+ mode: str,
+ *,
+ encoding: str | None = ...,
+ compression: CompressionOptions = ...,
+ memory_map: bool = ...,
+ is_text: bool = ...,
+ errors: str | None = ...,
+ storage_options: StorageOptions = ...,
+) -> IOHandles[str] | IOHandles[bytes]:
+ ...
+
+
+@doc(compression_options=_shared_docs["compression_options"] % "path_or_buf")
+def get_handle(
+ path_or_buf: FilePath | BaseBuffer,
+ mode: str,
+ *,
+ encoding: str | None = None,
+ compression: CompressionOptions | None = None,
+ memory_map: bool = False,
+ is_text: bool = True,
+ errors: str | None = None,
+ storage_options: StorageOptions | None = None,
+) -> IOHandles[str] | IOHandles[bytes]:
+ """
+ Get file handle for given path/buffer and mode.
+
+ Parameters
+ ----------
+ path_or_buf : str or file handle
+ File path or object.
+ mode : str
+ Mode to open path_or_buf with.
+ encoding : str or None
+ Encoding to use.
+ {compression_options}
+
+ May be a dict with key 'method' as compression mode
+ and other keys as compression options if compression
+ mode is 'zip'.
+
+ Passing compression options as keys in dict is
+ supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'.
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+ memory_map : bool, default False
+ See parsers._parser_params for more information. Only used by read_csv.
+ is_text : bool, default True
+ Whether the type of the content passed to the file/buffer is string or
+ bytes. This is not the same as `"b" not in mode`. If a string content is
+ passed to a binary file/buffer, a wrapper is inserted.
+ errors : str, default 'strict'
+ Specifies how encoding and decoding errors are to be handled.
+ See the errors argument for :func:`open` for a full list
+ of options.
+ storage_options: StorageOptions = None
+ Passed to _get_filepath_or_buffer
+
+ Returns the dataclass IOHandles
+ """
+ # Windows does not default to utf-8. Set to utf-8 for a consistent behavior
+ encoding = encoding or "utf-8"
+
+ errors = errors or "strict"
+
+ # read_csv does not know whether the buffer is opened in binary/text mode
+ if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
+ mode += "b"
+
+ # validate encoding and errors
+ codecs.lookup(encoding)
+ if isinstance(errors, str):
+ codecs.lookup_error(errors)
+
+ # open URLs
+ ioargs = _get_filepath_or_buffer(
+ path_or_buf,
+ encoding=encoding,
+ compression=compression,
+ mode=mode,
+ storage_options=storage_options,
+ )
+
+ handle = ioargs.filepath_or_buffer
+ handles: list[BaseBuffer]
+
+ # memory mapping needs to be the first step
+ # only used for read_csv
+ handle, memory_map, handles = _maybe_memory_map(handle, memory_map)
+
+ is_path = isinstance(handle, str)
+ compression_args = dict(ioargs.compression)
+ compression = compression_args.pop("method")
+
+ # Only for write methods
+ if "r" not in mode and is_path:
+ check_parent_directory(str(handle))
+
+ if compression:
+ if compression != "zstd":
+ # compression libraries do not like an explicit text-mode
+ ioargs.mode = ioargs.mode.replace("t", "")
+ elif compression == "zstd" and "b" not in ioargs.mode:
+ # python-zstandard defaults to text mode, but we always expect
+ # compression libraries to use binary mode.
+ ioargs.mode += "b"
+
+ # GZ Compression
+ if compression == "gzip":
+ if isinstance(handle, str):
+ # error: Incompatible types in assignment (expression has type
+ # "GzipFile", variable has type "Union[str, BaseBuffer]")
+ handle = gzip.GzipFile( # type: ignore[assignment]
+ filename=handle,
+ mode=ioargs.mode,
+ **compression_args,
+ )
+ else:
+ handle = gzip.GzipFile(
+ # No overload variant of "GzipFile" matches argument types
+ # "Union[str, BaseBuffer]", "str", "Dict[str, Any]"
+ fileobj=handle, # type: ignore[call-overload]
+ mode=ioargs.mode,
+ **compression_args,
+ )
+
+ # BZ Compression
+ elif compression == "bz2":
+ # Overload of "BZ2File" to handle pickle protocol 5
+ # "Union[str, BaseBuffer]", "str", "Dict[str, Any]"
+ handle = get_bz2_file()( # type: ignore[call-overload]
+ handle,
+ mode=ioargs.mode,
+ **compression_args,
+ )
+
+ # ZIP Compression
+ elif compression == "zip":
+ # error: Argument 1 to "_BytesZipFile" has incompatible type
+ # "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]],
+ # ReadBuffer[bytes], WriteBuffer[bytes]]"
+ handle = _BytesZipFile(
+ handle, ioargs.mode, **compression_args # type: ignore[arg-type]
+ )
+ if handle.buffer.mode == "r":
+ handles.append(handle)
+ zip_names = handle.buffer.namelist()
+ if len(zip_names) == 1:
+ handle = handle.buffer.open(zip_names.pop())
+ elif not zip_names:
+ raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
+ else:
+ raise ValueError(
+ "Multiple files found in ZIP file. "
+ f"Only one file per ZIP: {zip_names}"
+ )
+
+ # TAR Encoding
+ elif compression == "tar":
+ compression_args.setdefault("mode", ioargs.mode)
+ if isinstance(handle, str):
+ handle = _BytesTarFile(name=handle, **compression_args)
+ else:
+ # error: Argument "fileobj" to "_BytesTarFile" has incompatible
+ # type "BaseBuffer"; expected "Union[ReadBuffer[bytes],
+ # WriteBuffer[bytes], None]"
+ handle = _BytesTarFile(
+ fileobj=handle, **compression_args # type: ignore[arg-type]
+ )
+ assert isinstance(handle, _BytesTarFile)
+ if "r" in handle.buffer.mode:
+ handles.append(handle)
+ files = handle.buffer.getnames()
+ if len(files) == 1:
+ file = handle.buffer.extractfile(files[0])
+ assert file is not None
+ handle = file
+ elif not files:
+ raise ValueError(f"Zero files found in TAR archive {path_or_buf}")
+ else:
+ raise ValueError(
+ "Multiple files found in TAR archive. "
+ f"Only one file per TAR archive: {files}"
+ )
+
+ # XZ Compression
+ elif compression == "xz":
+ # error: Argument 1 to "LZMAFile" has incompatible type "Union[str,
+ # BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str],
+ # PathLike[bytes]], IO[bytes]], None]"
+ handle = get_lzma_file()(
+ handle, ioargs.mode, **compression_args # type: ignore[arg-type]
+ )
+
+ # Zstd Compression
+ elif compression == "zstd":
+ zstd = import_optional_dependency("zstandard")
+ if "r" in ioargs.mode:
+ open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)}
+ else:
+ open_args = {"cctx": zstd.ZstdCompressor(**compression_args)}
+ handle = zstd.open(
+ handle,
+ mode=ioargs.mode,
+ **open_args,
+ )
+
+ # Unrecognized Compression
+ else:
+ msg = f"Unrecognized compression type: {compression}"
+ raise ValueError(msg)
+
+ assert not isinstance(handle, str)
+ handles.append(handle)
+
+ elif isinstance(handle, str):
+ # Check whether the filename is to be opened in binary mode.
+ # Binary mode does not support 'encoding' and 'newline'.
+ if ioargs.encoding and "b" not in ioargs.mode:
+ # Encoding
+ handle = open(
+ handle,
+ ioargs.mode,
+ encoding=ioargs.encoding,
+ errors=errors,
+ newline="",
+ )
+ else:
+ # Binary mode
+ handle = open(handle, ioargs.mode)
+ handles.append(handle)
+
+ # Convert BytesIO or file objects passed with an encoding
+ is_wrapped = False
+ if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase):
+ # not added to handles as it does not open/buffer resources
+ handle = _BytesIOWrapper(
+ handle,
+ encoding=ioargs.encoding,
+ )
+ elif is_text and (
+ compression or memory_map or _is_binary_mode(handle, ioargs.mode)
+ ):
+ if (
+ not hasattr(handle, "readable")
+ or not hasattr(handle, "writable")
+ or not hasattr(handle, "seekable")
+ ):
+ handle = _IOWrapper(handle)
+ # error: Argument 1 to "TextIOWrapper" has incompatible type
+ # "_IOWrapper"; expected "IO[bytes]"
+ handle = TextIOWrapper(
+ handle, # type: ignore[arg-type]
+ encoding=ioargs.encoding,
+ errors=errors,
+ newline="",
+ )
+ handles.append(handle)
+ # only marked as wrapped when the caller provided a handle
+ is_wrapped = not (
+ isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close
+ )
+
+ if "r" in ioargs.mode and not hasattr(handle, "read"):
+ raise TypeError(
+ "Expected file path name or file-like object, "
+ f"got {type(ioargs.filepath_or_buffer)} type"
+ )
+
+ handles.reverse() # close the most recently added buffer first
+ if ioargs.should_close:
+ assert not isinstance(ioargs.filepath_or_buffer, str)
+ handles.append(ioargs.filepath_or_buffer)
+
+ return IOHandles(
+ # error: Argument "handle" to "IOHandles" has incompatible type
+ # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes],
+ # typing.IO[Any]]"; expected "pandas._typing.IO[Any]"
+ handle=handle, # type: ignore[arg-type]
+ # error: Argument "created_handles" to "IOHandles" has incompatible type
+ # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]"
+ created_handles=handles, # type: ignore[arg-type]
+ is_wrapped=is_wrapped,
+ compression=ioargs.compression,
+ )
+
+
+# error: Definition of "__enter__" in base class "IOBase" is incompatible
+# with definition in base class "BinaryIO"
+class _BufferedWriter(BytesIO, ABC): # type: ignore[misc]
+ """
+ Some objects do not support multiple .write() calls (TarFile and ZipFile).
+ This wrapper writes to the underlying buffer on close.
+ """
+
+ buffer = BytesIO()
+
+ @abstractmethod
+ def write_to_buffer(self) -> None:
+ ...
+
+ def close(self) -> None:
+ if self.closed:
+ # already closed
+ return
+ if self.getbuffer().nbytes:
+ # write to buffer
+ self.seek(0)
+ with self.buffer:
+ self.write_to_buffer()
+ else:
+ self.buffer.close()
+ super().close()
+
+
+class _BytesTarFile(_BufferedWriter):
+ def __init__(
+ self,
+ name: str | None = None,
+ mode: Literal["r", "a", "w", "x"] = "r",
+ fileobj: ReadBuffer[bytes] | WriteBuffer[bytes] | None = None,
+ archive_name: str | None = None,
+ **kwargs,
+ ) -> None:
+ super().__init__()
+ self.archive_name = archive_name
+ self.name = name
+ # error: Incompatible types in assignment (expression has type "TarFile",
+ # base class "_BufferedWriter" defined the type as "BytesIO")
+ self.buffer: tarfile.TarFile = tarfile.TarFile.open( # type: ignore[assignment]
+ name=name,
+ mode=self.extend_mode(mode),
+ fileobj=fileobj,
+ **kwargs,
+ )
+
+ def extend_mode(self, mode: str) -> str:
+ mode = mode.replace("b", "")
+ if mode != "w":
+ return mode
+ if self.name is not None:
+ suffix = Path(self.name).suffix
+ if suffix in (".gz", ".xz", ".bz2"):
+ mode = f"{mode}:{suffix[1:]}"
+ return mode
+
+ def infer_filename(self) -> str | None:
+ """
+ If an explicit archive_name is not given, we still want the file inside the zip
+ file not to be named something.tar, because that causes confusion (GH39465).
+ """
+ if self.name is None:
+ return None
+
+ filename = Path(self.name)
+ if filename.suffix == ".tar":
+ return filename.with_suffix("").name
+ elif filename.suffix in (".tar.gz", ".tar.bz2", ".tar.xz"):
+ return filename.with_suffix("").with_suffix("").name
+ return filename.name
+
+ def write_to_buffer(self) -> None:
+ # TarFile needs a non-empty string
+ archive_name = self.archive_name or self.infer_filename() or "tar"
+ tarinfo = tarfile.TarInfo(name=archive_name)
+ tarinfo.size = len(self.getvalue())
+ self.buffer.addfile(tarinfo, self)
+
+
+class _BytesZipFile(_BufferedWriter):
+ def __init__(
+ self,
+ file: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],
+ mode: str,
+ archive_name: str | None = None,
+ **kwargs,
+ ) -> None:
+ super().__init__()
+ mode = mode.replace("b", "")
+ self.archive_name = archive_name
+
+ kwargs.setdefault("compression", zipfile.ZIP_DEFLATED)
+ # error: Incompatible types in assignment (expression has type "ZipFile",
+ # base class "_BufferedWriter" defined the type as "BytesIO")
+ self.buffer: zipfile.ZipFile = zipfile.ZipFile( # type: ignore[assignment]
+ file, mode, **kwargs
+ )
+
+ def infer_filename(self) -> str | None:
+ """
+ If an explicit archive_name is not given, we still want the file inside the zip
+ file not to be named something.zip, because that causes confusion (GH39465).
+ """
+ if isinstance(self.buffer.filename, (os.PathLike, str)):
+ filename = Path(self.buffer.filename)
+ if filename.suffix == ".zip":
+ return filename.with_suffix("").name
+ return filename.name
+ return None
+
+ def write_to_buffer(self) -> None:
+ # ZipFile needs a non-empty string
+ archive_name = self.archive_name or self.infer_filename() or "zip"
+ self.buffer.writestr(archive_name, self.getvalue())
+
+
+class _IOWrapper:
+ # TextIOWrapper is overly strict: it request that the buffer has seekable, readable,
+ # and writable. If we have a read-only buffer, we shouldn't need writable and vice
+ # versa. Some buffers, are seek/read/writ-able but they do not have the "-able"
+ # methods, e.g., tempfile.SpooledTemporaryFile.
+ # If a buffer does not have the above "-able" methods, we simple assume they are
+ # seek/read/writ-able.
+ def __init__(self, buffer: BaseBuffer) -> None:
+ self.buffer = buffer
+
+ def __getattr__(self, name: str):
+ return getattr(self.buffer, name)
+
+ def readable(self) -> bool:
+ if hasattr(self.buffer, "readable"):
+ return self.buffer.readable()
+ return True
+
+ def seekable(self) -> bool:
+ if hasattr(self.buffer, "seekable"):
+ return self.buffer.seekable()
+ return True
+
+ def writable(self) -> bool:
+ if hasattr(self.buffer, "writable"):
+ return self.buffer.writable()
+ return True
+
+
+class _BytesIOWrapper:
+ # Wrapper that wraps a StringIO buffer and reads bytes from it
+ # Created for compat with pyarrow read_csv
+ def __init__(self, buffer: StringIO | TextIOBase, encoding: str = "utf-8") -> None:
+ self.buffer = buffer
+ self.encoding = encoding
+ # Because a character can be represented by more than 1 byte,
+ # it is possible that reading will produce more bytes than n
+ # We store the extra bytes in this overflow variable, and append the
+ # overflow to the front of the bytestring the next time reading is performed
+ self.overflow = b""
+
+ def __getattr__(self, attr: str):
+ return getattr(self.buffer, attr)
+
+ def read(self, n: int | None = -1) -> bytes:
+ assert self.buffer is not None
+ bytestring = self.buffer.read(n).encode(self.encoding)
+ # When n=-1/n greater than remaining bytes: Read entire file/rest of file
+ combined_bytestring = self.overflow + bytestring
+ if n is None or n < 0 or n >= len(combined_bytestring):
+ self.overflow = b""
+ return combined_bytestring
+ else:
+ to_return = combined_bytestring[:n]
+ self.overflow = combined_bytestring[n:]
+ return to_return
+
+
+def _maybe_memory_map(
+ handle: str | BaseBuffer, memory_map: bool
+) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]:
+ """Try to memory map file/buffer."""
+ handles: list[BaseBuffer] = []
+ memory_map &= hasattr(handle, "fileno") or isinstance(handle, str)
+ if not memory_map:
+ return handle, memory_map, handles
+
+ # mmap used by only read_csv
+ handle = cast(ReadCsvBuffer, handle)
+
+ # need to open the file first
+ if isinstance(handle, str):
+ handle = open(handle, "rb")
+ handles.append(handle)
+
+ try:
+ # open mmap and adds *-able
+ # error: Argument 1 to "_IOWrapper" has incompatible type "mmap";
+ # expected "BaseBuffer"
+ wrapped = _IOWrapper(
+ mmap.mmap(
+ handle.fileno(), 0, access=mmap.ACCESS_READ # type: ignore[arg-type]
+ )
+ )
+ finally:
+ for handle in reversed(handles):
+ # error: "BaseBuffer" has no attribute "close"
+ handle.close() # type: ignore[attr-defined]
+
+ return wrapped, memory_map, [wrapped]
+
+
+def file_exists(filepath_or_buffer: FilePath | BaseBuffer) -> bool:
+ """Test whether file exists."""
+ exists = False
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
+ if not isinstance(filepath_or_buffer, str):
+ return exists
+ try:
+ exists = os.path.exists(filepath_or_buffer)
+ # gh-5874: if the filepath is too long will raise here
+ except (TypeError, ValueError):
+ pass
+ return exists
+
+
+def _is_binary_mode(handle: FilePath | BaseBuffer, mode: str) -> bool:
+ """Whether the handle is opened in binary mode"""
+ # specified by user
+ if "t" in mode or "b" in mode:
+ return "b" in mode
+
+ # exceptions
+ text_classes = (
+ # classes that expect string but have 'b' in mode
+ codecs.StreamWriter,
+ codecs.StreamReader,
+ codecs.StreamReaderWriter,
+ )
+ if issubclass(type(handle), text_classes):
+ return False
+
+ return isinstance(handle, _get_binary_io_classes()) or "b" in getattr(
+ handle, "mode", mode
+ )
+
+
+@functools.lru_cache
+def _get_binary_io_classes() -> tuple[type, ...]:
+ """IO classes that that expect bytes"""
+ binary_classes: tuple[type, ...] = (BufferedIOBase, RawIOBase)
+
+ # python-zstandard doesn't use any of the builtin base classes; instead we
+ # have to use the `zstd.ZstdDecompressionReader` class for isinstance checks.
+ # Unfortunately `zstd.ZstdDecompressionReader` isn't exposed by python-zstandard
+ # so we have to get it from a `zstd.ZstdDecompressor` instance.
+ # See also https://github.com/indygreg/python-zstandard/pull/165.
+ zstd = import_optional_dependency("zstandard", errors="ignore")
+ if zstd is not None:
+ with zstd.ZstdDecompressor().stream_reader(b"") as reader:
+ binary_classes += (type(reader),)
+
+ return binary_classes
+
+
+def is_potential_multi_index(
+ columns: Sequence[Hashable] | MultiIndex,
+ index_col: bool | Sequence[int] | None = None,
+) -> bool:
+ """
+ Check whether or not the `columns` parameter
+ could be converted into a MultiIndex.
+
+ Parameters
+ ----------
+ columns : array-like
+ Object which may or may not be convertible into a MultiIndex
+ index_col : None, bool or list, optional
+ Column or columns to use as the (possibly hierarchical) index
+
+ Returns
+ -------
+ bool : Whether or not columns could become a MultiIndex
+ """
+ if index_col is None or isinstance(index_col, bool):
+ index_col = []
+
+ return bool(
+ len(columns)
+ and not isinstance(columns, ABCMultiIndex)
+ and all(isinstance(c, tuple) for c in columns if c not in list(index_col))
+ )
+
+
+def dedup_names(
+ names: Sequence[Hashable], is_potential_multiindex: bool
+) -> Sequence[Hashable]:
+ """
+ Rename column names if duplicates exist.
+
+ Currently the renaming is done by appending a period and an autonumeric,
+ but a custom pattern may be supported in the future.
+
+ Examples
+ --------
+ >>> dedup_names(["x", "y", "x", "x"], is_potential_multiindex=False)
+ ['x', 'y', 'x.1', 'x.2']
+ """
+ names = list(names) # so we can index
+ counts: DefaultDict[Hashable, int] = defaultdict(int)
+
+ for i, col in enumerate(names):
+ cur_count = counts[col]
+
+ while cur_count > 0:
+ counts[col] = cur_count + 1
+
+ if is_potential_multiindex:
+ # for mypy
+ assert isinstance(col, tuple)
+ col = col[:-1] + (f"{col[-1]}.{cur_count}",)
+ else:
+ col = f"{col}.{cur_count}"
+ cur_count = counts[col]
+
+ names[i] = col
+ counts[col] = cur_count + 1
+
+ return names
diff --git a/venv/lib/python3.10/site-packages/pandas/io/feather_format.py b/venv/lib/python3.10/site-packages/pandas/io/feather_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0aaf83b84cb241ebdd872c1c8b7982fadc9acdb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/feather_format.py
@@ -0,0 +1,143 @@
+""" feather-format compat """
+from __future__ import annotations
+
+from typing import (
+ TYPE_CHECKING,
+ Any,
+)
+
+from pandas._config import using_pyarrow_string_dtype
+
+from pandas._libs import lib
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import doc
+from pandas.util._validators import check_dtype_backend
+
+import pandas as pd
+from pandas.core.api import DataFrame
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io._util import arrow_string_types_mapper
+from pandas.io.common import get_handle
+
+if TYPE_CHECKING:
+ from collections.abc import (
+ Hashable,
+ Sequence,
+ )
+
+ from pandas._typing import (
+ DtypeBackend,
+ FilePath,
+ ReadBuffer,
+ StorageOptions,
+ WriteBuffer,
+ )
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+def to_feather(
+ df: DataFrame,
+ path: FilePath | WriteBuffer[bytes],
+ storage_options: StorageOptions | None = None,
+ **kwargs: Any,
+) -> None:
+ """
+ Write a DataFrame to the binary Feather format.
+
+ Parameters
+ ----------
+ df : DataFrame
+ path : str, path object, or file-like object
+ {storage_options}
+ **kwargs :
+ Additional keywords passed to `pyarrow.feather.write_feather`.
+
+ """
+ import_optional_dependency("pyarrow")
+ from pyarrow import feather
+
+ if not isinstance(df, DataFrame):
+ raise ValueError("feather only support IO with DataFrames")
+
+ with get_handle(
+ path, "wb", storage_options=storage_options, is_text=False
+ ) as handles:
+ feather.write_feather(df, handles.handle, **kwargs)
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+def read_feather(
+ path: FilePath | ReadBuffer[bytes],
+ columns: Sequence[Hashable] | None = None,
+ use_threads: bool = True,
+ storage_options: StorageOptions | None = None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+) -> DataFrame:
+ """
+ Load a feather-format object from the file path.
+
+ Parameters
+ ----------
+ path : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``read()`` function. The string could be a URL.
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ expected. A local file could be: ``file://localhost/path/to/table.feather``.
+ columns : sequence, default None
+ If not provided, all columns are read.
+ use_threads : bool, default True
+ Whether to parallelize reading using multiple threads.
+ {storage_options}
+
+ dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ Returns
+ -------
+ type of object stored in file
+
+ Examples
+ --------
+ >>> df = pd.read_feather("path/to/file.feather") # doctest: +SKIP
+ """
+ import_optional_dependency("pyarrow")
+ from pyarrow import feather
+
+ # import utils to register the pyarrow extension types
+ import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401
+
+ check_dtype_backend(dtype_backend)
+
+ with get_handle(
+ path, "rb", storage_options=storage_options, is_text=False
+ ) as handles:
+ if dtype_backend is lib.no_default and not using_pyarrow_string_dtype():
+ return feather.read_feather(
+ handles.handle, columns=columns, use_threads=bool(use_threads)
+ )
+
+ pa_table = feather.read_table(
+ handles.handle, columns=columns, use_threads=bool(use_threads)
+ )
+
+ if dtype_backend == "numpy_nullable":
+ from pandas.io._util import _arrow_dtype_mapping
+
+ return pa_table.to_pandas(types_mapper=_arrow_dtype_mapping().get)
+
+ elif dtype_backend == "pyarrow":
+ return pa_table.to_pandas(types_mapper=pd.ArrowDtype)
+
+ elif using_pyarrow_string_dtype():
+ return pa_table.to_pandas(types_mapper=arrow_string_types_mapper())
+ else:
+ raise NotImplementedError
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__init__.py b/venv/lib/python3.10/site-packages/pandas/io/formats/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e56b1bc7ba4377cc5de9d68a1424524aef21cb5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/formats/__init__.py
@@ -0,0 +1,9 @@
+# ruff: noqa: TCH004
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ # import modules that have public classes/functions
+ from pandas.io.formats import style
+
+ # and mark only those modules as public
+ __all__ = ["style"]
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5a360c48d22b80483ce16b4e80c89864bca32dc6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b345679e8adb961d23f48e9f5da94939f6ef0eb5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3569435d455dc8c3de67b678f88ebf4543993f1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fa73ccd312d04852b99543090ea04900de713366
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a1382fbf93dc797f78064c6000ff2a215cb0361
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..583f5392343ac56e82a2e2b9fcba77c7159ec102
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb77fe9900a01d823379889f5826577f2d1a06b0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0cf0c3f2d01536e8b5cef76e0acce0d78894f036
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5ac6c853d2b414db66ab3c9322ca36ec262c37fb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38218666a2dad373fa3858c42c4a76460e862e14
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cfb24410dc895711b0de82c3954efea8cc39f80a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..28d4a87f389962a0f54705eb1a45f226fa5c6100
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5cd07f6c017235e1e207c35c1a06544e0c5cabb9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ddb0b2c5a8df092e0390d3adc0df4d695baf4762
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/_color_data.py b/venv/lib/python3.10/site-packages/pandas/io/formats/_color_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e7cb7f29646eb11c0ec83d8a909a8cfd7953182
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/formats/_color_data.py
@@ -0,0 +1,157 @@
+# GH37967: Enable the use of CSS named colors, as defined in
+# matplotlib.colors.CSS4_COLORS, when exporting to Excel.
+# This data has been copied here, instead of being imported from matplotlib,
+# not to have ``to_excel`` methods require matplotlib.
+# source: matplotlib._color_data (3.3.3)
+from __future__ import annotations
+
+CSS4_COLORS = {
+ "aliceblue": "F0F8FF",
+ "antiquewhite": "FAEBD7",
+ "aqua": "00FFFF",
+ "aquamarine": "7FFFD4",
+ "azure": "F0FFFF",
+ "beige": "F5F5DC",
+ "bisque": "FFE4C4",
+ "black": "000000",
+ "blanchedalmond": "FFEBCD",
+ "blue": "0000FF",
+ "blueviolet": "8A2BE2",
+ "brown": "A52A2A",
+ "burlywood": "DEB887",
+ "cadetblue": "5F9EA0",
+ "chartreuse": "7FFF00",
+ "chocolate": "D2691E",
+ "coral": "FF7F50",
+ "cornflowerblue": "6495ED",
+ "cornsilk": "FFF8DC",
+ "crimson": "DC143C",
+ "cyan": "00FFFF",
+ "darkblue": "00008B",
+ "darkcyan": "008B8B",
+ "darkgoldenrod": "B8860B",
+ "darkgray": "A9A9A9",
+ "darkgreen": "006400",
+ "darkgrey": "A9A9A9",
+ "darkkhaki": "BDB76B",
+ "darkmagenta": "8B008B",
+ "darkolivegreen": "556B2F",
+ "darkorange": "FF8C00",
+ "darkorchid": "9932CC",
+ "darkred": "8B0000",
+ "darksalmon": "E9967A",
+ "darkseagreen": "8FBC8F",
+ "darkslateblue": "483D8B",
+ "darkslategray": "2F4F4F",
+ "darkslategrey": "2F4F4F",
+ "darkturquoise": "00CED1",
+ "darkviolet": "9400D3",
+ "deeppink": "FF1493",
+ "deepskyblue": "00BFFF",
+ "dimgray": "696969",
+ "dimgrey": "696969",
+ "dodgerblue": "1E90FF",
+ "firebrick": "B22222",
+ "floralwhite": "FFFAF0",
+ "forestgreen": "228B22",
+ "fuchsia": "FF00FF",
+ "gainsboro": "DCDCDC",
+ "ghostwhite": "F8F8FF",
+ "gold": "FFD700",
+ "goldenrod": "DAA520",
+ "gray": "808080",
+ "green": "008000",
+ "greenyellow": "ADFF2F",
+ "grey": "808080",
+ "honeydew": "F0FFF0",
+ "hotpink": "FF69B4",
+ "indianred": "CD5C5C",
+ "indigo": "4B0082",
+ "ivory": "FFFFF0",
+ "khaki": "F0E68C",
+ "lavender": "E6E6FA",
+ "lavenderblush": "FFF0F5",
+ "lawngreen": "7CFC00",
+ "lemonchiffon": "FFFACD",
+ "lightblue": "ADD8E6",
+ "lightcoral": "F08080",
+ "lightcyan": "E0FFFF",
+ "lightgoldenrodyellow": "FAFAD2",
+ "lightgray": "D3D3D3",
+ "lightgreen": "90EE90",
+ "lightgrey": "D3D3D3",
+ "lightpink": "FFB6C1",
+ "lightsalmon": "FFA07A",
+ "lightseagreen": "20B2AA",
+ "lightskyblue": "87CEFA",
+ "lightslategray": "778899",
+ "lightslategrey": "778899",
+ "lightsteelblue": "B0C4DE",
+ "lightyellow": "FFFFE0",
+ "lime": "00FF00",
+ "limegreen": "32CD32",
+ "linen": "FAF0E6",
+ "magenta": "FF00FF",
+ "maroon": "800000",
+ "mediumaquamarine": "66CDAA",
+ "mediumblue": "0000CD",
+ "mediumorchid": "BA55D3",
+ "mediumpurple": "9370DB",
+ "mediumseagreen": "3CB371",
+ "mediumslateblue": "7B68EE",
+ "mediumspringgreen": "00FA9A",
+ "mediumturquoise": "48D1CC",
+ "mediumvioletred": "C71585",
+ "midnightblue": "191970",
+ "mintcream": "F5FFFA",
+ "mistyrose": "FFE4E1",
+ "moccasin": "FFE4B5",
+ "navajowhite": "FFDEAD",
+ "navy": "000080",
+ "oldlace": "FDF5E6",
+ "olive": "808000",
+ "olivedrab": "6B8E23",
+ "orange": "FFA500",
+ "orangered": "FF4500",
+ "orchid": "DA70D6",
+ "palegoldenrod": "EEE8AA",
+ "palegreen": "98FB98",
+ "paleturquoise": "AFEEEE",
+ "palevioletred": "DB7093",
+ "papayawhip": "FFEFD5",
+ "peachpuff": "FFDAB9",
+ "peru": "CD853F",
+ "pink": "FFC0CB",
+ "plum": "DDA0DD",
+ "powderblue": "B0E0E6",
+ "purple": "800080",
+ "rebeccapurple": "663399",
+ "red": "FF0000",
+ "rosybrown": "BC8F8F",
+ "royalblue": "4169E1",
+ "saddlebrown": "8B4513",
+ "salmon": "FA8072",
+ "sandybrown": "F4A460",
+ "seagreen": "2E8B57",
+ "seashell": "FFF5EE",
+ "sienna": "A0522D",
+ "silver": "C0C0C0",
+ "skyblue": "87CEEB",
+ "slateblue": "6A5ACD",
+ "slategray": "708090",
+ "slategrey": "708090",
+ "snow": "FFFAFA",
+ "springgreen": "00FF7F",
+ "steelblue": "4682B4",
+ "tan": "D2B48C",
+ "teal": "008080",
+ "thistle": "D8BFD8",
+ "tomato": "FF6347",
+ "turquoise": "40E0D0",
+ "violet": "EE82EE",
+ "wheat": "F5DEB3",
+ "white": "FFFFFF",
+ "whitesmoke": "F5F5F5",
+ "yellow": "FFFF00",
+ "yellowgreen": "9ACD32",
+}
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/console.py b/venv/lib/python3.10/site-packages/pandas/io/formats/console.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a6cbe07629031687c249f70b51bdfbe2dd84041
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/formats/console.py
@@ -0,0 +1,94 @@
+"""
+Internal module for console introspection
+"""
+from __future__ import annotations
+
+from shutil import get_terminal_size
+
+
+def get_console_size() -> tuple[int | None, int | None]:
+ """
+ Return console size as tuple = (width, height).
+
+ Returns (None,None) in non-interactive session.
+ """
+ from pandas import get_option
+
+ display_width = get_option("display.width")
+ display_height = get_option("display.max_rows")
+
+ # Consider
+ # interactive shell terminal, can detect term size
+ # interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term
+ # size non-interactive script, should disregard term size
+
+ # in addition
+ # width,height have default values, but setting to 'None' signals
+ # should use Auto-Detection, But only in interactive shell-terminal.
+ # Simple. yeah.
+
+ if in_interactive_session():
+ if in_ipython_frontend():
+ # sane defaults for interactive non-shell terminal
+ # match default for width,height in config_init
+ from pandas._config.config import get_default_val
+
+ terminal_width = get_default_val("display.width")
+ terminal_height = get_default_val("display.max_rows")
+ else:
+ # pure terminal
+ terminal_width, terminal_height = get_terminal_size()
+ else:
+ terminal_width, terminal_height = None, None
+
+ # Note if the User sets width/Height to None (auto-detection)
+ # and we're in a script (non-inter), this will return (None,None)
+ # caller needs to deal.
+ return display_width or terminal_width, display_height or terminal_height
+
+
+# ----------------------------------------------------------------------
+# Detect our environment
+
+
+def in_interactive_session() -> bool:
+ """
+ Check if we're running in an interactive shell.
+
+ Returns
+ -------
+ bool
+ True if running under python/ipython interactive shell.
+ """
+ from pandas import get_option
+
+ def check_main():
+ try:
+ import __main__ as main
+ except ModuleNotFoundError:
+ return get_option("mode.sim_interactive")
+ return not hasattr(main, "__file__") or get_option("mode.sim_interactive")
+
+ try:
+ # error: Name '__IPYTHON__' is not defined
+ return __IPYTHON__ or check_main() # type: ignore[name-defined]
+ except NameError:
+ return check_main()
+
+
+def in_ipython_frontend() -> bool:
+ """
+ Check if we're inside an IPython zmq frontend.
+
+ Returns
+ -------
+ bool
+ """
+ try:
+ # error: Name 'get_ipython' is not defined
+ ip = get_ipython() # type: ignore[name-defined]
+ return "zmq" in str(type(ip)).lower()
+ except NameError:
+ pass
+
+ return False
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/css.py b/venv/lib/python3.10/site-packages/pandas/io/formats/css.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccce60c00a9e02bf3bb7f21c5ec799b7123e8eed
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/formats/css.py
@@ -0,0 +1,421 @@
+"""
+Utilities for interpreting CSS from Stylers for formatting non-HTML outputs.
+"""
+from __future__ import annotations
+
+import re
+from typing import (
+ TYPE_CHECKING,
+ Callable,
+)
+import warnings
+
+from pandas.errors import CSSWarning
+from pandas.util._exceptions import find_stack_level
+
+if TYPE_CHECKING:
+ from collections.abc import (
+ Generator,
+ Iterable,
+ Iterator,
+ )
+
+
+def _side_expander(prop_fmt: str) -> Callable:
+ """
+ Wrapper to expand shorthand property into top, right, bottom, left properties
+
+ Parameters
+ ----------
+ side : str
+ The border side to expand into properties
+
+ Returns
+ -------
+ function: Return to call when a 'border(-{side}): {value}' string is encountered
+ """
+
+ def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]:
+ """
+ Expand shorthand property into side-specific property (top, right, bottom, left)
+
+ Parameters
+ ----------
+ prop (str): CSS property name
+ value (str): String token for property
+
+ Yields
+ ------
+ Tuple (str, str): Expanded property, value
+ """
+ tokens = value.split()
+ try:
+ mapping = self.SIDE_SHORTHANDS[len(tokens)]
+ except KeyError:
+ warnings.warn(
+ f'Could not expand "{prop}: {value}"',
+ CSSWarning,
+ stacklevel=find_stack_level(),
+ )
+ return
+ for key, idx in zip(self.SIDES, mapping):
+ yield prop_fmt.format(key), tokens[idx]
+
+ return expand
+
+
+def _border_expander(side: str = "") -> Callable:
+ """
+ Wrapper to expand 'border' property into border color, style, and width properties
+
+ Parameters
+ ----------
+ side : str
+ The border side to expand into properties
+
+ Returns
+ -------
+ function: Return to call when a 'border(-{side}): {value}' string is encountered
+ """
+ if side != "":
+ side = f"-{side}"
+
+ def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]:
+ """
+ Expand border into color, style, and width tuples
+
+ Parameters
+ ----------
+ prop : str
+ CSS property name passed to styler
+ value : str
+ Value passed to styler for property
+
+ Yields
+ ------
+ Tuple (str, str): Expanded property, value
+ """
+ tokens = value.split()
+ if len(tokens) == 0 or len(tokens) > 3:
+ warnings.warn(
+ f'Too many tokens provided to "{prop}" (expected 1-3)',
+ CSSWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ # TODO: Can we use current color as initial value to comply with CSS standards?
+ border_declarations = {
+ f"border{side}-color": "black",
+ f"border{side}-style": "none",
+ f"border{side}-width": "medium",
+ }
+ for token in tokens:
+ if token.lower() in self.BORDER_STYLES:
+ border_declarations[f"border{side}-style"] = token
+ elif any(ratio in token.lower() for ratio in self.BORDER_WIDTH_RATIOS):
+ border_declarations[f"border{side}-width"] = token
+ else:
+ border_declarations[f"border{side}-color"] = token
+ # TODO: Warn user if item entered more than once (e.g. "border: red green")
+
+ # Per CSS, "border" will reset previous "border-*" definitions
+ yield from self.atomize(border_declarations.items())
+
+ return expand
+
+
+class CSSResolver:
+ """
+ A callable for parsing and resolving CSS to atomic properties.
+ """
+
+ UNIT_RATIOS = {
+ "pt": ("pt", 1),
+ "em": ("em", 1),
+ "rem": ("pt", 12),
+ "ex": ("em", 0.5),
+ # 'ch':
+ "px": ("pt", 0.75),
+ "pc": ("pt", 12),
+ "in": ("pt", 72),
+ "cm": ("in", 1 / 2.54),
+ "mm": ("in", 1 / 25.4),
+ "q": ("mm", 0.25),
+ "!!default": ("em", 0),
+ }
+
+ FONT_SIZE_RATIOS = UNIT_RATIOS.copy()
+ FONT_SIZE_RATIOS.update(
+ {
+ "%": ("em", 0.01),
+ "xx-small": ("rem", 0.5),
+ "x-small": ("rem", 0.625),
+ "small": ("rem", 0.8),
+ "medium": ("rem", 1),
+ "large": ("rem", 1.125),
+ "x-large": ("rem", 1.5),
+ "xx-large": ("rem", 2),
+ "smaller": ("em", 1 / 1.2),
+ "larger": ("em", 1.2),
+ "!!default": ("em", 1),
+ }
+ )
+
+ MARGIN_RATIOS = UNIT_RATIOS.copy()
+ MARGIN_RATIOS.update({"none": ("pt", 0)})
+
+ BORDER_WIDTH_RATIOS = UNIT_RATIOS.copy()
+ BORDER_WIDTH_RATIOS.update(
+ {
+ "none": ("pt", 0),
+ "thick": ("px", 4),
+ "medium": ("px", 2),
+ "thin": ("px", 1),
+ # Default: medium only if solid
+ }
+ )
+
+ BORDER_STYLES = [
+ "none",
+ "hidden",
+ "dotted",
+ "dashed",
+ "solid",
+ "double",
+ "groove",
+ "ridge",
+ "inset",
+ "outset",
+ "mediumdashdot",
+ "dashdotdot",
+ "hair",
+ "mediumdashdotdot",
+ "dashdot",
+ "slantdashdot",
+ "mediumdashed",
+ ]
+
+ SIDE_SHORTHANDS = {
+ 1: [0, 0, 0, 0],
+ 2: [0, 1, 0, 1],
+ 3: [0, 1, 2, 1],
+ 4: [0, 1, 2, 3],
+ }
+
+ SIDES = ("top", "right", "bottom", "left")
+
+ CSS_EXPANSIONS = {
+ **{
+ (f"border-{prop}" if prop else "border"): _border_expander(prop)
+ for prop in ["", "top", "right", "bottom", "left"]
+ },
+ **{
+ f"border-{prop}": _side_expander(f"border-{{:s}}-{prop}")
+ for prop in ["color", "style", "width"]
+ },
+ "margin": _side_expander("margin-{:s}"),
+ "padding": _side_expander("padding-{:s}"),
+ }
+
+ def __call__(
+ self,
+ declarations: str | Iterable[tuple[str, str]],
+ inherited: dict[str, str] | None = None,
+ ) -> dict[str, str]:
+ """
+ The given declarations to atomic properties.
+
+ Parameters
+ ----------
+ declarations_str : str | Iterable[tuple[str, str]]
+ A CSS string or set of CSS declaration tuples
+ e.g. "font-weight: bold; background: blue" or
+ {("font-weight", "bold"), ("background", "blue")}
+ inherited : dict, optional
+ Atomic properties indicating the inherited style context in which
+ declarations_str is to be resolved. ``inherited`` should already
+ be resolved, i.e. valid output of this method.
+
+ Returns
+ -------
+ dict
+ Atomic CSS 2.2 properties.
+
+ Examples
+ --------
+ >>> resolve = CSSResolver()
+ >>> inherited = {'font-family': 'serif', 'font-weight': 'bold'}
+ >>> out = resolve('''
+ ... border-color: BLUE RED;
+ ... font-size: 1em;
+ ... font-size: 2em;
+ ... font-weight: normal;
+ ... font-weight: inherit;
+ ... ''', inherited)
+ >>> sorted(out.items()) # doctest: +NORMALIZE_WHITESPACE
+ [('border-bottom-color', 'blue'),
+ ('border-left-color', 'red'),
+ ('border-right-color', 'red'),
+ ('border-top-color', 'blue'),
+ ('font-family', 'serif'),
+ ('font-size', '24pt'),
+ ('font-weight', 'bold')]
+ """
+ if isinstance(declarations, str):
+ declarations = self.parse(declarations)
+ props = dict(self.atomize(declarations))
+ if inherited is None:
+ inherited = {}
+
+ props = self._update_initial(props, inherited)
+ props = self._update_font_size(props, inherited)
+ return self._update_other_units(props)
+
+ def _update_initial(
+ self,
+ props: dict[str, str],
+ inherited: dict[str, str],
+ ) -> dict[str, str]:
+ # 1. resolve inherited, initial
+ for prop, val in inherited.items():
+ if prop not in props:
+ props[prop] = val
+
+ new_props = props.copy()
+ for prop, val in props.items():
+ if val == "inherit":
+ val = inherited.get(prop, "initial")
+
+ if val in ("initial", None):
+ # we do not define a complete initial stylesheet
+ del new_props[prop]
+ else:
+ new_props[prop] = val
+ return new_props
+
+ def _update_font_size(
+ self,
+ props: dict[str, str],
+ inherited: dict[str, str],
+ ) -> dict[str, str]:
+ # 2. resolve relative font size
+ if props.get("font-size"):
+ props["font-size"] = self.size_to_pt(
+ props["font-size"],
+ self._get_font_size(inherited),
+ conversions=self.FONT_SIZE_RATIOS,
+ )
+ return props
+
+ def _get_font_size(self, props: dict[str, str]) -> float | None:
+ if props.get("font-size"):
+ font_size_string = props["font-size"]
+ return self._get_float_font_size_from_pt(font_size_string)
+ return None
+
+ def _get_float_font_size_from_pt(self, font_size_string: str) -> float:
+ assert font_size_string.endswith("pt")
+ return float(font_size_string.rstrip("pt"))
+
+ def _update_other_units(self, props: dict[str, str]) -> dict[str, str]:
+ font_size = self._get_font_size(props)
+ # 3. TODO: resolve other font-relative units
+ for side in self.SIDES:
+ prop = f"border-{side}-width"
+ if prop in props:
+ props[prop] = self.size_to_pt(
+ props[prop],
+ em_pt=font_size,
+ conversions=self.BORDER_WIDTH_RATIOS,
+ )
+
+ for prop in [f"margin-{side}", f"padding-{side}"]:
+ if prop in props:
+ # TODO: support %
+ props[prop] = self.size_to_pt(
+ props[prop],
+ em_pt=font_size,
+ conversions=self.MARGIN_RATIOS,
+ )
+ return props
+
+ def size_to_pt(self, in_val, em_pt=None, conversions=UNIT_RATIOS) -> str:
+ def _error():
+ warnings.warn(
+ f"Unhandled size: {repr(in_val)}",
+ CSSWarning,
+ stacklevel=find_stack_level(),
+ )
+ return self.size_to_pt("1!!default", conversions=conversions)
+
+ match = re.match(r"^(\S*?)([a-zA-Z%!].*)", in_val)
+ if match is None:
+ return _error()
+
+ val, unit = match.groups()
+ if val == "":
+ # hack for 'large' etc.
+ val = 1
+ else:
+ try:
+ val = float(val)
+ except ValueError:
+ return _error()
+
+ while unit != "pt":
+ if unit == "em":
+ if em_pt is None:
+ unit = "rem"
+ else:
+ val *= em_pt
+ unit = "pt"
+ continue
+
+ try:
+ unit, mul = conversions[unit]
+ except KeyError:
+ return _error()
+ val *= mul
+
+ val = round(val, 5)
+ if int(val) == val:
+ size_fmt = f"{int(val):d}pt"
+ else:
+ size_fmt = f"{val:f}pt"
+ return size_fmt
+
+ def atomize(self, declarations: Iterable) -> Generator[tuple[str, str], None, None]:
+ for prop, value in declarations:
+ prop = prop.lower()
+ value = value.lower()
+ if prop in self.CSS_EXPANSIONS:
+ expand = self.CSS_EXPANSIONS[prop]
+ yield from expand(self, prop, value)
+ else:
+ yield prop, value
+
+ def parse(self, declarations_str: str) -> Iterator[tuple[str, str]]:
+ """
+ Generates (prop, value) pairs from declarations.
+
+ In a future version may generate parsed tokens from tinycss/tinycss2
+
+ Parameters
+ ----------
+ declarations_str : str
+ """
+ for decl in declarations_str.split(";"):
+ if not decl.strip():
+ continue
+ prop, sep, val = decl.partition(":")
+ prop = prop.strip().lower()
+ # TODO: don't lowercase case sensitive parts of values (strings)
+ val = val.strip().lower()
+ if sep:
+ yield prop, val
+ else:
+ warnings.warn(
+ f"Ill-formatted attribute: expected a colon in {repr(decl)}",
+ CSSWarning,
+ stacklevel=find_stack_level(),
+ )
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/csvs.py b/venv/lib/python3.10/site-packages/pandas/io/formats/csvs.py
new file mode 100644
index 0000000000000000000000000000000000000000..50503e862ef433901f40715987c2105f6f16263a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/formats/csvs.py
@@ -0,0 +1,330 @@
+"""
+Module for formatting output data into CSV files.
+"""
+
+from __future__ import annotations
+
+from collections.abc import (
+ Hashable,
+ Iterable,
+ Iterator,
+ Sequence,
+)
+import csv as csvlib
+import os
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ cast,
+)
+
+import numpy as np
+
+from pandas._libs import writers as libwriters
+from pandas._typing import SequenceNotStr
+from pandas.util._decorators import cache_readonly
+
+from pandas.core.dtypes.generic import (
+ ABCDatetimeIndex,
+ ABCIndex,
+ ABCMultiIndex,
+ ABCPeriodIndex,
+)
+from pandas.core.dtypes.missing import notna
+
+from pandas.core.indexes.api import Index
+
+from pandas.io.common import get_handle
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ CompressionOptions,
+ FilePath,
+ FloatFormatType,
+ IndexLabel,
+ StorageOptions,
+ WriteBuffer,
+ npt,
+ )
+
+ from pandas.io.formats.format import DataFrameFormatter
+
+
+_DEFAULT_CHUNKSIZE_CELLS = 100_000
+
+
+class CSVFormatter:
+ cols: npt.NDArray[np.object_]
+
+ def __init__(
+ self,
+ formatter: DataFrameFormatter,
+ path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] = "",
+ sep: str = ",",
+ cols: Sequence[Hashable] | None = None,
+ index_label: IndexLabel | None = None,
+ mode: str = "w",
+ encoding: str | None = None,
+ errors: str = "strict",
+ compression: CompressionOptions = "infer",
+ quoting: int | None = None,
+ lineterminator: str | None = "\n",
+ chunksize: int | None = None,
+ quotechar: str | None = '"',
+ date_format: str | None = None,
+ doublequote: bool = True,
+ escapechar: str | None = None,
+ storage_options: StorageOptions | None = None,
+ ) -> None:
+ self.fmt = formatter
+
+ self.obj = self.fmt.frame
+
+ self.filepath_or_buffer = path_or_buf
+ self.encoding = encoding
+ self.compression: CompressionOptions = compression
+ self.mode = mode
+ self.storage_options = storage_options
+
+ self.sep = sep
+ self.index_label = self._initialize_index_label(index_label)
+ self.errors = errors
+ self.quoting = quoting or csvlib.QUOTE_MINIMAL
+ self.quotechar = self._initialize_quotechar(quotechar)
+ self.doublequote = doublequote
+ self.escapechar = escapechar
+ self.lineterminator = lineterminator or os.linesep
+ self.date_format = date_format
+ self.cols = self._initialize_columns(cols)
+ self.chunksize = self._initialize_chunksize(chunksize)
+
+ @property
+ def na_rep(self) -> str:
+ return self.fmt.na_rep
+
+ @property
+ def float_format(self) -> FloatFormatType | None:
+ return self.fmt.float_format
+
+ @property
+ def decimal(self) -> str:
+ return self.fmt.decimal
+
+ @property
+ def header(self) -> bool | SequenceNotStr[str]:
+ return self.fmt.header
+
+ @property
+ def index(self) -> bool:
+ return self.fmt.index
+
+ def _initialize_index_label(self, index_label: IndexLabel | None) -> IndexLabel:
+ if index_label is not False:
+ if index_label is None:
+ return self._get_index_label_from_obj()
+ elif not isinstance(index_label, (list, tuple, np.ndarray, ABCIndex)):
+ # given a string for a DF with Index
+ return [index_label]
+ return index_label
+
+ def _get_index_label_from_obj(self) -> Sequence[Hashable]:
+ if isinstance(self.obj.index, ABCMultiIndex):
+ return self._get_index_label_multiindex()
+ else:
+ return self._get_index_label_flat()
+
+ def _get_index_label_multiindex(self) -> Sequence[Hashable]:
+ return [name or "" for name in self.obj.index.names]
+
+ def _get_index_label_flat(self) -> Sequence[Hashable]:
+ index_label = self.obj.index.name
+ return [""] if index_label is None else [index_label]
+
+ def _initialize_quotechar(self, quotechar: str | None) -> str | None:
+ if self.quoting != csvlib.QUOTE_NONE:
+ # prevents crash in _csv
+ return quotechar
+ return None
+
+ @property
+ def has_mi_columns(self) -> bool:
+ return bool(isinstance(self.obj.columns, ABCMultiIndex))
+
+ def _initialize_columns(
+ self, cols: Iterable[Hashable] | None
+ ) -> npt.NDArray[np.object_]:
+ # validate mi options
+ if self.has_mi_columns:
+ if cols is not None:
+ msg = "cannot specify cols with a MultiIndex on the columns"
+ raise TypeError(msg)
+
+ if cols is not None:
+ if isinstance(cols, ABCIndex):
+ cols = cols._get_values_for_csv(**self._number_format)
+ else:
+ cols = list(cols)
+ self.obj = self.obj.loc[:, cols]
+
+ # update columns to include possible multiplicity of dupes
+ # and make sure cols is just a list of labels
+ new_cols = self.obj.columns
+ return new_cols._get_values_for_csv(**self._number_format)
+
+ def _initialize_chunksize(self, chunksize: int | None) -> int:
+ if chunksize is None:
+ return (_DEFAULT_CHUNKSIZE_CELLS // (len(self.cols) or 1)) or 1
+ return int(chunksize)
+
+ @property
+ def _number_format(self) -> dict[str, Any]:
+ """Dictionary used for storing number formatting settings."""
+ return {
+ "na_rep": self.na_rep,
+ "float_format": self.float_format,
+ "date_format": self.date_format,
+ "quoting": self.quoting,
+ "decimal": self.decimal,
+ }
+
+ @cache_readonly
+ def data_index(self) -> Index:
+ data_index = self.obj.index
+ if (
+ isinstance(data_index, (ABCDatetimeIndex, ABCPeriodIndex))
+ and self.date_format is not None
+ ):
+ data_index = Index(
+ [x.strftime(self.date_format) if notna(x) else "" for x in data_index]
+ )
+ elif isinstance(data_index, ABCMultiIndex):
+ data_index = data_index.remove_unused_levels()
+ return data_index
+
+ @property
+ def nlevels(self) -> int:
+ if self.index:
+ return getattr(self.data_index, "nlevels", 1)
+ else:
+ return 0
+
+ @property
+ def _has_aliases(self) -> bool:
+ return isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))
+
+ @property
+ def _need_to_save_header(self) -> bool:
+ return bool(self._has_aliases or self.header)
+
+ @property
+ def write_cols(self) -> SequenceNotStr[Hashable]:
+ if self._has_aliases:
+ assert not isinstance(self.header, bool)
+ if len(self.header) != len(self.cols):
+ raise ValueError(
+ f"Writing {len(self.cols)} cols but got {len(self.header)} aliases"
+ )
+ return self.header
+ else:
+ # self.cols is an ndarray derived from Index._get_values_for_csv,
+ # so its entries are strings, i.e. hashable
+ return cast(SequenceNotStr[Hashable], self.cols)
+
+ @property
+ def encoded_labels(self) -> list[Hashable]:
+ encoded_labels: list[Hashable] = []
+
+ if self.index and self.index_label:
+ assert isinstance(self.index_label, Sequence)
+ encoded_labels = list(self.index_label)
+
+ if not self.has_mi_columns or self._has_aliases:
+ encoded_labels += list(self.write_cols)
+
+ return encoded_labels
+
+ def save(self) -> None:
+ """
+ Create the writer & save.
+ """
+ # apply compression and byte/text conversion
+ with get_handle(
+ self.filepath_or_buffer,
+ self.mode,
+ encoding=self.encoding,
+ errors=self.errors,
+ compression=self.compression,
+ storage_options=self.storage_options,
+ ) as handles:
+ # Note: self.encoding is irrelevant here
+ self.writer = csvlib.writer(
+ handles.handle,
+ lineterminator=self.lineterminator,
+ delimiter=self.sep,
+ quoting=self.quoting,
+ doublequote=self.doublequote,
+ escapechar=self.escapechar,
+ quotechar=self.quotechar,
+ )
+
+ self._save()
+
+ def _save(self) -> None:
+ if self._need_to_save_header:
+ self._save_header()
+ self._save_body()
+
+ def _save_header(self) -> None:
+ if not self.has_mi_columns or self._has_aliases:
+ self.writer.writerow(self.encoded_labels)
+ else:
+ for row in self._generate_multiindex_header_rows():
+ self.writer.writerow(row)
+
+ def _generate_multiindex_header_rows(self) -> Iterator[list[Hashable]]:
+ columns = self.obj.columns
+ for i in range(columns.nlevels):
+ # we need at least 1 index column to write our col names
+ col_line = []
+ if self.index:
+ # name is the first column
+ col_line.append(columns.names[i])
+
+ if isinstance(self.index_label, list) and len(self.index_label) > 1:
+ col_line.extend([""] * (len(self.index_label) - 1))
+
+ col_line.extend(columns._get_level_values(i))
+ yield col_line
+
+ # Write out the index line if it's not empty.
+ # Otherwise, we will print out an extraneous
+ # blank line between the mi and the data rows.
+ if self.encoded_labels and set(self.encoded_labels) != {""}:
+ yield self.encoded_labels + [""] * len(columns)
+
+ def _save_body(self) -> None:
+ nrows = len(self.data_index)
+ chunks = (nrows // self.chunksize) + 1
+ for i in range(chunks):
+ start_i = i * self.chunksize
+ end_i = min(start_i + self.chunksize, nrows)
+ if start_i >= end_i:
+ break
+ self._save_chunk(start_i, end_i)
+
+ def _save_chunk(self, start_i: int, end_i: int) -> None:
+ # create the data for a chunk
+ slicer = slice(start_i, end_i)
+ df = self.obj.iloc[slicer]
+
+ res = df._get_values_for_csv(**self._number_format)
+ data = list(res._iter_column_arrays())
+
+ ix = self.data_index[slicer]._get_values_for_csv(**self._number_format)
+ libwriters.write_csv_rows(
+ data,
+ ix,
+ self.nlevels,
+ self.cols,
+ self.writer,
+ )
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/excel.py b/venv/lib/python3.10/site-packages/pandas/io/formats/excel.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fd23cd7d918ad0efddb1088d79fd78f6079cca7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/formats/excel.py
@@ -0,0 +1,962 @@
+"""
+Utilities for conversion to writer-agnostic Excel representation.
+"""
+from __future__ import annotations
+
+from collections.abc import (
+ Hashable,
+ Iterable,
+ Mapping,
+ Sequence,
+)
+import functools
+import itertools
+import re
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ cast,
+)
+import warnings
+
+import numpy as np
+
+from pandas._libs.lib import is_list_like
+from pandas.util._decorators import doc
+from pandas.util._exceptions import find_stack_level
+
+from pandas.core.dtypes import missing
+from pandas.core.dtypes.common import (
+ is_float,
+ is_scalar,
+)
+
+from pandas import (
+ DataFrame,
+ Index,
+ MultiIndex,
+ PeriodIndex,
+)
+import pandas.core.common as com
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.formats._color_data import CSS4_COLORS
+from pandas.io.formats.css import (
+ CSSResolver,
+ CSSWarning,
+)
+from pandas.io.formats.format import get_level_lengths
+from pandas.io.formats.printing import pprint_thing
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ FilePath,
+ IndexLabel,
+ StorageOptions,
+ WriteExcelBuffer,
+ )
+
+ from pandas import ExcelWriter
+
+
+class ExcelCell:
+ __fields__ = ("row", "col", "val", "style", "mergestart", "mergeend")
+ __slots__ = __fields__
+
+ def __init__(
+ self,
+ row: int,
+ col: int,
+ val,
+ style=None,
+ mergestart: int | None = None,
+ mergeend: int | None = None,
+ ) -> None:
+ self.row = row
+ self.col = col
+ self.val = val
+ self.style = style
+ self.mergestart = mergestart
+ self.mergeend = mergeend
+
+
+class CssExcelCell(ExcelCell):
+ def __init__(
+ self,
+ row: int,
+ col: int,
+ val,
+ style: dict | None,
+ css_styles: dict[tuple[int, int], list[tuple[str, Any]]] | None,
+ css_row: int,
+ css_col: int,
+ css_converter: Callable | None,
+ **kwargs,
+ ) -> None:
+ if css_styles and css_converter:
+ # Use dict to get only one (case-insensitive) declaration per property
+ declaration_dict = {
+ prop.lower(): val for prop, val in css_styles[css_row, css_col]
+ }
+ # Convert to frozenset for order-invariant caching
+ unique_declarations = frozenset(declaration_dict.items())
+ style = css_converter(unique_declarations)
+
+ super().__init__(row=row, col=col, val=val, style=style, **kwargs)
+
+
+class CSSToExcelConverter:
+ """
+ A callable for converting CSS declarations to ExcelWriter styles
+
+ Supports parts of CSS 2.2, with minimal CSS 3.0 support (e.g. text-shadow),
+ focusing on font styling, backgrounds, borders and alignment.
+
+ Operates by first computing CSS styles in a fairly generic
+ way (see :meth:`compute_css`) then determining Excel style
+ properties from CSS properties (see :meth:`build_xlstyle`).
+
+ Parameters
+ ----------
+ inherited : str, optional
+ CSS declarations understood to be the containing scope for the
+ CSS processed by :meth:`__call__`.
+ """
+
+ NAMED_COLORS = CSS4_COLORS
+
+ VERTICAL_MAP = {
+ "top": "top",
+ "text-top": "top",
+ "middle": "center",
+ "baseline": "bottom",
+ "bottom": "bottom",
+ "text-bottom": "bottom",
+ # OpenXML also has 'justify', 'distributed'
+ }
+
+ BOLD_MAP = {
+ "bold": True,
+ "bolder": True,
+ "600": True,
+ "700": True,
+ "800": True,
+ "900": True,
+ "normal": False,
+ "lighter": False,
+ "100": False,
+ "200": False,
+ "300": False,
+ "400": False,
+ "500": False,
+ }
+
+ ITALIC_MAP = {
+ "normal": False,
+ "italic": True,
+ "oblique": True,
+ }
+
+ FAMILY_MAP = {
+ "serif": 1, # roman
+ "sans-serif": 2, # swiss
+ "cursive": 4, # script
+ "fantasy": 5, # decorative
+ }
+
+ BORDER_STYLE_MAP = {
+ style.lower(): style
+ for style in [
+ "dashed",
+ "mediumDashDot",
+ "dashDotDot",
+ "hair",
+ "dotted",
+ "mediumDashDotDot",
+ "double",
+ "dashDot",
+ "slantDashDot",
+ "mediumDashed",
+ ]
+ }
+
+ # NB: Most of the methods here could be classmethods, as only __init__
+ # and __call__ make use of instance attributes. We leave them as
+ # instancemethods so that users can easily experiment with extensions
+ # without monkey-patching.
+ inherited: dict[str, str] | None
+
+ def __init__(self, inherited: str | None = None) -> None:
+ if inherited is not None:
+ self.inherited = self.compute_css(inherited)
+ else:
+ self.inherited = None
+ # We should avoid cache on the __call__ method.
+ # Otherwise once the method __call__ has been called
+ # garbage collection no longer deletes the instance.
+ self._call_cached = functools.cache(self._call_uncached)
+
+ compute_css = CSSResolver()
+
+ def __call__(
+ self, declarations: str | frozenset[tuple[str, str]]
+ ) -> dict[str, dict[str, str]]:
+ """
+ Convert CSS declarations to ExcelWriter style.
+
+ Parameters
+ ----------
+ declarations : str | frozenset[tuple[str, str]]
+ CSS string or set of CSS declaration tuples.
+ e.g. "font-weight: bold; background: blue" or
+ {("font-weight", "bold"), ("background", "blue")}
+
+ Returns
+ -------
+ xlstyle : dict
+ A style as interpreted by ExcelWriter when found in
+ ExcelCell.style.
+ """
+ return self._call_cached(declarations)
+
+ def _call_uncached(
+ self, declarations: str | frozenset[tuple[str, str]]
+ ) -> dict[str, dict[str, str]]:
+ properties = self.compute_css(declarations, self.inherited)
+ return self.build_xlstyle(properties)
+
+ def build_xlstyle(self, props: Mapping[str, str]) -> dict[str, dict[str, str]]:
+ out = {
+ "alignment": self.build_alignment(props),
+ "border": self.build_border(props),
+ "fill": self.build_fill(props),
+ "font": self.build_font(props),
+ "number_format": self.build_number_format(props),
+ }
+
+ # TODO: handle cell width and height: needs support in pandas.io.excel
+
+ def remove_none(d: dict[str, str | None]) -> None:
+ """Remove key where value is None, through nested dicts"""
+ for k, v in list(d.items()):
+ if v is None:
+ del d[k]
+ elif isinstance(v, dict):
+ remove_none(v)
+ if not v:
+ del d[k]
+
+ remove_none(out)
+ return out
+
+ def build_alignment(self, props: Mapping[str, str]) -> dict[str, bool | str | None]:
+ # TODO: text-indent, padding-left -> alignment.indent
+ return {
+ "horizontal": props.get("text-align"),
+ "vertical": self._get_vertical_alignment(props),
+ "wrap_text": self._get_is_wrap_text(props),
+ }
+
+ def _get_vertical_alignment(self, props: Mapping[str, str]) -> str | None:
+ vertical_align = props.get("vertical-align")
+ if vertical_align:
+ return self.VERTICAL_MAP.get(vertical_align)
+ return None
+
+ def _get_is_wrap_text(self, props: Mapping[str, str]) -> bool | None:
+ if props.get("white-space") is None:
+ return None
+ return bool(props["white-space"] not in ("nowrap", "pre", "pre-line"))
+
+ def build_border(
+ self, props: Mapping[str, str]
+ ) -> dict[str, dict[str, str | None]]:
+ return {
+ side: {
+ "style": self._border_style(
+ props.get(f"border-{side}-style"),
+ props.get(f"border-{side}-width"),
+ self.color_to_excel(props.get(f"border-{side}-color")),
+ ),
+ "color": self.color_to_excel(props.get(f"border-{side}-color")),
+ }
+ for side in ["top", "right", "bottom", "left"]
+ }
+
+ def _border_style(self, style: str | None, width: str | None, color: str | None):
+ # convert styles and widths to openxml, one of:
+ # 'dashDot'
+ # 'dashDotDot'
+ # 'dashed'
+ # 'dotted'
+ # 'double'
+ # 'hair'
+ # 'medium'
+ # 'mediumDashDot'
+ # 'mediumDashDotDot'
+ # 'mediumDashed'
+ # 'slantDashDot'
+ # 'thick'
+ # 'thin'
+ if width is None and style is None and color is None:
+ # Return None will remove "border" from style dictionary
+ return None
+
+ if width is None and style is None:
+ # Return "none" will keep "border" in style dictionary
+ return "none"
+
+ if style in ("none", "hidden"):
+ return "none"
+
+ width_name = self._get_width_name(width)
+ if width_name is None:
+ return "none"
+
+ if style in (None, "groove", "ridge", "inset", "outset", "solid"):
+ # not handled
+ return width_name
+
+ if style == "double":
+ return "double"
+ if style == "dotted":
+ if width_name in ("hair", "thin"):
+ return "dotted"
+ return "mediumDashDotDot"
+ if style == "dashed":
+ if width_name in ("hair", "thin"):
+ return "dashed"
+ return "mediumDashed"
+ elif style in self.BORDER_STYLE_MAP:
+ # Excel-specific styles
+ return self.BORDER_STYLE_MAP[style]
+ else:
+ warnings.warn(
+ f"Unhandled border style format: {repr(style)}",
+ CSSWarning,
+ stacklevel=find_stack_level(),
+ )
+ return "none"
+
+ def _get_width_name(self, width_input: str | None) -> str | None:
+ width = self._width_to_float(width_input)
+ if width < 1e-5:
+ return None
+ elif width < 1.3:
+ return "thin"
+ elif width < 2.8:
+ return "medium"
+ return "thick"
+
+ def _width_to_float(self, width: str | None) -> float:
+ if width is None:
+ width = "2pt"
+ return self._pt_to_float(width)
+
+ def _pt_to_float(self, pt_string: str) -> float:
+ assert pt_string.endswith("pt")
+ return float(pt_string.rstrip("pt"))
+
+ def build_fill(self, props: Mapping[str, str]):
+ # TODO: perhaps allow for special properties
+ # -excel-pattern-bgcolor and -excel-pattern-type
+ fill_color = props.get("background-color")
+ if fill_color not in (None, "transparent", "none"):
+ return {"fgColor": self.color_to_excel(fill_color), "patternType": "solid"}
+
+ def build_number_format(self, props: Mapping[str, str]) -> dict[str, str | None]:
+ fc = props.get("number-format")
+ fc = fc.replace("§", ";") if isinstance(fc, str) else fc
+ return {"format_code": fc}
+
+ def build_font(
+ self, props: Mapping[str, str]
+ ) -> dict[str, bool | float | str | None]:
+ font_names = self._get_font_names(props)
+ decoration = self._get_decoration(props)
+ return {
+ "name": font_names[0] if font_names else None,
+ "family": self._select_font_family(font_names),
+ "size": self._get_font_size(props),
+ "bold": self._get_is_bold(props),
+ "italic": self._get_is_italic(props),
+ "underline": ("single" if "underline" in decoration else None),
+ "strike": ("line-through" in decoration) or None,
+ "color": self.color_to_excel(props.get("color")),
+ # shadow if nonzero digit before shadow color
+ "shadow": self._get_shadow(props),
+ }
+
+ def _get_is_bold(self, props: Mapping[str, str]) -> bool | None:
+ weight = props.get("font-weight")
+ if weight:
+ return self.BOLD_MAP.get(weight)
+ return None
+
+ def _get_is_italic(self, props: Mapping[str, str]) -> bool | None:
+ font_style = props.get("font-style")
+ if font_style:
+ return self.ITALIC_MAP.get(font_style)
+ return None
+
+ def _get_decoration(self, props: Mapping[str, str]) -> Sequence[str]:
+ decoration = props.get("text-decoration")
+ if decoration is not None:
+ return decoration.split()
+ else:
+ return ()
+
+ def _get_underline(self, decoration: Sequence[str]) -> str | None:
+ if "underline" in decoration:
+ return "single"
+ return None
+
+ def _get_shadow(self, props: Mapping[str, str]) -> bool | None:
+ if "text-shadow" in props:
+ return bool(re.search("^[^#(]*[1-9]", props["text-shadow"]))
+ return None
+
+ def _get_font_names(self, props: Mapping[str, str]) -> Sequence[str]:
+ font_names_tmp = re.findall(
+ r"""(?x)
+ (
+ "(?:[^"]|\\")+"
+ |
+ '(?:[^']|\\')+'
+ |
+ [^'",]+
+ )(?=,|\s*$)
+ """,
+ props.get("font-family", ""),
+ )
+
+ font_names = []
+ for name in font_names_tmp:
+ if name[:1] == '"':
+ name = name[1:-1].replace('\\"', '"')
+ elif name[:1] == "'":
+ name = name[1:-1].replace("\\'", "'")
+ else:
+ name = name.strip()
+ if name:
+ font_names.append(name)
+ return font_names
+
+ def _get_font_size(self, props: Mapping[str, str]) -> float | None:
+ size = props.get("font-size")
+ if size is None:
+ return size
+ return self._pt_to_float(size)
+
+ def _select_font_family(self, font_names: Sequence[str]) -> int | None:
+ family = None
+ for name in font_names:
+ family = self.FAMILY_MAP.get(name)
+ if family:
+ break
+
+ return family
+
+ def color_to_excel(self, val: str | None) -> str | None:
+ if val is None:
+ return None
+
+ if self._is_hex_color(val):
+ return self._convert_hex_to_excel(val)
+
+ try:
+ return self.NAMED_COLORS[val]
+ except KeyError:
+ warnings.warn(
+ f"Unhandled color format: {repr(val)}",
+ CSSWarning,
+ stacklevel=find_stack_level(),
+ )
+ return None
+
+ def _is_hex_color(self, color_string: str) -> bool:
+ return bool(color_string.startswith("#"))
+
+ def _convert_hex_to_excel(self, color_string: str) -> str:
+ code = color_string.lstrip("#")
+ if self._is_shorthand_color(color_string):
+ return (code[0] * 2 + code[1] * 2 + code[2] * 2).upper()
+ else:
+ return code.upper()
+
+ def _is_shorthand_color(self, color_string: str) -> bool:
+ """Check if color code is shorthand.
+
+ #FFF is a shorthand as opposed to full #FFFFFF.
+ """
+ code = color_string.lstrip("#")
+ if len(code) == 3:
+ return True
+ elif len(code) == 6:
+ return False
+ else:
+ raise ValueError(f"Unexpected color {color_string}")
+
+
+class ExcelFormatter:
+ """
+ Class for formatting a DataFrame to a list of ExcelCells,
+
+ Parameters
+ ----------
+ df : DataFrame or Styler
+ na_rep: na representation
+ float_format : str, default None
+ Format string for floating point numbers
+ cols : sequence, optional
+ Columns to write
+ header : bool or sequence of str, default True
+ Write out column names. If a list of string is given it is
+ assumed to be aliases for the column names
+ index : bool, default True
+ output row names (index)
+ index_label : str or sequence, default None
+ Column label for index column(s) if desired. If None is given, and
+ `header` and `index` are True, then the index names are used. A
+ sequence should be given if the DataFrame uses MultiIndex.
+ merge_cells : bool, default False
+ Format MultiIndex and Hierarchical Rows as merged cells.
+ inf_rep : str, default `'inf'`
+ representation for np.inf values (which aren't representable in Excel)
+ A `'-'` sign will be added in front of -inf.
+ style_converter : callable, optional
+ This translates Styler styles (CSS) into ExcelWriter styles.
+ Defaults to ``CSSToExcelConverter()``.
+ It should have signature css_declarations string -> excel style.
+ This is only called for body cells.
+ """
+
+ max_rows = 2**20
+ max_cols = 2**14
+
+ def __init__(
+ self,
+ df,
+ na_rep: str = "",
+ float_format: str | None = None,
+ cols: Sequence[Hashable] | None = None,
+ header: Sequence[Hashable] | bool = True,
+ index: bool = True,
+ index_label: IndexLabel | None = None,
+ merge_cells: bool = False,
+ inf_rep: str = "inf",
+ style_converter: Callable | None = None,
+ ) -> None:
+ self.rowcounter = 0
+ self.na_rep = na_rep
+ if not isinstance(df, DataFrame):
+ self.styler = df
+ self.styler._compute() # calculate applied styles
+ df = df.data
+ if style_converter is None:
+ style_converter = CSSToExcelConverter()
+ self.style_converter: Callable | None = style_converter
+ else:
+ self.styler = None
+ self.style_converter = None
+ self.df = df
+ if cols is not None:
+ # all missing, raise
+ if not len(Index(cols).intersection(df.columns)):
+ raise KeyError("passes columns are not ALL present dataframe")
+
+ if len(Index(cols).intersection(df.columns)) != len(set(cols)):
+ # Deprecated in GH#17295, enforced in 1.0.0
+ raise KeyError("Not all names specified in 'columns' are found")
+
+ self.df = df.reindex(columns=cols)
+
+ self.columns = self.df.columns
+ self.float_format = float_format
+ self.index = index
+ self.index_label = index_label
+ self.header = header
+ self.merge_cells = merge_cells
+ self.inf_rep = inf_rep
+
+ @property
+ def header_style(self) -> dict[str, dict[str, str | bool]]:
+ return {
+ "font": {"bold": True},
+ "borders": {
+ "top": "thin",
+ "right": "thin",
+ "bottom": "thin",
+ "left": "thin",
+ },
+ "alignment": {"horizontal": "center", "vertical": "top"},
+ }
+
+ def _format_value(self, val):
+ if is_scalar(val) and missing.isna(val):
+ val = self.na_rep
+ elif is_float(val):
+ if missing.isposinf_scalar(val):
+ val = self.inf_rep
+ elif missing.isneginf_scalar(val):
+ val = f"-{self.inf_rep}"
+ elif self.float_format is not None:
+ val = float(self.float_format % val)
+ if getattr(val, "tzinfo", None) is not None:
+ raise ValueError(
+ "Excel does not support datetimes with "
+ "timezones. Please ensure that datetimes "
+ "are timezone unaware before writing to Excel."
+ )
+ return val
+
+ def _format_header_mi(self) -> Iterable[ExcelCell]:
+ if self.columns.nlevels > 1:
+ if not self.index:
+ raise NotImplementedError(
+ "Writing to Excel with MultiIndex columns and no "
+ "index ('index'=False) is not yet implemented."
+ )
+
+ if not (self._has_aliases or self.header):
+ return
+
+ columns = self.columns
+ level_strs = columns._format_multi(
+ sparsify=self.merge_cells, include_names=False
+ )
+ level_lengths = get_level_lengths(level_strs)
+ coloffset = 0
+ lnum = 0
+
+ if self.index and isinstance(self.df.index, MultiIndex):
+ coloffset = len(self.df.index[0]) - 1
+
+ if self.merge_cells:
+ # Format multi-index as a merged cells.
+ for lnum, name in enumerate(columns.names):
+ yield ExcelCell(
+ row=lnum,
+ col=coloffset,
+ val=name,
+ style=self.header_style,
+ )
+
+ for lnum, (spans, levels, level_codes) in enumerate(
+ zip(level_lengths, columns.levels, columns.codes)
+ ):
+ values = levels.take(level_codes)
+ for i, span_val in spans.items():
+ mergestart, mergeend = None, None
+ if span_val > 1:
+ mergestart, mergeend = lnum, coloffset + i + span_val
+ yield CssExcelCell(
+ row=lnum,
+ col=coloffset + i + 1,
+ val=values[i],
+ style=self.header_style,
+ css_styles=getattr(self.styler, "ctx_columns", None),
+ css_row=lnum,
+ css_col=i,
+ css_converter=self.style_converter,
+ mergestart=mergestart,
+ mergeend=mergeend,
+ )
+ else:
+ # Format in legacy format with dots to indicate levels.
+ for i, values in enumerate(zip(*level_strs)):
+ v = ".".join(map(pprint_thing, values))
+ yield CssExcelCell(
+ row=lnum,
+ col=coloffset + i + 1,
+ val=v,
+ style=self.header_style,
+ css_styles=getattr(self.styler, "ctx_columns", None),
+ css_row=lnum,
+ css_col=i,
+ css_converter=self.style_converter,
+ )
+
+ self.rowcounter = lnum
+
+ def _format_header_regular(self) -> Iterable[ExcelCell]:
+ if self._has_aliases or self.header:
+ coloffset = 0
+
+ if self.index:
+ coloffset = 1
+ if isinstance(self.df.index, MultiIndex):
+ coloffset = len(self.df.index.names)
+
+ colnames = self.columns
+ if self._has_aliases:
+ self.header = cast(Sequence, self.header)
+ if len(self.header) != len(self.columns):
+ raise ValueError(
+ f"Writing {len(self.columns)} cols "
+ f"but got {len(self.header)} aliases"
+ )
+ colnames = self.header
+
+ for colindex, colname in enumerate(colnames):
+ yield CssExcelCell(
+ row=self.rowcounter,
+ col=colindex + coloffset,
+ val=colname,
+ style=self.header_style,
+ css_styles=getattr(self.styler, "ctx_columns", None),
+ css_row=0,
+ css_col=colindex,
+ css_converter=self.style_converter,
+ )
+
+ def _format_header(self) -> Iterable[ExcelCell]:
+ gen: Iterable[ExcelCell]
+
+ if isinstance(self.columns, MultiIndex):
+ gen = self._format_header_mi()
+ else:
+ gen = self._format_header_regular()
+
+ gen2: Iterable[ExcelCell] = ()
+
+ if self.df.index.names:
+ row = [x if x is not None else "" for x in self.df.index.names] + [
+ ""
+ ] * len(self.columns)
+ if functools.reduce(lambda x, y: x and y, (x != "" for x in row)):
+ gen2 = (
+ ExcelCell(self.rowcounter, colindex, val, self.header_style)
+ for colindex, val in enumerate(row)
+ )
+ self.rowcounter += 1
+ return itertools.chain(gen, gen2)
+
+ def _format_body(self) -> Iterable[ExcelCell]:
+ if isinstance(self.df.index, MultiIndex):
+ return self._format_hierarchical_rows()
+ else:
+ return self._format_regular_rows()
+
+ def _format_regular_rows(self) -> Iterable[ExcelCell]:
+ if self._has_aliases or self.header:
+ self.rowcounter += 1
+
+ # output index and index_label?
+ if self.index:
+ # check aliases
+ # if list only take first as this is not a MultiIndex
+ if self.index_label and isinstance(
+ self.index_label, (list, tuple, np.ndarray, Index)
+ ):
+ index_label = self.index_label[0]
+ # if string good to go
+ elif self.index_label and isinstance(self.index_label, str):
+ index_label = self.index_label
+ else:
+ index_label = self.df.index.names[0]
+
+ if isinstance(self.columns, MultiIndex):
+ self.rowcounter += 1
+
+ if index_label and self.header is not False:
+ yield ExcelCell(self.rowcounter - 1, 0, index_label, self.header_style)
+
+ # write index_values
+ index_values = self.df.index
+ if isinstance(self.df.index, PeriodIndex):
+ index_values = self.df.index.to_timestamp()
+
+ for idx, idxval in enumerate(index_values):
+ yield CssExcelCell(
+ row=self.rowcounter + idx,
+ col=0,
+ val=idxval,
+ style=self.header_style,
+ css_styles=getattr(self.styler, "ctx_index", None),
+ css_row=idx,
+ css_col=0,
+ css_converter=self.style_converter,
+ )
+ coloffset = 1
+ else:
+ coloffset = 0
+
+ yield from self._generate_body(coloffset)
+
+ def _format_hierarchical_rows(self) -> Iterable[ExcelCell]:
+ if self._has_aliases or self.header:
+ self.rowcounter += 1
+
+ gcolidx = 0
+
+ if self.index:
+ index_labels = self.df.index.names
+ # check for aliases
+ if self.index_label and isinstance(
+ self.index_label, (list, tuple, np.ndarray, Index)
+ ):
+ index_labels = self.index_label
+
+ # MultiIndex columns require an extra row
+ # with index names (blank if None) for
+ # unambiguous round-trip, unless not merging,
+ # in which case the names all go on one row Issue #11328
+ if isinstance(self.columns, MultiIndex) and self.merge_cells:
+ self.rowcounter += 1
+
+ # if index labels are not empty go ahead and dump
+ if com.any_not_none(*index_labels) and self.header is not False:
+ for cidx, name in enumerate(index_labels):
+ yield ExcelCell(self.rowcounter - 1, cidx, name, self.header_style)
+
+ if self.merge_cells:
+ # Format hierarchical rows as merged cells.
+ level_strs = self.df.index._format_multi(
+ sparsify=True, include_names=False
+ )
+ level_lengths = get_level_lengths(level_strs)
+
+ for spans, levels, level_codes in zip(
+ level_lengths, self.df.index.levels, self.df.index.codes
+ ):
+ values = levels.take(
+ level_codes,
+ allow_fill=levels._can_hold_na,
+ fill_value=levels._na_value,
+ )
+
+ for i, span_val in spans.items():
+ mergestart, mergeend = None, None
+ if span_val > 1:
+ mergestart = self.rowcounter + i + span_val - 1
+ mergeend = gcolidx
+ yield CssExcelCell(
+ row=self.rowcounter + i,
+ col=gcolidx,
+ val=values[i],
+ style=self.header_style,
+ css_styles=getattr(self.styler, "ctx_index", None),
+ css_row=i,
+ css_col=gcolidx,
+ css_converter=self.style_converter,
+ mergestart=mergestart,
+ mergeend=mergeend,
+ )
+ gcolidx += 1
+
+ else:
+ # Format hierarchical rows with non-merged values.
+ for indexcolvals in zip(*self.df.index):
+ for idx, indexcolval in enumerate(indexcolvals):
+ yield CssExcelCell(
+ row=self.rowcounter + idx,
+ col=gcolidx,
+ val=indexcolval,
+ style=self.header_style,
+ css_styles=getattr(self.styler, "ctx_index", None),
+ css_row=idx,
+ css_col=gcolidx,
+ css_converter=self.style_converter,
+ )
+ gcolidx += 1
+
+ yield from self._generate_body(gcolidx)
+
+ @property
+ def _has_aliases(self) -> bool:
+ """Whether the aliases for column names are present."""
+ return is_list_like(self.header)
+
+ def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]:
+ # Write the body of the frame data series by series.
+ for colidx in range(len(self.columns)):
+ series = self.df.iloc[:, colidx]
+ for i, val in enumerate(series):
+ yield CssExcelCell(
+ row=self.rowcounter + i,
+ col=colidx + coloffset,
+ val=val,
+ style=None,
+ css_styles=getattr(self.styler, "ctx", None),
+ css_row=i,
+ css_col=colidx,
+ css_converter=self.style_converter,
+ )
+
+ def get_formatted_cells(self) -> Iterable[ExcelCell]:
+ for cell in itertools.chain(self._format_header(), self._format_body()):
+ cell.val = self._format_value(cell.val)
+ yield cell
+
+ @doc(storage_options=_shared_docs["storage_options"])
+ def write(
+ self,
+ writer: FilePath | WriteExcelBuffer | ExcelWriter,
+ sheet_name: str = "Sheet1",
+ startrow: int = 0,
+ startcol: int = 0,
+ freeze_panes: tuple[int, int] | None = None,
+ engine: str | None = None,
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ """
+ writer : path-like, file-like, or ExcelWriter object
+ File path or existing ExcelWriter
+ sheet_name : str, default 'Sheet1'
+ Name of sheet which will contain DataFrame
+ startrow :
+ upper left cell row to dump data frame
+ startcol :
+ upper left cell column to dump data frame
+ freeze_panes : tuple of integer (length 2), default None
+ Specifies the one-based bottommost row and rightmost column that
+ is to be frozen
+ engine : string, default None
+ write engine to use if writer is a path - you can also set this
+ via the options ``io.excel.xlsx.writer``,
+ or ``io.excel.xlsm.writer``.
+
+ {storage_options}
+
+ engine_kwargs: dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+ """
+ from pandas.io.excel import ExcelWriter
+
+ num_rows, num_cols = self.df.shape
+ if num_rows > self.max_rows or num_cols > self.max_cols:
+ raise ValueError(
+ f"This sheet is too large! Your sheet size is: {num_rows}, {num_cols} "
+ f"Max sheet size is: {self.max_rows}, {self.max_cols}"
+ )
+
+ if engine_kwargs is None:
+ engine_kwargs = {}
+
+ formatted_cells = self.get_formatted_cells()
+ if isinstance(writer, ExcelWriter):
+ need_save = False
+ else:
+ writer = ExcelWriter(
+ writer,
+ engine=engine,
+ storage_options=storage_options,
+ engine_kwargs=engine_kwargs,
+ )
+ need_save = True
+
+ try:
+ writer._write_cells(
+ formatted_cells,
+ sheet_name,
+ startrow=startrow,
+ startcol=startcol,
+ freeze_panes=freeze_panes,
+ )
+ finally:
+ # make sure to close opened file handles
+ if need_save:
+ writer.close()
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/format.py b/venv/lib/python3.10/site-packages/pandas/io/formats/format.py
new file mode 100644
index 0000000000000000000000000000000000000000..00c7526edfa4894fab655cb5bbfdf2aa93c4e96d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/formats/format.py
@@ -0,0 +1,2058 @@
+"""
+Internal module for formatting output data in csv, html, xml,
+and latex files. This module also applies to display formatting.
+"""
+from __future__ import annotations
+
+from collections.abc import (
+ Generator,
+ Hashable,
+ Mapping,
+ Sequence,
+)
+from contextlib import contextmanager
+from csv import QUOTE_NONE
+from decimal import Decimal
+from functools import partial
+from io import StringIO
+import math
+import re
+from shutil import get_terminal_size
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Final,
+ cast,
+)
+
+import numpy as np
+
+from pandas._config.config import (
+ get_option,
+ set_option,
+)
+
+from pandas._libs import lib
+from pandas._libs.missing import NA
+from pandas._libs.tslibs import (
+ NaT,
+ Timedelta,
+ Timestamp,
+)
+from pandas._libs.tslibs.nattype import NaTType
+
+from pandas.core.dtypes.common import (
+ is_complex_dtype,
+ is_float,
+ is_integer,
+ is_list_like,
+ is_numeric_dtype,
+ is_scalar,
+)
+from pandas.core.dtypes.dtypes import (
+ CategoricalDtype,
+ DatetimeTZDtype,
+ ExtensionDtype,
+)
+from pandas.core.dtypes.missing import (
+ isna,
+ notna,
+)
+
+from pandas.core.arrays import (
+ Categorical,
+ DatetimeArray,
+ ExtensionArray,
+ TimedeltaArray,
+)
+from pandas.core.arrays.string_ import StringDtype
+from pandas.core.base import PandasObject
+import pandas.core.common as com
+from pandas.core.indexes.api import (
+ Index,
+ MultiIndex,
+ PeriodIndex,
+ ensure_index,
+)
+from pandas.core.indexes.datetimes import DatetimeIndex
+from pandas.core.indexes.timedeltas import TimedeltaIndex
+from pandas.core.reshape.concat import concat
+
+from pandas.io.common import (
+ check_parent_directory,
+ stringify_path,
+)
+from pandas.io.formats import printing
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ ArrayLike,
+ Axes,
+ ColspaceArgType,
+ ColspaceType,
+ CompressionOptions,
+ FilePath,
+ FloatFormatType,
+ FormattersType,
+ IndexLabel,
+ SequenceNotStr,
+ StorageOptions,
+ WriteBuffer,
+ )
+
+ from pandas import (
+ DataFrame,
+ Series,
+ )
+
+
+common_docstring: Final = """
+ Parameters
+ ----------
+ buf : str, Path or StringIO-like, optional, default None
+ Buffer to write to. If None, the output is returned as a string.
+ columns : array-like, optional, default None
+ The subset of columns to write. Writes all columns by default.
+ col_space : %(col_space_type)s, optional
+ %(col_space)s.
+ header : %(header_type)s, optional
+ %(header)s.
+ index : bool, optional, default True
+ Whether to print index (row) labels.
+ na_rep : str, optional, default 'NaN'
+ String representation of ``NaN`` to use.
+ formatters : list, tuple or dict of one-param. functions, optional
+ Formatter functions to apply to columns' elements by position or
+ name.
+ The result of each function must be a unicode string.
+ List/tuple must be of length equal to the number of columns.
+ float_format : one-parameter function, optional, default None
+ Formatter function to apply to columns' elements if they are
+ floats. This function must return a unicode string and will be
+ applied only to the non-``NaN`` elements, with ``NaN`` being
+ handled by ``na_rep``.
+ sparsify : bool, optional, default True
+ Set to False for a DataFrame with a hierarchical index to print
+ every multiindex key at each row.
+ index_names : bool, optional, default True
+ Prints the names of the indexes.
+ justify : str, default None
+ How to justify the column labels. If None uses the option from
+ the print configuration (controlled by set_option), 'right' out
+ of the box. Valid values are
+
+ * left
+ * right
+ * center
+ * justify
+ * justify-all
+ * start
+ * end
+ * inherit
+ * match-parent
+ * initial
+ * unset.
+ max_rows : int, optional
+ Maximum number of rows to display in the console.
+ max_cols : int, optional
+ Maximum number of columns to display in the console.
+ show_dimensions : bool, default False
+ Display DataFrame dimensions (number of rows by number of columns).
+ decimal : str, default '.'
+ Character recognized as decimal separator, e.g. ',' in Europe.
+ """
+
+VALID_JUSTIFY_PARAMETERS = (
+ "left",
+ "right",
+ "center",
+ "justify",
+ "justify-all",
+ "start",
+ "end",
+ "inherit",
+ "match-parent",
+ "initial",
+ "unset",
+)
+
+return_docstring: Final = """
+ Returns
+ -------
+ str or None
+ If buf is None, returns the result as a string. Otherwise returns
+ None.
+ """
+
+
+class SeriesFormatter:
+ """
+ Implement the main logic of Series.to_string, which underlies
+ Series.__repr__.
+ """
+
+ def __init__(
+ self,
+ series: Series,
+ *,
+ length: bool | str = True,
+ header: bool = True,
+ index: bool = True,
+ na_rep: str = "NaN",
+ name: bool = False,
+ float_format: str | None = None,
+ dtype: bool = True,
+ max_rows: int | None = None,
+ min_rows: int | None = None,
+ ) -> None:
+ self.series = series
+ self.buf = StringIO()
+ self.name = name
+ self.na_rep = na_rep
+ self.header = header
+ self.length = length
+ self.index = index
+ self.max_rows = max_rows
+ self.min_rows = min_rows
+
+ if float_format is None:
+ float_format = get_option("display.float_format")
+ self.float_format = float_format
+ self.dtype = dtype
+ self.adj = printing.get_adjustment()
+
+ self._chk_truncate()
+
+ def _chk_truncate(self) -> None:
+ self.tr_row_num: int | None
+
+ min_rows = self.min_rows
+ max_rows = self.max_rows
+ # truncation determined by max_rows, actual truncated number of rows
+ # used below by min_rows
+ is_truncated_vertically = max_rows and (len(self.series) > max_rows)
+ series = self.series
+ if is_truncated_vertically:
+ max_rows = cast(int, max_rows)
+ if min_rows:
+ # if min_rows is set (not None or 0), set max_rows to minimum
+ # of both
+ max_rows = min(min_rows, max_rows)
+ if max_rows == 1:
+ row_num = max_rows
+ series = series.iloc[:max_rows]
+ else:
+ row_num = max_rows // 2
+ series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
+ self.tr_row_num = row_num
+ else:
+ self.tr_row_num = None
+ self.tr_series = series
+ self.is_truncated_vertically = is_truncated_vertically
+
+ def _get_footer(self) -> str:
+ name = self.series.name
+ footer = ""
+
+ index = self.series.index
+ if (
+ isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex))
+ and index.freq is not None
+ ):
+ footer += f"Freq: {index.freqstr}"
+
+ if self.name is not False and name is not None:
+ if footer:
+ footer += ", "
+
+ series_name = printing.pprint_thing(name, escape_chars=("\t", "\r", "\n"))
+ footer += f"Name: {series_name}"
+
+ if self.length is True or (
+ self.length == "truncate" and self.is_truncated_vertically
+ ):
+ if footer:
+ footer += ", "
+ footer += f"Length: {len(self.series)}"
+
+ if self.dtype is not False and self.dtype is not None:
+ dtype_name = getattr(self.tr_series.dtype, "name", None)
+ if dtype_name:
+ if footer:
+ footer += ", "
+ footer += f"dtype: {printing.pprint_thing(dtype_name)}"
+
+ # level infos are added to the end and in a new line, like it is done
+ # for Categoricals
+ if isinstance(self.tr_series.dtype, CategoricalDtype):
+ level_info = self.tr_series._values._get_repr_footer()
+ if footer:
+ footer += "\n"
+ footer += level_info
+
+ return str(footer)
+
+ def _get_formatted_values(self) -> list[str]:
+ return format_array(
+ self.tr_series._values,
+ None,
+ float_format=self.float_format,
+ na_rep=self.na_rep,
+ leading_space=self.index,
+ )
+
+ def to_string(self) -> str:
+ series = self.tr_series
+ footer = self._get_footer()
+
+ if len(series) == 0:
+ return f"{type(self.series).__name__}([], {footer})"
+
+ index = series.index
+ have_header = _has_names(index)
+ if isinstance(index, MultiIndex):
+ fmt_index = index._format_multi(include_names=True, sparsify=None)
+ adj = printing.get_adjustment()
+ fmt_index = adj.adjoin(2, *fmt_index).split("\n")
+ else:
+ fmt_index = index._format_flat(include_name=True)
+ fmt_values = self._get_formatted_values()
+
+ if self.is_truncated_vertically:
+ n_header_rows = 0
+ row_num = self.tr_row_num
+ row_num = cast(int, row_num)
+ width = self.adj.len(fmt_values[row_num - 1])
+ if width > 3:
+ dot_str = "..."
+ else:
+ dot_str = ".."
+ # Series uses mode=center because it has single value columns
+ # DataFrame uses mode=left
+ dot_str = self.adj.justify([dot_str], width, mode="center")[0]
+ fmt_values.insert(row_num + n_header_rows, dot_str)
+ fmt_index.insert(row_num + 1, "")
+
+ if self.index:
+ result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
+ else:
+ result = self.adj.adjoin(3, fmt_values)
+
+ if self.header and have_header:
+ result = fmt_index[0] + "\n" + result
+
+ if footer:
+ result += "\n" + footer
+
+ return str("".join(result))
+
+
+def get_dataframe_repr_params() -> dict[str, Any]:
+ """Get the parameters used to repr(dataFrame) calls using DataFrame.to_string.
+
+ Supplying these parameters to DataFrame.to_string is equivalent to calling
+ ``repr(DataFrame)``. This is useful if you want to adjust the repr output.
+
+ .. versionadded:: 1.4.0
+
+ Example
+ -------
+ >>> import pandas as pd
+ >>>
+ >>> df = pd.DataFrame([[1, 2], [3, 4]])
+ >>> repr_params = pd.io.formats.format.get_dataframe_repr_params()
+ >>> repr(df) == df.to_string(**repr_params)
+ True
+ """
+ from pandas.io.formats import console
+
+ if get_option("display.expand_frame_repr"):
+ line_width, _ = console.get_console_size()
+ else:
+ line_width = None
+ return {
+ "max_rows": get_option("display.max_rows"),
+ "min_rows": get_option("display.min_rows"),
+ "max_cols": get_option("display.max_columns"),
+ "max_colwidth": get_option("display.max_colwidth"),
+ "show_dimensions": get_option("display.show_dimensions"),
+ "line_width": line_width,
+ }
+
+
+def get_series_repr_params() -> dict[str, Any]:
+ """Get the parameters used to repr(Series) calls using Series.to_string.
+
+ Supplying these parameters to Series.to_string is equivalent to calling
+ ``repr(series)``. This is useful if you want to adjust the series repr output.
+
+ .. versionadded:: 1.4.0
+
+ Example
+ -------
+ >>> import pandas as pd
+ >>>
+ >>> ser = pd.Series([1, 2, 3, 4])
+ >>> repr_params = pd.io.formats.format.get_series_repr_params()
+ >>> repr(ser) == ser.to_string(**repr_params)
+ True
+ """
+ width, height = get_terminal_size()
+ max_rows_opt = get_option("display.max_rows")
+ max_rows = height if max_rows_opt == 0 else max_rows_opt
+ min_rows = height if max_rows_opt == 0 else get_option("display.min_rows")
+
+ return {
+ "name": True,
+ "dtype": True,
+ "min_rows": min_rows,
+ "max_rows": max_rows,
+ "length": get_option("display.show_dimensions"),
+ }
+
+
+class DataFrameFormatter:
+ """
+ Class for processing dataframe formatting options and data.
+
+ Used by DataFrame.to_string, which backs DataFrame.__repr__.
+ """
+
+ __doc__ = __doc__ if __doc__ else ""
+ __doc__ += common_docstring + return_docstring
+
+ def __init__(
+ self,
+ frame: DataFrame,
+ columns: Axes | None = None,
+ col_space: ColspaceArgType | None = None,
+ header: bool | SequenceNotStr[str] = True,
+ index: bool = True,
+ na_rep: str = "NaN",
+ formatters: FormattersType | None = None,
+ justify: str | None = None,
+ float_format: FloatFormatType | None = None,
+ sparsify: bool | None = None,
+ index_names: bool = True,
+ max_rows: int | None = None,
+ min_rows: int | None = None,
+ max_cols: int | None = None,
+ show_dimensions: bool | str = False,
+ decimal: str = ".",
+ bold_rows: bool = False,
+ escape: bool = True,
+ ) -> None:
+ self.frame = frame
+ self.columns = self._initialize_columns(columns)
+ self.col_space = self._initialize_colspace(col_space)
+ self.header = header
+ self.index = index
+ self.na_rep = na_rep
+ self.formatters = self._initialize_formatters(formatters)
+ self.justify = self._initialize_justify(justify)
+ self.float_format = float_format
+ self.sparsify = self._initialize_sparsify(sparsify)
+ self.show_index_names = index_names
+ self.decimal = decimal
+ self.bold_rows = bold_rows
+ self.escape = escape
+ self.max_rows = max_rows
+ self.min_rows = min_rows
+ self.max_cols = max_cols
+ self.show_dimensions = show_dimensions
+
+ self.max_cols_fitted = self._calc_max_cols_fitted()
+ self.max_rows_fitted = self._calc_max_rows_fitted()
+
+ self.tr_frame = self.frame
+ self.truncate()
+ self.adj = printing.get_adjustment()
+
+ def get_strcols(self) -> list[list[str]]:
+ """
+ Render a DataFrame to a list of columns (as lists of strings).
+ """
+ strcols = self._get_strcols_without_index()
+
+ if self.index:
+ str_index = self._get_formatted_index(self.tr_frame)
+ strcols.insert(0, str_index)
+
+ return strcols
+
+ @property
+ def should_show_dimensions(self) -> bool:
+ return self.show_dimensions is True or (
+ self.show_dimensions == "truncate" and self.is_truncated
+ )
+
+ @property
+ def is_truncated(self) -> bool:
+ return bool(self.is_truncated_horizontally or self.is_truncated_vertically)
+
+ @property
+ def is_truncated_horizontally(self) -> bool:
+ return bool(self.max_cols_fitted and (len(self.columns) > self.max_cols_fitted))
+
+ @property
+ def is_truncated_vertically(self) -> bool:
+ return bool(self.max_rows_fitted and (len(self.frame) > self.max_rows_fitted))
+
+ @property
+ def dimensions_info(self) -> str:
+ return f"\n\n[{len(self.frame)} rows x {len(self.frame.columns)} columns]"
+
+ @property
+ def has_index_names(self) -> bool:
+ return _has_names(self.frame.index)
+
+ @property
+ def has_column_names(self) -> bool:
+ return _has_names(self.frame.columns)
+
+ @property
+ def show_row_idx_names(self) -> bool:
+ return all((self.has_index_names, self.index, self.show_index_names))
+
+ @property
+ def show_col_idx_names(self) -> bool:
+ return all((self.has_column_names, self.show_index_names, self.header))
+
+ @property
+ def max_rows_displayed(self) -> int:
+ return min(self.max_rows or len(self.frame), len(self.frame))
+
+ def _initialize_sparsify(self, sparsify: bool | None) -> bool:
+ if sparsify is None:
+ return get_option("display.multi_sparse")
+ return sparsify
+
+ def _initialize_formatters(
+ self, formatters: FormattersType | None
+ ) -> FormattersType:
+ if formatters is None:
+ return {}
+ elif len(self.frame.columns) == len(formatters) or isinstance(formatters, dict):
+ return formatters
+ else:
+ raise ValueError(
+ f"Formatters length({len(formatters)}) should match "
+ f"DataFrame number of columns({len(self.frame.columns)})"
+ )
+
+ def _initialize_justify(self, justify: str | None) -> str:
+ if justify is None:
+ return get_option("display.colheader_justify")
+ else:
+ return justify
+
+ def _initialize_columns(self, columns: Axes | None) -> Index:
+ if columns is not None:
+ cols = ensure_index(columns)
+ self.frame = self.frame[cols]
+ return cols
+ else:
+ return self.frame.columns
+
+ def _initialize_colspace(self, col_space: ColspaceArgType | None) -> ColspaceType:
+ result: ColspaceType
+
+ if col_space is None:
+ result = {}
+ elif isinstance(col_space, (int, str)):
+ result = {"": col_space}
+ result.update({column: col_space for column in self.frame.columns})
+ elif isinstance(col_space, Mapping):
+ for column in col_space.keys():
+ if column not in self.frame.columns and column != "":
+ raise ValueError(
+ f"Col_space is defined for an unknown column: {column}"
+ )
+ result = col_space
+ else:
+ if len(self.frame.columns) != len(col_space):
+ raise ValueError(
+ f"Col_space length({len(col_space)}) should match "
+ f"DataFrame number of columns({len(self.frame.columns)})"
+ )
+ result = dict(zip(self.frame.columns, col_space))
+ return result
+
+ def _calc_max_cols_fitted(self) -> int | None:
+ """Number of columns fitting the screen."""
+ if not self._is_in_terminal():
+ return self.max_cols
+
+ width, _ = get_terminal_size()
+ if self._is_screen_narrow(width):
+ return width
+ else:
+ return self.max_cols
+
+ def _calc_max_rows_fitted(self) -> int | None:
+ """Number of rows with data fitting the screen."""
+ max_rows: int | None
+
+ if self._is_in_terminal():
+ _, height = get_terminal_size()
+ if self.max_rows == 0:
+ # rows available to fill with actual data
+ return height - self._get_number_of_auxiliary_rows()
+
+ if self._is_screen_short(height):
+ max_rows = height
+ else:
+ max_rows = self.max_rows
+ else:
+ max_rows = self.max_rows
+
+ return self._adjust_max_rows(max_rows)
+
+ def _adjust_max_rows(self, max_rows: int | None) -> int | None:
+ """Adjust max_rows using display logic.
+
+ See description here:
+ https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
+
+ GH #37359
+ """
+ if max_rows:
+ if (len(self.frame) > max_rows) and self.min_rows:
+ # if truncated, set max_rows showed to min_rows
+ max_rows = min(self.min_rows, max_rows)
+ return max_rows
+
+ def _is_in_terminal(self) -> bool:
+ """Check if the output is to be shown in terminal."""
+ return bool(self.max_cols == 0 or self.max_rows == 0)
+
+ def _is_screen_narrow(self, max_width) -> bool:
+ return bool(self.max_cols == 0 and len(self.frame.columns) > max_width)
+
+ def _is_screen_short(self, max_height) -> bool:
+ return bool(self.max_rows == 0 and len(self.frame) > max_height)
+
+ def _get_number_of_auxiliary_rows(self) -> int:
+ """Get number of rows occupied by prompt, dots and dimension info."""
+ dot_row = 1
+ prompt_row = 1
+ num_rows = dot_row + prompt_row
+
+ if self.show_dimensions:
+ num_rows += len(self.dimensions_info.splitlines())
+
+ if self.header:
+ num_rows += 1
+
+ return num_rows
+
+ def truncate(self) -> None:
+ """
+ Check whether the frame should be truncated. If so, slice the frame up.
+ """
+ if self.is_truncated_horizontally:
+ self._truncate_horizontally()
+
+ if self.is_truncated_vertically:
+ self._truncate_vertically()
+
+ def _truncate_horizontally(self) -> None:
+ """Remove columns, which are not to be displayed and adjust formatters.
+
+ Attributes affected:
+ - tr_frame
+ - formatters
+ - tr_col_num
+ """
+ assert self.max_cols_fitted is not None
+ col_num = self.max_cols_fitted // 2
+ if col_num >= 1:
+ left = self.tr_frame.iloc[:, :col_num]
+ right = self.tr_frame.iloc[:, -col_num:]
+ self.tr_frame = concat((left, right), axis=1)
+
+ # truncate formatter
+ if isinstance(self.formatters, (list, tuple)):
+ self.formatters = [
+ *self.formatters[:col_num],
+ *self.formatters[-col_num:],
+ ]
+ else:
+ col_num = cast(int, self.max_cols)
+ self.tr_frame = self.tr_frame.iloc[:, :col_num]
+ self.tr_col_num = col_num
+
+ def _truncate_vertically(self) -> None:
+ """Remove rows, which are not to be displayed.
+
+ Attributes affected:
+ - tr_frame
+ - tr_row_num
+ """
+ assert self.max_rows_fitted is not None
+ row_num = self.max_rows_fitted // 2
+ if row_num >= 1:
+ _len = len(self.tr_frame)
+ _slice = np.hstack([np.arange(row_num), np.arange(_len - row_num, _len)])
+ self.tr_frame = self.tr_frame.iloc[_slice]
+ else:
+ row_num = cast(int, self.max_rows)
+ self.tr_frame = self.tr_frame.iloc[:row_num, :]
+ self.tr_row_num = row_num
+
+ def _get_strcols_without_index(self) -> list[list[str]]:
+ strcols: list[list[str]] = []
+
+ if not is_list_like(self.header) and not self.header:
+ for i, c in enumerate(self.tr_frame):
+ fmt_values = self.format_col(i)
+ fmt_values = _make_fixed_width(
+ strings=fmt_values,
+ justify=self.justify,
+ minimum=int(self.col_space.get(c, 0)),
+ adj=self.adj,
+ )
+ strcols.append(fmt_values)
+ return strcols
+
+ if is_list_like(self.header):
+ # cast here since can't be bool if is_list_like
+ self.header = cast(list[str], self.header)
+ if len(self.header) != len(self.columns):
+ raise ValueError(
+ f"Writing {len(self.columns)} cols "
+ f"but got {len(self.header)} aliases"
+ )
+ str_columns = [[label] for label in self.header]
+ else:
+ str_columns = self._get_formatted_column_labels(self.tr_frame)
+
+ if self.show_row_idx_names:
+ for x in str_columns:
+ x.append("")
+
+ for i, c in enumerate(self.tr_frame):
+ cheader = str_columns[i]
+ header_colwidth = max(
+ int(self.col_space.get(c, 0)), *(self.adj.len(x) for x in cheader)
+ )
+ fmt_values = self.format_col(i)
+ fmt_values = _make_fixed_width(
+ fmt_values, self.justify, minimum=header_colwidth, adj=self.adj
+ )
+
+ max_len = max(*(self.adj.len(x) for x in fmt_values), header_colwidth)
+ cheader = self.adj.justify(cheader, max_len, mode=self.justify)
+ strcols.append(cheader + fmt_values)
+
+ return strcols
+
+ def format_col(self, i: int) -> list[str]:
+ frame = self.tr_frame
+ formatter = self._get_formatter(i)
+ return format_array(
+ frame.iloc[:, i]._values,
+ formatter,
+ float_format=self.float_format,
+ na_rep=self.na_rep,
+ space=self.col_space.get(frame.columns[i]),
+ decimal=self.decimal,
+ leading_space=self.index,
+ )
+
+ def _get_formatter(self, i: str | int) -> Callable | None:
+ if isinstance(self.formatters, (list, tuple)):
+ if is_integer(i):
+ i = cast(int, i)
+ return self.formatters[i]
+ else:
+ return None
+ else:
+ if is_integer(i) and i not in self.columns:
+ i = self.columns[i]
+ return self.formatters.get(i, None)
+
+ def _get_formatted_column_labels(self, frame: DataFrame) -> list[list[str]]:
+ from pandas.core.indexes.multi import sparsify_labels
+
+ columns = frame.columns
+
+ if isinstance(columns, MultiIndex):
+ fmt_columns = columns._format_multi(sparsify=False, include_names=False)
+ fmt_columns = list(zip(*fmt_columns))
+ dtypes = self.frame.dtypes._values
+
+ # if we have a Float level, they don't use leading space at all
+ restrict_formatting = any(level.is_floating for level in columns.levels)
+ need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
+
+ def space_format(x, y):
+ if (
+ y not in self.formatters
+ and need_leadsp[x]
+ and not restrict_formatting
+ ):
+ return " " + y
+ return y
+
+ str_columns_tuple = list(
+ zip(*([space_format(x, y) for y in x] for x in fmt_columns))
+ )
+ if self.sparsify and len(str_columns_tuple):
+ str_columns_tuple = sparsify_labels(str_columns_tuple)
+
+ str_columns = [list(x) for x in zip(*str_columns_tuple)]
+ else:
+ fmt_columns = columns._format_flat(include_name=False)
+ dtypes = self.frame.dtypes
+ need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
+ str_columns = [
+ [" " + x if not self._get_formatter(i) and need_leadsp[x] else x]
+ for i, x in enumerate(fmt_columns)
+ ]
+ # self.str_columns = str_columns
+ return str_columns
+
+ def _get_formatted_index(self, frame: DataFrame) -> list[str]:
+ # Note: this is only used by to_string() and to_latex(), not by
+ # to_html(). so safe to cast col_space here.
+ col_space = {k: cast(int, v) for k, v in self.col_space.items()}
+ index = frame.index
+ columns = frame.columns
+ fmt = self._get_formatter("__index__")
+
+ if isinstance(index, MultiIndex):
+ fmt_index = index._format_multi(
+ sparsify=self.sparsify,
+ include_names=self.show_row_idx_names,
+ formatter=fmt,
+ )
+ else:
+ fmt_index = [
+ index._format_flat(include_name=self.show_row_idx_names, formatter=fmt)
+ ]
+
+ fmt_index = [
+ tuple(
+ _make_fixed_width(
+ list(x), justify="left", minimum=col_space.get("", 0), adj=self.adj
+ )
+ )
+ for x in fmt_index
+ ]
+
+ adjoined = self.adj.adjoin(1, *fmt_index).split("\n")
+
+ # empty space for columns
+ if self.show_col_idx_names:
+ col_header = [str(x) for x in self._get_column_name_list()]
+ else:
+ col_header = [""] * columns.nlevels
+
+ if self.header:
+ return col_header + adjoined
+ else:
+ return adjoined
+
+ def _get_column_name_list(self) -> list[Hashable]:
+ names: list[Hashable] = []
+ columns = self.frame.columns
+ if isinstance(columns, MultiIndex):
+ names.extend("" if name is None else name for name in columns.names)
+ else:
+ names.append("" if columns.name is None else columns.name)
+ return names
+
+
+class DataFrameRenderer:
+ """Class for creating dataframe output in multiple formats.
+
+ Called in pandas.core.generic.NDFrame:
+ - to_csv
+ - to_latex
+
+ Called in pandas.core.frame.DataFrame:
+ - to_html
+ - to_string
+
+ Parameters
+ ----------
+ fmt : DataFrameFormatter
+ Formatter with the formatting options.
+ """
+
+ def __init__(self, fmt: DataFrameFormatter) -> None:
+ self.fmt = fmt
+
+ def to_html(
+ self,
+ buf: FilePath | WriteBuffer[str] | None = None,
+ encoding: str | None = None,
+ classes: str | list | tuple | None = None,
+ notebook: bool = False,
+ border: int | bool | None = None,
+ table_id: str | None = None,
+ render_links: bool = False,
+ ) -> str | None:
+ """
+ Render a DataFrame to a html table.
+
+ Parameters
+ ----------
+ buf : str, path object, file-like object, or None, default None
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a string ``write()`` function. If None, the result is
+ returned as a string.
+ encoding : str, default “utf-8”
+ Set character encoding.
+ classes : str or list-like
+ classes to include in the `class` attribute of the opening
+ ``
`` tag, in addition to the default "dataframe".
+ notebook : {True, False}, optional, default False
+ Whether the generated HTML is for IPython Notebook.
+ border : int
+ A ``border=border`` attribute is included in the opening
+ ``
`` tag. Default ``pd.options.display.html.border``.
+ table_id : str, optional
+ A css id is included in the opening `
` tag if specified.
+ render_links : bool, default False
+ Convert URLs to HTML links.
+ """
+ from pandas.io.formats.html import (
+ HTMLFormatter,
+ NotebookFormatter,
+ )
+
+ Klass = NotebookFormatter if notebook else HTMLFormatter
+
+ html_formatter = Klass(
+ self.fmt,
+ classes=classes,
+ border=border,
+ table_id=table_id,
+ render_links=render_links,
+ )
+ string = html_formatter.to_string()
+ return save_to_buffer(string, buf=buf, encoding=encoding)
+
+ def to_string(
+ self,
+ buf: FilePath | WriteBuffer[str] | None = None,
+ encoding: str | None = None,
+ line_width: int | None = None,
+ ) -> str | None:
+ """
+ Render a DataFrame to a console-friendly tabular output.
+
+ Parameters
+ ----------
+ buf : str, path object, file-like object, or None, default None
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a string ``write()`` function. If None, the result is
+ returned as a string.
+ encoding: str, default “utf-8”
+ Set character encoding.
+ line_width : int, optional
+ Width to wrap a line in characters.
+ """
+ from pandas.io.formats.string import StringFormatter
+
+ string_formatter = StringFormatter(self.fmt, line_width=line_width)
+ string = string_formatter.to_string()
+ return save_to_buffer(string, buf=buf, encoding=encoding)
+
+ def to_csv(
+ self,
+ path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
+ encoding: str | None = None,
+ sep: str = ",",
+ columns: Sequence[Hashable] | None = None,
+ index_label: IndexLabel | None = None,
+ mode: str = "w",
+ compression: CompressionOptions = "infer",
+ quoting: int | None = None,
+ quotechar: str = '"',
+ lineterminator: str | None = None,
+ chunksize: int | None = None,
+ date_format: str | None = None,
+ doublequote: bool = True,
+ escapechar: str | None = None,
+ errors: str = "strict",
+ storage_options: StorageOptions | None = None,
+ ) -> str | None:
+ """
+ Render dataframe as comma-separated file.
+ """
+ from pandas.io.formats.csvs import CSVFormatter
+
+ if path_or_buf is None:
+ created_buffer = True
+ path_or_buf = StringIO()
+ else:
+ created_buffer = False
+
+ csv_formatter = CSVFormatter(
+ path_or_buf=path_or_buf,
+ lineterminator=lineterminator,
+ sep=sep,
+ encoding=encoding,
+ errors=errors,
+ compression=compression,
+ quoting=quoting,
+ cols=columns,
+ index_label=index_label,
+ mode=mode,
+ chunksize=chunksize,
+ quotechar=quotechar,
+ date_format=date_format,
+ doublequote=doublequote,
+ escapechar=escapechar,
+ storage_options=storage_options,
+ formatter=self.fmt,
+ )
+ csv_formatter.save()
+
+ if created_buffer:
+ assert isinstance(path_or_buf, StringIO)
+ content = path_or_buf.getvalue()
+ path_or_buf.close()
+ return content
+
+ return None
+
+
+def save_to_buffer(
+ string: str,
+ buf: FilePath | WriteBuffer[str] | None = None,
+ encoding: str | None = None,
+) -> str | None:
+ """
+ Perform serialization. Write to buf or return as string if buf is None.
+ """
+ with _get_buffer(buf, encoding=encoding) as fd:
+ fd.write(string)
+ if buf is None:
+ # error: "WriteBuffer[str]" has no attribute "getvalue"
+ return fd.getvalue() # type: ignore[attr-defined]
+ return None
+
+
+@contextmanager
+def _get_buffer(
+ buf: FilePath | WriteBuffer[str] | None, encoding: str | None = None
+) -> Generator[WriteBuffer[str], None, None] | Generator[StringIO, None, None]:
+ """
+ Context manager to open, yield and close buffer for filenames or Path-like
+ objects, otherwise yield buf unchanged.
+ """
+ if buf is not None:
+ buf = stringify_path(buf)
+ else:
+ buf = StringIO()
+
+ if encoding is None:
+ encoding = "utf-8"
+ elif not isinstance(buf, str):
+ raise ValueError("buf is not a file name and encoding is specified.")
+
+ if hasattr(buf, "write"):
+ # Incompatible types in "yield" (actual type "Union[str, WriteBuffer[str],
+ # StringIO]", expected type "Union[WriteBuffer[str], StringIO]")
+ yield buf # type: ignore[misc]
+ elif isinstance(buf, str):
+ check_parent_directory(str(buf))
+ with open(buf, "w", encoding=encoding, newline="") as f:
+ # GH#30034 open instead of codecs.open prevents a file leak
+ # if we have an invalid encoding argument.
+ # newline="" is needed to roundtrip correctly on
+ # windows test_to_latex_filename
+ yield f
+ else:
+ raise TypeError("buf is not a file name and it has no write method")
+
+
+# ----------------------------------------------------------------------
+# Array formatters
+
+
+def format_array(
+ values: ArrayLike,
+ formatter: Callable | None,
+ float_format: FloatFormatType | None = None,
+ na_rep: str = "NaN",
+ digits: int | None = None,
+ space: str | int | None = None,
+ justify: str = "right",
+ decimal: str = ".",
+ leading_space: bool | None = True,
+ quoting: int | None = None,
+ fallback_formatter: Callable | None = None,
+) -> list[str]:
+ """
+ Format an array for printing.
+
+ Parameters
+ ----------
+ values : np.ndarray or ExtensionArray
+ formatter
+ float_format
+ na_rep
+ digits
+ space
+ justify
+ decimal
+ leading_space : bool, optional, default True
+ Whether the array should be formatted with a leading space.
+ When an array as a column of a Series or DataFrame, we do want
+ the leading space to pad between columns.
+
+ When formatting an Index subclass
+ (e.g. IntervalIndex._get_values_for_csv), we don't want the
+ leading space since it should be left-aligned.
+ fallback_formatter
+
+ Returns
+ -------
+ List[str]
+ """
+ fmt_klass: type[_GenericArrayFormatter]
+ if lib.is_np_dtype(values.dtype, "M"):
+ fmt_klass = _Datetime64Formatter
+ values = cast(DatetimeArray, values)
+ elif isinstance(values.dtype, DatetimeTZDtype):
+ fmt_klass = _Datetime64TZFormatter
+ values = cast(DatetimeArray, values)
+ elif lib.is_np_dtype(values.dtype, "m"):
+ fmt_klass = _Timedelta64Formatter
+ values = cast(TimedeltaArray, values)
+ elif isinstance(values.dtype, ExtensionDtype):
+ fmt_klass = _ExtensionArrayFormatter
+ elif lib.is_np_dtype(values.dtype, "fc"):
+ fmt_klass = FloatArrayFormatter
+ elif lib.is_np_dtype(values.dtype, "iu"):
+ fmt_klass = _IntArrayFormatter
+ else:
+ fmt_klass = _GenericArrayFormatter
+
+ if space is None:
+ space = 12
+
+ if float_format is None:
+ float_format = get_option("display.float_format")
+
+ if digits is None:
+ digits = get_option("display.precision")
+
+ fmt_obj = fmt_klass(
+ values,
+ digits=digits,
+ na_rep=na_rep,
+ float_format=float_format,
+ formatter=formatter,
+ space=space,
+ justify=justify,
+ decimal=decimal,
+ leading_space=leading_space,
+ quoting=quoting,
+ fallback_formatter=fallback_formatter,
+ )
+
+ return fmt_obj.get_result()
+
+
+class _GenericArrayFormatter:
+ def __init__(
+ self,
+ values: ArrayLike,
+ digits: int = 7,
+ formatter: Callable | None = None,
+ na_rep: str = "NaN",
+ space: str | int = 12,
+ float_format: FloatFormatType | None = None,
+ justify: str = "right",
+ decimal: str = ".",
+ quoting: int | None = None,
+ fixed_width: bool = True,
+ leading_space: bool | None = True,
+ fallback_formatter: Callable | None = None,
+ ) -> None:
+ self.values = values
+ self.digits = digits
+ self.na_rep = na_rep
+ self.space = space
+ self.formatter = formatter
+ self.float_format = float_format
+ self.justify = justify
+ self.decimal = decimal
+ self.quoting = quoting
+ self.fixed_width = fixed_width
+ self.leading_space = leading_space
+ self.fallback_formatter = fallback_formatter
+
+ def get_result(self) -> list[str]:
+ fmt_values = self._format_strings()
+ return _make_fixed_width(fmt_values, self.justify)
+
+ def _format_strings(self) -> list[str]:
+ if self.float_format is None:
+ float_format = get_option("display.float_format")
+ if float_format is None:
+ precision = get_option("display.precision")
+ float_format = lambda x: _trim_zeros_single_float(
+ f"{x: .{precision:d}f}"
+ )
+ else:
+ float_format = self.float_format
+
+ if self.formatter is not None:
+ formatter = self.formatter
+ elif self.fallback_formatter is not None:
+ formatter = self.fallback_formatter
+ else:
+ quote_strings = self.quoting is not None and self.quoting != QUOTE_NONE
+ formatter = partial(
+ printing.pprint_thing,
+ escape_chars=("\t", "\r", "\n"),
+ quote_strings=quote_strings,
+ )
+
+ def _format(x):
+ if self.na_rep is not None and is_scalar(x) and isna(x):
+ if x is None:
+ return "None"
+ elif x is NA:
+ return str(NA)
+ elif lib.is_float(x) and np.isinf(x):
+ # TODO(3.0): this will be unreachable when use_inf_as_na
+ # deprecation is enforced
+ return str(x)
+ elif x is NaT or isinstance(x, (np.datetime64, np.timedelta64)):
+ return "NaT"
+ return self.na_rep
+ elif isinstance(x, PandasObject):
+ return str(x)
+ elif isinstance(x, StringDtype):
+ return repr(x)
+ else:
+ # object dtype
+ return str(formatter(x))
+
+ vals = self.values
+ if not isinstance(vals, np.ndarray):
+ raise TypeError(
+ "ExtensionArray formatting should use _ExtensionArrayFormatter"
+ )
+ inferred = lib.map_infer(vals, is_float)
+ is_float_type = (
+ inferred
+ # vals may have 2 or more dimensions
+ & np.all(notna(vals), axis=tuple(range(1, len(vals.shape))))
+ )
+ leading_space = self.leading_space
+ if leading_space is None:
+ leading_space = is_float_type.any()
+
+ fmt_values = []
+ for i, v in enumerate(vals):
+ if (not is_float_type[i] or self.formatter is not None) and leading_space:
+ fmt_values.append(f" {_format(v)}")
+ elif is_float_type[i]:
+ fmt_values.append(float_format(v))
+ else:
+ if leading_space is False:
+ # False specifically, so that the default is
+ # to include a space if we get here.
+ tpl = "{v}"
+ else:
+ tpl = " {v}"
+ fmt_values.append(tpl.format(v=_format(v)))
+
+ return fmt_values
+
+
+class FloatArrayFormatter(_GenericArrayFormatter):
+ def __init__(self, *args, **kwargs) -> None:
+ super().__init__(*args, **kwargs)
+
+ # float_format is expected to be a string
+ # formatter should be used to pass a function
+ if self.float_format is not None and self.formatter is None:
+ # GH21625, GH22270
+ self.fixed_width = False
+ if callable(self.float_format):
+ self.formatter = self.float_format
+ self.float_format = None
+
+ def _value_formatter(
+ self,
+ float_format: FloatFormatType | None = None,
+ threshold: float | None = None,
+ ) -> Callable:
+ """Returns a function to be applied on each value to format it"""
+ # the float_format parameter supersedes self.float_format
+ if float_format is None:
+ float_format = self.float_format
+
+ # we are going to compose different functions, to first convert to
+ # a string, then replace the decimal symbol, and finally chop according
+ # to the threshold
+
+ # when there is no float_format, we use str instead of '%g'
+ # because str(0.0) = '0.0' while '%g' % 0.0 = '0'
+ if float_format:
+
+ def base_formatter(v):
+ assert float_format is not None # for mypy
+ # error: "str" not callable
+ # error: Unexpected keyword argument "value" for "__call__" of
+ # "EngFormatter"
+ return (
+ float_format(value=v) # type: ignore[operator,call-arg]
+ if notna(v)
+ else self.na_rep
+ )
+
+ else:
+
+ def base_formatter(v):
+ return str(v) if notna(v) else self.na_rep
+
+ if self.decimal != ".":
+
+ def decimal_formatter(v):
+ return base_formatter(v).replace(".", self.decimal, 1)
+
+ else:
+ decimal_formatter = base_formatter
+
+ if threshold is None:
+ return decimal_formatter
+
+ def formatter(value):
+ if notna(value):
+ if abs(value) > threshold:
+ return decimal_formatter(value)
+ else:
+ return decimal_formatter(0.0)
+ else:
+ return self.na_rep
+
+ return formatter
+
+ def get_result_as_array(self) -> np.ndarray:
+ """
+ Returns the float values converted into strings using
+ the parameters given at initialisation, as a numpy array
+ """
+
+ def format_with_na_rep(values: ArrayLike, formatter: Callable, na_rep: str):
+ mask = isna(values)
+ formatted = np.array(
+ [
+ formatter(val) if not m else na_rep
+ for val, m in zip(values.ravel(), mask.ravel())
+ ]
+ ).reshape(values.shape)
+ return formatted
+
+ def format_complex_with_na_rep(
+ values: ArrayLike, formatter: Callable, na_rep: str
+ ):
+ real_values = np.real(values).ravel() # type: ignore[arg-type]
+ imag_values = np.imag(values).ravel() # type: ignore[arg-type]
+ real_mask, imag_mask = isna(real_values), isna(imag_values)
+ formatted_lst = []
+ for val, real_val, imag_val, re_isna, im_isna in zip(
+ values.ravel(),
+ real_values,
+ imag_values,
+ real_mask,
+ imag_mask,
+ ):
+ if not re_isna and not im_isna:
+ formatted_lst.append(formatter(val))
+ elif not re_isna: # xxx+nanj
+ formatted_lst.append(f"{formatter(real_val)}+{na_rep}j")
+ elif not im_isna: # nan[+/-]xxxj
+ # The imaginary part may either start with a "-" or a space
+ imag_formatted = formatter(imag_val).strip()
+ if imag_formatted.startswith("-"):
+ formatted_lst.append(f"{na_rep}{imag_formatted}j")
+ else:
+ formatted_lst.append(f"{na_rep}+{imag_formatted}j")
+ else: # nan+nanj
+ formatted_lst.append(f"{na_rep}+{na_rep}j")
+ return np.array(formatted_lst).reshape(values.shape)
+
+ if self.formatter is not None:
+ return format_with_na_rep(self.values, self.formatter, self.na_rep)
+
+ if self.fixed_width:
+ threshold = get_option("display.chop_threshold")
+ else:
+ threshold = None
+
+ # if we have a fixed_width, we'll need to try different float_format
+ def format_values_with(float_format):
+ formatter = self._value_formatter(float_format, threshold)
+
+ # default formatter leaves a space to the left when formatting
+ # floats, must be consistent for left-justifying NaNs (GH #25061)
+ na_rep = " " + self.na_rep if self.justify == "left" else self.na_rep
+
+ # different formatting strategies for complex and non-complex data
+ # need to distinguish complex and float NaNs (GH #53762)
+ values = self.values
+ is_complex = is_complex_dtype(values)
+
+ # separate the wheat from the chaff
+ if is_complex:
+ values = format_complex_with_na_rep(values, formatter, na_rep)
+ else:
+ values = format_with_na_rep(values, formatter, na_rep)
+
+ if self.fixed_width:
+ if is_complex:
+ result = _trim_zeros_complex(values, self.decimal)
+ else:
+ result = _trim_zeros_float(values, self.decimal)
+ return np.asarray(result, dtype="object")
+
+ return values
+
+ # There is a special default string when we are fixed-width
+ # The default is otherwise to use str instead of a formatting string
+ float_format: FloatFormatType | None
+ if self.float_format is None:
+ if self.fixed_width:
+ if self.leading_space is True:
+ fmt_str = "{value: .{digits:d}f}"
+ else:
+ fmt_str = "{value:.{digits:d}f}"
+ float_format = partial(fmt_str.format, digits=self.digits)
+ else:
+ float_format = self.float_format
+ else:
+ float_format = lambda value: self.float_format % value
+
+ formatted_values = format_values_with(float_format)
+
+ if not self.fixed_width:
+ return formatted_values
+
+ # we need do convert to engineering format if some values are too small
+ # and would appear as 0, or if some values are too big and take too
+ # much space
+
+ if len(formatted_values) > 0:
+ maxlen = max(len(x) for x in formatted_values)
+ too_long = maxlen > self.digits + 6
+ else:
+ too_long = False
+
+ abs_vals = np.abs(self.values)
+ # this is pretty arbitrary for now
+ # large values: more that 8 characters including decimal symbol
+ # and first digit, hence > 1e6
+ has_large_values = (abs_vals > 1e6).any()
+ has_small_values = ((abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)).any()
+
+ if has_small_values or (too_long and has_large_values):
+ if self.leading_space is True:
+ fmt_str = "{value: .{digits:d}e}"
+ else:
+ fmt_str = "{value:.{digits:d}e}"
+ float_format = partial(fmt_str.format, digits=self.digits)
+ formatted_values = format_values_with(float_format)
+
+ return formatted_values
+
+ def _format_strings(self) -> list[str]:
+ return list(self.get_result_as_array())
+
+
+class _IntArrayFormatter(_GenericArrayFormatter):
+ def _format_strings(self) -> list[str]:
+ if self.leading_space is False:
+ formatter_str = lambda x: f"{x:d}".format(x=x)
+ else:
+ formatter_str = lambda x: f"{x: d}".format(x=x)
+ formatter = self.formatter or formatter_str
+ fmt_values = [formatter(x) for x in self.values]
+ return fmt_values
+
+
+class _Datetime64Formatter(_GenericArrayFormatter):
+ values: DatetimeArray
+
+ def __init__(
+ self,
+ values: DatetimeArray,
+ nat_rep: str = "NaT",
+ date_format: None = None,
+ **kwargs,
+ ) -> None:
+ super().__init__(values, **kwargs)
+ self.nat_rep = nat_rep
+ self.date_format = date_format
+
+ def _format_strings(self) -> list[str]:
+ """we by definition have DO NOT have a TZ"""
+ values = self.values
+
+ if self.formatter is not None:
+ return [self.formatter(x) for x in values]
+
+ fmt_values = values._format_native_types(
+ na_rep=self.nat_rep, date_format=self.date_format
+ )
+ return fmt_values.tolist()
+
+
+class _ExtensionArrayFormatter(_GenericArrayFormatter):
+ values: ExtensionArray
+
+ def _format_strings(self) -> list[str]:
+ values = self.values
+
+ formatter = self.formatter
+ fallback_formatter = None
+ if formatter is None:
+ fallback_formatter = values._formatter(boxed=True)
+
+ if isinstance(values, Categorical):
+ # Categorical is special for now, so that we can preserve tzinfo
+ array = values._internal_get_values()
+ else:
+ array = np.asarray(values, dtype=object)
+
+ fmt_values = format_array(
+ array,
+ formatter,
+ float_format=self.float_format,
+ na_rep=self.na_rep,
+ digits=self.digits,
+ space=self.space,
+ justify=self.justify,
+ decimal=self.decimal,
+ leading_space=self.leading_space,
+ quoting=self.quoting,
+ fallback_formatter=fallback_formatter,
+ )
+ return fmt_values
+
+
+def format_percentiles(
+ percentiles: (np.ndarray | Sequence[float]),
+) -> list[str]:
+ """
+ Outputs rounded and formatted percentiles.
+
+ Parameters
+ ----------
+ percentiles : list-like, containing floats from interval [0,1]
+
+ Returns
+ -------
+ formatted : list of strings
+
+ Notes
+ -----
+ Rounding precision is chosen so that: (1) if any two elements of
+ ``percentiles`` differ, they remain different after rounding
+ (2) no entry is *rounded* to 0% or 100%.
+ Any non-integer is always rounded to at least 1 decimal place.
+
+ Examples
+ --------
+ Keeps all entries different after rounding:
+
+ >>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
+ ['1.999%', '2.001%', '50%', '66.667%', '99.99%']
+
+ No element is rounded to 0% or 100% (unless already equal to it).
+ Duplicates are allowed:
+
+ >>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
+ ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
+ """
+ percentiles = np.asarray(percentiles)
+
+ # It checks for np.nan as well
+ if (
+ not is_numeric_dtype(percentiles)
+ or not np.all(percentiles >= 0)
+ or not np.all(percentiles <= 1)
+ ):
+ raise ValueError("percentiles should all be in the interval [0,1]")
+
+ percentiles = 100 * percentiles
+ prec = get_precision(percentiles)
+ percentiles_round_type = percentiles.round(prec).astype(int)
+
+ int_idx = np.isclose(percentiles_round_type, percentiles)
+
+ if np.all(int_idx):
+ out = percentiles_round_type.astype(str)
+ return [i + "%" for i in out]
+
+ unique_pcts = np.unique(percentiles)
+ prec = get_precision(unique_pcts)
+ out = np.empty_like(percentiles, dtype=object)
+ out[int_idx] = percentiles[int_idx].round().astype(int).astype(str)
+
+ out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
+ return [i + "%" for i in out]
+
+
+def get_precision(array: np.ndarray | Sequence[float]) -> int:
+ to_begin = array[0] if array[0] > 0 else None
+ to_end = 100 - array[-1] if array[-1] < 100 else None
+ diff = np.ediff1d(array, to_begin=to_begin, to_end=to_end)
+ diff = abs(diff)
+ prec = -np.floor(np.log10(np.min(diff))).astype(int)
+ prec = max(1, prec)
+ return prec
+
+
+def _format_datetime64(x: NaTType | Timestamp, nat_rep: str = "NaT") -> str:
+ if x is NaT:
+ return nat_rep
+
+ # Timestamp.__str__ falls back to datetime.datetime.__str__ = isoformat(sep=' ')
+ # so it already uses string formatting rather than strftime (faster).
+ return str(x)
+
+
+def _format_datetime64_dateonly(
+ x: NaTType | Timestamp,
+ nat_rep: str = "NaT",
+ date_format: str | None = None,
+) -> str:
+ if isinstance(x, NaTType):
+ return nat_rep
+
+ if date_format:
+ return x.strftime(date_format)
+ else:
+ # Timestamp._date_repr relies on string formatting (faster than strftime)
+ return x._date_repr
+
+
+def get_format_datetime64(
+ is_dates_only: bool, nat_rep: str = "NaT", date_format: str | None = None
+) -> Callable:
+ """Return a formatter callable taking a datetime64 as input and providing
+ a string as output"""
+
+ if is_dates_only:
+ return lambda x: _format_datetime64_dateonly(
+ x, nat_rep=nat_rep, date_format=date_format
+ )
+ else:
+ return lambda x: _format_datetime64(x, nat_rep=nat_rep)
+
+
+class _Datetime64TZFormatter(_Datetime64Formatter):
+ values: DatetimeArray
+
+ def _format_strings(self) -> list[str]:
+ """we by definition have a TZ"""
+ ido = self.values._is_dates_only
+ values = self.values.astype(object)
+ formatter = self.formatter or get_format_datetime64(
+ ido, date_format=self.date_format
+ )
+ fmt_values = [formatter(x) for x in values]
+
+ return fmt_values
+
+
+class _Timedelta64Formatter(_GenericArrayFormatter):
+ values: TimedeltaArray
+
+ def __init__(
+ self,
+ values: TimedeltaArray,
+ nat_rep: str = "NaT",
+ **kwargs,
+ ) -> None:
+ # TODO: nat_rep is never passed, na_rep is.
+ super().__init__(values, **kwargs)
+ self.nat_rep = nat_rep
+
+ def _format_strings(self) -> list[str]:
+ formatter = self.formatter or get_format_timedelta64(
+ self.values, nat_rep=self.nat_rep, box=False
+ )
+ return [formatter(x) for x in self.values]
+
+
+def get_format_timedelta64(
+ values: TimedeltaArray,
+ nat_rep: str | float = "NaT",
+ box: bool = False,
+) -> Callable:
+ """
+ Return a formatter function for a range of timedeltas.
+ These will all have the same format argument
+
+ If box, then show the return in quotes
+ """
+ even_days = values._is_dates_only
+
+ if even_days:
+ format = None
+ else:
+ format = "long"
+
+ def _formatter(x):
+ if x is None or (is_scalar(x) and isna(x)):
+ return nat_rep
+
+ if not isinstance(x, Timedelta):
+ x = Timedelta(x)
+
+ # Timedelta._repr_base uses string formatting (faster than strftime)
+ result = x._repr_base(format=format)
+ if box:
+ result = f"'{result}'"
+ return result
+
+ return _formatter
+
+
+def _make_fixed_width(
+ strings: list[str],
+ justify: str = "right",
+ minimum: int | None = None,
+ adj: printing._TextAdjustment | None = None,
+) -> list[str]:
+ if len(strings) == 0 or justify == "all":
+ return strings
+
+ if adj is None:
+ adjustment = printing.get_adjustment()
+ else:
+ adjustment = adj
+
+ max_len = max(adjustment.len(x) for x in strings)
+
+ if minimum is not None:
+ max_len = max(minimum, max_len)
+
+ conf_max = get_option("display.max_colwidth")
+ if conf_max is not None and max_len > conf_max:
+ max_len = conf_max
+
+ def just(x: str) -> str:
+ if conf_max is not None:
+ if (conf_max > 3) & (adjustment.len(x) > max_len):
+ x = x[: max_len - 3] + "..."
+ return x
+
+ strings = [just(x) for x in strings]
+ result = adjustment.justify(strings, max_len, mode=justify)
+ return result
+
+
+def _trim_zeros_complex(str_complexes: ArrayLike, decimal: str = ".") -> list[str]:
+ """
+ Separates the real and imaginary parts from the complex number, and
+ executes the _trim_zeros_float method on each of those.
+ """
+ real_part, imag_part = [], []
+ for x in str_complexes:
+ # Complex numbers are represented as "(-)xxx(+/-)xxxj"
+ # The split will give [{"", "-"}, "xxx", "+/-", "xxx", "j", ""]
+ # Therefore, the imaginary part is the 4th and 3rd last elements,
+ # and the real part is everything before the imaginary part
+ trimmed = re.split(r"([j+-])", x)
+ real_part.append("".join(trimmed[:-4]))
+ imag_part.append("".join(trimmed[-4:-2]))
+
+ # We want to align the lengths of the real and imaginary parts of each complex
+ # number, as well as the lengths the real (resp. complex) parts of all numbers
+ # in the array
+ n = len(str_complexes)
+ padded_parts = _trim_zeros_float(real_part + imag_part, decimal)
+ if len(padded_parts) == 0:
+ return []
+ padded_length = max(len(part) for part in padded_parts) - 1
+ padded = [
+ real_pt # real part, possibly NaN
+ + imag_pt[0] # +/-
+ + f"{imag_pt[1:]:>{padded_length}}" # complex part (no sign), possibly nan
+ + "j"
+ for real_pt, imag_pt in zip(padded_parts[:n], padded_parts[n:])
+ ]
+ return padded
+
+
+def _trim_zeros_single_float(str_float: str) -> str:
+ """
+ Trims trailing zeros after a decimal point,
+ leaving just one if necessary.
+ """
+ str_float = str_float.rstrip("0")
+ if str_float.endswith("."):
+ str_float += "0"
+
+ return str_float
+
+
+def _trim_zeros_float(
+ str_floats: ArrayLike | list[str], decimal: str = "."
+) -> list[str]:
+ """
+ Trims the maximum number of trailing zeros equally from
+ all numbers containing decimals, leaving just one if
+ necessary.
+ """
+ trimmed = str_floats
+ number_regex = re.compile(rf"^\s*[\+-]?[0-9]+\{decimal}[0-9]*$")
+
+ def is_number_with_decimal(x) -> bool:
+ return re.match(number_regex, x) is not None
+
+ def should_trim(values: ArrayLike | list[str]) -> bool:
+ """
+ Determine if an array of strings should be trimmed.
+
+ Returns True if all numbers containing decimals (defined by the
+ above regular expression) within the array end in a zero, otherwise
+ returns False.
+ """
+ numbers = [x for x in values if is_number_with_decimal(x)]
+ return len(numbers) > 0 and all(x.endswith("0") for x in numbers)
+
+ while should_trim(trimmed):
+ trimmed = [x[:-1] if is_number_with_decimal(x) else x for x in trimmed]
+
+ # leave one 0 after the decimal points if need be.
+ result = [
+ x + "0" if is_number_with_decimal(x) and x.endswith(decimal) else x
+ for x in trimmed
+ ]
+ return result
+
+
+def _has_names(index: Index) -> bool:
+ if isinstance(index, MultiIndex):
+ return com.any_not_none(*index.names)
+ else:
+ return index.name is not None
+
+
+class EngFormatter:
+ """
+ Formats float values according to engineering format.
+
+ Based on matplotlib.ticker.EngFormatter
+ """
+
+ # The SI engineering prefixes
+ ENG_PREFIXES = {
+ -24: "y",
+ -21: "z",
+ -18: "a",
+ -15: "f",
+ -12: "p",
+ -9: "n",
+ -6: "u",
+ -3: "m",
+ 0: "",
+ 3: "k",
+ 6: "M",
+ 9: "G",
+ 12: "T",
+ 15: "P",
+ 18: "E",
+ 21: "Z",
+ 24: "Y",
+ }
+
+ def __init__(
+ self, accuracy: int | None = None, use_eng_prefix: bool = False
+ ) -> None:
+ self.accuracy = accuracy
+ self.use_eng_prefix = use_eng_prefix
+
+ def __call__(self, num: float) -> str:
+ """
+ Formats a number in engineering notation, appending a letter
+ representing the power of 1000 of the original number. Some examples:
+ >>> format_eng = EngFormatter(accuracy=0, use_eng_prefix=True)
+ >>> format_eng(0)
+ ' 0'
+ >>> format_eng = EngFormatter(accuracy=1, use_eng_prefix=True)
+ >>> format_eng(1_000_000)
+ ' 1.0M'
+ >>> format_eng = EngFormatter(accuracy=2, use_eng_prefix=False)
+ >>> format_eng("-1e-6")
+ '-1.00E-06'
+
+ @param num: the value to represent
+ @type num: either a numeric value or a string that can be converted to
+ a numeric value (as per decimal.Decimal constructor)
+
+ @return: engineering formatted string
+ """
+ dnum = Decimal(str(num))
+
+ if Decimal.is_nan(dnum):
+ return "NaN"
+
+ if Decimal.is_infinite(dnum):
+ return "inf"
+
+ sign = 1
+
+ if dnum < 0: # pragma: no cover
+ sign = -1
+ dnum = -dnum
+
+ if dnum != 0:
+ pow10 = Decimal(int(math.floor(dnum.log10() / 3) * 3))
+ else:
+ pow10 = Decimal(0)
+
+ pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
+ pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
+ int_pow10 = int(pow10)
+
+ if self.use_eng_prefix:
+ prefix = self.ENG_PREFIXES[int_pow10]
+ elif int_pow10 < 0:
+ prefix = f"E-{-int_pow10:02d}"
+ else:
+ prefix = f"E+{int_pow10:02d}"
+
+ mant = sign * dnum / (10**pow10)
+
+ if self.accuracy is None: # pragma: no cover
+ format_str = "{mant: g}{prefix}"
+ else:
+ format_str = f"{{mant: .{self.accuracy:d}f}}{{prefix}}"
+
+ formatted = format_str.format(mant=mant, prefix=prefix)
+
+ return formatted
+
+
+def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> None:
+ """
+ Format float representation in DataFrame with SI notation.
+
+ Parameters
+ ----------
+ accuracy : int, default 3
+ Number of decimal digits after the floating point.
+ use_eng_prefix : bool, default False
+ Whether to represent a value with SI prefixes.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ >>> df = pd.DataFrame([1e-9, 1e-3, 1, 1e3, 1e6])
+ >>> df
+ 0
+ 0 1.000000e-09
+ 1 1.000000e-03
+ 2 1.000000e+00
+ 3 1.000000e+03
+ 4 1.000000e+06
+
+ >>> pd.set_eng_float_format(accuracy=1)
+ >>> df
+ 0
+ 0 1.0E-09
+ 1 1.0E-03
+ 2 1.0E+00
+ 3 1.0E+03
+ 4 1.0E+06
+
+ >>> pd.set_eng_float_format(use_eng_prefix=True)
+ >>> df
+ 0
+ 0 1.000n
+ 1 1.000m
+ 2 1.000
+ 3 1.000k
+ 4 1.000M
+
+ >>> pd.set_eng_float_format(accuracy=1, use_eng_prefix=True)
+ >>> df
+ 0
+ 0 1.0n
+ 1 1.0m
+ 2 1.0
+ 3 1.0k
+ 4 1.0M
+
+ >>> pd.set_option("display.float_format", None) # unset option
+ """
+ set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
+
+
+def get_level_lengths(
+ levels: Any, sentinel: bool | object | str = ""
+) -> list[dict[int, int]]:
+ """
+ For each index in each level the function returns lengths of indexes.
+
+ Parameters
+ ----------
+ levels : list of lists
+ List of values on for level.
+ sentinel : string, optional
+ Value which states that no new index starts on there.
+
+ Returns
+ -------
+ Returns list of maps. For each level returns map of indexes (key is index
+ in row and value is length of index).
+ """
+ if len(levels) == 0:
+ return []
+
+ control = [True] * len(levels[0])
+
+ result = []
+ for level in levels:
+ last_index = 0
+
+ lengths = {}
+ for i, key in enumerate(level):
+ if control[i] and key == sentinel:
+ pass
+ else:
+ control[i] = False
+ lengths[last_index] = i - last_index
+ last_index = i
+
+ lengths[last_index] = len(level) - last_index
+
+ result.append(lengths)
+
+ return result
+
+
+def buffer_put_lines(buf: WriteBuffer[str], lines: list[str]) -> None:
+ """
+ Appends lines to a buffer.
+
+ Parameters
+ ----------
+ buf
+ The buffer to write to
+ lines
+ The lines to append.
+ """
+ if any(isinstance(x, str) for x in lines):
+ lines = [str(x) for x in lines]
+ buf.write("\n".join(lines))
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/html.py b/venv/lib/python3.10/site-packages/pandas/io/formats/html.py
new file mode 100644
index 0000000000000000000000000000000000000000..794ce77b3b45ec38d9fa58a708939e53bb8ae629
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/formats/html.py
@@ -0,0 +1,646 @@
+"""
+Module for formatting output data in HTML.
+"""
+from __future__ import annotations
+
+from textwrap import dedent
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Final,
+ cast,
+)
+
+from pandas._config import get_option
+
+from pandas._libs import lib
+
+from pandas import (
+ MultiIndex,
+ option_context,
+)
+
+from pandas.io.common import is_url
+from pandas.io.formats.format import (
+ DataFrameFormatter,
+ get_level_lengths,
+)
+from pandas.io.formats.printing import pprint_thing
+
+if TYPE_CHECKING:
+ from collections.abc import (
+ Hashable,
+ Iterable,
+ Mapping,
+ )
+
+
+class HTMLFormatter:
+ """
+ Internal class for formatting output data in html.
+ This class is intended for shared functionality between
+ DataFrame.to_html() and DataFrame._repr_html_().
+ Any logic in common with other output formatting methods
+ should ideally be inherited from classes in format.py
+ and this class responsible for only producing html markup.
+ """
+
+ indent_delta: Final = 2
+
+ def __init__(
+ self,
+ formatter: DataFrameFormatter,
+ classes: str | list[str] | tuple[str, ...] | None = None,
+ border: int | bool | None = None,
+ table_id: str | None = None,
+ render_links: bool = False,
+ ) -> None:
+ self.fmt = formatter
+ self.classes = classes
+
+ self.frame = self.fmt.frame
+ self.columns = self.fmt.tr_frame.columns
+ self.elements: list[str] = []
+ self.bold_rows = self.fmt.bold_rows
+ self.escape = self.fmt.escape
+ self.show_dimensions = self.fmt.show_dimensions
+ if border is None or border is True:
+ border = cast(int, get_option("display.html.border"))
+ elif not border:
+ border = None
+
+ self.border = border
+ self.table_id = table_id
+ self.render_links = render_links
+
+ self.col_space = {}
+ is_multi_index = isinstance(self.columns, MultiIndex)
+ for column, value in self.fmt.col_space.items():
+ col_space_value = f"{value}px" if isinstance(value, int) else value
+ self.col_space[column] = col_space_value
+ # GH 53885: Handling case where column is index
+ # Flatten the data in the multi index and add in the map
+ if is_multi_index and isinstance(column, tuple):
+ for column_index in column:
+ self.col_space[str(column_index)] = col_space_value
+
+ def to_string(self) -> str:
+ lines = self.render()
+ if any(isinstance(x, str) for x in lines):
+ lines = [str(x) for x in lines]
+ return "\n".join(lines)
+
+ def render(self) -> list[str]:
+ self._write_table()
+
+ if self.should_show_dimensions:
+ by = chr(215) # × # noqa: RUF003
+ self.write(
+ f"
"
+ )
+
+ return self.elements
+
+ @property
+ def should_show_dimensions(self) -> bool:
+ return self.fmt.should_show_dimensions
+
+ @property
+ def show_row_idx_names(self) -> bool:
+ return self.fmt.show_row_idx_names
+
+ @property
+ def show_col_idx_names(self) -> bool:
+ return self.fmt.show_col_idx_names
+
+ @property
+ def row_levels(self) -> int:
+ if self.fmt.index:
+ # showing (row) index
+ return self.frame.index.nlevels
+ elif self.show_col_idx_names:
+ # see gh-22579
+ # Column misalignment also occurs for
+ # a standard index when the columns index is named.
+ # If the row index is not displayed a column of
+ # blank cells need to be included before the DataFrame values.
+ return 1
+ # not showing (row) index
+ return 0
+
+ def _get_columns_formatted_values(self) -> Iterable:
+ return self.columns
+
+ @property
+ def is_truncated(self) -> bool:
+ return self.fmt.is_truncated
+
+ @property
+ def ncols(self) -> int:
+ return len(self.fmt.tr_frame.columns)
+
+ def write(self, s: Any, indent: int = 0) -> None:
+ rs = pprint_thing(s)
+ self.elements.append(" " * indent + rs)
+
+ def write_th(
+ self, s: Any, header: bool = False, indent: int = 0, tags: str | None = None
+ ) -> None:
+ """
+ Method for writing a formatted
cell.
+
+ If col_space is set on the formatter then that is used for
+ the value of min-width.
+
+ Parameters
+ ----------
+ s : object
+ The data to be written inside the cell.
+ header : bool, default False
+ Set to True if the
is for use inside . This will
+ cause min-width to be set if there is one.
+ indent : int, default 0
+ The indentation level of the cell.
+ tags : str, default None
+ Tags to include in the cell.
+
+ Returns
+ -------
+ A written
cell.
+ """
+ col_space = self.col_space.get(s, None)
+
+ if header and col_space is not None:
+ tags = tags or ""
+ tags += f'style="min-width: {col_space};"'
+
+ self._write_cell(s, kind="th", indent=indent, tags=tags)
+
+ def write_td(self, s: Any, indent: int = 0, tags: str | None = None) -> None:
+ self._write_cell(s, kind="td", indent=indent, tags=tags)
+
+ def _write_cell(
+ self, s: Any, kind: str = "td", indent: int = 0, tags: str | None = None
+ ) -> None:
+ if tags is not None:
+ start_tag = f"<{kind} {tags}>"
+ else:
+ start_tag = f"<{kind}>"
+
+ if self.escape:
+ # escape & first to prevent double escaping of &
+ esc = {"&": r"&", "<": r"<", ">": r">"}
+ else:
+ esc = {}
+
+ rs = pprint_thing(s, escape_chars=esc).strip()
+
+ if self.render_links and is_url(rs):
+ rs_unescaped = pprint_thing(s, escape_chars={}).strip()
+ start_tag += f''
+ end_a = ""
+ else:
+ end_a = ""
+
+ self.write(f"{start_tag}{rs}{end_a}{kind}>", indent)
+
+ def write_tr(
+ self,
+ line: Iterable,
+ indent: int = 0,
+ indent_delta: int = 0,
+ header: bool = False,
+ align: str | None = None,
+ tags: dict[int, str] | None = None,
+ nindex_levels: int = 0,
+ ) -> None:
+ if tags is None:
+ tags = {}
+
+ if align is None:
+ self.write("
", indent)
+ else:
+ self.write(f'
', indent)
+ indent += indent_delta
+
+ for i, s in enumerate(line):
+ val_tag = tags.get(i, None)
+ if header or (self.bold_rows and i < nindex_levels):
+ self.write_th(s, indent=indent, header=header, tags=val_tag)
+ else:
+ self.write_td(s, indent, tags=val_tag)
+
+ indent -= indent_delta
+ self.write("
", indent)
+
+ def _write_table(self, indent: int = 0) -> None:
+ _classes = ["dataframe"] # Default class.
+ use_mathjax = get_option("display.html.use_mathjax")
+ if not use_mathjax:
+ _classes.append("tex2jax_ignore")
+ if self.classes is not None:
+ if isinstance(self.classes, str):
+ self.classes = self.classes.split()
+ if not isinstance(self.classes, (list, tuple)):
+ raise TypeError(
+ "classes must be a string, list, "
+ f"or tuple, not {type(self.classes)}"
+ )
+ _classes.extend(self.classes)
+
+ if self.table_id is None:
+ id_section = ""
+ else:
+ id_section = f' id="{self.table_id}"'
+
+ if self.border is None:
+ border_attr = ""
+ else:
+ border_attr = f' border="{self.border}"'
+
+ self.write(
+ f'
")
+ return self.elements
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/info.py b/venv/lib/python3.10/site-packages/pandas/io/formats/info.py
new file mode 100644
index 0000000000000000000000000000000000000000..552affbd053f2bed3f4d5f678ddf8eb293f65b01
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/formats/info.py
@@ -0,0 +1,1101 @@
+from __future__ import annotations
+
+from abc import (
+ ABC,
+ abstractmethod,
+)
+import sys
+from textwrap import dedent
+from typing import TYPE_CHECKING
+
+from pandas._config import get_option
+
+from pandas.io.formats import format as fmt
+from pandas.io.formats.printing import pprint_thing
+
+if TYPE_CHECKING:
+ from collections.abc import (
+ Iterable,
+ Iterator,
+ Mapping,
+ Sequence,
+ )
+
+ from pandas._typing import (
+ Dtype,
+ WriteBuffer,
+ )
+
+ from pandas import (
+ DataFrame,
+ Index,
+ Series,
+ )
+
+
+frame_max_cols_sub = dedent(
+ """\
+ max_cols : int, optional
+ When to switch from the verbose to the truncated output. If the
+ DataFrame has more than `max_cols` columns, the truncated output
+ is used. By default, the setting in
+ ``pandas.options.display.max_info_columns`` is used."""
+)
+
+
+show_counts_sub = dedent(
+ """\
+ show_counts : bool, optional
+ Whether to show the non-null counts. By default, this is shown
+ only if the DataFrame is smaller than
+ ``pandas.options.display.max_info_rows`` and
+ ``pandas.options.display.max_info_columns``. A value of True always
+ shows the counts, and False never shows the counts."""
+)
+
+
+frame_examples_sub = dedent(
+ """\
+ >>> int_values = [1, 2, 3, 4, 5]
+ >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
+ >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
+ >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
+ ... "float_col": float_values})
+ >>> df
+ int_col text_col float_col
+ 0 1 alpha 0.00
+ 1 2 beta 0.25
+ 2 3 gamma 0.50
+ 3 4 delta 0.75
+ 4 5 epsilon 1.00
+
+ Prints information of all columns:
+
+ >>> df.info(verbose=True)
+
+ RangeIndex: 5 entries, 0 to 4
+ Data columns (total 3 columns):
+ # Column Non-Null Count Dtype
+ --- ------ -------------- -----
+ 0 int_col 5 non-null int64
+ 1 text_col 5 non-null object
+ 2 float_col 5 non-null float64
+ dtypes: float64(1), int64(1), object(1)
+ memory usage: 248.0+ bytes
+
+ Prints a summary of columns count and its dtypes but not per column
+ information:
+
+ >>> df.info(verbose=False)
+
+ RangeIndex: 5 entries, 0 to 4
+ Columns: 3 entries, int_col to float_col
+ dtypes: float64(1), int64(1), object(1)
+ memory usage: 248.0+ bytes
+
+ Pipe output of DataFrame.info to buffer instead of sys.stdout, get
+ buffer content and writes to a text file:
+
+ >>> import io
+ >>> buffer = io.StringIO()
+ >>> df.info(buf=buffer)
+ >>> s = buffer.getvalue()
+ >>> with open("df_info.txt", "w",
+ ... encoding="utf-8") as f: # doctest: +SKIP
+ ... f.write(s)
+ 260
+
+ The `memory_usage` parameter allows deep introspection mode, specially
+ useful for big DataFrames and fine-tune memory optimization:
+
+ >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
+ >>> df = pd.DataFrame({
+ ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
+ ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
+ ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
+ ... })
+ >>> df.info()
+
+ RangeIndex: 1000000 entries, 0 to 999999
+ Data columns (total 3 columns):
+ # Column Non-Null Count Dtype
+ --- ------ -------------- -----
+ 0 column_1 1000000 non-null object
+ 1 column_2 1000000 non-null object
+ 2 column_3 1000000 non-null object
+ dtypes: object(3)
+ memory usage: 22.9+ MB
+
+ >>> df.info(memory_usage='deep')
+
+ RangeIndex: 1000000 entries, 0 to 999999
+ Data columns (total 3 columns):
+ # Column Non-Null Count Dtype
+ --- ------ -------------- -----
+ 0 column_1 1000000 non-null object
+ 1 column_2 1000000 non-null object
+ 2 column_3 1000000 non-null object
+ dtypes: object(3)
+ memory usage: 165.9 MB"""
+)
+
+
+frame_see_also_sub = dedent(
+ """\
+ DataFrame.describe: Generate descriptive statistics of DataFrame
+ columns.
+ DataFrame.memory_usage: Memory usage of DataFrame columns."""
+)
+
+
+frame_sub_kwargs = {
+ "klass": "DataFrame",
+ "type_sub": " and columns",
+ "max_cols_sub": frame_max_cols_sub,
+ "show_counts_sub": show_counts_sub,
+ "examples_sub": frame_examples_sub,
+ "see_also_sub": frame_see_also_sub,
+ "version_added_sub": "",
+}
+
+
+series_examples_sub = dedent(
+ """\
+ >>> int_values = [1, 2, 3, 4, 5]
+ >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
+ >>> s = pd.Series(text_values, index=int_values)
+ >>> s.info()
+
+ Index: 5 entries, 1 to 5
+ Series name: None
+ Non-Null Count Dtype
+ -------------- -----
+ 5 non-null object
+ dtypes: object(1)
+ memory usage: 80.0+ bytes
+
+ Prints a summary excluding information about its values:
+
+ >>> s.info(verbose=False)
+
+ Index: 5 entries, 1 to 5
+ dtypes: object(1)
+ memory usage: 80.0+ bytes
+
+ Pipe output of Series.info to buffer instead of sys.stdout, get
+ buffer content and writes to a text file:
+
+ >>> import io
+ >>> buffer = io.StringIO()
+ >>> s.info(buf=buffer)
+ >>> s = buffer.getvalue()
+ >>> with open("df_info.txt", "w",
+ ... encoding="utf-8") as f: # doctest: +SKIP
+ ... f.write(s)
+ 260
+
+ The `memory_usage` parameter allows deep introspection mode, specially
+ useful for big Series and fine-tune memory optimization:
+
+ >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
+ >>> s = pd.Series(np.random.choice(['a', 'b', 'c'], 10 ** 6))
+ >>> s.info()
+
+ RangeIndex: 1000000 entries, 0 to 999999
+ Series name: None
+ Non-Null Count Dtype
+ -------------- -----
+ 1000000 non-null object
+ dtypes: object(1)
+ memory usage: 7.6+ MB
+
+ >>> s.info(memory_usage='deep')
+
+ RangeIndex: 1000000 entries, 0 to 999999
+ Series name: None
+ Non-Null Count Dtype
+ -------------- -----
+ 1000000 non-null object
+ dtypes: object(1)
+ memory usage: 55.3 MB"""
+)
+
+
+series_see_also_sub = dedent(
+ """\
+ Series.describe: Generate descriptive statistics of Series.
+ Series.memory_usage: Memory usage of Series."""
+)
+
+
+series_sub_kwargs = {
+ "klass": "Series",
+ "type_sub": "",
+ "max_cols_sub": "",
+ "show_counts_sub": show_counts_sub,
+ "examples_sub": series_examples_sub,
+ "see_also_sub": series_see_also_sub,
+ "version_added_sub": "\n.. versionadded:: 1.4.0\n",
+}
+
+
+INFO_DOCSTRING = dedent(
+ """
+ Print a concise summary of a {klass}.
+
+ This method prints information about a {klass} including
+ the index dtype{type_sub}, non-null values and memory usage.
+ {version_added_sub}\
+
+ Parameters
+ ----------
+ verbose : bool, optional
+ Whether to print the full summary. By default, the setting in
+ ``pandas.options.display.max_info_columns`` is followed.
+ buf : writable buffer, defaults to sys.stdout
+ Where to send the output. By default, the output is printed to
+ sys.stdout. Pass a writable buffer if you need to further process
+ the output.
+ {max_cols_sub}
+ memory_usage : bool, str, optional
+ Specifies whether total memory usage of the {klass}
+ elements (including the index) should be displayed. By default,
+ this follows the ``pandas.options.display.memory_usage`` setting.
+
+ True always show memory usage. False never shows memory usage.
+ A value of 'deep' is equivalent to "True with deep introspection".
+ Memory usage is shown in human-readable units (base-2
+ representation). Without deep introspection a memory estimation is
+ made based in column dtype and number of rows assuming values
+ consume the same memory amount for corresponding dtypes. With deep
+ memory introspection, a real memory usage calculation is performed
+ at the cost of computational resources. See the
+ :ref:`Frequently Asked Questions ` for more
+ details.
+ {show_counts_sub}
+
+ Returns
+ -------
+ None
+ This method prints a summary of a {klass} and returns None.
+
+ See Also
+ --------
+ {see_also_sub}
+
+ Examples
+ --------
+ {examples_sub}
+ """
+)
+
+
+def _put_str(s: str | Dtype, space: int) -> str:
+ """
+ Make string of specified length, padding to the right if necessary.
+
+ Parameters
+ ----------
+ s : Union[str, Dtype]
+ String to be formatted.
+ space : int
+ Length to force string to be of.
+
+ Returns
+ -------
+ str
+ String coerced to given length.
+
+ Examples
+ --------
+ >>> pd.io.formats.info._put_str("panda", 6)
+ 'panda '
+ >>> pd.io.formats.info._put_str("panda", 4)
+ 'pand'
+ """
+ return str(s)[:space].ljust(space)
+
+
+def _sizeof_fmt(num: float, size_qualifier: str) -> str:
+ """
+ Return size in human readable format.
+
+ Parameters
+ ----------
+ num : int
+ Size in bytes.
+ size_qualifier : str
+ Either empty, or '+' (if lower bound).
+
+ Returns
+ -------
+ str
+ Size in human readable format.
+
+ Examples
+ --------
+ >>> _sizeof_fmt(23028, '')
+ '22.5 KB'
+
+ >>> _sizeof_fmt(23028, '+')
+ '22.5+ KB'
+ """
+ for x in ["bytes", "KB", "MB", "GB", "TB"]:
+ if num < 1024.0:
+ return f"{num:3.1f}{size_qualifier} {x}"
+ num /= 1024.0
+ return f"{num:3.1f}{size_qualifier} PB"
+
+
+def _initialize_memory_usage(
+ memory_usage: bool | str | None = None,
+) -> bool | str:
+ """Get memory usage based on inputs and display options."""
+ if memory_usage is None:
+ memory_usage = get_option("display.memory_usage")
+ return memory_usage
+
+
+class _BaseInfo(ABC):
+ """
+ Base class for DataFrameInfo and SeriesInfo.
+
+ Parameters
+ ----------
+ data : DataFrame or Series
+ Either dataframe or series.
+ memory_usage : bool or str, optional
+ If "deep", introspect the data deeply by interrogating object dtypes
+ for system-level memory consumption, and include it in the returned
+ values.
+ """
+
+ data: DataFrame | Series
+ memory_usage: bool | str
+
+ @property
+ @abstractmethod
+ def dtypes(self) -> Iterable[Dtype]:
+ """
+ Dtypes.
+
+ Returns
+ -------
+ dtypes : sequence
+ Dtype of each of the DataFrame's columns (or one series column).
+ """
+
+ @property
+ @abstractmethod
+ def dtype_counts(self) -> Mapping[str, int]:
+ """Mapping dtype - number of counts."""
+
+ @property
+ @abstractmethod
+ def non_null_counts(self) -> Sequence[int]:
+ """Sequence of non-null counts for all columns or column (if series)."""
+
+ @property
+ @abstractmethod
+ def memory_usage_bytes(self) -> int:
+ """
+ Memory usage in bytes.
+
+ Returns
+ -------
+ memory_usage_bytes : int
+ Object's total memory usage in bytes.
+ """
+
+ @property
+ def memory_usage_string(self) -> str:
+ """Memory usage in a form of human readable string."""
+ return f"{_sizeof_fmt(self.memory_usage_bytes, self.size_qualifier)}\n"
+
+ @property
+ def size_qualifier(self) -> str:
+ size_qualifier = ""
+ if self.memory_usage:
+ if self.memory_usage != "deep":
+ # size_qualifier is just a best effort; not guaranteed to catch
+ # all cases (e.g., it misses categorical data even with object
+ # categories)
+ if (
+ "object" in self.dtype_counts
+ or self.data.index._is_memory_usage_qualified()
+ ):
+ size_qualifier = "+"
+ return size_qualifier
+
+ @abstractmethod
+ def render(
+ self,
+ *,
+ buf: WriteBuffer[str] | None,
+ max_cols: int | None,
+ verbose: bool | None,
+ show_counts: bool | None,
+ ) -> None:
+ pass
+
+
+class DataFrameInfo(_BaseInfo):
+ """
+ Class storing dataframe-specific info.
+ """
+
+ def __init__(
+ self,
+ data: DataFrame,
+ memory_usage: bool | str | None = None,
+ ) -> None:
+ self.data: DataFrame = data
+ self.memory_usage = _initialize_memory_usage(memory_usage)
+
+ @property
+ def dtype_counts(self) -> Mapping[str, int]:
+ return _get_dataframe_dtype_counts(self.data)
+
+ @property
+ def dtypes(self) -> Iterable[Dtype]:
+ """
+ Dtypes.
+
+ Returns
+ -------
+ dtypes
+ Dtype of each of the DataFrame's columns.
+ """
+ return self.data.dtypes
+
+ @property
+ def ids(self) -> Index:
+ """
+ Column names.
+
+ Returns
+ -------
+ ids : Index
+ DataFrame's column names.
+ """
+ return self.data.columns
+
+ @property
+ def col_count(self) -> int:
+ """Number of columns to be summarized."""
+ return len(self.ids)
+
+ @property
+ def non_null_counts(self) -> Sequence[int]:
+ """Sequence of non-null counts for all columns or column (if series)."""
+ return self.data.count()
+
+ @property
+ def memory_usage_bytes(self) -> int:
+ deep = self.memory_usage == "deep"
+ return self.data.memory_usage(index=True, deep=deep).sum()
+
+ def render(
+ self,
+ *,
+ buf: WriteBuffer[str] | None,
+ max_cols: int | None,
+ verbose: bool | None,
+ show_counts: bool | None,
+ ) -> None:
+ printer = _DataFrameInfoPrinter(
+ info=self,
+ max_cols=max_cols,
+ verbose=verbose,
+ show_counts=show_counts,
+ )
+ printer.to_buffer(buf)
+
+
+class SeriesInfo(_BaseInfo):
+ """
+ Class storing series-specific info.
+ """
+
+ def __init__(
+ self,
+ data: Series,
+ memory_usage: bool | str | None = None,
+ ) -> None:
+ self.data: Series = data
+ self.memory_usage = _initialize_memory_usage(memory_usage)
+
+ def render(
+ self,
+ *,
+ buf: WriteBuffer[str] | None = None,
+ max_cols: int | None = None,
+ verbose: bool | None = None,
+ show_counts: bool | None = None,
+ ) -> None:
+ if max_cols is not None:
+ raise ValueError(
+ "Argument `max_cols` can only be passed "
+ "in DataFrame.info, not Series.info"
+ )
+ printer = _SeriesInfoPrinter(
+ info=self,
+ verbose=verbose,
+ show_counts=show_counts,
+ )
+ printer.to_buffer(buf)
+
+ @property
+ def non_null_counts(self) -> Sequence[int]:
+ return [self.data.count()]
+
+ @property
+ def dtypes(self) -> Iterable[Dtype]:
+ return [self.data.dtypes]
+
+ @property
+ def dtype_counts(self) -> Mapping[str, int]:
+ from pandas.core.frame import DataFrame
+
+ return _get_dataframe_dtype_counts(DataFrame(self.data))
+
+ @property
+ def memory_usage_bytes(self) -> int:
+ """Memory usage in bytes.
+
+ Returns
+ -------
+ memory_usage_bytes : int
+ Object's total memory usage in bytes.
+ """
+ deep = self.memory_usage == "deep"
+ return self.data.memory_usage(index=True, deep=deep)
+
+
+class _InfoPrinterAbstract:
+ """
+ Class for printing dataframe or series info.
+ """
+
+ def to_buffer(self, buf: WriteBuffer[str] | None = None) -> None:
+ """Save dataframe info into buffer."""
+ table_builder = self._create_table_builder()
+ lines = table_builder.get_lines()
+ if buf is None: # pragma: no cover
+ buf = sys.stdout
+ fmt.buffer_put_lines(buf, lines)
+
+ @abstractmethod
+ def _create_table_builder(self) -> _TableBuilderAbstract:
+ """Create instance of table builder."""
+
+
+class _DataFrameInfoPrinter(_InfoPrinterAbstract):
+ """
+ Class for printing dataframe info.
+
+ Parameters
+ ----------
+ info : DataFrameInfo
+ Instance of DataFrameInfo.
+ max_cols : int, optional
+ When to switch from the verbose to the truncated output.
+ verbose : bool, optional
+ Whether to print the full summary.
+ show_counts : bool, optional
+ Whether to show the non-null counts.
+ """
+
+ def __init__(
+ self,
+ info: DataFrameInfo,
+ max_cols: int | None = None,
+ verbose: bool | None = None,
+ show_counts: bool | None = None,
+ ) -> None:
+ self.info = info
+ self.data = info.data
+ self.verbose = verbose
+ self.max_cols = self._initialize_max_cols(max_cols)
+ self.show_counts = self._initialize_show_counts(show_counts)
+
+ @property
+ def max_rows(self) -> int:
+ """Maximum info rows to be displayed."""
+ return get_option("display.max_info_rows", len(self.data) + 1)
+
+ @property
+ def exceeds_info_cols(self) -> bool:
+ """Check if number of columns to be summarized does not exceed maximum."""
+ return bool(self.col_count > self.max_cols)
+
+ @property
+ def exceeds_info_rows(self) -> bool:
+ """Check if number of rows to be summarized does not exceed maximum."""
+ return bool(len(self.data) > self.max_rows)
+
+ @property
+ def col_count(self) -> int:
+ """Number of columns to be summarized."""
+ return self.info.col_count
+
+ def _initialize_max_cols(self, max_cols: int | None) -> int:
+ if max_cols is None:
+ return get_option("display.max_info_columns", self.col_count + 1)
+ return max_cols
+
+ def _initialize_show_counts(self, show_counts: bool | None) -> bool:
+ if show_counts is None:
+ return bool(not self.exceeds_info_cols and not self.exceeds_info_rows)
+ else:
+ return show_counts
+
+ def _create_table_builder(self) -> _DataFrameTableBuilder:
+ """
+ Create instance of table builder based on verbosity and display settings.
+ """
+ if self.verbose:
+ return _DataFrameTableBuilderVerbose(
+ info=self.info,
+ with_counts=self.show_counts,
+ )
+ elif self.verbose is False: # specifically set to False, not necessarily None
+ return _DataFrameTableBuilderNonVerbose(info=self.info)
+ elif self.exceeds_info_cols:
+ return _DataFrameTableBuilderNonVerbose(info=self.info)
+ else:
+ return _DataFrameTableBuilderVerbose(
+ info=self.info,
+ with_counts=self.show_counts,
+ )
+
+
+class _SeriesInfoPrinter(_InfoPrinterAbstract):
+ """Class for printing series info.
+
+ Parameters
+ ----------
+ info : SeriesInfo
+ Instance of SeriesInfo.
+ verbose : bool, optional
+ Whether to print the full summary.
+ show_counts : bool, optional
+ Whether to show the non-null counts.
+ """
+
+ def __init__(
+ self,
+ info: SeriesInfo,
+ verbose: bool | None = None,
+ show_counts: bool | None = None,
+ ) -> None:
+ self.info = info
+ self.data = info.data
+ self.verbose = verbose
+ self.show_counts = self._initialize_show_counts(show_counts)
+
+ def _create_table_builder(self) -> _SeriesTableBuilder:
+ """
+ Create instance of table builder based on verbosity.
+ """
+ if self.verbose or self.verbose is None:
+ return _SeriesTableBuilderVerbose(
+ info=self.info,
+ with_counts=self.show_counts,
+ )
+ else:
+ return _SeriesTableBuilderNonVerbose(info=self.info)
+
+ def _initialize_show_counts(self, show_counts: bool | None) -> bool:
+ if show_counts is None:
+ return True
+ else:
+ return show_counts
+
+
+class _TableBuilderAbstract(ABC):
+ """
+ Abstract builder for info table.
+ """
+
+ _lines: list[str]
+ info: _BaseInfo
+
+ @abstractmethod
+ def get_lines(self) -> list[str]:
+ """Product in a form of list of lines (strings)."""
+
+ @property
+ def data(self) -> DataFrame | Series:
+ return self.info.data
+
+ @property
+ def dtypes(self) -> Iterable[Dtype]:
+ """Dtypes of each of the DataFrame's columns."""
+ return self.info.dtypes
+
+ @property
+ def dtype_counts(self) -> Mapping[str, int]:
+ """Mapping dtype - number of counts."""
+ return self.info.dtype_counts
+
+ @property
+ def display_memory_usage(self) -> bool:
+ """Whether to display memory usage."""
+ return bool(self.info.memory_usage)
+
+ @property
+ def memory_usage_string(self) -> str:
+ """Memory usage string with proper size qualifier."""
+ return self.info.memory_usage_string
+
+ @property
+ def non_null_counts(self) -> Sequence[int]:
+ return self.info.non_null_counts
+
+ def add_object_type_line(self) -> None:
+ """Add line with string representation of dataframe to the table."""
+ self._lines.append(str(type(self.data)))
+
+ def add_index_range_line(self) -> None:
+ """Add line with range of indices to the table."""
+ self._lines.append(self.data.index._summary())
+
+ def add_dtypes_line(self) -> None:
+ """Add summary line with dtypes present in dataframe."""
+ collected_dtypes = [
+ f"{key}({val:d})" for key, val in sorted(self.dtype_counts.items())
+ ]
+ self._lines.append(f"dtypes: {', '.join(collected_dtypes)}")
+
+
+class _DataFrameTableBuilder(_TableBuilderAbstract):
+ """
+ Abstract builder for dataframe info table.
+
+ Parameters
+ ----------
+ info : DataFrameInfo.
+ Instance of DataFrameInfo.
+ """
+
+ def __init__(self, *, info: DataFrameInfo) -> None:
+ self.info: DataFrameInfo = info
+
+ def get_lines(self) -> list[str]:
+ self._lines = []
+ if self.col_count == 0:
+ self._fill_empty_info()
+ else:
+ self._fill_non_empty_info()
+ return self._lines
+
+ def _fill_empty_info(self) -> None:
+ """Add lines to the info table, pertaining to empty dataframe."""
+ self.add_object_type_line()
+ self.add_index_range_line()
+ self._lines.append(f"Empty {type(self.data).__name__}\n")
+
+ @abstractmethod
+ def _fill_non_empty_info(self) -> None:
+ """Add lines to the info table, pertaining to non-empty dataframe."""
+
+ @property
+ def data(self) -> DataFrame:
+ """DataFrame."""
+ return self.info.data
+
+ @property
+ def ids(self) -> Index:
+ """Dataframe columns."""
+ return self.info.ids
+
+ @property
+ def col_count(self) -> int:
+ """Number of dataframe columns to be summarized."""
+ return self.info.col_count
+
+ def add_memory_usage_line(self) -> None:
+ """Add line containing memory usage."""
+ self._lines.append(f"memory usage: {self.memory_usage_string}")
+
+
+class _DataFrameTableBuilderNonVerbose(_DataFrameTableBuilder):
+ """
+ Dataframe info table builder for non-verbose output.
+ """
+
+ def _fill_non_empty_info(self) -> None:
+ """Add lines to the info table, pertaining to non-empty dataframe."""
+ self.add_object_type_line()
+ self.add_index_range_line()
+ self.add_columns_summary_line()
+ self.add_dtypes_line()
+ if self.display_memory_usage:
+ self.add_memory_usage_line()
+
+ def add_columns_summary_line(self) -> None:
+ self._lines.append(self.ids._summary(name="Columns"))
+
+
+class _TableBuilderVerboseMixin(_TableBuilderAbstract):
+ """
+ Mixin for verbose info output.
+ """
+
+ SPACING: str = " " * 2
+ strrows: Sequence[Sequence[str]]
+ gross_column_widths: Sequence[int]
+ with_counts: bool
+
+ @property
+ @abstractmethod
+ def headers(self) -> Sequence[str]:
+ """Headers names of the columns in verbose table."""
+
+ @property
+ def header_column_widths(self) -> Sequence[int]:
+ """Widths of header columns (only titles)."""
+ return [len(col) for col in self.headers]
+
+ def _get_gross_column_widths(self) -> Sequence[int]:
+ """Get widths of columns containing both headers and actual content."""
+ body_column_widths = self._get_body_column_widths()
+ return [
+ max(*widths)
+ for widths in zip(self.header_column_widths, body_column_widths)
+ ]
+
+ def _get_body_column_widths(self) -> Sequence[int]:
+ """Get widths of table content columns."""
+ strcols: Sequence[Sequence[str]] = list(zip(*self.strrows))
+ return [max(len(x) for x in col) for col in strcols]
+
+ def _gen_rows(self) -> Iterator[Sequence[str]]:
+ """
+ Generator function yielding rows content.
+
+ Each element represents a row comprising a sequence of strings.
+ """
+ if self.with_counts:
+ return self._gen_rows_with_counts()
+ else:
+ return self._gen_rows_without_counts()
+
+ @abstractmethod
+ def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:
+ """Iterator with string representation of body data with counts."""
+
+ @abstractmethod
+ def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:
+ """Iterator with string representation of body data without counts."""
+
+ def add_header_line(self) -> None:
+ header_line = self.SPACING.join(
+ [
+ _put_str(header, col_width)
+ for header, col_width in zip(self.headers, self.gross_column_widths)
+ ]
+ )
+ self._lines.append(header_line)
+
+ def add_separator_line(self) -> None:
+ separator_line = self.SPACING.join(
+ [
+ _put_str("-" * header_colwidth, gross_colwidth)
+ for header_colwidth, gross_colwidth in zip(
+ self.header_column_widths, self.gross_column_widths
+ )
+ ]
+ )
+ self._lines.append(separator_line)
+
+ def add_body_lines(self) -> None:
+ for row in self.strrows:
+ body_line = self.SPACING.join(
+ [
+ _put_str(col, gross_colwidth)
+ for col, gross_colwidth in zip(row, self.gross_column_widths)
+ ]
+ )
+ self._lines.append(body_line)
+
+ def _gen_non_null_counts(self) -> Iterator[str]:
+ """Iterator with string representation of non-null counts."""
+ for count in self.non_null_counts:
+ yield f"{count} non-null"
+
+ def _gen_dtypes(self) -> Iterator[str]:
+ """Iterator with string representation of column dtypes."""
+ for dtype in self.dtypes:
+ yield pprint_thing(dtype)
+
+
+class _DataFrameTableBuilderVerbose(_DataFrameTableBuilder, _TableBuilderVerboseMixin):
+ """
+ Dataframe info table builder for verbose output.
+ """
+
+ def __init__(
+ self,
+ *,
+ info: DataFrameInfo,
+ with_counts: bool,
+ ) -> None:
+ self.info = info
+ self.with_counts = with_counts
+ self.strrows: Sequence[Sequence[str]] = list(self._gen_rows())
+ self.gross_column_widths: Sequence[int] = self._get_gross_column_widths()
+
+ def _fill_non_empty_info(self) -> None:
+ """Add lines to the info table, pertaining to non-empty dataframe."""
+ self.add_object_type_line()
+ self.add_index_range_line()
+ self.add_columns_summary_line()
+ self.add_header_line()
+ self.add_separator_line()
+ self.add_body_lines()
+ self.add_dtypes_line()
+ if self.display_memory_usage:
+ self.add_memory_usage_line()
+
+ @property
+ def headers(self) -> Sequence[str]:
+ """Headers names of the columns in verbose table."""
+ if self.with_counts:
+ return [" # ", "Column", "Non-Null Count", "Dtype"]
+ return [" # ", "Column", "Dtype"]
+
+ def add_columns_summary_line(self) -> None:
+ self._lines.append(f"Data columns (total {self.col_count} columns):")
+
+ def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:
+ """Iterator with string representation of body data without counts."""
+ yield from zip(
+ self._gen_line_numbers(),
+ self._gen_columns(),
+ self._gen_dtypes(),
+ )
+
+ def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:
+ """Iterator with string representation of body data with counts."""
+ yield from zip(
+ self._gen_line_numbers(),
+ self._gen_columns(),
+ self._gen_non_null_counts(),
+ self._gen_dtypes(),
+ )
+
+ def _gen_line_numbers(self) -> Iterator[str]:
+ """Iterator with string representation of column numbers."""
+ for i, _ in enumerate(self.ids):
+ yield f" {i}"
+
+ def _gen_columns(self) -> Iterator[str]:
+ """Iterator with string representation of column names."""
+ for col in self.ids:
+ yield pprint_thing(col)
+
+
+class _SeriesTableBuilder(_TableBuilderAbstract):
+ """
+ Abstract builder for series info table.
+
+ Parameters
+ ----------
+ info : SeriesInfo.
+ Instance of SeriesInfo.
+ """
+
+ def __init__(self, *, info: SeriesInfo) -> None:
+ self.info: SeriesInfo = info
+
+ def get_lines(self) -> list[str]:
+ self._lines = []
+ self._fill_non_empty_info()
+ return self._lines
+
+ @property
+ def data(self) -> Series:
+ """Series."""
+ return self.info.data
+
+ def add_memory_usage_line(self) -> None:
+ """Add line containing memory usage."""
+ self._lines.append(f"memory usage: {self.memory_usage_string}")
+
+ @abstractmethod
+ def _fill_non_empty_info(self) -> None:
+ """Add lines to the info table, pertaining to non-empty series."""
+
+
+class _SeriesTableBuilderNonVerbose(_SeriesTableBuilder):
+ """
+ Series info table builder for non-verbose output.
+ """
+
+ def _fill_non_empty_info(self) -> None:
+ """Add lines to the info table, pertaining to non-empty series."""
+ self.add_object_type_line()
+ self.add_index_range_line()
+ self.add_dtypes_line()
+ if self.display_memory_usage:
+ self.add_memory_usage_line()
+
+
+class _SeriesTableBuilderVerbose(_SeriesTableBuilder, _TableBuilderVerboseMixin):
+ """
+ Series info table builder for verbose output.
+ """
+
+ def __init__(
+ self,
+ *,
+ info: SeriesInfo,
+ with_counts: bool,
+ ) -> None:
+ self.info = info
+ self.with_counts = with_counts
+ self.strrows: Sequence[Sequence[str]] = list(self._gen_rows())
+ self.gross_column_widths: Sequence[int] = self._get_gross_column_widths()
+
+ def _fill_non_empty_info(self) -> None:
+ """Add lines to the info table, pertaining to non-empty series."""
+ self.add_object_type_line()
+ self.add_index_range_line()
+ self.add_series_name_line()
+ self.add_header_line()
+ self.add_separator_line()
+ self.add_body_lines()
+ self.add_dtypes_line()
+ if self.display_memory_usage:
+ self.add_memory_usage_line()
+
+ def add_series_name_line(self) -> None:
+ self._lines.append(f"Series name: {self.data.name}")
+
+ @property
+ def headers(self) -> Sequence[str]:
+ """Headers names of the columns in verbose table."""
+ if self.with_counts:
+ return ["Non-Null Count", "Dtype"]
+ return ["Dtype"]
+
+ def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:
+ """Iterator with string representation of body data without counts."""
+ yield from self._gen_dtypes()
+
+ def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:
+ """Iterator with string representation of body data with counts."""
+ yield from zip(
+ self._gen_non_null_counts(),
+ self._gen_dtypes(),
+ )
+
+
+def _get_dataframe_dtype_counts(df: DataFrame) -> Mapping[str, int]:
+ """
+ Create mapping between datatypes and their number of occurrences.
+ """
+ # groupby dtype.name to collect e.g. Categorical columns
+ return df.dtypes.value_counts().groupby(lambda x: x.name).sum()
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/printing.py b/venv/lib/python3.10/site-packages/pandas/io/formats/printing.py
new file mode 100644
index 0000000000000000000000000000000000000000..2cc9368f8846a6423655040673df283d111efeda
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/formats/printing.py
@@ -0,0 +1,572 @@
+"""
+Printing tools.
+"""
+from __future__ import annotations
+
+from collections.abc import (
+ Iterable,
+ Mapping,
+ Sequence,
+)
+import sys
+from typing import (
+ Any,
+ Callable,
+ TypeVar,
+ Union,
+)
+from unicodedata import east_asian_width
+
+from pandas._config import get_option
+
+from pandas.core.dtypes.inference import is_sequence
+
+from pandas.io.formats.console import get_console_size
+
+EscapeChars = Union[Mapping[str, str], Iterable[str]]
+_KT = TypeVar("_KT")
+_VT = TypeVar("_VT")
+
+
+def adjoin(space: int, *lists: list[str], **kwargs) -> str:
+ """
+ Glues together two sets of strings using the amount of space requested.
+ The idea is to prettify.
+
+ ----------
+ space : int
+ number of spaces for padding
+ lists : str
+ list of str which being joined
+ strlen : callable
+ function used to calculate the length of each str. Needed for unicode
+ handling.
+ justfunc : callable
+ function used to justify str. Needed for unicode handling.
+ """
+ strlen = kwargs.pop("strlen", len)
+ justfunc = kwargs.pop("justfunc", _adj_justify)
+
+ newLists = []
+ lengths = [max(map(strlen, x)) + space for x in lists[:-1]]
+ # not the last one
+ lengths.append(max(map(len, lists[-1])))
+ maxLen = max(map(len, lists))
+ for i, lst in enumerate(lists):
+ nl = justfunc(lst, lengths[i], mode="left")
+ nl = ([" " * lengths[i]] * (maxLen - len(lst))) + nl
+ newLists.append(nl)
+ toJoin = zip(*newLists)
+ return "\n".join("".join(lines) for lines in toJoin)
+
+
+def _adj_justify(texts: Iterable[str], max_len: int, mode: str = "right") -> list[str]:
+ """
+ Perform ljust, center, rjust against string or list-like
+ """
+ if mode == "left":
+ return [x.ljust(max_len) for x in texts]
+ elif mode == "center":
+ return [x.center(max_len) for x in texts]
+ else:
+ return [x.rjust(max_len) for x in texts]
+
+
+# Unicode consolidation
+# ---------------------
+#
+# pprinting utility functions for generating Unicode text or
+# bytes(3.x)/str(2.x) representations of objects.
+# Try to use these as much as possible rather than rolling your own.
+#
+# When to use
+# -----------
+#
+# 1) If you're writing code internal to pandas (no I/O directly involved),
+# use pprint_thing().
+#
+# It will always return unicode text which can handled by other
+# parts of the package without breakage.
+#
+# 2) if you need to write something out to file, use
+# pprint_thing_encoded(encoding).
+#
+# If no encoding is specified, it defaults to utf-8. Since encoding pure
+# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
+# working with straight ascii.
+
+
+def _pprint_seq(
+ seq: Sequence, _nest_lvl: int = 0, max_seq_items: int | None = None, **kwds
+) -> str:
+ """
+ internal. pprinter for iterables. you should probably use pprint_thing()
+ rather than calling this directly.
+
+ bounds length of printed sequence, depending on options
+ """
+ if isinstance(seq, set):
+ fmt = "{{{body}}}"
+ else:
+ fmt = "[{body}]" if hasattr(seq, "__setitem__") else "({body})"
+
+ if max_seq_items is False:
+ nitems = len(seq)
+ else:
+ nitems = max_seq_items or get_option("max_seq_items") or len(seq)
+
+ s = iter(seq)
+ # handle sets, no slicing
+ r = [
+ pprint_thing(next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)
+ for i in range(min(nitems, len(seq)))
+ ]
+ body = ", ".join(r)
+
+ if nitems < len(seq):
+ body += ", ..."
+ elif isinstance(seq, tuple) and len(seq) == 1:
+ body += ","
+
+ return fmt.format(body=body)
+
+
+def _pprint_dict(
+ seq: Mapping, _nest_lvl: int = 0, max_seq_items: int | None = None, **kwds
+) -> str:
+ """
+ internal. pprinter for iterables. you should probably use pprint_thing()
+ rather than calling this directly.
+ """
+ fmt = "{{{things}}}"
+ pairs = []
+
+ pfmt = "{key}: {val}"
+
+ if max_seq_items is False:
+ nitems = len(seq)
+ else:
+ nitems = max_seq_items or get_option("max_seq_items") or len(seq)
+
+ for k, v in list(seq.items())[:nitems]:
+ pairs.append(
+ pfmt.format(
+ key=pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),
+ val=pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),
+ )
+ )
+
+ if nitems < len(seq):
+ return fmt.format(things=", ".join(pairs) + ", ...")
+ else:
+ return fmt.format(things=", ".join(pairs))
+
+
+def pprint_thing(
+ thing: Any,
+ _nest_lvl: int = 0,
+ escape_chars: EscapeChars | None = None,
+ default_escapes: bool = False,
+ quote_strings: bool = False,
+ max_seq_items: int | None = None,
+) -> str:
+ """
+ This function is the sanctioned way of converting objects
+ to a string representation and properly handles nested sequences.
+
+ Parameters
+ ----------
+ thing : anything to be formatted
+ _nest_lvl : internal use only. pprint_thing() is mutually-recursive
+ with pprint_sequence, this argument is used to keep track of the
+ current nesting level, and limit it.
+ escape_chars : list or dict, optional
+ Characters to escape. If a dict is passed the values are the
+ replacements
+ default_escapes : bool, default False
+ Whether the input escape characters replaces or adds to the defaults
+ max_seq_items : int or None, default None
+ Pass through to other pretty printers to limit sequence printing
+
+ Returns
+ -------
+ str
+ """
+
+ def as_escaped_string(
+ thing: Any, escape_chars: EscapeChars | None = escape_chars
+ ) -> str:
+ translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"}
+ if isinstance(escape_chars, dict):
+ if default_escapes:
+ translate.update(escape_chars)
+ else:
+ translate = escape_chars
+ escape_chars = list(escape_chars.keys())
+ else:
+ escape_chars = escape_chars or ()
+
+ result = str(thing)
+ for c in escape_chars:
+ result = result.replace(c, translate[c])
+ return result
+
+ if hasattr(thing, "__next__"):
+ return str(thing)
+ elif isinstance(thing, dict) and _nest_lvl < get_option(
+ "display.pprint_nest_depth"
+ ):
+ result = _pprint_dict(
+ thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items
+ )
+ elif is_sequence(thing) and _nest_lvl < get_option("display.pprint_nest_depth"):
+ result = _pprint_seq(
+ thing,
+ _nest_lvl,
+ escape_chars=escape_chars,
+ quote_strings=quote_strings,
+ max_seq_items=max_seq_items,
+ )
+ elif isinstance(thing, str) and quote_strings:
+ result = f"'{as_escaped_string(thing)}'"
+ else:
+ result = as_escaped_string(thing)
+
+ return result
+
+
+def pprint_thing_encoded(
+ object, encoding: str = "utf-8", errors: str = "replace"
+) -> bytes:
+ value = pprint_thing(object) # get unicode representation of object
+ return value.encode(encoding, errors)
+
+
+def enable_data_resource_formatter(enable: bool) -> None:
+ if "IPython" not in sys.modules:
+ # definitely not in IPython
+ return
+ from IPython import get_ipython
+
+ ip = get_ipython()
+ if ip is None:
+ # still not in IPython
+ return
+
+ formatters = ip.display_formatter.formatters
+ mimetype = "application/vnd.dataresource+json"
+
+ if enable:
+ if mimetype not in formatters:
+ # define tableschema formatter
+ from IPython.core.formatters import BaseFormatter
+ from traitlets import ObjectName
+
+ class TableSchemaFormatter(BaseFormatter):
+ print_method = ObjectName("_repr_data_resource_")
+ _return_type = (dict,)
+
+ # register it:
+ formatters[mimetype] = TableSchemaFormatter()
+ # enable it if it's been disabled:
+ formatters[mimetype].enabled = True
+ # unregister tableschema mime-type
+ elif mimetype in formatters:
+ formatters[mimetype].enabled = False
+
+
+def default_pprint(thing: Any, max_seq_items: int | None = None) -> str:
+ return pprint_thing(
+ thing,
+ escape_chars=("\t", "\r", "\n"),
+ quote_strings=True,
+ max_seq_items=max_seq_items,
+ )
+
+
+def format_object_summary(
+ obj,
+ formatter: Callable,
+ is_justify: bool = True,
+ name: str | None = None,
+ indent_for_name: bool = True,
+ line_break_each_value: bool = False,
+) -> str:
+ """
+ Return the formatted obj as a unicode string
+
+ Parameters
+ ----------
+ obj : object
+ must be iterable and support __getitem__
+ formatter : callable
+ string formatter for an element
+ is_justify : bool
+ should justify the display
+ name : name, optional
+ defaults to the class name of the obj
+ indent_for_name : bool, default True
+ Whether subsequent lines should be indented to
+ align with the name.
+ line_break_each_value : bool, default False
+ If True, inserts a line break for each value of ``obj``.
+ If False, only break lines when the a line of values gets wider
+ than the display width.
+
+ Returns
+ -------
+ summary string
+ """
+ display_width, _ = get_console_size()
+ if display_width is None:
+ display_width = get_option("display.width") or 80
+ if name is None:
+ name = type(obj).__name__
+
+ if indent_for_name:
+ name_len = len(name)
+ space1 = f'\n{(" " * (name_len + 1))}'
+ space2 = f'\n{(" " * (name_len + 2))}'
+ else:
+ space1 = "\n"
+ space2 = "\n " # space for the opening '['
+
+ n = len(obj)
+ if line_break_each_value:
+ # If we want to vertically align on each value of obj, we need to
+ # separate values by a line break and indent the values
+ sep = ",\n " + " " * len(name)
+ else:
+ sep = ","
+ max_seq_items = get_option("display.max_seq_items") or n
+
+ # are we a truncated display
+ is_truncated = n > max_seq_items
+
+ # adj can optionally handle unicode eastern asian width
+ adj = get_adjustment()
+
+ def _extend_line(
+ s: str, line: str, value: str, display_width: int, next_line_prefix: str
+ ) -> tuple[str, str]:
+ if adj.len(line.rstrip()) + adj.len(value.rstrip()) >= display_width:
+ s += line.rstrip()
+ line = next_line_prefix
+ line += value
+ return s, line
+
+ def best_len(values: list[str]) -> int:
+ if values:
+ return max(adj.len(x) for x in values)
+ else:
+ return 0
+
+ close = ", "
+
+ if n == 0:
+ summary = f"[]{close}"
+ elif n == 1 and not line_break_each_value:
+ first = formatter(obj[0])
+ summary = f"[{first}]{close}"
+ elif n == 2 and not line_break_each_value:
+ first = formatter(obj[0])
+ last = formatter(obj[-1])
+ summary = f"[{first}, {last}]{close}"
+ else:
+ if max_seq_items == 1:
+ # If max_seq_items=1 show only last element
+ head = []
+ tail = [formatter(x) for x in obj[-1:]]
+ elif n > max_seq_items:
+ n = min(max_seq_items // 2, 10)
+ head = [formatter(x) for x in obj[:n]]
+ tail = [formatter(x) for x in obj[-n:]]
+ else:
+ head = []
+ tail = [formatter(x) for x in obj]
+
+ # adjust all values to max length if needed
+ if is_justify:
+ if line_break_each_value:
+ # Justify each string in the values of head and tail, so the
+ # strings will right align when head and tail are stacked
+ # vertically.
+ head, tail = _justify(head, tail)
+ elif is_truncated or not (
+ len(", ".join(head)) < display_width
+ and len(", ".join(tail)) < display_width
+ ):
+ # Each string in head and tail should align with each other
+ max_length = max(best_len(head), best_len(tail))
+ head = [x.rjust(max_length) for x in head]
+ tail = [x.rjust(max_length) for x in tail]
+ # If we are not truncated and we are only a single
+ # line, then don't justify
+
+ if line_break_each_value:
+ # Now head and tail are of type List[Tuple[str]]. Below we
+ # convert them into List[str], so there will be one string per
+ # value. Also truncate items horizontally if wider than
+ # max_space
+ max_space = display_width - len(space2)
+ value = tail[0]
+ max_items = 1
+ for num_items in reversed(range(1, len(value) + 1)):
+ pprinted_seq = _pprint_seq(value, max_seq_items=num_items)
+ if len(pprinted_seq) < max_space:
+ max_items = num_items
+ break
+ head = [_pprint_seq(x, max_seq_items=max_items) for x in head]
+ tail = [_pprint_seq(x, max_seq_items=max_items) for x in tail]
+
+ summary = ""
+ line = space2
+
+ for head_value in head:
+ word = head_value + sep + " "
+ summary, line = _extend_line(summary, line, word, display_width, space2)
+
+ if is_truncated:
+ # remove trailing space of last line
+ summary += line.rstrip() + space2 + "..."
+ line = space2
+
+ for tail_item in tail[:-1]:
+ word = tail_item + sep + " "
+ summary, line = _extend_line(summary, line, word, display_width, space2)
+
+ # last value: no sep added + 1 space of width used for trailing ','
+ summary, line = _extend_line(summary, line, tail[-1], display_width - 2, space2)
+ summary += line
+
+ # right now close is either '' or ', '
+ # Now we want to include the ']', but not the maybe space.
+ close = "]" + close.rstrip(" ")
+ summary += close
+
+ if len(summary) > (display_width) or line_break_each_value:
+ summary += space1
+ else: # one row
+ summary += " "
+
+ # remove initial space
+ summary = "[" + summary[len(space2) :]
+
+ return summary
+
+
+def _justify(
+ head: list[Sequence[str]], tail: list[Sequence[str]]
+) -> tuple[list[tuple[str, ...]], list[tuple[str, ...]]]:
+ """
+ Justify items in head and tail, so they are right-aligned when stacked.
+
+ Parameters
+ ----------
+ head : list-like of list-likes of strings
+ tail : list-like of list-likes of strings
+
+ Returns
+ -------
+ tuple of list of tuples of strings
+ Same as head and tail, but items are right aligned when stacked
+ vertically.
+
+ Examples
+ --------
+ >>> _justify([['a', 'b']], [['abc', 'abcd']])
+ ([(' a', ' b')], [('abc', 'abcd')])
+ """
+ combined = head + tail
+
+ # For each position for the sequences in ``combined``,
+ # find the length of the largest string.
+ max_length = [0] * len(combined[0])
+ for inner_seq in combined:
+ length = [len(item) for item in inner_seq]
+ max_length = [max(x, y) for x, y in zip(max_length, length)]
+
+ # justify each item in each list-like in head and tail using max_length
+ head_tuples = [
+ tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in head
+ ]
+ tail_tuples = [
+ tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in tail
+ ]
+ return head_tuples, tail_tuples
+
+
+class PrettyDict(dict[_KT, _VT]):
+ """Dict extension to support abbreviated __repr__"""
+
+ def __repr__(self) -> str:
+ return pprint_thing(self)
+
+
+class _TextAdjustment:
+ def __init__(self) -> None:
+ self.encoding = get_option("display.encoding")
+
+ def len(self, text: str) -> int:
+ return len(text)
+
+ def justify(self, texts: Any, max_len: int, mode: str = "right") -> list[str]:
+ """
+ Perform ljust, center, rjust against string or list-like
+ """
+ if mode == "left":
+ return [x.ljust(max_len) for x in texts]
+ elif mode == "center":
+ return [x.center(max_len) for x in texts]
+ else:
+ return [x.rjust(max_len) for x in texts]
+
+ def adjoin(self, space: int, *lists, **kwargs) -> str:
+ return adjoin(space, *lists, strlen=self.len, justfunc=self.justify, **kwargs)
+
+
+class _EastAsianTextAdjustment(_TextAdjustment):
+ def __init__(self) -> None:
+ super().__init__()
+ if get_option("display.unicode.ambiguous_as_wide"):
+ self.ambiguous_width = 2
+ else:
+ self.ambiguous_width = 1
+
+ # Definition of East Asian Width
+ # https://unicode.org/reports/tr11/
+ # Ambiguous width can be changed by option
+ self._EAW_MAP = {"Na": 1, "N": 1, "W": 2, "F": 2, "H": 1}
+
+ def len(self, text: str) -> int:
+ """
+ Calculate display width considering unicode East Asian Width
+ """
+ if not isinstance(text, str):
+ return len(text)
+
+ return sum(
+ self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text
+ )
+
+ def justify(
+ self, texts: Iterable[str], max_len: int, mode: str = "right"
+ ) -> list[str]:
+ # re-calculate padding space per str considering East Asian Width
+ def _get_pad(t):
+ return max_len - self.len(t) + len(t)
+
+ if mode == "left":
+ return [x.ljust(_get_pad(x)) for x in texts]
+ elif mode == "center":
+ return [x.center(_get_pad(x)) for x in texts]
+ else:
+ return [x.rjust(_get_pad(x)) for x in texts]
+
+
+def get_adjustment() -> _TextAdjustment:
+ use_east_asian_width = get_option("display.unicode.east_asian_width")
+ if use_east_asian_width:
+ return _EastAsianTextAdjustment()
+ else:
+ return _TextAdjustment()
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/string.py b/venv/lib/python3.10/site-packages/pandas/io/formats/string.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdad388592717dff79fde61bf35a12c0635034c1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/formats/string.py
@@ -0,0 +1,206 @@
+"""
+Module for formatting output data in console (to string).
+"""
+from __future__ import annotations
+
+from shutil import get_terminal_size
+from typing import TYPE_CHECKING
+
+import numpy as np
+
+from pandas.io.formats.printing import pprint_thing
+
+if TYPE_CHECKING:
+ from collections.abc import Iterable
+
+ from pandas.io.formats.format import DataFrameFormatter
+
+
+class StringFormatter:
+ """Formatter for string representation of a dataframe."""
+
+ def __init__(self, fmt: DataFrameFormatter, line_width: int | None = None) -> None:
+ self.fmt = fmt
+ self.adj = fmt.adj
+ self.frame = fmt.frame
+ self.line_width = line_width
+
+ def to_string(self) -> str:
+ text = self._get_string_representation()
+ if self.fmt.should_show_dimensions:
+ text = f"{text}{self.fmt.dimensions_info}"
+ return text
+
+ def _get_strcols(self) -> list[list[str]]:
+ strcols = self.fmt.get_strcols()
+ if self.fmt.is_truncated:
+ strcols = self._insert_dot_separators(strcols)
+ return strcols
+
+ def _get_string_representation(self) -> str:
+ if self.fmt.frame.empty:
+ return self._empty_info_line
+
+ strcols = self._get_strcols()
+
+ if self.line_width is None:
+ # no need to wrap around just print the whole frame
+ return self.adj.adjoin(1, *strcols)
+
+ if self._need_to_wrap_around:
+ return self._join_multiline(strcols)
+
+ return self._fit_strcols_to_terminal_width(strcols)
+
+ @property
+ def _empty_info_line(self) -> str:
+ return (
+ f"Empty {type(self.frame).__name__}\n"
+ f"Columns: {pprint_thing(self.frame.columns)}\n"
+ f"Index: {pprint_thing(self.frame.index)}"
+ )
+
+ @property
+ def _need_to_wrap_around(self) -> bool:
+ return bool(self.fmt.max_cols is None or self.fmt.max_cols > 0)
+
+ def _insert_dot_separators(self, strcols: list[list[str]]) -> list[list[str]]:
+ str_index = self.fmt._get_formatted_index(self.fmt.tr_frame)
+ index_length = len(str_index)
+
+ if self.fmt.is_truncated_horizontally:
+ strcols = self._insert_dot_separator_horizontal(strcols, index_length)
+
+ if self.fmt.is_truncated_vertically:
+ strcols = self._insert_dot_separator_vertical(strcols, index_length)
+
+ return strcols
+
+ @property
+ def _adjusted_tr_col_num(self) -> int:
+ return self.fmt.tr_col_num + 1 if self.fmt.index else self.fmt.tr_col_num
+
+ def _insert_dot_separator_horizontal(
+ self, strcols: list[list[str]], index_length: int
+ ) -> list[list[str]]:
+ strcols.insert(self._adjusted_tr_col_num, [" ..."] * index_length)
+ return strcols
+
+ def _insert_dot_separator_vertical(
+ self, strcols: list[list[str]], index_length: int
+ ) -> list[list[str]]:
+ n_header_rows = index_length - len(self.fmt.tr_frame)
+ row_num = self.fmt.tr_row_num
+ for ix, col in enumerate(strcols):
+ cwidth = self.adj.len(col[row_num])
+
+ if self.fmt.is_truncated_horizontally:
+ is_dot_col = ix == self._adjusted_tr_col_num
+ else:
+ is_dot_col = False
+
+ if cwidth > 3 or is_dot_col:
+ dots = "..."
+ else:
+ dots = ".."
+
+ if ix == 0 and self.fmt.index:
+ dot_mode = "left"
+ elif is_dot_col:
+ cwidth = 4
+ dot_mode = "right"
+ else:
+ dot_mode = "right"
+
+ dot_str = self.adj.justify([dots], cwidth, mode=dot_mode)[0]
+ col.insert(row_num + n_header_rows, dot_str)
+ return strcols
+
+ def _join_multiline(self, strcols_input: Iterable[list[str]]) -> str:
+ lwidth = self.line_width
+ adjoin_width = 1
+ strcols = list(strcols_input)
+
+ if self.fmt.index:
+ idx = strcols.pop(0)
+ lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width
+
+ col_widths = [
+ np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0
+ for col in strcols
+ ]
+
+ assert lwidth is not None
+ col_bins = _binify(col_widths, lwidth)
+ nbins = len(col_bins)
+
+ str_lst = []
+ start = 0
+ for i, end in enumerate(col_bins):
+ row = strcols[start:end]
+ if self.fmt.index:
+ row.insert(0, idx)
+ if nbins > 1:
+ nrows = len(row[-1])
+ if end <= len(strcols) and i < nbins - 1:
+ row.append([" \\"] + [" "] * (nrows - 1))
+ else:
+ row.append([" "] * nrows)
+ str_lst.append(self.adj.adjoin(adjoin_width, *row))
+ start = end
+ return "\n\n".join(str_lst)
+
+ def _fit_strcols_to_terminal_width(self, strcols: list[list[str]]) -> str:
+ from pandas import Series
+
+ lines = self.adj.adjoin(1, *strcols).split("\n")
+ max_len = Series(lines).str.len().max()
+ # plus truncate dot col
+ width, _ = get_terminal_size()
+ dif = max_len - width
+ # '+ 1' to avoid too wide repr (GH PR #17023)
+ adj_dif = dif + 1
+ col_lens = Series([Series(ele).str.len().max() for ele in strcols])
+ n_cols = len(col_lens)
+ counter = 0
+ while adj_dif > 0 and n_cols > 1:
+ counter += 1
+ mid = round(n_cols / 2)
+ mid_ix = col_lens.index[mid]
+ col_len = col_lens[mid_ix]
+ # adjoin adds one
+ adj_dif -= col_len + 1
+ col_lens = col_lens.drop(mid_ix)
+ n_cols = len(col_lens)
+
+ # subtract index column
+ max_cols_fitted = n_cols - self.fmt.index
+ # GH-21180. Ensure that we print at least two.
+ max_cols_fitted = max(max_cols_fitted, 2)
+ self.fmt.max_cols_fitted = max_cols_fitted
+
+ # Call again _truncate to cut frame appropriately
+ # and then generate string representation
+ self.fmt.truncate()
+ strcols = self._get_strcols()
+ return self.adj.adjoin(1, *strcols)
+
+
+def _binify(cols: list[int], line_width: int) -> list[int]:
+ adjoin_width = 1
+ bins = []
+ curr_width = 0
+ i_last_column = len(cols) - 1
+ for i, w in enumerate(cols):
+ w_adjoined = w + adjoin_width
+ curr_width += w_adjoined
+ if i_last_column == i:
+ wrap = curr_width + 1 > line_width and i > 0
+ else:
+ wrap = curr_width + 2 > line_width and i > 0
+ if wrap:
+ bins.append(i)
+ curr_width = w_adjoined
+
+ bins.append(len(cols))
+ return bins
diff --git a/venv/lib/python3.10/site-packages/pandas/io/formats/style.py b/venv/lib/python3.10/site-packages/pandas/io/formats/style.py
new file mode 100644
index 0000000000000000000000000000000000000000..b62f7581ac2205c8c1821b6030b56f13a77c6379
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pandas/io/formats/style.py
@@ -0,0 +1,4136 @@
+"""
+Module for applying conditional formatting to DataFrames and Series.
+"""
+from __future__ import annotations
+
+from contextlib import contextmanager
+import copy
+from functools import partial
+import operator
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ overload,
+)
+import warnings
+
+import numpy as np
+
+from pandas._config import get_option
+
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import (
+ Substitution,
+ doc,
+)
+from pandas.util._exceptions import find_stack_level
+
+import pandas as pd
+from pandas import (
+ IndexSlice,
+ RangeIndex,
+)
+import pandas.core.common as com
+from pandas.core.frame import (
+ DataFrame,
+ Series,
+)
+from pandas.core.generic import NDFrame
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.formats.format import save_to_buffer
+
+jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.")
+
+from pandas.io.formats.style_render import (
+ CSSProperties,
+ CSSStyles,
+ ExtFormatter,
+ StylerRenderer,
+ Subset,
+ Tooltips,
+ format_table_styles,
+ maybe_convert_css_to_tuples,
+ non_reducing_slice,
+ refactor_levels,
+)
+
+if TYPE_CHECKING:
+ from collections.abc import (
+ Generator,
+ Hashable,
+ Sequence,
+ )
+
+ from matplotlib.colors import Colormap
+
+ from pandas._typing import (
+ Axis,
+ AxisInt,
+ FilePath,
+ IndexLabel,
+ IntervalClosedType,
+ Level,
+ QuantileInterpolation,
+ Scalar,
+ StorageOptions,
+ WriteBuffer,
+ WriteExcelBuffer,
+ )
+
+ from pandas import ExcelWriter
+
+try:
+ import matplotlib as mpl
+ import matplotlib.pyplot as plt
+
+ has_mpl = True
+except ImportError:
+ has_mpl = False
+
+
+@contextmanager
+def _mpl(func: Callable) -> Generator[tuple[Any, Any], None, None]:
+ if has_mpl:
+ yield plt, mpl
+ else:
+ raise ImportError(f"{func.__name__} requires matplotlib.")
+
+
+####
+# Shared Doc Strings
+
+subset_args = """subset : label, array-like, IndexSlice, optional
+ A valid 2d input to `DataFrame.loc[]`, or, in the case of a 1d input
+ or single key, to `DataFrame.loc[:, ]` where the columns are
+ prioritised, to limit ``data`` to *before* applying the function."""
+
+properties_args = """props : str, default None
+ CSS properties to use for highlighting. If ``props`` is given, ``color``
+ is not used."""
+
+coloring_args = """color : str, default '{default}'
+ Background color to use for highlighting."""
+
+buffering_args = """buf : str, path object, file-like object, optional
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a string ``write()`` function. If ``None``, the result is
+ returned as a string."""
+
+encoding_args = """encoding : str, optional
+ Character encoding setting for file output (and meta tags if available).
+ Defaults to ``pandas.options.styler.render.encoding`` value of "utf-8"."""
+
+#
+###
+
+
+class Styler(StylerRenderer):
+ r"""
+ Helps style a DataFrame or Series according to the data with HTML and CSS.
+
+ Parameters
+ ----------
+ data : Series or DataFrame
+ Data to be styled - either a Series or DataFrame.
+ precision : int, optional
+ Precision to round floats to. If not given defaults to
+ ``pandas.options.styler.format.precision``.
+
+ .. versionchanged:: 1.4.0
+ table_styles : list-like, default None
+ List of {selector: (attr, value)} dicts; see Notes.
+ uuid : str, default None
+ A unique identifier to avoid CSS collisions; generated automatically.
+ caption : str, tuple, default None
+ String caption to attach to the table. Tuple only used for LaTeX dual captions.
+ table_attributes : str, default None
+ Items that show up in the opening ``
`` tag
+ in addition to automatic (by default) id.
+ cell_ids : bool, default True
+ If True, each cell will have an ``id`` attribute in their HTML tag.
+ The ``id`` takes the form ``T__row_col``
+ where ```` is the unique identifier, ```` is the row
+ number and ```` is the column number.
+ na_rep : str, optional
+ Representation for missing values.
+ If ``na_rep`` is None, no special formatting is applied, and falls back to
+ ``pandas.options.styler.format.na_rep``.
+
+ uuid_len : int, default 5
+ If ``uuid`` is not specified, the length of the ``uuid`` to randomly generate
+ expressed in hex characters, in range [0, 32].
+ decimal : str, optional
+ Character used as decimal separator for floats, complex and integers. If not
+ given uses ``pandas.options.styler.format.decimal``.
+
+ .. versionadded:: 1.3.0
+
+ thousands : str, optional, default None
+ Character used as thousands separator for floats, complex and integers. If not
+ given uses ``pandas.options.styler.format.thousands``.
+
+ .. versionadded:: 1.3.0
+
+ escape : str, optional
+ Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"``
+ in cell display string with HTML-safe sequences.
+ Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``,
+ ``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with
+ LaTeX-safe sequences. Use 'latex-math' to replace the characters
+ the same way as in 'latex' mode, except for math substrings,
+ which either are surrounded by two characters ``$`` or start with
+ the character ``\(`` and end with ``\)``.
+ If not given uses ``pandas.options.styler.format.escape``.
+
+ .. versionadded:: 1.3.0
+ formatter : str, callable, dict, optional
+ Object to define how values are displayed. See ``Styler.format``. If not given
+ uses ``pandas.options.styler.format.formatter``.
+
+ .. versionadded:: 1.4.0
+
+ Attributes
+ ----------
+ env : Jinja2 jinja2.Environment
+ template_html : Jinja2 Template
+ template_html_table : Jinja2 Template
+ template_html_style : Jinja2 Template
+ template_latex : Jinja2 Template
+ loader : Jinja2 Loader
+
+ See Also
+ --------
+ DataFrame.style : Return a Styler object containing methods for building
+ a styled HTML representation for the DataFrame.
+
+ Notes
+ -----
+ Most styling will be done by passing style functions into
+ ``Styler.apply`` or ``Styler.map``. Style functions should
+ return values with strings containing CSS ``'attr: value'`` that will
+ be applied to the indicated cells.
+
+ If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
+ to automatically render itself. Otherwise call Styler.to_html to get
+ the generated HTML.
+
+ CSS classes are attached to the generated HTML
+
+ * Index and Column names include ``index_name`` and ``level``
+ where `k` is its level in a MultiIndex
+ * Index label cells include
+
+ * ``row_heading``
+ * ``row`` where `n` is the numeric position of the row
+ * ``level`` where `k` is the level in a MultiIndex
+
+ * Column label cells include
+ * ``col_heading``
+ * ``col`` where `n` is the numeric position of the column
+ * ``level`` where `k` is the level in a MultiIndex
+
+ * Blank cells include ``blank``
+ * Data cells include ``data``
+ * Trimmed cells include ``col_trim`` or ``row_trim``.
+
+ Any, or all, or these classes can be renamed by using the ``css_class_names``
+ argument in ``Styler.set_table_classes``, giving a value such as
+ *{"row": "MY_ROW_CLASS", "col_trim": "", "row_trim": ""}*.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame([[1.0, 2.0, 3.0], [4, 5, 6]], index=['a', 'b'],
+ ... columns=['A', 'B', 'C'])
+ >>> pd.io.formats.style.Styler(df, precision=2,
+ ... caption="My table") # doctest: +SKIP
+
+ Please see:
+ `Table Visualization <../../user_guide/style.ipynb>`_ for more examples.
+ """
+
+ def __init__(
+ self,
+ data: DataFrame | Series,
+ precision: int | None = None,
+ table_styles: CSSStyles | None = None,
+ uuid: str | None = None,
+ caption: str | tuple | list | None = None,
+ table_attributes: str | None = None,
+ cell_ids: bool = True,
+ na_rep: str | None = None,
+ uuid_len: int = 5,
+ decimal: str | None = None,
+ thousands: str | None = None,
+ escape: str | None = None,
+ formatter: ExtFormatter | None = None,
+ ) -> None:
+ super().__init__(
+ data=data,
+ uuid=uuid,
+ uuid_len=uuid_len,
+ table_styles=table_styles,
+ table_attributes=table_attributes,
+ caption=caption,
+ cell_ids=cell_ids,
+ precision=precision,
+ )
+
+ # validate ordered args
+ thousands = thousands or get_option("styler.format.thousands")
+ decimal = decimal or get_option("styler.format.decimal")
+ na_rep = na_rep or get_option("styler.format.na_rep")
+ escape = escape or get_option("styler.format.escape")
+ formatter = formatter or get_option("styler.format.formatter")
+ # precision is handled by superclass as default for performance
+
+ self.format(
+ formatter=formatter,
+ precision=precision,
+ na_rep=na_rep,
+ escape=escape,
+ decimal=decimal,
+ thousands=thousands,
+ )
+
+ def concat(self, other: Styler) -> Styler:
+ """
+ Append another Styler to combine the output into a single table.
+
+ .. versionadded:: 1.5.0
+
+ Parameters
+ ----------
+ other : Styler
+ The other Styler object which has already been styled and formatted. The
+ data for this Styler must have the same columns as the original, and the
+ number of index levels must also be the same to render correctly.
+
+ Returns
+ -------
+ Styler
+
+ Notes
+ -----
+ The purpose of this method is to extend existing styled dataframes with other
+ metrics that may be useful but may not conform to the original's structure.
+ For example adding a sub total row, or displaying metrics such as means,
+ variance or counts.
+
+ Styles that are applied using the ``apply``, ``map``, ``apply_index``
+ and ``map_index``, and formatting applied with ``format`` and
+ ``format_index`` will be preserved.
+
+ .. warning::
+ Only the output methods ``to_html``, ``to_string`` and ``to_latex``
+ currently work with concatenated Stylers.
+
+ Other output methods, including ``to_excel``, **do not** work with
+ concatenated Stylers.
+
+ The following should be noted:
+
+ - ``table_styles``, ``table_attributes``, ``caption`` and ``uuid`` are all
+ inherited from the original Styler and not ``other``.
+ - hidden columns and hidden index levels will be inherited from the
+ original Styler
+ - ``css`` will be inherited from the original Styler, and the value of
+ keys ``data``, ``row_heading`` and ``row`` will be prepended with
+ ``foot0_``. If more concats are chained, their styles will be prepended
+ with ``foot1_``, ''foot_2'', etc., and if a concatenated style have
+ another concatanated style, the second style will be prepended with
+ ``foot{parent}_foot{child}_``.
+
+ A common use case is to concatenate user defined functions with
+ ``DataFrame.agg`` or with described statistics via ``DataFrame.describe``.
+ See examples.
+
+ Examples
+ --------
+ A common use case is adding totals rows, or otherwise, via methods calculated
+ in ``DataFrame.agg``.
+
+ >>> df = pd.DataFrame([[4, 6], [1, 9], [3, 4], [5, 5], [9, 6]],
+ ... columns=["Mike", "Jim"],
+ ... index=["Mon", "Tue", "Wed", "Thurs", "Fri"])
+ >>> styler = df.style.concat(df.agg(["sum"]).style) # doctest: +SKIP
+
+ .. figure:: ../../_static/style/footer_simple.png
+
+ Since the concatenated object is a Styler the existing functionality can be
+ used to conditionally format it as well as the original.
+
+ >>> descriptors = df.agg(["sum", "mean", lambda s: s.dtype])
+ >>> descriptors.index = ["Total", "Average", "dtype"]
+ >>> other = (descriptors.style
+ ... .highlight_max(axis=1, subset=(["Total", "Average"], slice(None)))
+ ... .format(subset=("Average", slice(None)), precision=2, decimal=",")
+ ... .map(lambda v: "font-weight: bold;"))
+ >>> styler = (df.style
+ ... .highlight_max(color="salmon")
+ ... .set_table_styles([{"selector": ".foot_row0",
+ ... "props": "border-top: 1px solid black;"}]))
+ >>> styler.concat(other) # doctest: +SKIP
+
+ .. figure:: ../../_static/style/footer_extended.png
+
+ When ``other`` has fewer index levels than the original Styler it is possible
+ to extend the index in ``other``, with placeholder levels.
+
+ >>> df = pd.DataFrame([[1], [2]],
+ ... index=pd.MultiIndex.from_product([[0], [1, 2]]))
+ >>> descriptors = df.agg(["sum"])
+ >>> descriptors.index = pd.MultiIndex.from_product([[""], descriptors.index])
+ >>> df.style.concat(descriptors.style) # doctest: +SKIP
+ """
+ if not isinstance(other, Styler):
+ raise TypeError("`other` must be of type `Styler`")
+ if not self.data.columns.equals(other.data.columns):
+ raise ValueError("`other.data` must have same columns as `Styler.data`")
+ if not self.data.index.nlevels == other.data.index.nlevels:
+ raise ValueError(
+ "number of index levels must be same in `other` "
+ "as in `Styler`. See documentation for suggestions."
+ )
+ self.concatenated.append(other)
+ return self
+
+ def _repr_html_(self) -> str | None:
+ """
+ Hooks into Jupyter notebook rich display system, which calls _repr_html_ by
+ default if an object is returned at the end of a cell.
+ """
+ if get_option("styler.render.repr") == "html":
+ return self.to_html()
+ return None
+
+ def _repr_latex_(self) -> str | None:
+ if get_option("styler.render.repr") == "latex":
+ return self.to_latex()
+ return None
+
+ def set_tooltips(
+ self,
+ ttips: DataFrame,
+ props: CSSProperties | None = None,
+ css_class: str | None = None,
+ ) -> Styler:
+ """
+ Set the DataFrame of strings on ``Styler`` generating ``:hover`` tooltips.
+
+ These string based tooltips are only applicable to ``
`` HTML elements,
+ and cannot be used for column or index headers.
+
+ .. versionadded:: 1.3.0
+
+ Parameters
+ ----------
+ ttips : DataFrame
+ DataFrame containing strings that will be translated to tooltips, mapped
+ by identical column and index values that must exist on the underlying
+ Styler data. None, NaN values, and empty strings will be ignored and
+ not affect the rendered HTML.
+ props : list-like or str, optional
+ List of (attr, value) tuples or a valid CSS string. If ``None`` adopts
+ the internal default values described in notes.
+ css_class : str, optional
+ Name of the tooltip class used in CSS, should conform to HTML standards.
+ Only useful if integrating tooltips with external CSS. If ``None`` uses the
+ internal default value 'pd-t'.
+
+ Returns
+ -------
+ Styler
+
+ Notes
+ -----
+ Tooltips are created by adding `` to each data cell
+ and then manipulating the table level CSS to attach pseudo hover and pseudo
+ after selectors to produce the required the results.
+
+ The default properties for the tooltip CSS class are:
+
+ - visibility: hidden
+ - position: absolute
+ - z-index: 1
+ - background-color: black
+ - color: white
+ - transform: translate(-20px, -20px)
+
+ The property 'visibility: hidden;' is a key prerequisite to the hover
+ functionality, and should always be included in any manual properties
+ specification, using the ``props`` argument.
+
+ Tooltips are not designed to be efficient, and can add large amounts of
+ additional HTML for larger tables, since they also require that ``cell_ids``
+ is forced to `True`.
+
+ Examples
+ --------
+ Basic application
+
+ >>> df = pd.DataFrame(data=[[0, 1], [2, 3]])
+ >>> ttips = pd.DataFrame(
+ ... data=[["Min", ""], [np.nan, "Max"]], columns=df.columns, index=df.index
+ ... )
+ >>> s = df.style.set_tooltips(ttips).to_html()
+
+ Optionally controlling the tooltip visual display
+
+ >>> df.style.set_tooltips(ttips, css_class='tt-add', props=[
+ ... ('visibility', 'hidden'),
+ ... ('position', 'absolute'),
+ ... ('z-index', 1)]) # doctest: +SKIP
+ >>> df.style.set_tooltips(ttips, css_class='tt-add',
+ ... props='visibility:hidden; position:absolute; z-index:1;')
+ ... # doctest: +SKIP
+ """
+ if not self.cell_ids:
+ # tooltips not optimised for individual cell check. requires reasonable
+ # redesign and more extensive code for a feature that might be rarely used.
+ raise NotImplementedError(
+ "Tooltips can only render with 'cell_ids' is True."
+ )
+ if not ttips.index.is_unique or not ttips.columns.is_unique:
+ raise KeyError(
+ "Tooltips render only if `ttips` has unique index and columns."
+ )
+ if self.tooltips is None: # create a default instance if necessary
+ self.tooltips = Tooltips()
+ self.tooltips.tt_data = ttips
+ if props:
+ self.tooltips.class_properties = props
+ if css_class:
+ self.tooltips.class_name = css_class
+
+ return self
+
+ @doc(
+ NDFrame.to_excel,
+ klass="Styler",
+ storage_options=_shared_docs["storage_options"],
+ storage_options_versionadded="1.5.0",
+ )
+ def to_excel(
+ self,
+ excel_writer: FilePath | WriteExcelBuffer | ExcelWriter,
+ sheet_name: str = "Sheet1",
+ na_rep: str = "",
+ float_format: str | None = None,
+ columns: Sequence[Hashable] | None = None,
+ header: Sequence[Hashable] | bool = True,
+ index: bool = True,
+ index_label: IndexLabel | None = None,
+ startrow: int = 0,
+ startcol: int = 0,
+ engine: str | None = None,
+ merge_cells: bool = True,
+ encoding: str | None = None,
+ inf_rep: str = "inf",
+ verbose: bool = True,
+ freeze_panes: tuple[int, int] | None = None,
+ storage_options: StorageOptions | None = None,
+ ) -> None:
+ from pandas.io.formats.excel import ExcelFormatter
+
+ formatter = ExcelFormatter(
+ self,
+ na_rep=na_rep,
+ cols=columns,
+ header=header,
+ float_format=float_format,
+ index=index,
+ index_label=index_label,
+ merge_cells=merge_cells,
+ inf_rep=inf_rep,
+ )
+ formatter.write(
+ excel_writer,
+ sheet_name=sheet_name,
+ startrow=startrow,
+ startcol=startcol,
+ freeze_panes=freeze_panes,
+ engine=engine,
+ storage_options=storage_options,
+ )
+
+ @overload
+ def to_latex(
+ self,
+ buf: FilePath | WriteBuffer[str],
+ *,
+ column_format: str | None = ...,
+ position: str | None = ...,
+ position_float: str | None = ...,
+ hrules: bool | None = ...,
+ clines: str | None = ...,
+ label: str | None = ...,
+ caption: str | tuple | None = ...,
+ sparse_index: bool | None = ...,
+ sparse_columns: bool | None = ...,
+ multirow_align: str | None = ...,
+ multicol_align: str | None = ...,
+ siunitx: bool = ...,
+ environment: str | None = ...,
+ encoding: str | None = ...,
+ convert_css: bool = ...,
+ ) -> None:
+ ...
+
+ @overload
+ def to_latex(
+ self,
+ buf: None = ...,
+ *,
+ column_format: str | None = ...,
+ position: str | None = ...,
+ position_float: str | None = ...,
+ hrules: bool | None = ...,
+ clines: str | None = ...,
+ label: str | None = ...,
+ caption: str | tuple | None = ...,
+ sparse_index: bool | None = ...,
+ sparse_columns: bool | None = ...,
+ multirow_align: str | None = ...,
+ multicol_align: str | None = ...,
+ siunitx: bool = ...,
+ environment: str | None = ...,
+ encoding: str | None = ...,
+ convert_css: bool = ...,
+ ) -> str:
+ ...
+
+ def to_latex(
+ self,
+ buf: FilePath | WriteBuffer[str] | None = None,
+ *,
+ column_format: str | None = None,
+ position: str | None = None,
+ position_float: str | None = None,
+ hrules: bool | None = None,
+ clines: str | None = None,
+ label: str | None = None,
+ caption: str | tuple | None = None,
+ sparse_index: bool | None = None,
+ sparse_columns: bool | None = None,
+ multirow_align: str | None = None,
+ multicol_align: str | None = None,
+ siunitx: bool = False,
+ environment: str | None = None,
+ encoding: str | None = None,
+ convert_css: bool = False,
+ ) -> str | None:
+ r"""
+ Write Styler to a file, buffer or string in LaTeX format.
+
+ .. versionadded:: 1.3.0
+
+ Parameters
+ ----------
+ buf : str, path object, file-like object, or None, default None
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a string ``write()`` function. If None, the result is
+ returned as a string.
+ column_format : str, optional
+ The LaTeX column specification placed in location:
+
+ \\begin{tabular}{}
+
+ Defaults to 'l' for index and
+ non-numeric data columns, and, for numeric data columns,
+ to 'r' by default, or 'S' if ``siunitx`` is ``True``.
+ position : str, optional
+ The LaTeX positional argument (e.g. 'h!') for tables, placed in location:
+
+ ``\\begin{table}[]``.
+ position_float : {"centering", "raggedleft", "raggedright"}, optional
+ The LaTeX float command placed in location:
+
+ \\begin{table}[]
+
+ \\
+
+ Cannot be used if ``environment`` is "longtable".
+ hrules : bool
+ Set to `True` to add \\toprule, \\midrule and \\bottomrule from the
+ {booktabs} LaTeX package.
+ Defaults to ``pandas.options.styler.latex.hrules``, which is `False`.
+
+ .. versionchanged:: 1.4.0
+ clines : str, optional
+ Use to control adding \\cline commands for the index labels separation.
+ Possible values are:
+
+ - `None`: no cline commands are added (default).
+ - `"all;data"`: a cline is added for every index value extending the
+ width of the table, including data entries.
+ - `"all;index"`: as above with lines extending only the width of the
+ index entries.
+ - `"skip-last;data"`: a cline is added for each index value except the
+ last level (which is never sparsified), extending the widtn of the
+ table.
+ - `"skip-last;index"`: as above with lines extending only the width of the
+ index entries.
+
+ .. versionadded:: 1.4.0
+ label : str, optional
+ The LaTeX label included as: \\label{