applied-ai-018 commited on
Commit
0906520
·
verified ·
1 Parent(s): 345605d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_117_mp_rank_02_optim_states.pt +3 -0
  2. venv/lib/python3.10/site-packages/pandas/compat/__init__.py +197 -0
  3. venv/lib/python3.10/site-packages/pandas/compat/__pycache__/__init__.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/pandas/compat/__pycache__/_constants.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/pandas/compat/__pycache__/_optional.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/pandas/compat/__pycache__/pickle_compat.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/pandas/compat/__pycache__/pyarrow.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/pandas/compat/_constants.py +30 -0
  10. venv/lib/python3.10/site-packages/pandas/compat/_optional.py +168 -0
  11. venv/lib/python3.10/site-packages/pandas/compat/compressors.py +77 -0
  12. venv/lib/python3.10/site-packages/pandas/compat/numpy/__init__.py +53 -0
  13. venv/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/__init__.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/function.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/pandas/compat/numpy/function.py +418 -0
  16. venv/lib/python3.10/site-packages/pandas/compat/pickle_compat.py +262 -0
  17. venv/lib/python3.10/site-packages/pandas/compat/pyarrow.py +27 -0
  18. venv/lib/python3.10/site-packages/pandas/io/__init__.py +13 -0
  19. venv/lib/python3.10/site-packages/pandas/io/_util.py +34 -0
  20. venv/lib/python3.10/site-packages/pandas/io/api.py +65 -0
  21. venv/lib/python3.10/site-packages/pandas/io/clipboards.py +197 -0
  22. venv/lib/python3.10/site-packages/pandas/io/common.py +1267 -0
  23. venv/lib/python3.10/site-packages/pandas/io/feather_format.py +143 -0
  24. venv/lib/python3.10/site-packages/pandas/io/formats/__init__.py +9 -0
  25. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/pandas/io/formats/_color_data.py +157 -0
  40. venv/lib/python3.10/site-packages/pandas/io/formats/console.py +94 -0
  41. venv/lib/python3.10/site-packages/pandas/io/formats/css.py +421 -0
  42. venv/lib/python3.10/site-packages/pandas/io/formats/csvs.py +330 -0
  43. venv/lib/python3.10/site-packages/pandas/io/formats/excel.py +962 -0
  44. venv/lib/python3.10/site-packages/pandas/io/formats/format.py +2058 -0
  45. venv/lib/python3.10/site-packages/pandas/io/formats/html.py +646 -0
  46. venv/lib/python3.10/site-packages/pandas/io/formats/info.py +1101 -0
  47. venv/lib/python3.10/site-packages/pandas/io/formats/printing.py +572 -0
  48. venv/lib/python3.10/site-packages/pandas/io/formats/string.py +206 -0
  49. venv/lib/python3.10/site-packages/pandas/io/formats/style.py +0 -0
  50. venv/lib/python3.10/site-packages/pandas/io/formats/style_render.py +2497 -0
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_117_mp_rank_02_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8634498c78ef5764c676db79ff9d360b4afd7e3a4e6c68c11730145ccfab4d01
3
+ size 41830340
venv/lib/python3.10/site-packages/pandas/compat/__init__.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ compat
3
+ ======
4
+
5
+ Cross-compatible functions for different versions of Python.
6
+
7
+ Other items:
8
+ * platform checker
9
+ """
10
+ from __future__ import annotations
11
+
12
+ import os
13
+ import platform
14
+ import sys
15
+ from typing import TYPE_CHECKING
16
+
17
+ from pandas.compat._constants import (
18
+ IS64,
19
+ ISMUSL,
20
+ PY310,
21
+ PY311,
22
+ PY312,
23
+ PYPY,
24
+ )
25
+ import pandas.compat.compressors
26
+ from pandas.compat.numpy import is_numpy_dev
27
+ from pandas.compat.pyarrow import (
28
+ pa_version_under10p1,
29
+ pa_version_under11p0,
30
+ pa_version_under13p0,
31
+ pa_version_under14p0,
32
+ pa_version_under14p1,
33
+ pa_version_under16p0,
34
+ )
35
+
36
+ if TYPE_CHECKING:
37
+ from pandas._typing import F
38
+
39
+
40
+ def set_function_name(f: F, name: str, cls: type) -> F:
41
+ """
42
+ Bind the name/qualname attributes of the function.
43
+ """
44
+ f.__name__ = name
45
+ f.__qualname__ = f"{cls.__name__}.{name}"
46
+ f.__module__ = cls.__module__
47
+ return f
48
+
49
+
50
+ def is_platform_little_endian() -> bool:
51
+ """
52
+ Checking if the running platform is little endian.
53
+
54
+ Returns
55
+ -------
56
+ bool
57
+ True if the running platform is little endian.
58
+ """
59
+ return sys.byteorder == "little"
60
+
61
+
62
+ def is_platform_windows() -> bool:
63
+ """
64
+ Checking if the running platform is windows.
65
+
66
+ Returns
67
+ -------
68
+ bool
69
+ True if the running platform is windows.
70
+ """
71
+ return sys.platform in ["win32", "cygwin"]
72
+
73
+
74
+ def is_platform_linux() -> bool:
75
+ """
76
+ Checking if the running platform is linux.
77
+
78
+ Returns
79
+ -------
80
+ bool
81
+ True if the running platform is linux.
82
+ """
83
+ return sys.platform == "linux"
84
+
85
+
86
+ def is_platform_mac() -> bool:
87
+ """
88
+ Checking if the running platform is mac.
89
+
90
+ Returns
91
+ -------
92
+ bool
93
+ True if the running platform is mac.
94
+ """
95
+ return sys.platform == "darwin"
96
+
97
+
98
+ def is_platform_arm() -> bool:
99
+ """
100
+ Checking if the running platform use ARM architecture.
101
+
102
+ Returns
103
+ -------
104
+ bool
105
+ True if the running platform uses ARM architecture.
106
+ """
107
+ return platform.machine() in ("arm64", "aarch64") or platform.machine().startswith(
108
+ "armv"
109
+ )
110
+
111
+
112
+ def is_platform_power() -> bool:
113
+ """
114
+ Checking if the running platform use Power architecture.
115
+
116
+ Returns
117
+ -------
118
+ bool
119
+ True if the running platform uses ARM architecture.
120
+ """
121
+ return platform.machine() in ("ppc64", "ppc64le")
122
+
123
+
124
+ def is_ci_environment() -> bool:
125
+ """
126
+ Checking if running in a continuous integration environment by checking
127
+ the PANDAS_CI environment variable.
128
+
129
+ Returns
130
+ -------
131
+ bool
132
+ True if the running in a continuous integration environment.
133
+ """
134
+ return os.environ.get("PANDAS_CI", "0") == "1"
135
+
136
+
137
+ def get_lzma_file() -> type[pandas.compat.compressors.LZMAFile]:
138
+ """
139
+ Importing the `LZMAFile` class from the `lzma` module.
140
+
141
+ Returns
142
+ -------
143
+ class
144
+ The `LZMAFile` class from the `lzma` module.
145
+
146
+ Raises
147
+ ------
148
+ RuntimeError
149
+ If the `lzma` module was not imported correctly, or didn't exist.
150
+ """
151
+ if not pandas.compat.compressors.has_lzma:
152
+ raise RuntimeError(
153
+ "lzma module not available. "
154
+ "A Python re-install with the proper dependencies, "
155
+ "might be required to solve this issue."
156
+ )
157
+ return pandas.compat.compressors.LZMAFile
158
+
159
+
160
+ def get_bz2_file() -> type[pandas.compat.compressors.BZ2File]:
161
+ """
162
+ Importing the `BZ2File` class from the `bz2` module.
163
+
164
+ Returns
165
+ -------
166
+ class
167
+ The `BZ2File` class from the `bz2` module.
168
+
169
+ Raises
170
+ ------
171
+ RuntimeError
172
+ If the `bz2` module was not imported correctly, or didn't exist.
173
+ """
174
+ if not pandas.compat.compressors.has_bz2:
175
+ raise RuntimeError(
176
+ "bz2 module not available. "
177
+ "A Python re-install with the proper dependencies, "
178
+ "might be required to solve this issue."
179
+ )
180
+ return pandas.compat.compressors.BZ2File
181
+
182
+
183
+ __all__ = [
184
+ "is_numpy_dev",
185
+ "pa_version_under10p1",
186
+ "pa_version_under11p0",
187
+ "pa_version_under13p0",
188
+ "pa_version_under14p0",
189
+ "pa_version_under14p1",
190
+ "pa_version_under16p0",
191
+ "IS64",
192
+ "ISMUSL",
193
+ "PY310",
194
+ "PY311",
195
+ "PY312",
196
+ "PYPY",
197
+ ]
venv/lib/python3.10/site-packages/pandas/compat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.72 kB). View file
 
venv/lib/python3.10/site-packages/pandas/compat/__pycache__/_constants.cpython-310.pyc ADDED
Binary file (719 Bytes). View file
 
venv/lib/python3.10/site-packages/pandas/compat/__pycache__/_optional.cpython-310.pyc ADDED
Binary file (4.38 kB). View file
 
venv/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc ADDED
Binary file (1.74 kB). View file
 
venv/lib/python3.10/site-packages/pandas/compat/__pycache__/pickle_compat.cpython-310.pyc ADDED
Binary file (5.68 kB). View file
 
venv/lib/python3.10/site-packages/pandas/compat/__pycache__/pyarrow.cpython-310.pyc ADDED
Binary file (860 Bytes). View file
 
venv/lib/python3.10/site-packages/pandas/compat/_constants.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ _constants
3
+ ======
4
+
5
+ Constants relevant for the Python implementation.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import platform
11
+ import sys
12
+ import sysconfig
13
+
14
+ IS64 = sys.maxsize > 2**32
15
+
16
+ PY310 = sys.version_info >= (3, 10)
17
+ PY311 = sys.version_info >= (3, 11)
18
+ PY312 = sys.version_info >= (3, 12)
19
+ PYPY = platform.python_implementation() == "PyPy"
20
+ ISMUSL = "musl" in (sysconfig.get_config_var("HOST_GNU_TYPE") or "")
21
+ REF_COUNT = 2 if PY311 else 3
22
+
23
+ __all__ = [
24
+ "IS64",
25
+ "ISMUSL",
26
+ "PY310",
27
+ "PY311",
28
+ "PY312",
29
+ "PYPY",
30
+ ]
venv/lib/python3.10/site-packages/pandas/compat/_optional.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import importlib
4
+ import sys
5
+ from typing import TYPE_CHECKING
6
+ import warnings
7
+
8
+ from pandas.util._exceptions import find_stack_level
9
+
10
+ from pandas.util.version import Version
11
+
12
+ if TYPE_CHECKING:
13
+ import types
14
+
15
+ # Update install.rst & setup.cfg when updating versions!
16
+
17
+ VERSIONS = {
18
+ "adbc-driver-postgresql": "0.8.0",
19
+ "adbc-driver-sqlite": "0.8.0",
20
+ "bs4": "4.11.2",
21
+ "blosc": "1.21.3",
22
+ "bottleneck": "1.3.6",
23
+ "dataframe-api-compat": "0.1.7",
24
+ "fastparquet": "2022.12.0",
25
+ "fsspec": "2022.11.0",
26
+ "html5lib": "1.1",
27
+ "hypothesis": "6.46.1",
28
+ "gcsfs": "2022.11.0",
29
+ "jinja2": "3.1.2",
30
+ "lxml.etree": "4.9.2",
31
+ "matplotlib": "3.6.3",
32
+ "numba": "0.56.4",
33
+ "numexpr": "2.8.4",
34
+ "odfpy": "1.4.1",
35
+ "openpyxl": "3.1.0",
36
+ "pandas_gbq": "0.19.0",
37
+ "psycopg2": "2.9.6", # (dt dec pq3 ext lo64)
38
+ "pymysql": "1.0.2",
39
+ "pyarrow": "10.0.1",
40
+ "pyreadstat": "1.2.0",
41
+ "pytest": "7.3.2",
42
+ "python-calamine": "0.1.7",
43
+ "pyxlsb": "1.0.10",
44
+ "s3fs": "2022.11.0",
45
+ "scipy": "1.10.0",
46
+ "sqlalchemy": "2.0.0",
47
+ "tables": "3.8.0",
48
+ "tabulate": "0.9.0",
49
+ "xarray": "2022.12.0",
50
+ "xlrd": "2.0.1",
51
+ "xlsxwriter": "3.0.5",
52
+ "zstandard": "0.19.0",
53
+ "tzdata": "2022.7",
54
+ "qtpy": "2.3.0",
55
+ "pyqt5": "5.15.9",
56
+ }
57
+
58
+ # A mapping from import name to package name (on PyPI) for packages where
59
+ # these two names are different.
60
+
61
+ INSTALL_MAPPING = {
62
+ "bs4": "beautifulsoup4",
63
+ "bottleneck": "Bottleneck",
64
+ "jinja2": "Jinja2",
65
+ "lxml.etree": "lxml",
66
+ "odf": "odfpy",
67
+ "pandas_gbq": "pandas-gbq",
68
+ "python_calamine": "python-calamine",
69
+ "sqlalchemy": "SQLAlchemy",
70
+ "tables": "pytables",
71
+ }
72
+
73
+
74
+ def get_version(module: types.ModuleType) -> str:
75
+ version = getattr(module, "__version__", None)
76
+
77
+ if version is None:
78
+ raise ImportError(f"Can't determine version for {module.__name__}")
79
+ if module.__name__ == "psycopg2":
80
+ # psycopg2 appends " (dt dec pq3 ext lo64)" to it's version
81
+ version = version.split()[0]
82
+ return version
83
+
84
+
85
+ def import_optional_dependency(
86
+ name: str,
87
+ extra: str = "",
88
+ errors: str = "raise",
89
+ min_version: str | None = None,
90
+ ):
91
+ """
92
+ Import an optional dependency.
93
+
94
+ By default, if a dependency is missing an ImportError with a nice
95
+ message will be raised. If a dependency is present, but too old,
96
+ we raise.
97
+
98
+ Parameters
99
+ ----------
100
+ name : str
101
+ The module name.
102
+ extra : str
103
+ Additional text to include in the ImportError message.
104
+ errors : str {'raise', 'warn', 'ignore'}
105
+ What to do when a dependency is not found or its version is too old.
106
+
107
+ * raise : Raise an ImportError
108
+ * warn : Only applicable when a module's version is to old.
109
+ Warns that the version is too old and returns None
110
+ * ignore: If the module is not installed, return None, otherwise,
111
+ return the module, even if the version is too old.
112
+ It's expected that users validate the version locally when
113
+ using ``errors="ignore"`` (see. ``io/html.py``)
114
+ min_version : str, default None
115
+ Specify a minimum version that is different from the global pandas
116
+ minimum version required.
117
+ Returns
118
+ -------
119
+ maybe_module : Optional[ModuleType]
120
+ The imported module, when found and the version is correct.
121
+ None is returned when the package is not found and `errors`
122
+ is False, or when the package's version is too old and `errors`
123
+ is ``'warn'`` or ``'ignore'``.
124
+ """
125
+ assert errors in {"warn", "raise", "ignore"}
126
+
127
+ package_name = INSTALL_MAPPING.get(name)
128
+ install_name = package_name if package_name is not None else name
129
+
130
+ msg = (
131
+ f"Missing optional dependency '{install_name}'. {extra} "
132
+ f"Use pip or conda to install {install_name}."
133
+ )
134
+ try:
135
+ module = importlib.import_module(name)
136
+ except ImportError:
137
+ if errors == "raise":
138
+ raise ImportError(msg)
139
+ return None
140
+
141
+ # Handle submodules: if we have submodule, grab parent module from sys.modules
142
+ parent = name.split(".")[0]
143
+ if parent != name:
144
+ install_name = parent
145
+ module_to_get = sys.modules[install_name]
146
+ else:
147
+ module_to_get = module
148
+ minimum_version = min_version if min_version is not None else VERSIONS.get(parent)
149
+ if minimum_version:
150
+ version = get_version(module_to_get)
151
+ if version and Version(version) < Version(minimum_version):
152
+ msg = (
153
+ f"Pandas requires version '{minimum_version}' or newer of '{parent}' "
154
+ f"(version '{version}' currently installed)."
155
+ )
156
+ if errors == "warn":
157
+ warnings.warn(
158
+ msg,
159
+ UserWarning,
160
+ stacklevel=find_stack_level(),
161
+ )
162
+ return None
163
+ elif errors == "raise":
164
+ raise ImportError(msg)
165
+ else:
166
+ return None
167
+
168
+ return module
venv/lib/python3.10/site-packages/pandas/compat/compressors.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Patched ``BZ2File`` and ``LZMAFile`` to handle pickle protocol 5.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from pickle import PickleBuffer
8
+
9
+ from pandas.compat._constants import PY310
10
+
11
+ try:
12
+ import bz2
13
+
14
+ has_bz2 = True
15
+ except ImportError:
16
+ has_bz2 = False
17
+
18
+ try:
19
+ import lzma
20
+
21
+ has_lzma = True
22
+ except ImportError:
23
+ has_lzma = False
24
+
25
+
26
+ def flatten_buffer(
27
+ b: bytes | bytearray | memoryview | PickleBuffer,
28
+ ) -> bytes | bytearray | memoryview:
29
+ """
30
+ Return some 1-D `uint8` typed buffer.
31
+
32
+ Coerces anything that does not match that description to one that does
33
+ without copying if possible (otherwise will copy).
34
+ """
35
+
36
+ if isinstance(b, (bytes, bytearray)):
37
+ return b
38
+
39
+ if not isinstance(b, PickleBuffer):
40
+ b = PickleBuffer(b)
41
+
42
+ try:
43
+ # coerce to 1-D `uint8` C-contiguous `memoryview` zero-copy
44
+ return b.raw()
45
+ except BufferError:
46
+ # perform in-memory copy if buffer is not contiguous
47
+ return memoryview(b).tobytes("A")
48
+
49
+
50
+ if has_bz2:
51
+
52
+ class BZ2File(bz2.BZ2File):
53
+ if not PY310:
54
+
55
+ def write(self, b) -> int:
56
+ # Workaround issue where `bz2.BZ2File` expects `len`
57
+ # to return the number of bytes in `b` by converting
58
+ # `b` into something that meets that constraint with
59
+ # minimal copying.
60
+ #
61
+ # Note: This is fixed in Python 3.10.
62
+ return super().write(flatten_buffer(b))
63
+
64
+
65
+ if has_lzma:
66
+
67
+ class LZMAFile(lzma.LZMAFile):
68
+ if not PY310:
69
+
70
+ def write(self, b) -> int:
71
+ # Workaround issue where `lzma.LZMAFile` expects `len`
72
+ # to return the number of bytes in `b` by converting
73
+ # `b` into something that meets that constraint with
74
+ # minimal copying.
75
+ #
76
+ # Note: This is fixed in Python 3.10.
77
+ return super().write(flatten_buffer(b))
venv/lib/python3.10/site-packages/pandas/compat/numpy/__init__.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ support numpy compatibility across versions """
2
+ import warnings
3
+
4
+ import numpy as np
5
+
6
+ from pandas.util.version import Version
7
+
8
+ # numpy versioning
9
+ _np_version = np.__version__
10
+ _nlv = Version(_np_version)
11
+ np_version_lt1p23 = _nlv < Version("1.23")
12
+ np_version_gte1p24 = _nlv >= Version("1.24")
13
+ np_version_gte1p24p3 = _nlv >= Version("1.24.3")
14
+ np_version_gte1p25 = _nlv >= Version("1.25")
15
+ np_version_gt2 = _nlv >= Version("2.0.0.dev0")
16
+ is_numpy_dev = _nlv.dev is not None
17
+ _min_numpy_ver = "1.22.4"
18
+
19
+
20
+ if _nlv < Version(_min_numpy_ver):
21
+ raise ImportError(
22
+ f"this version of pandas is incompatible with numpy < {_min_numpy_ver}\n"
23
+ f"your numpy version is {_np_version}.\n"
24
+ f"Please upgrade numpy to >= {_min_numpy_ver} to use this pandas version"
25
+ )
26
+
27
+
28
+ np_long: type
29
+ np_ulong: type
30
+
31
+ if np_version_gt2:
32
+ try:
33
+ with warnings.catch_warnings():
34
+ warnings.filterwarnings(
35
+ "ignore",
36
+ r".*In the future `np\.long` will be defined as.*",
37
+ FutureWarning,
38
+ )
39
+ np_long = np.long # type: ignore[attr-defined]
40
+ np_ulong = np.ulong # type: ignore[attr-defined]
41
+ except AttributeError:
42
+ np_long = np.int_
43
+ np_ulong = np.uint
44
+ else:
45
+ np_long = np.int_
46
+ np_ulong = np.uint
47
+
48
+
49
+ __all__ = [
50
+ "np",
51
+ "_np_version",
52
+ "is_numpy_dev",
53
+ ]
venv/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
venv/lib/python3.10/site-packages/pandas/compat/numpy/__pycache__/function.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
venv/lib/python3.10/site-packages/pandas/compat/numpy/function.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ For compatibility with numpy libraries, pandas functions or methods have to
3
+ accept '*args' and '**kwargs' parameters to accommodate numpy arguments that
4
+ are not actually used or respected in the pandas implementation.
5
+
6
+ To ensure that users do not abuse these parameters, validation is performed in
7
+ 'validators.py' to make sure that any extra parameters passed correspond ONLY
8
+ to those in the numpy signature. Part of that validation includes whether or
9
+ not the user attempted to pass in non-default values for these extraneous
10
+ parameters. As we want to discourage users from relying on these parameters
11
+ when calling the pandas implementation, we want them only to pass in the
12
+ default values for these parameters.
13
+
14
+ This module provides a set of commonly used default arguments for functions and
15
+ methods that are spread throughout the codebase. This module will make it
16
+ easier to adjust to future upstream changes in the analogous numpy signatures.
17
+ """
18
+ from __future__ import annotations
19
+
20
+ from typing import (
21
+ TYPE_CHECKING,
22
+ Any,
23
+ TypeVar,
24
+ cast,
25
+ overload,
26
+ )
27
+
28
+ import numpy as np
29
+ from numpy import ndarray
30
+
31
+ from pandas._libs.lib import (
32
+ is_bool,
33
+ is_integer,
34
+ )
35
+ from pandas.errors import UnsupportedFunctionCall
36
+ from pandas.util._validators import (
37
+ validate_args,
38
+ validate_args_and_kwargs,
39
+ validate_kwargs,
40
+ )
41
+
42
+ if TYPE_CHECKING:
43
+ from pandas._typing import (
44
+ Axis,
45
+ AxisInt,
46
+ )
47
+
48
+ AxisNoneT = TypeVar("AxisNoneT", Axis, None)
49
+
50
+
51
+ class CompatValidator:
52
+ def __init__(
53
+ self,
54
+ defaults,
55
+ fname=None,
56
+ method: str | None = None,
57
+ max_fname_arg_count=None,
58
+ ) -> None:
59
+ self.fname = fname
60
+ self.method = method
61
+ self.defaults = defaults
62
+ self.max_fname_arg_count = max_fname_arg_count
63
+
64
+ def __call__(
65
+ self,
66
+ args,
67
+ kwargs,
68
+ fname=None,
69
+ max_fname_arg_count=None,
70
+ method: str | None = None,
71
+ ) -> None:
72
+ if not args and not kwargs:
73
+ return None
74
+
75
+ fname = self.fname if fname is None else fname
76
+ max_fname_arg_count = (
77
+ self.max_fname_arg_count
78
+ if max_fname_arg_count is None
79
+ else max_fname_arg_count
80
+ )
81
+ method = self.method if method is None else method
82
+
83
+ if method == "args":
84
+ validate_args(fname, args, max_fname_arg_count, self.defaults)
85
+ elif method == "kwargs":
86
+ validate_kwargs(fname, kwargs, self.defaults)
87
+ elif method == "both":
88
+ validate_args_and_kwargs(
89
+ fname, args, kwargs, max_fname_arg_count, self.defaults
90
+ )
91
+ else:
92
+ raise ValueError(f"invalid validation method '{method}'")
93
+
94
+
95
+ ARGMINMAX_DEFAULTS = {"out": None}
96
+ validate_argmin = CompatValidator(
97
+ ARGMINMAX_DEFAULTS, fname="argmin", method="both", max_fname_arg_count=1
98
+ )
99
+ validate_argmax = CompatValidator(
100
+ ARGMINMAX_DEFAULTS, fname="argmax", method="both", max_fname_arg_count=1
101
+ )
102
+
103
+
104
+ def process_skipna(skipna: bool | ndarray | None, args) -> tuple[bool, Any]:
105
+ if isinstance(skipna, ndarray) or skipna is None:
106
+ args = (skipna,) + args
107
+ skipna = True
108
+
109
+ return skipna, args
110
+
111
+
112
+ def validate_argmin_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool:
113
+ """
114
+ If 'Series.argmin' is called via the 'numpy' library, the third parameter
115
+ in its signature is 'out', which takes either an ndarray or 'None', so
116
+ check if the 'skipna' parameter is either an instance of ndarray or is
117
+ None, since 'skipna' itself should be a boolean
118
+ """
119
+ skipna, args = process_skipna(skipna, args)
120
+ validate_argmin(args, kwargs)
121
+ return skipna
122
+
123
+
124
+ def validate_argmax_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool:
125
+ """
126
+ If 'Series.argmax' is called via the 'numpy' library, the third parameter
127
+ in its signature is 'out', which takes either an ndarray or 'None', so
128
+ check if the 'skipna' parameter is either an instance of ndarray or is
129
+ None, since 'skipna' itself should be a boolean
130
+ """
131
+ skipna, args = process_skipna(skipna, args)
132
+ validate_argmax(args, kwargs)
133
+ return skipna
134
+
135
+
136
+ ARGSORT_DEFAULTS: dict[str, int | str | None] = {}
137
+ ARGSORT_DEFAULTS["axis"] = -1
138
+ ARGSORT_DEFAULTS["kind"] = "quicksort"
139
+ ARGSORT_DEFAULTS["order"] = None
140
+ ARGSORT_DEFAULTS["kind"] = None
141
+ ARGSORT_DEFAULTS["stable"] = None
142
+
143
+
144
+ validate_argsort = CompatValidator(
145
+ ARGSORT_DEFAULTS, fname="argsort", max_fname_arg_count=0, method="both"
146
+ )
147
+
148
+ # two different signatures of argsort, this second validation for when the
149
+ # `kind` param is supported
150
+ ARGSORT_DEFAULTS_KIND: dict[str, int | None] = {}
151
+ ARGSORT_DEFAULTS_KIND["axis"] = -1
152
+ ARGSORT_DEFAULTS_KIND["order"] = None
153
+ ARGSORT_DEFAULTS_KIND["stable"] = None
154
+ validate_argsort_kind = CompatValidator(
155
+ ARGSORT_DEFAULTS_KIND, fname="argsort", max_fname_arg_count=0, method="both"
156
+ )
157
+
158
+
159
+ def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs) -> bool:
160
+ """
161
+ If 'Categorical.argsort' is called via the 'numpy' library, the first
162
+ parameter in its signature is 'axis', which takes either an integer or
163
+ 'None', so check if the 'ascending' parameter has either integer type or is
164
+ None, since 'ascending' itself should be a boolean
165
+ """
166
+ if is_integer(ascending) or ascending is None:
167
+ args = (ascending,) + args
168
+ ascending = True
169
+
170
+ validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
171
+ ascending = cast(bool, ascending)
172
+ return ascending
173
+
174
+
175
+ CLIP_DEFAULTS: dict[str, Any] = {"out": None}
176
+ validate_clip = CompatValidator(
177
+ CLIP_DEFAULTS, fname="clip", method="both", max_fname_arg_count=3
178
+ )
179
+
180
+
181
+ @overload
182
+ def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None:
183
+ ...
184
+
185
+
186
+ @overload
187
+ def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT:
188
+ ...
189
+
190
+
191
+ def validate_clip_with_axis(
192
+ axis: ndarray | AxisNoneT, args, kwargs
193
+ ) -> AxisNoneT | None:
194
+ """
195
+ If 'NDFrame.clip' is called via the numpy library, the third parameter in
196
+ its signature is 'out', which can takes an ndarray, so check if the 'axis'
197
+ parameter is an instance of ndarray, since 'axis' itself should either be
198
+ an integer or None
199
+ """
200
+ if isinstance(axis, ndarray):
201
+ args = (axis,) + args
202
+ # error: Incompatible types in assignment (expression has type "None",
203
+ # variable has type "Union[ndarray[Any, Any], str, int]")
204
+ axis = None # type: ignore[assignment]
205
+
206
+ validate_clip(args, kwargs)
207
+ # error: Incompatible return value type (got "Union[ndarray[Any, Any],
208
+ # str, int]", expected "Union[str, int, None]")
209
+ return axis # type: ignore[return-value]
210
+
211
+
212
+ CUM_FUNC_DEFAULTS: dict[str, Any] = {}
213
+ CUM_FUNC_DEFAULTS["dtype"] = None
214
+ CUM_FUNC_DEFAULTS["out"] = None
215
+ validate_cum_func = CompatValidator(
216
+ CUM_FUNC_DEFAULTS, method="both", max_fname_arg_count=1
217
+ )
218
+ validate_cumsum = CompatValidator(
219
+ CUM_FUNC_DEFAULTS, fname="cumsum", method="both", max_fname_arg_count=1
220
+ )
221
+
222
+
223
+ def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool:
224
+ """
225
+ If this function is called via the 'numpy' library, the third parameter in
226
+ its signature is 'dtype', which takes either a 'numpy' dtype or 'None', so
227
+ check if the 'skipna' parameter is a boolean or not
228
+ """
229
+ if not is_bool(skipna):
230
+ args = (skipna,) + args
231
+ skipna = True
232
+ elif isinstance(skipna, np.bool_):
233
+ skipna = bool(skipna)
234
+
235
+ validate_cum_func(args, kwargs, fname=name)
236
+ return skipna
237
+
238
+
239
+ ALLANY_DEFAULTS: dict[str, bool | None] = {}
240
+ ALLANY_DEFAULTS["dtype"] = None
241
+ ALLANY_DEFAULTS["out"] = None
242
+ ALLANY_DEFAULTS["keepdims"] = False
243
+ ALLANY_DEFAULTS["axis"] = None
244
+ validate_all = CompatValidator(
245
+ ALLANY_DEFAULTS, fname="all", method="both", max_fname_arg_count=1
246
+ )
247
+ validate_any = CompatValidator(
248
+ ALLANY_DEFAULTS, fname="any", method="both", max_fname_arg_count=1
249
+ )
250
+
251
+ LOGICAL_FUNC_DEFAULTS = {"out": None, "keepdims": False}
252
+ validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method="kwargs")
253
+
254
+ MINMAX_DEFAULTS = {"axis": None, "dtype": None, "out": None, "keepdims": False}
255
+ validate_min = CompatValidator(
256
+ MINMAX_DEFAULTS, fname="min", method="both", max_fname_arg_count=1
257
+ )
258
+ validate_max = CompatValidator(
259
+ MINMAX_DEFAULTS, fname="max", method="both", max_fname_arg_count=1
260
+ )
261
+
262
+ RESHAPE_DEFAULTS: dict[str, str] = {"order": "C"}
263
+ validate_reshape = CompatValidator(
264
+ RESHAPE_DEFAULTS, fname="reshape", method="both", max_fname_arg_count=1
265
+ )
266
+
267
+ REPEAT_DEFAULTS: dict[str, Any] = {"axis": None}
268
+ validate_repeat = CompatValidator(
269
+ REPEAT_DEFAULTS, fname="repeat", method="both", max_fname_arg_count=1
270
+ )
271
+
272
+ ROUND_DEFAULTS: dict[str, Any] = {"out": None}
273
+ validate_round = CompatValidator(
274
+ ROUND_DEFAULTS, fname="round", method="both", max_fname_arg_count=1
275
+ )
276
+
277
+ SORT_DEFAULTS: dict[str, int | str | None] = {}
278
+ SORT_DEFAULTS["axis"] = -1
279
+ SORT_DEFAULTS["kind"] = "quicksort"
280
+ SORT_DEFAULTS["order"] = None
281
+ validate_sort = CompatValidator(SORT_DEFAULTS, fname="sort", method="kwargs")
282
+
283
+ STAT_FUNC_DEFAULTS: dict[str, Any | None] = {}
284
+ STAT_FUNC_DEFAULTS["dtype"] = None
285
+ STAT_FUNC_DEFAULTS["out"] = None
286
+
287
+ SUM_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
288
+ SUM_DEFAULTS["axis"] = None
289
+ SUM_DEFAULTS["keepdims"] = False
290
+ SUM_DEFAULTS["initial"] = None
291
+
292
+ PROD_DEFAULTS = SUM_DEFAULTS.copy()
293
+
294
+ MEAN_DEFAULTS = SUM_DEFAULTS.copy()
295
+
296
+ MEDIAN_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
297
+ MEDIAN_DEFAULTS["overwrite_input"] = False
298
+ MEDIAN_DEFAULTS["keepdims"] = False
299
+
300
+ STAT_FUNC_DEFAULTS["keepdims"] = False
301
+
302
+ validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS, method="kwargs")
303
+ validate_sum = CompatValidator(
304
+ SUM_DEFAULTS, fname="sum", method="both", max_fname_arg_count=1
305
+ )
306
+ validate_prod = CompatValidator(
307
+ PROD_DEFAULTS, fname="prod", method="both", max_fname_arg_count=1
308
+ )
309
+ validate_mean = CompatValidator(
310
+ MEAN_DEFAULTS, fname="mean", method="both", max_fname_arg_count=1
311
+ )
312
+ validate_median = CompatValidator(
313
+ MEDIAN_DEFAULTS, fname="median", method="both", max_fname_arg_count=1
314
+ )
315
+
316
+ STAT_DDOF_FUNC_DEFAULTS: dict[str, bool | None] = {}
317
+ STAT_DDOF_FUNC_DEFAULTS["dtype"] = None
318
+ STAT_DDOF_FUNC_DEFAULTS["out"] = None
319
+ STAT_DDOF_FUNC_DEFAULTS["keepdims"] = False
320
+ validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS, method="kwargs")
321
+
322
+ TAKE_DEFAULTS: dict[str, str | None] = {}
323
+ TAKE_DEFAULTS["out"] = None
324
+ TAKE_DEFAULTS["mode"] = "raise"
325
+ validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs")
326
+
327
+
328
+ def validate_take_with_convert(convert: ndarray | bool | None, args, kwargs) -> bool:
329
+ """
330
+ If this function is called via the 'numpy' library, the third parameter in
331
+ its signature is 'axis', which takes either an ndarray or 'None', so check
332
+ if the 'convert' parameter is either an instance of ndarray or is None
333
+ """
334
+ if isinstance(convert, ndarray) or convert is None:
335
+ args = (convert,) + args
336
+ convert = True
337
+
338
+ validate_take(args, kwargs, max_fname_arg_count=3, method="both")
339
+ return convert
340
+
341
+
342
+ TRANSPOSE_DEFAULTS = {"axes": None}
343
+ validate_transpose = CompatValidator(
344
+ TRANSPOSE_DEFAULTS, fname="transpose", method="both", max_fname_arg_count=0
345
+ )
346
+
347
+
348
+ def validate_groupby_func(name: str, args, kwargs, allowed=None) -> None:
349
+ """
350
+ 'args' and 'kwargs' should be empty, except for allowed kwargs because all
351
+ of their necessary parameters are explicitly listed in the function
352
+ signature
353
+ """
354
+ if allowed is None:
355
+ allowed = []
356
+
357
+ kwargs = set(kwargs) - set(allowed)
358
+
359
+ if len(args) + len(kwargs) > 0:
360
+ raise UnsupportedFunctionCall(
361
+ "numpy operations are not valid with groupby. "
362
+ f"Use .groupby(...).{name}() instead"
363
+ )
364
+
365
+
366
+ RESAMPLER_NUMPY_OPS = ("min", "max", "sum", "prod", "mean", "std", "var")
367
+
368
+
369
+ def validate_resampler_func(method: str, args, kwargs) -> None:
370
+ """
371
+ 'args' and 'kwargs' should be empty because all of their necessary
372
+ parameters are explicitly listed in the function signature
373
+ """
374
+ if len(args) + len(kwargs) > 0:
375
+ if method in RESAMPLER_NUMPY_OPS:
376
+ raise UnsupportedFunctionCall(
377
+ "numpy operations are not valid with resample. "
378
+ f"Use .resample(...).{method}() instead"
379
+ )
380
+ raise TypeError("too many arguments passed in")
381
+
382
+
383
+ def validate_minmax_axis(axis: AxisInt | None, ndim: int = 1) -> None:
384
+ """
385
+ Ensure that the axis argument passed to min, max, argmin, or argmax is zero
386
+ or None, as otherwise it will be incorrectly ignored.
387
+
388
+ Parameters
389
+ ----------
390
+ axis : int or None
391
+ ndim : int, default 1
392
+
393
+ Raises
394
+ ------
395
+ ValueError
396
+ """
397
+ if axis is None:
398
+ return
399
+ if axis >= ndim or (axis < 0 and ndim + axis < 0):
400
+ raise ValueError(f"`axis` must be fewer than the number of dimensions ({ndim})")
401
+
402
+
403
+ _validation_funcs = {
404
+ "median": validate_median,
405
+ "mean": validate_mean,
406
+ "min": validate_min,
407
+ "max": validate_max,
408
+ "sum": validate_sum,
409
+ "prod": validate_prod,
410
+ }
411
+
412
+
413
+ def validate_func(fname, args, kwargs) -> None:
414
+ if fname not in _validation_funcs:
415
+ return validate_stat_func(args, kwargs, fname=fname)
416
+
417
+ validation_func = _validation_funcs[fname]
418
+ return validation_func(args, kwargs)
venv/lib/python3.10/site-packages/pandas/compat/pickle_compat.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Support pre-0.12 series pickle compatibility.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import contextlib
7
+ import copy
8
+ import io
9
+ import pickle as pkl
10
+ from typing import TYPE_CHECKING
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs.arrays import NDArrayBacked
15
+ from pandas._libs.tslibs import BaseOffset
16
+
17
+ from pandas import Index
18
+ from pandas.core.arrays import (
19
+ DatetimeArray,
20
+ PeriodArray,
21
+ TimedeltaArray,
22
+ )
23
+ from pandas.core.internals import BlockManager
24
+
25
+ if TYPE_CHECKING:
26
+ from collections.abc import Generator
27
+
28
+
29
+ def load_reduce(self) -> None:
30
+ stack = self.stack
31
+ args = stack.pop()
32
+ func = stack[-1]
33
+
34
+ try:
35
+ stack[-1] = func(*args)
36
+ return
37
+ except TypeError as err:
38
+ # If we have a deprecated function,
39
+ # try to replace and try again.
40
+
41
+ msg = "_reconstruct: First argument must be a sub-type of ndarray"
42
+
43
+ if msg in str(err):
44
+ try:
45
+ cls = args[0]
46
+ stack[-1] = object.__new__(cls)
47
+ return
48
+ except TypeError:
49
+ pass
50
+ elif args and isinstance(args[0], type) and issubclass(args[0], BaseOffset):
51
+ # TypeError: object.__new__(Day) is not safe, use Day.__new__()
52
+ cls = args[0]
53
+ stack[-1] = cls.__new__(*args)
54
+ return
55
+ elif args and issubclass(args[0], PeriodArray):
56
+ cls = args[0]
57
+ stack[-1] = NDArrayBacked.__new__(*args)
58
+ return
59
+
60
+ raise
61
+
62
+
63
+ # If classes are moved, provide compat here.
64
+ _class_locations_map = {
65
+ ("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"),
66
+ # 15477
67
+ ("pandas.core.base", "FrozenNDArray"): ("numpy", "ndarray"),
68
+ # Re-routing unpickle block logic to go through _unpickle_block instead
69
+ # for pandas <= 1.3.5
70
+ ("pandas.core.internals.blocks", "new_block"): (
71
+ "pandas._libs.internals",
72
+ "_unpickle_block",
73
+ ),
74
+ ("pandas.core.indexes.frozen", "FrozenNDArray"): ("numpy", "ndarray"),
75
+ ("pandas.core.base", "FrozenList"): ("pandas.core.indexes.frozen", "FrozenList"),
76
+ # 10890
77
+ ("pandas.core.series", "TimeSeries"): ("pandas.core.series", "Series"),
78
+ ("pandas.sparse.series", "SparseTimeSeries"): (
79
+ "pandas.core.sparse.series",
80
+ "SparseSeries",
81
+ ),
82
+ # 12588, extensions moving
83
+ ("pandas._sparse", "BlockIndex"): ("pandas._libs.sparse", "BlockIndex"),
84
+ ("pandas.tslib", "Timestamp"): ("pandas._libs.tslib", "Timestamp"),
85
+ # 18543 moving period
86
+ ("pandas._period", "Period"): ("pandas._libs.tslibs.period", "Period"),
87
+ ("pandas._libs.period", "Period"): ("pandas._libs.tslibs.period", "Period"),
88
+ # 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype
89
+ ("pandas.tslib", "__nat_unpickle"): (
90
+ "pandas._libs.tslibs.nattype",
91
+ "__nat_unpickle",
92
+ ),
93
+ ("pandas._libs.tslib", "__nat_unpickle"): (
94
+ "pandas._libs.tslibs.nattype",
95
+ "__nat_unpickle",
96
+ ),
97
+ # 15998 top-level dirs moving
98
+ ("pandas.sparse.array", "SparseArray"): (
99
+ "pandas.core.arrays.sparse",
100
+ "SparseArray",
101
+ ),
102
+ ("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"),
103
+ ("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"),
104
+ ("pandas.indexes.numeric", "Int64Index"): (
105
+ "pandas.core.indexes.base",
106
+ "Index", # updated in 50775
107
+ ),
108
+ ("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"),
109
+ ("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"),
110
+ ("pandas.tseries.index", "_new_DatetimeIndex"): (
111
+ "pandas.core.indexes.datetimes",
112
+ "_new_DatetimeIndex",
113
+ ),
114
+ ("pandas.tseries.index", "DatetimeIndex"): (
115
+ "pandas.core.indexes.datetimes",
116
+ "DatetimeIndex",
117
+ ),
118
+ ("pandas.tseries.period", "PeriodIndex"): (
119
+ "pandas.core.indexes.period",
120
+ "PeriodIndex",
121
+ ),
122
+ # 19269, arrays moving
123
+ ("pandas.core.categorical", "Categorical"): ("pandas.core.arrays", "Categorical"),
124
+ # 19939, add timedeltaindex, float64index compat from 15998 move
125
+ ("pandas.tseries.tdi", "TimedeltaIndex"): (
126
+ "pandas.core.indexes.timedeltas",
127
+ "TimedeltaIndex",
128
+ ),
129
+ ("pandas.indexes.numeric", "Float64Index"): (
130
+ "pandas.core.indexes.base",
131
+ "Index", # updated in 50775
132
+ ),
133
+ # 50775, remove Int64Index, UInt64Index & Float64Index from codabase
134
+ ("pandas.core.indexes.numeric", "Int64Index"): (
135
+ "pandas.core.indexes.base",
136
+ "Index",
137
+ ),
138
+ ("pandas.core.indexes.numeric", "UInt64Index"): (
139
+ "pandas.core.indexes.base",
140
+ "Index",
141
+ ),
142
+ ("pandas.core.indexes.numeric", "Float64Index"): (
143
+ "pandas.core.indexes.base",
144
+ "Index",
145
+ ),
146
+ ("pandas.core.arrays.sparse.dtype", "SparseDtype"): (
147
+ "pandas.core.dtypes.dtypes",
148
+ "SparseDtype",
149
+ ),
150
+ }
151
+
152
+
153
+ # our Unpickler sub-class to override methods and some dispatcher
154
+ # functions for compat and uses a non-public class of the pickle module.
155
+
156
+
157
+ class Unpickler(pkl._Unpickler):
158
+ def find_class(self, module, name):
159
+ # override superclass
160
+ key = (module, name)
161
+ module, name = _class_locations_map.get(key, key)
162
+ return super().find_class(module, name)
163
+
164
+
165
+ Unpickler.dispatch = copy.copy(Unpickler.dispatch)
166
+ Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
167
+
168
+
169
+ def load_newobj(self) -> None:
170
+ args = self.stack.pop()
171
+ cls = self.stack[-1]
172
+
173
+ # compat
174
+ if issubclass(cls, Index):
175
+ obj = object.__new__(cls)
176
+ elif issubclass(cls, DatetimeArray) and not args:
177
+ arr = np.array([], dtype="M8[ns]")
178
+ obj = cls.__new__(cls, arr, arr.dtype)
179
+ elif issubclass(cls, TimedeltaArray) and not args:
180
+ arr = np.array([], dtype="m8[ns]")
181
+ obj = cls.__new__(cls, arr, arr.dtype)
182
+ elif cls is BlockManager and not args:
183
+ obj = cls.__new__(cls, (), [], False)
184
+ else:
185
+ obj = cls.__new__(cls, *args)
186
+
187
+ self.stack[-1] = obj
188
+
189
+
190
+ Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
191
+
192
+
193
+ def load_newobj_ex(self) -> None:
194
+ kwargs = self.stack.pop()
195
+ args = self.stack.pop()
196
+ cls = self.stack.pop()
197
+
198
+ # compat
199
+ if issubclass(cls, Index):
200
+ obj = object.__new__(cls)
201
+ else:
202
+ obj = cls.__new__(cls, *args, **kwargs)
203
+ self.append(obj)
204
+
205
+
206
+ try:
207
+ Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
208
+ except (AttributeError, KeyError):
209
+ pass
210
+
211
+
212
+ def load(fh, encoding: str | None = None, is_verbose: bool = False):
213
+ """
214
+ Load a pickle, with a provided encoding,
215
+
216
+ Parameters
217
+ ----------
218
+ fh : a filelike object
219
+ encoding : an optional encoding
220
+ is_verbose : show exception output
221
+ """
222
+ try:
223
+ fh.seek(0)
224
+ if encoding is not None:
225
+ up = Unpickler(fh, encoding=encoding)
226
+ else:
227
+ up = Unpickler(fh)
228
+ # "Unpickler" has no attribute "is_verbose" [attr-defined]
229
+ up.is_verbose = is_verbose # type: ignore[attr-defined]
230
+
231
+ return up.load()
232
+ except (ValueError, TypeError):
233
+ raise
234
+
235
+
236
+ def loads(
237
+ bytes_object: bytes,
238
+ *,
239
+ fix_imports: bool = True,
240
+ encoding: str = "ASCII",
241
+ errors: str = "strict",
242
+ ):
243
+ """
244
+ Analogous to pickle._loads.
245
+ """
246
+ fd = io.BytesIO(bytes_object)
247
+ return Unpickler(
248
+ fd, fix_imports=fix_imports, encoding=encoding, errors=errors
249
+ ).load()
250
+
251
+
252
+ @contextlib.contextmanager
253
+ def patch_pickle() -> Generator[None, None, None]:
254
+ """
255
+ Temporarily patch pickle to use our unpickler.
256
+ """
257
+ orig_loads = pkl.loads
258
+ try:
259
+ setattr(pkl, "loads", loads)
260
+ yield
261
+ finally:
262
+ setattr(pkl, "loads", orig_loads)
venv/lib/python3.10/site-packages/pandas/compat/pyarrow.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ support pyarrow compatibility across versions """
2
+
3
+ from __future__ import annotations
4
+
5
+ from pandas.util.version import Version
6
+
7
+ try:
8
+ import pyarrow as pa
9
+
10
+ _palv = Version(Version(pa.__version__).base_version)
11
+ pa_version_under10p1 = _palv < Version("10.0.1")
12
+ pa_version_under11p0 = _palv < Version("11.0.0")
13
+ pa_version_under12p0 = _palv < Version("12.0.0")
14
+ pa_version_under13p0 = _palv < Version("13.0.0")
15
+ pa_version_under14p0 = _palv < Version("14.0.0")
16
+ pa_version_under14p1 = _palv < Version("14.0.1")
17
+ pa_version_under15p0 = _palv < Version("15.0.0")
18
+ pa_version_under16p0 = _palv < Version("16.0.0")
19
+ except ImportError:
20
+ pa_version_under10p1 = True
21
+ pa_version_under11p0 = True
22
+ pa_version_under12p0 = True
23
+ pa_version_under13p0 = True
24
+ pa_version_under14p0 = True
25
+ pa_version_under14p1 = True
26
+ pa_version_under15p0 = True
27
+ pa_version_under16p0 = True
venv/lib/python3.10/site-packages/pandas/io/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ruff: noqa: TCH004
2
+ from typing import TYPE_CHECKING
3
+
4
+ if TYPE_CHECKING:
5
+ # import modules that have public classes/functions
6
+ from pandas.io import (
7
+ formats,
8
+ json,
9
+ stata,
10
+ )
11
+
12
+ # mark only those modules as public
13
+ __all__ = ["formats", "json", "stata"]
venv/lib/python3.10/site-packages/pandas/io/_util.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Callable
4
+
5
+ from pandas.compat._optional import import_optional_dependency
6
+
7
+ import pandas as pd
8
+
9
+
10
+ def _arrow_dtype_mapping() -> dict:
11
+ pa = import_optional_dependency("pyarrow")
12
+ return {
13
+ pa.int8(): pd.Int8Dtype(),
14
+ pa.int16(): pd.Int16Dtype(),
15
+ pa.int32(): pd.Int32Dtype(),
16
+ pa.int64(): pd.Int64Dtype(),
17
+ pa.uint8(): pd.UInt8Dtype(),
18
+ pa.uint16(): pd.UInt16Dtype(),
19
+ pa.uint32(): pd.UInt32Dtype(),
20
+ pa.uint64(): pd.UInt64Dtype(),
21
+ pa.bool_(): pd.BooleanDtype(),
22
+ pa.string(): pd.StringDtype(),
23
+ pa.float32(): pd.Float32Dtype(),
24
+ pa.float64(): pd.Float64Dtype(),
25
+ }
26
+
27
+
28
+ def arrow_string_types_mapper() -> Callable:
29
+ pa = import_optional_dependency("pyarrow")
30
+
31
+ return {
32
+ pa.string(): pd.StringDtype(storage="pyarrow_numpy"),
33
+ pa.large_string(): pd.StringDtype(storage="pyarrow_numpy"),
34
+ }.get
venv/lib/python3.10/site-packages/pandas/io/api.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Data IO api
3
+ """
4
+
5
+ from pandas.io.clipboards import read_clipboard
6
+ from pandas.io.excel import (
7
+ ExcelFile,
8
+ ExcelWriter,
9
+ read_excel,
10
+ )
11
+ from pandas.io.feather_format import read_feather
12
+ from pandas.io.gbq import read_gbq
13
+ from pandas.io.html import read_html
14
+ from pandas.io.json import read_json
15
+ from pandas.io.orc import read_orc
16
+ from pandas.io.parquet import read_parquet
17
+ from pandas.io.parsers import (
18
+ read_csv,
19
+ read_fwf,
20
+ read_table,
21
+ )
22
+ from pandas.io.pickle import (
23
+ read_pickle,
24
+ to_pickle,
25
+ )
26
+ from pandas.io.pytables import (
27
+ HDFStore,
28
+ read_hdf,
29
+ )
30
+ from pandas.io.sas import read_sas
31
+ from pandas.io.spss import read_spss
32
+ from pandas.io.sql import (
33
+ read_sql,
34
+ read_sql_query,
35
+ read_sql_table,
36
+ )
37
+ from pandas.io.stata import read_stata
38
+ from pandas.io.xml import read_xml
39
+
40
+ __all__ = [
41
+ "ExcelFile",
42
+ "ExcelWriter",
43
+ "HDFStore",
44
+ "read_clipboard",
45
+ "read_csv",
46
+ "read_excel",
47
+ "read_feather",
48
+ "read_fwf",
49
+ "read_gbq",
50
+ "read_hdf",
51
+ "read_html",
52
+ "read_json",
53
+ "read_orc",
54
+ "read_parquet",
55
+ "read_pickle",
56
+ "read_sas",
57
+ "read_spss",
58
+ "read_sql",
59
+ "read_sql_query",
60
+ "read_sql_table",
61
+ "read_stata",
62
+ "read_table",
63
+ "read_xml",
64
+ "to_pickle",
65
+ ]
venv/lib/python3.10/site-packages/pandas/io/clipboards.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ io on the clipboard """
2
+ from __future__ import annotations
3
+
4
+ from io import StringIO
5
+ from typing import TYPE_CHECKING
6
+ import warnings
7
+
8
+ from pandas._libs import lib
9
+ from pandas.util._exceptions import find_stack_level
10
+ from pandas.util._validators import check_dtype_backend
11
+
12
+ from pandas.core.dtypes.generic import ABCDataFrame
13
+
14
+ from pandas import (
15
+ get_option,
16
+ option_context,
17
+ )
18
+
19
+ if TYPE_CHECKING:
20
+ from pandas._typing import DtypeBackend
21
+
22
+
23
+ def read_clipboard(
24
+ sep: str = r"\s+",
25
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
26
+ **kwargs,
27
+ ): # pragma: no cover
28
+ r"""
29
+ Read text from clipboard and pass to :func:`~pandas.read_csv`.
30
+
31
+ Parses clipboard contents similar to how CSV files are parsed
32
+ using :func:`~pandas.read_csv`.
33
+
34
+ Parameters
35
+ ----------
36
+ sep : str, default '\\s+'
37
+ A string or regex delimiter. The default of ``'\\s+'`` denotes
38
+ one or more whitespace characters.
39
+
40
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
41
+ Back-end data type applied to the resultant :class:`DataFrame`
42
+ (still experimental). Behaviour is as follows:
43
+
44
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
45
+ (default).
46
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
47
+ DataFrame.
48
+
49
+ .. versionadded:: 2.0
50
+
51
+ **kwargs
52
+ See :func:`~pandas.read_csv` for the full argument list.
53
+
54
+ Returns
55
+ -------
56
+ DataFrame
57
+ A parsed :class:`~pandas.DataFrame` object.
58
+
59
+ See Also
60
+ --------
61
+ DataFrame.to_clipboard : Copy object to the system clipboard.
62
+ read_csv : Read a comma-separated values (csv) file into DataFrame.
63
+ read_fwf : Read a table of fixed-width formatted lines into DataFrame.
64
+
65
+ Examples
66
+ --------
67
+ >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
68
+ >>> df.to_clipboard() # doctest: +SKIP
69
+ >>> pd.read_clipboard() # doctest: +SKIP
70
+ A B C
71
+ 0 1 2 3
72
+ 1 4 5 6
73
+ """
74
+ encoding = kwargs.pop("encoding", "utf-8")
75
+
76
+ # only utf-8 is valid for passed value because that's what clipboard
77
+ # supports
78
+ if encoding is not None and encoding.lower().replace("-", "") != "utf8":
79
+ raise NotImplementedError("reading from clipboard only supports utf-8 encoding")
80
+
81
+ check_dtype_backend(dtype_backend)
82
+
83
+ from pandas.io.clipboard import clipboard_get
84
+ from pandas.io.parsers import read_csv
85
+
86
+ text = clipboard_get()
87
+
88
+ # Try to decode (if needed, as "text" might already be a string here).
89
+ try:
90
+ text = text.decode(kwargs.get("encoding") or get_option("display.encoding"))
91
+ except AttributeError:
92
+ pass
93
+
94
+ # Excel copies into clipboard with \t separation
95
+ # inspect no more then the 10 first lines, if they
96
+ # all contain an equal number (>0) of tabs, infer
97
+ # that this came from excel and set 'sep' accordingly
98
+ lines = text[:10000].split("\n")[:-1][:10]
99
+
100
+ # Need to remove leading white space, since read_csv
101
+ # accepts:
102
+ # a b
103
+ # 0 1 2
104
+ # 1 3 4
105
+
106
+ counts = {x.lstrip(" ").count("\t") for x in lines}
107
+ if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
108
+ sep = "\t"
109
+ # check the number of leading tabs in the first line
110
+ # to account for index columns
111
+ index_length = len(lines[0]) - len(lines[0].lstrip(" \t"))
112
+ if index_length != 0:
113
+ kwargs.setdefault("index_col", list(range(index_length)))
114
+
115
+ # Edge case where sep is specified to be None, return to default
116
+ if sep is None and kwargs.get("delim_whitespace") is None:
117
+ sep = r"\s+"
118
+
119
+ # Regex separator currently only works with python engine.
120
+ # Default to python if separator is multi-character (regex)
121
+ if len(sep) > 1 and kwargs.get("engine") is None:
122
+ kwargs["engine"] = "python"
123
+ elif len(sep) > 1 and kwargs.get("engine") == "c":
124
+ warnings.warn(
125
+ "read_clipboard with regex separator does not work properly with c engine.",
126
+ stacklevel=find_stack_level(),
127
+ )
128
+
129
+ return read_csv(StringIO(text), sep=sep, dtype_backend=dtype_backend, **kwargs)
130
+
131
+
132
+ def to_clipboard(
133
+ obj, excel: bool | None = True, sep: str | None = None, **kwargs
134
+ ) -> None: # pragma: no cover
135
+ """
136
+ Attempt to write text representation of object to the system clipboard
137
+ The clipboard can be then pasted into Excel for example.
138
+
139
+ Parameters
140
+ ----------
141
+ obj : the object to write to the clipboard
142
+ excel : bool, defaults to True
143
+ if True, use the provided separator, writing in a csv
144
+ format for allowing easy pasting into excel.
145
+ if False, write a string representation of the object
146
+ to the clipboard
147
+ sep : optional, defaults to tab
148
+ other keywords are passed to to_csv
149
+
150
+ Notes
151
+ -----
152
+ Requirements for your platform
153
+ - Linux: xclip, or xsel (with PyQt4 modules)
154
+ - Windows:
155
+ - OS X:
156
+ """
157
+ encoding = kwargs.pop("encoding", "utf-8")
158
+
159
+ # testing if an invalid encoding is passed to clipboard
160
+ if encoding is not None and encoding.lower().replace("-", "") != "utf8":
161
+ raise ValueError("clipboard only supports utf-8 encoding")
162
+
163
+ from pandas.io.clipboard import clipboard_set
164
+
165
+ if excel is None:
166
+ excel = True
167
+
168
+ if excel:
169
+ try:
170
+ if sep is None:
171
+ sep = "\t"
172
+ buf = StringIO()
173
+
174
+ # clipboard_set (pyperclip) expects unicode
175
+ obj.to_csv(buf, sep=sep, encoding="utf-8", **kwargs)
176
+ text = buf.getvalue()
177
+
178
+ clipboard_set(text)
179
+ return
180
+ except TypeError:
181
+ warnings.warn(
182
+ "to_clipboard in excel mode requires a single character separator.",
183
+ stacklevel=find_stack_level(),
184
+ )
185
+ elif sep is not None:
186
+ warnings.warn(
187
+ "to_clipboard with excel=False ignores the sep argument.",
188
+ stacklevel=find_stack_level(),
189
+ )
190
+
191
+ if isinstance(obj, ABCDataFrame):
192
+ # str(df) has various unhelpful defaults, like truncation
193
+ with option_context("display.max_colwidth", None):
194
+ objstr = obj.to_string(**kwargs)
195
+ else:
196
+ objstr = str(obj)
197
+ clipboard_set(objstr)
venv/lib/python3.10/site-packages/pandas/io/common.py ADDED
@@ -0,0 +1,1267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Common IO api utilities"""
2
+ from __future__ import annotations
3
+
4
+ from abc import (
5
+ ABC,
6
+ abstractmethod,
7
+ )
8
+ import codecs
9
+ from collections import defaultdict
10
+ from collections.abc import (
11
+ Hashable,
12
+ Mapping,
13
+ Sequence,
14
+ )
15
+ import dataclasses
16
+ import functools
17
+ import gzip
18
+ from io import (
19
+ BufferedIOBase,
20
+ BytesIO,
21
+ RawIOBase,
22
+ StringIO,
23
+ TextIOBase,
24
+ TextIOWrapper,
25
+ )
26
+ import mmap
27
+ import os
28
+ from pathlib import Path
29
+ import re
30
+ import tarfile
31
+ from typing import (
32
+ IO,
33
+ TYPE_CHECKING,
34
+ Any,
35
+ AnyStr,
36
+ DefaultDict,
37
+ Generic,
38
+ Literal,
39
+ TypeVar,
40
+ cast,
41
+ overload,
42
+ )
43
+ from urllib.parse import (
44
+ urljoin,
45
+ urlparse as parse_url,
46
+ uses_netloc,
47
+ uses_params,
48
+ uses_relative,
49
+ )
50
+ import warnings
51
+ import zipfile
52
+
53
+ from pandas._typing import (
54
+ BaseBuffer,
55
+ ReadCsvBuffer,
56
+ )
57
+ from pandas.compat import (
58
+ get_bz2_file,
59
+ get_lzma_file,
60
+ )
61
+ from pandas.compat._optional import import_optional_dependency
62
+ from pandas.util._decorators import doc
63
+ from pandas.util._exceptions import find_stack_level
64
+
65
+ from pandas.core.dtypes.common import (
66
+ is_bool,
67
+ is_file_like,
68
+ is_integer,
69
+ is_list_like,
70
+ )
71
+ from pandas.core.dtypes.generic import ABCMultiIndex
72
+
73
+ from pandas.core.shared_docs import _shared_docs
74
+
75
+ _VALID_URLS = set(uses_relative + uses_netloc + uses_params)
76
+ _VALID_URLS.discard("")
77
+ _RFC_3986_PATTERN = re.compile(r"^[A-Za-z][A-Za-z0-9+\-+.]*://")
78
+
79
+ BaseBufferT = TypeVar("BaseBufferT", bound=BaseBuffer)
80
+
81
+
82
+ if TYPE_CHECKING:
83
+ from types import TracebackType
84
+
85
+ from pandas._typing import (
86
+ CompressionDict,
87
+ CompressionOptions,
88
+ FilePath,
89
+ ReadBuffer,
90
+ StorageOptions,
91
+ WriteBuffer,
92
+ )
93
+
94
+ from pandas import MultiIndex
95
+
96
+
97
+ @dataclasses.dataclass
98
+ class IOArgs:
99
+ """
100
+ Return value of io/common.py:_get_filepath_or_buffer.
101
+ """
102
+
103
+ filepath_or_buffer: str | BaseBuffer
104
+ encoding: str
105
+ mode: str
106
+ compression: CompressionDict
107
+ should_close: bool = False
108
+
109
+
110
+ @dataclasses.dataclass
111
+ class IOHandles(Generic[AnyStr]):
112
+ """
113
+ Return value of io/common.py:get_handle
114
+
115
+ Can be used as a context manager.
116
+
117
+ This is used to easily close created buffers and to handle corner cases when
118
+ TextIOWrapper is inserted.
119
+
120
+ handle: The file handle to be used.
121
+ created_handles: All file handles that are created by get_handle
122
+ is_wrapped: Whether a TextIOWrapper needs to be detached.
123
+ """
124
+
125
+ # handle might not implement the IO-interface
126
+ handle: IO[AnyStr]
127
+ compression: CompressionDict
128
+ created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list)
129
+ is_wrapped: bool = False
130
+
131
+ def close(self) -> None:
132
+ """
133
+ Close all created buffers.
134
+
135
+ Note: If a TextIOWrapper was inserted, it is flushed and detached to
136
+ avoid closing the potentially user-created buffer.
137
+ """
138
+ if self.is_wrapped:
139
+ assert isinstance(self.handle, TextIOWrapper)
140
+ self.handle.flush()
141
+ self.handle.detach()
142
+ self.created_handles.remove(self.handle)
143
+ for handle in self.created_handles:
144
+ handle.close()
145
+ self.created_handles = []
146
+ self.is_wrapped = False
147
+
148
+ def __enter__(self) -> IOHandles[AnyStr]:
149
+ return self
150
+
151
+ def __exit__(
152
+ self,
153
+ exc_type: type[BaseException] | None,
154
+ exc_value: BaseException | None,
155
+ traceback: TracebackType | None,
156
+ ) -> None:
157
+ self.close()
158
+
159
+
160
+ def is_url(url: object) -> bool:
161
+ """
162
+ Check to see if a URL has a valid protocol.
163
+
164
+ Parameters
165
+ ----------
166
+ url : str or unicode
167
+
168
+ Returns
169
+ -------
170
+ isurl : bool
171
+ If `url` has a valid protocol return True otherwise False.
172
+ """
173
+ if not isinstance(url, str):
174
+ return False
175
+ return parse_url(url).scheme in _VALID_URLS
176
+
177
+
178
+ @overload
179
+ def _expand_user(filepath_or_buffer: str) -> str:
180
+ ...
181
+
182
+
183
+ @overload
184
+ def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT:
185
+ ...
186
+
187
+
188
+ def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT:
189
+ """
190
+ Return the argument with an initial component of ~ or ~user
191
+ replaced by that user's home directory.
192
+
193
+ Parameters
194
+ ----------
195
+ filepath_or_buffer : object to be converted if possible
196
+
197
+ Returns
198
+ -------
199
+ expanded_filepath_or_buffer : an expanded filepath or the
200
+ input if not expandable
201
+ """
202
+ if isinstance(filepath_or_buffer, str):
203
+ return os.path.expanduser(filepath_or_buffer)
204
+ return filepath_or_buffer
205
+
206
+
207
+ def validate_header_arg(header: object) -> None:
208
+ if header is None:
209
+ return
210
+ if is_integer(header):
211
+ header = cast(int, header)
212
+ if header < 0:
213
+ # GH 27779
214
+ raise ValueError(
215
+ "Passing negative integer to header is invalid. "
216
+ "For no header, use header=None instead"
217
+ )
218
+ return
219
+ if is_list_like(header, allow_sets=False):
220
+ header = cast(Sequence, header)
221
+ if not all(map(is_integer, header)):
222
+ raise ValueError("header must be integer or list of integers")
223
+ if any(i < 0 for i in header):
224
+ raise ValueError("cannot specify multi-index header with negative integers")
225
+ return
226
+ if is_bool(header):
227
+ raise TypeError(
228
+ "Passing a bool to header is invalid. Use header=None for no header or "
229
+ "header=int or list-like of ints to specify "
230
+ "the row(s) making up the column names"
231
+ )
232
+ # GH 16338
233
+ raise ValueError("header must be integer or list of integers")
234
+
235
+
236
+ @overload
237
+ def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str:
238
+ ...
239
+
240
+
241
+ @overload
242
+ def stringify_path(
243
+ filepath_or_buffer: BaseBufferT, convert_file_like: bool = ...
244
+ ) -> BaseBufferT:
245
+ ...
246
+
247
+
248
+ def stringify_path(
249
+ filepath_or_buffer: FilePath | BaseBufferT,
250
+ convert_file_like: bool = False,
251
+ ) -> str | BaseBufferT:
252
+ """
253
+ Attempt to convert a path-like object to a string.
254
+
255
+ Parameters
256
+ ----------
257
+ filepath_or_buffer : object to be converted
258
+
259
+ Returns
260
+ -------
261
+ str_filepath_or_buffer : maybe a string version of the object
262
+
263
+ Notes
264
+ -----
265
+ Objects supporting the fspath protocol are coerced
266
+ according to its __fspath__ method.
267
+
268
+ Any other object is passed through unchanged, which includes bytes,
269
+ strings, buffers, or anything else that's not even path-like.
270
+ """
271
+ if not convert_file_like and is_file_like(filepath_or_buffer):
272
+ # GH 38125: some fsspec objects implement os.PathLike but have already opened a
273
+ # file. This prevents opening the file a second time. infer_compression calls
274
+ # this function with convert_file_like=True to infer the compression.
275
+ return cast(BaseBufferT, filepath_or_buffer)
276
+
277
+ if isinstance(filepath_or_buffer, os.PathLike):
278
+ filepath_or_buffer = filepath_or_buffer.__fspath__()
279
+ return _expand_user(filepath_or_buffer)
280
+
281
+
282
+ def urlopen(*args, **kwargs):
283
+ """
284
+ Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of
285
+ the stdlib.
286
+ """
287
+ import urllib.request
288
+
289
+ return urllib.request.urlopen(*args, **kwargs)
290
+
291
+
292
+ def is_fsspec_url(url: FilePath | BaseBuffer) -> bool:
293
+ """
294
+ Returns true if the given URL looks like
295
+ something fsspec can handle
296
+ """
297
+ return (
298
+ isinstance(url, str)
299
+ and bool(_RFC_3986_PATTERN.match(url))
300
+ and not url.startswith(("http://", "https://"))
301
+ )
302
+
303
+
304
+ @doc(
305
+ storage_options=_shared_docs["storage_options"],
306
+ compression_options=_shared_docs["compression_options"] % "filepath_or_buffer",
307
+ )
308
+ def _get_filepath_or_buffer(
309
+ filepath_or_buffer: FilePath | BaseBuffer,
310
+ encoding: str = "utf-8",
311
+ compression: CompressionOptions | None = None,
312
+ mode: str = "r",
313
+ storage_options: StorageOptions | None = None,
314
+ ) -> IOArgs:
315
+ """
316
+ If the filepath_or_buffer is a url, translate and return the buffer.
317
+ Otherwise passthrough.
318
+
319
+ Parameters
320
+ ----------
321
+ filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
322
+ or buffer
323
+ {compression_options}
324
+
325
+ .. versionchanged:: 1.4.0 Zstandard support.
326
+
327
+ encoding : the encoding to use to decode bytes, default is 'utf-8'
328
+ mode : str, optional
329
+
330
+ {storage_options}
331
+
332
+
333
+ Returns the dataclass IOArgs.
334
+ """
335
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
336
+
337
+ # handle compression dict
338
+ compression_method, compression = get_compression_method(compression)
339
+ compression_method = infer_compression(filepath_or_buffer, compression_method)
340
+
341
+ # GH21227 internal compression is not used for non-binary handles.
342
+ if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode:
343
+ warnings.warn(
344
+ "compression has no effect when passing a non-binary object as input.",
345
+ RuntimeWarning,
346
+ stacklevel=find_stack_level(),
347
+ )
348
+ compression_method = None
349
+
350
+ compression = dict(compression, method=compression_method)
351
+
352
+ # bz2 and xz do not write the byte order mark for utf-16 and utf-32
353
+ # print a warning when writing such files
354
+ if (
355
+ "w" in mode
356
+ and compression_method in ["bz2", "xz"]
357
+ and encoding in ["utf-16", "utf-32"]
358
+ ):
359
+ warnings.warn(
360
+ f"{compression} will not write the byte order mark for {encoding}",
361
+ UnicodeWarning,
362
+ stacklevel=find_stack_level(),
363
+ )
364
+
365
+ # Use binary mode when converting path-like objects to file-like objects (fsspec)
366
+ # except when text mode is explicitly requested. The original mode is returned if
367
+ # fsspec is not used.
368
+ fsspec_mode = mode
369
+ if "t" not in fsspec_mode and "b" not in fsspec_mode:
370
+ fsspec_mode += "b"
371
+
372
+ if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):
373
+ # TODO: fsspec can also handle HTTP via requests, but leaving this
374
+ # unchanged. using fsspec appears to break the ability to infer if the
375
+ # server responded with gzipped data
376
+ storage_options = storage_options or {}
377
+
378
+ # waiting until now for importing to match intended lazy logic of
379
+ # urlopen function defined elsewhere in this module
380
+ import urllib.request
381
+
382
+ # assuming storage_options is to be interpreted as headers
383
+ req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options)
384
+ with urlopen(req_info) as req:
385
+ content_encoding = req.headers.get("Content-Encoding", None)
386
+ if content_encoding == "gzip":
387
+ # Override compression based on Content-Encoding header
388
+ compression = {"method": "gzip"}
389
+ reader = BytesIO(req.read())
390
+ return IOArgs(
391
+ filepath_or_buffer=reader,
392
+ encoding=encoding,
393
+ compression=compression,
394
+ should_close=True,
395
+ mode=fsspec_mode,
396
+ )
397
+
398
+ if is_fsspec_url(filepath_or_buffer):
399
+ assert isinstance(
400
+ filepath_or_buffer, str
401
+ ) # just to appease mypy for this branch
402
+ # two special-case s3-like protocols; these have special meaning in Hadoop,
403
+ # but are equivalent to just "s3" from fsspec's point of view
404
+ # cc #11071
405
+ if filepath_or_buffer.startswith("s3a://"):
406
+ filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://")
407
+ if filepath_or_buffer.startswith("s3n://"):
408
+ filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://")
409
+ fsspec = import_optional_dependency("fsspec")
410
+
411
+ # If botocore is installed we fallback to reading with anon=True
412
+ # to allow reads from public buckets
413
+ err_types_to_retry_with_anon: list[Any] = []
414
+ try:
415
+ import_optional_dependency("botocore")
416
+ from botocore.exceptions import (
417
+ ClientError,
418
+ NoCredentialsError,
419
+ )
420
+
421
+ err_types_to_retry_with_anon = [
422
+ ClientError,
423
+ NoCredentialsError,
424
+ PermissionError,
425
+ ]
426
+ except ImportError:
427
+ pass
428
+
429
+ try:
430
+ file_obj = fsspec.open(
431
+ filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
432
+ ).open()
433
+ # GH 34626 Reads from Public Buckets without Credentials needs anon=True
434
+ except tuple(err_types_to_retry_with_anon):
435
+ if storage_options is None:
436
+ storage_options = {"anon": True}
437
+ else:
438
+ # don't mutate user input.
439
+ storage_options = dict(storage_options)
440
+ storage_options["anon"] = True
441
+ file_obj = fsspec.open(
442
+ filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
443
+ ).open()
444
+
445
+ return IOArgs(
446
+ filepath_or_buffer=file_obj,
447
+ encoding=encoding,
448
+ compression=compression,
449
+ should_close=True,
450
+ mode=fsspec_mode,
451
+ )
452
+ elif storage_options:
453
+ raise ValueError(
454
+ "storage_options passed with file object or non-fsspec file path"
455
+ )
456
+
457
+ if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):
458
+ return IOArgs(
459
+ filepath_or_buffer=_expand_user(filepath_or_buffer),
460
+ encoding=encoding,
461
+ compression=compression,
462
+ should_close=False,
463
+ mode=mode,
464
+ )
465
+
466
+ # is_file_like requires (read | write) & __iter__ but __iter__ is only
467
+ # needed for read_csv(engine=python)
468
+ if not (
469
+ hasattr(filepath_or_buffer, "read") or hasattr(filepath_or_buffer, "write")
470
+ ):
471
+ msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
472
+ raise ValueError(msg)
473
+
474
+ return IOArgs(
475
+ filepath_or_buffer=filepath_or_buffer,
476
+ encoding=encoding,
477
+ compression=compression,
478
+ should_close=False,
479
+ mode=mode,
480
+ )
481
+
482
+
483
+ def file_path_to_url(path: str) -> str:
484
+ """
485
+ converts an absolute native path to a FILE URL.
486
+
487
+ Parameters
488
+ ----------
489
+ path : a path in native format
490
+
491
+ Returns
492
+ -------
493
+ a valid FILE URL
494
+ """
495
+ # lazify expensive import (~30ms)
496
+ from urllib.request import pathname2url
497
+
498
+ return urljoin("file:", pathname2url(path))
499
+
500
+
501
+ extension_to_compression = {
502
+ ".tar": "tar",
503
+ ".tar.gz": "tar",
504
+ ".tar.bz2": "tar",
505
+ ".tar.xz": "tar",
506
+ ".gz": "gzip",
507
+ ".bz2": "bz2",
508
+ ".zip": "zip",
509
+ ".xz": "xz",
510
+ ".zst": "zstd",
511
+ }
512
+ _supported_compressions = set(extension_to_compression.values())
513
+
514
+
515
+ def get_compression_method(
516
+ compression: CompressionOptions,
517
+ ) -> tuple[str | None, CompressionDict]:
518
+ """
519
+ Simplifies a compression argument to a compression method string and
520
+ a mapping containing additional arguments.
521
+
522
+ Parameters
523
+ ----------
524
+ compression : str or mapping
525
+ If string, specifies the compression method. If mapping, value at key
526
+ 'method' specifies compression method.
527
+
528
+ Returns
529
+ -------
530
+ tuple of ({compression method}, Optional[str]
531
+ {compression arguments}, Dict[str, Any])
532
+
533
+ Raises
534
+ ------
535
+ ValueError on mapping missing 'method' key
536
+ """
537
+ compression_method: str | None
538
+ if isinstance(compression, Mapping):
539
+ compression_args = dict(compression)
540
+ try:
541
+ compression_method = compression_args.pop("method")
542
+ except KeyError as err:
543
+ raise ValueError("If mapping, compression must have key 'method'") from err
544
+ else:
545
+ compression_args = {}
546
+ compression_method = compression
547
+ return compression_method, compression_args
548
+
549
+
550
+ @doc(compression_options=_shared_docs["compression_options"] % "filepath_or_buffer")
551
+ def infer_compression(
552
+ filepath_or_buffer: FilePath | BaseBuffer, compression: str | None
553
+ ) -> str | None:
554
+ """
555
+ Get the compression method for filepath_or_buffer. If compression='infer',
556
+ the inferred compression method is returned. Otherwise, the input
557
+ compression method is returned unchanged, unless it's invalid, in which
558
+ case an error is raised.
559
+
560
+ Parameters
561
+ ----------
562
+ filepath_or_buffer : str or file handle
563
+ File path or object.
564
+ {compression_options}
565
+
566
+ .. versionchanged:: 1.4.0 Zstandard support.
567
+
568
+ Returns
569
+ -------
570
+ string or None
571
+
572
+ Raises
573
+ ------
574
+ ValueError on invalid compression specified.
575
+ """
576
+ if compression is None:
577
+ return None
578
+
579
+ # Infer compression
580
+ if compression == "infer":
581
+ # Convert all path types (e.g. pathlib.Path) to strings
582
+ filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)
583
+ if not isinstance(filepath_or_buffer, str):
584
+ # Cannot infer compression of a buffer, assume no compression
585
+ return None
586
+
587
+ # Infer compression from the filename/URL extension
588
+ for extension, compression in extension_to_compression.items():
589
+ if filepath_or_buffer.lower().endswith(extension):
590
+ return compression
591
+ return None
592
+
593
+ # Compression has been specified. Check that it's valid
594
+ if compression in _supported_compressions:
595
+ return compression
596
+
597
+ valid = ["infer", None] + sorted(_supported_compressions)
598
+ msg = (
599
+ f"Unrecognized compression type: {compression}\n"
600
+ f"Valid compression types are {valid}"
601
+ )
602
+ raise ValueError(msg)
603
+
604
+
605
+ def check_parent_directory(path: Path | str) -> None:
606
+ """
607
+ Check if parent directory of a file exists, raise OSError if it does not
608
+
609
+ Parameters
610
+ ----------
611
+ path: Path or str
612
+ Path to check parent directory of
613
+ """
614
+ parent = Path(path).parent
615
+ if not parent.is_dir():
616
+ raise OSError(rf"Cannot save file into a non-existent directory: '{parent}'")
617
+
618
+
619
+ @overload
620
+ def get_handle(
621
+ path_or_buf: FilePath | BaseBuffer,
622
+ mode: str,
623
+ *,
624
+ encoding: str | None = ...,
625
+ compression: CompressionOptions = ...,
626
+ memory_map: bool = ...,
627
+ is_text: Literal[False],
628
+ errors: str | None = ...,
629
+ storage_options: StorageOptions = ...,
630
+ ) -> IOHandles[bytes]:
631
+ ...
632
+
633
+
634
+ @overload
635
+ def get_handle(
636
+ path_or_buf: FilePath | BaseBuffer,
637
+ mode: str,
638
+ *,
639
+ encoding: str | None = ...,
640
+ compression: CompressionOptions = ...,
641
+ memory_map: bool = ...,
642
+ is_text: Literal[True] = ...,
643
+ errors: str | None = ...,
644
+ storage_options: StorageOptions = ...,
645
+ ) -> IOHandles[str]:
646
+ ...
647
+
648
+
649
+ @overload
650
+ def get_handle(
651
+ path_or_buf: FilePath | BaseBuffer,
652
+ mode: str,
653
+ *,
654
+ encoding: str | None = ...,
655
+ compression: CompressionOptions = ...,
656
+ memory_map: bool = ...,
657
+ is_text: bool = ...,
658
+ errors: str | None = ...,
659
+ storage_options: StorageOptions = ...,
660
+ ) -> IOHandles[str] | IOHandles[bytes]:
661
+ ...
662
+
663
+
664
+ @doc(compression_options=_shared_docs["compression_options"] % "path_or_buf")
665
+ def get_handle(
666
+ path_or_buf: FilePath | BaseBuffer,
667
+ mode: str,
668
+ *,
669
+ encoding: str | None = None,
670
+ compression: CompressionOptions | None = None,
671
+ memory_map: bool = False,
672
+ is_text: bool = True,
673
+ errors: str | None = None,
674
+ storage_options: StorageOptions | None = None,
675
+ ) -> IOHandles[str] | IOHandles[bytes]:
676
+ """
677
+ Get file handle for given path/buffer and mode.
678
+
679
+ Parameters
680
+ ----------
681
+ path_or_buf : str or file handle
682
+ File path or object.
683
+ mode : str
684
+ Mode to open path_or_buf with.
685
+ encoding : str or None
686
+ Encoding to use.
687
+ {compression_options}
688
+
689
+ May be a dict with key 'method' as compression mode
690
+ and other keys as compression options if compression
691
+ mode is 'zip'.
692
+
693
+ Passing compression options as keys in dict is
694
+ supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'.
695
+
696
+ .. versionchanged:: 1.4.0 Zstandard support.
697
+
698
+ memory_map : bool, default False
699
+ See parsers._parser_params for more information. Only used by read_csv.
700
+ is_text : bool, default True
701
+ Whether the type of the content passed to the file/buffer is string or
702
+ bytes. This is not the same as `"b" not in mode`. If a string content is
703
+ passed to a binary file/buffer, a wrapper is inserted.
704
+ errors : str, default 'strict'
705
+ Specifies how encoding and decoding errors are to be handled.
706
+ See the errors argument for :func:`open` for a full list
707
+ of options.
708
+ storage_options: StorageOptions = None
709
+ Passed to _get_filepath_or_buffer
710
+
711
+ Returns the dataclass IOHandles
712
+ """
713
+ # Windows does not default to utf-8. Set to utf-8 for a consistent behavior
714
+ encoding = encoding or "utf-8"
715
+
716
+ errors = errors or "strict"
717
+
718
+ # read_csv does not know whether the buffer is opened in binary/text mode
719
+ if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
720
+ mode += "b"
721
+
722
+ # validate encoding and errors
723
+ codecs.lookup(encoding)
724
+ if isinstance(errors, str):
725
+ codecs.lookup_error(errors)
726
+
727
+ # open URLs
728
+ ioargs = _get_filepath_or_buffer(
729
+ path_or_buf,
730
+ encoding=encoding,
731
+ compression=compression,
732
+ mode=mode,
733
+ storage_options=storage_options,
734
+ )
735
+
736
+ handle = ioargs.filepath_or_buffer
737
+ handles: list[BaseBuffer]
738
+
739
+ # memory mapping needs to be the first step
740
+ # only used for read_csv
741
+ handle, memory_map, handles = _maybe_memory_map(handle, memory_map)
742
+
743
+ is_path = isinstance(handle, str)
744
+ compression_args = dict(ioargs.compression)
745
+ compression = compression_args.pop("method")
746
+
747
+ # Only for write methods
748
+ if "r" not in mode and is_path:
749
+ check_parent_directory(str(handle))
750
+
751
+ if compression:
752
+ if compression != "zstd":
753
+ # compression libraries do not like an explicit text-mode
754
+ ioargs.mode = ioargs.mode.replace("t", "")
755
+ elif compression == "zstd" and "b" not in ioargs.mode:
756
+ # python-zstandard defaults to text mode, but we always expect
757
+ # compression libraries to use binary mode.
758
+ ioargs.mode += "b"
759
+
760
+ # GZ Compression
761
+ if compression == "gzip":
762
+ if isinstance(handle, str):
763
+ # error: Incompatible types in assignment (expression has type
764
+ # "GzipFile", variable has type "Union[str, BaseBuffer]")
765
+ handle = gzip.GzipFile( # type: ignore[assignment]
766
+ filename=handle,
767
+ mode=ioargs.mode,
768
+ **compression_args,
769
+ )
770
+ else:
771
+ handle = gzip.GzipFile(
772
+ # No overload variant of "GzipFile" matches argument types
773
+ # "Union[str, BaseBuffer]", "str", "Dict[str, Any]"
774
+ fileobj=handle, # type: ignore[call-overload]
775
+ mode=ioargs.mode,
776
+ **compression_args,
777
+ )
778
+
779
+ # BZ Compression
780
+ elif compression == "bz2":
781
+ # Overload of "BZ2File" to handle pickle protocol 5
782
+ # "Union[str, BaseBuffer]", "str", "Dict[str, Any]"
783
+ handle = get_bz2_file()( # type: ignore[call-overload]
784
+ handle,
785
+ mode=ioargs.mode,
786
+ **compression_args,
787
+ )
788
+
789
+ # ZIP Compression
790
+ elif compression == "zip":
791
+ # error: Argument 1 to "_BytesZipFile" has incompatible type
792
+ # "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]],
793
+ # ReadBuffer[bytes], WriteBuffer[bytes]]"
794
+ handle = _BytesZipFile(
795
+ handle, ioargs.mode, **compression_args # type: ignore[arg-type]
796
+ )
797
+ if handle.buffer.mode == "r":
798
+ handles.append(handle)
799
+ zip_names = handle.buffer.namelist()
800
+ if len(zip_names) == 1:
801
+ handle = handle.buffer.open(zip_names.pop())
802
+ elif not zip_names:
803
+ raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
804
+ else:
805
+ raise ValueError(
806
+ "Multiple files found in ZIP file. "
807
+ f"Only one file per ZIP: {zip_names}"
808
+ )
809
+
810
+ # TAR Encoding
811
+ elif compression == "tar":
812
+ compression_args.setdefault("mode", ioargs.mode)
813
+ if isinstance(handle, str):
814
+ handle = _BytesTarFile(name=handle, **compression_args)
815
+ else:
816
+ # error: Argument "fileobj" to "_BytesTarFile" has incompatible
817
+ # type "BaseBuffer"; expected "Union[ReadBuffer[bytes],
818
+ # WriteBuffer[bytes], None]"
819
+ handle = _BytesTarFile(
820
+ fileobj=handle, **compression_args # type: ignore[arg-type]
821
+ )
822
+ assert isinstance(handle, _BytesTarFile)
823
+ if "r" in handle.buffer.mode:
824
+ handles.append(handle)
825
+ files = handle.buffer.getnames()
826
+ if len(files) == 1:
827
+ file = handle.buffer.extractfile(files[0])
828
+ assert file is not None
829
+ handle = file
830
+ elif not files:
831
+ raise ValueError(f"Zero files found in TAR archive {path_or_buf}")
832
+ else:
833
+ raise ValueError(
834
+ "Multiple files found in TAR archive. "
835
+ f"Only one file per TAR archive: {files}"
836
+ )
837
+
838
+ # XZ Compression
839
+ elif compression == "xz":
840
+ # error: Argument 1 to "LZMAFile" has incompatible type "Union[str,
841
+ # BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str],
842
+ # PathLike[bytes]], IO[bytes]], None]"
843
+ handle = get_lzma_file()(
844
+ handle, ioargs.mode, **compression_args # type: ignore[arg-type]
845
+ )
846
+
847
+ # Zstd Compression
848
+ elif compression == "zstd":
849
+ zstd = import_optional_dependency("zstandard")
850
+ if "r" in ioargs.mode:
851
+ open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)}
852
+ else:
853
+ open_args = {"cctx": zstd.ZstdCompressor(**compression_args)}
854
+ handle = zstd.open(
855
+ handle,
856
+ mode=ioargs.mode,
857
+ **open_args,
858
+ )
859
+
860
+ # Unrecognized Compression
861
+ else:
862
+ msg = f"Unrecognized compression type: {compression}"
863
+ raise ValueError(msg)
864
+
865
+ assert not isinstance(handle, str)
866
+ handles.append(handle)
867
+
868
+ elif isinstance(handle, str):
869
+ # Check whether the filename is to be opened in binary mode.
870
+ # Binary mode does not support 'encoding' and 'newline'.
871
+ if ioargs.encoding and "b" not in ioargs.mode:
872
+ # Encoding
873
+ handle = open(
874
+ handle,
875
+ ioargs.mode,
876
+ encoding=ioargs.encoding,
877
+ errors=errors,
878
+ newline="",
879
+ )
880
+ else:
881
+ # Binary mode
882
+ handle = open(handle, ioargs.mode)
883
+ handles.append(handle)
884
+
885
+ # Convert BytesIO or file objects passed with an encoding
886
+ is_wrapped = False
887
+ if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase):
888
+ # not added to handles as it does not open/buffer resources
889
+ handle = _BytesIOWrapper(
890
+ handle,
891
+ encoding=ioargs.encoding,
892
+ )
893
+ elif is_text and (
894
+ compression or memory_map or _is_binary_mode(handle, ioargs.mode)
895
+ ):
896
+ if (
897
+ not hasattr(handle, "readable")
898
+ or not hasattr(handle, "writable")
899
+ or not hasattr(handle, "seekable")
900
+ ):
901
+ handle = _IOWrapper(handle)
902
+ # error: Argument 1 to "TextIOWrapper" has incompatible type
903
+ # "_IOWrapper"; expected "IO[bytes]"
904
+ handle = TextIOWrapper(
905
+ handle, # type: ignore[arg-type]
906
+ encoding=ioargs.encoding,
907
+ errors=errors,
908
+ newline="",
909
+ )
910
+ handles.append(handle)
911
+ # only marked as wrapped when the caller provided a handle
912
+ is_wrapped = not (
913
+ isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close
914
+ )
915
+
916
+ if "r" in ioargs.mode and not hasattr(handle, "read"):
917
+ raise TypeError(
918
+ "Expected file path name or file-like object, "
919
+ f"got {type(ioargs.filepath_or_buffer)} type"
920
+ )
921
+
922
+ handles.reverse() # close the most recently added buffer first
923
+ if ioargs.should_close:
924
+ assert not isinstance(ioargs.filepath_or_buffer, str)
925
+ handles.append(ioargs.filepath_or_buffer)
926
+
927
+ return IOHandles(
928
+ # error: Argument "handle" to "IOHandles" has incompatible type
929
+ # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes],
930
+ # typing.IO[Any]]"; expected "pandas._typing.IO[Any]"
931
+ handle=handle, # type: ignore[arg-type]
932
+ # error: Argument "created_handles" to "IOHandles" has incompatible type
933
+ # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]"
934
+ created_handles=handles, # type: ignore[arg-type]
935
+ is_wrapped=is_wrapped,
936
+ compression=ioargs.compression,
937
+ )
938
+
939
+
940
+ # error: Definition of "__enter__" in base class "IOBase" is incompatible
941
+ # with definition in base class "BinaryIO"
942
+ class _BufferedWriter(BytesIO, ABC): # type: ignore[misc]
943
+ """
944
+ Some objects do not support multiple .write() calls (TarFile and ZipFile).
945
+ This wrapper writes to the underlying buffer on close.
946
+ """
947
+
948
+ buffer = BytesIO()
949
+
950
+ @abstractmethod
951
+ def write_to_buffer(self) -> None:
952
+ ...
953
+
954
+ def close(self) -> None:
955
+ if self.closed:
956
+ # already closed
957
+ return
958
+ if self.getbuffer().nbytes:
959
+ # write to buffer
960
+ self.seek(0)
961
+ with self.buffer:
962
+ self.write_to_buffer()
963
+ else:
964
+ self.buffer.close()
965
+ super().close()
966
+
967
+
968
+ class _BytesTarFile(_BufferedWriter):
969
+ def __init__(
970
+ self,
971
+ name: str | None = None,
972
+ mode: Literal["r", "a", "w", "x"] = "r",
973
+ fileobj: ReadBuffer[bytes] | WriteBuffer[bytes] | None = None,
974
+ archive_name: str | None = None,
975
+ **kwargs,
976
+ ) -> None:
977
+ super().__init__()
978
+ self.archive_name = archive_name
979
+ self.name = name
980
+ # error: Incompatible types in assignment (expression has type "TarFile",
981
+ # base class "_BufferedWriter" defined the type as "BytesIO")
982
+ self.buffer: tarfile.TarFile = tarfile.TarFile.open( # type: ignore[assignment]
983
+ name=name,
984
+ mode=self.extend_mode(mode),
985
+ fileobj=fileobj,
986
+ **kwargs,
987
+ )
988
+
989
+ def extend_mode(self, mode: str) -> str:
990
+ mode = mode.replace("b", "")
991
+ if mode != "w":
992
+ return mode
993
+ if self.name is not None:
994
+ suffix = Path(self.name).suffix
995
+ if suffix in (".gz", ".xz", ".bz2"):
996
+ mode = f"{mode}:{suffix[1:]}"
997
+ return mode
998
+
999
+ def infer_filename(self) -> str | None:
1000
+ """
1001
+ If an explicit archive_name is not given, we still want the file inside the zip
1002
+ file not to be named something.tar, because that causes confusion (GH39465).
1003
+ """
1004
+ if self.name is None:
1005
+ return None
1006
+
1007
+ filename = Path(self.name)
1008
+ if filename.suffix == ".tar":
1009
+ return filename.with_suffix("").name
1010
+ elif filename.suffix in (".tar.gz", ".tar.bz2", ".tar.xz"):
1011
+ return filename.with_suffix("").with_suffix("").name
1012
+ return filename.name
1013
+
1014
+ def write_to_buffer(self) -> None:
1015
+ # TarFile needs a non-empty string
1016
+ archive_name = self.archive_name or self.infer_filename() or "tar"
1017
+ tarinfo = tarfile.TarInfo(name=archive_name)
1018
+ tarinfo.size = len(self.getvalue())
1019
+ self.buffer.addfile(tarinfo, self)
1020
+
1021
+
1022
+ class _BytesZipFile(_BufferedWriter):
1023
+ def __init__(
1024
+ self,
1025
+ file: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],
1026
+ mode: str,
1027
+ archive_name: str | None = None,
1028
+ **kwargs,
1029
+ ) -> None:
1030
+ super().__init__()
1031
+ mode = mode.replace("b", "")
1032
+ self.archive_name = archive_name
1033
+
1034
+ kwargs.setdefault("compression", zipfile.ZIP_DEFLATED)
1035
+ # error: Incompatible types in assignment (expression has type "ZipFile",
1036
+ # base class "_BufferedWriter" defined the type as "BytesIO")
1037
+ self.buffer: zipfile.ZipFile = zipfile.ZipFile( # type: ignore[assignment]
1038
+ file, mode, **kwargs
1039
+ )
1040
+
1041
+ def infer_filename(self) -> str | None:
1042
+ """
1043
+ If an explicit archive_name is not given, we still want the file inside the zip
1044
+ file not to be named something.zip, because that causes confusion (GH39465).
1045
+ """
1046
+ if isinstance(self.buffer.filename, (os.PathLike, str)):
1047
+ filename = Path(self.buffer.filename)
1048
+ if filename.suffix == ".zip":
1049
+ return filename.with_suffix("").name
1050
+ return filename.name
1051
+ return None
1052
+
1053
+ def write_to_buffer(self) -> None:
1054
+ # ZipFile needs a non-empty string
1055
+ archive_name = self.archive_name or self.infer_filename() or "zip"
1056
+ self.buffer.writestr(archive_name, self.getvalue())
1057
+
1058
+
1059
+ class _IOWrapper:
1060
+ # TextIOWrapper is overly strict: it request that the buffer has seekable, readable,
1061
+ # and writable. If we have a read-only buffer, we shouldn't need writable and vice
1062
+ # versa. Some buffers, are seek/read/writ-able but they do not have the "-able"
1063
+ # methods, e.g., tempfile.SpooledTemporaryFile.
1064
+ # If a buffer does not have the above "-able" methods, we simple assume they are
1065
+ # seek/read/writ-able.
1066
+ def __init__(self, buffer: BaseBuffer) -> None:
1067
+ self.buffer = buffer
1068
+
1069
+ def __getattr__(self, name: str):
1070
+ return getattr(self.buffer, name)
1071
+
1072
+ def readable(self) -> bool:
1073
+ if hasattr(self.buffer, "readable"):
1074
+ return self.buffer.readable()
1075
+ return True
1076
+
1077
+ def seekable(self) -> bool:
1078
+ if hasattr(self.buffer, "seekable"):
1079
+ return self.buffer.seekable()
1080
+ return True
1081
+
1082
+ def writable(self) -> bool:
1083
+ if hasattr(self.buffer, "writable"):
1084
+ return self.buffer.writable()
1085
+ return True
1086
+
1087
+
1088
+ class _BytesIOWrapper:
1089
+ # Wrapper that wraps a StringIO buffer and reads bytes from it
1090
+ # Created for compat with pyarrow read_csv
1091
+ def __init__(self, buffer: StringIO | TextIOBase, encoding: str = "utf-8") -> None:
1092
+ self.buffer = buffer
1093
+ self.encoding = encoding
1094
+ # Because a character can be represented by more than 1 byte,
1095
+ # it is possible that reading will produce more bytes than n
1096
+ # We store the extra bytes in this overflow variable, and append the
1097
+ # overflow to the front of the bytestring the next time reading is performed
1098
+ self.overflow = b""
1099
+
1100
+ def __getattr__(self, attr: str):
1101
+ return getattr(self.buffer, attr)
1102
+
1103
+ def read(self, n: int | None = -1) -> bytes:
1104
+ assert self.buffer is not None
1105
+ bytestring = self.buffer.read(n).encode(self.encoding)
1106
+ # When n=-1/n greater than remaining bytes: Read entire file/rest of file
1107
+ combined_bytestring = self.overflow + bytestring
1108
+ if n is None or n < 0 or n >= len(combined_bytestring):
1109
+ self.overflow = b""
1110
+ return combined_bytestring
1111
+ else:
1112
+ to_return = combined_bytestring[:n]
1113
+ self.overflow = combined_bytestring[n:]
1114
+ return to_return
1115
+
1116
+
1117
+ def _maybe_memory_map(
1118
+ handle: str | BaseBuffer, memory_map: bool
1119
+ ) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]:
1120
+ """Try to memory map file/buffer."""
1121
+ handles: list[BaseBuffer] = []
1122
+ memory_map &= hasattr(handle, "fileno") or isinstance(handle, str)
1123
+ if not memory_map:
1124
+ return handle, memory_map, handles
1125
+
1126
+ # mmap used by only read_csv
1127
+ handle = cast(ReadCsvBuffer, handle)
1128
+
1129
+ # need to open the file first
1130
+ if isinstance(handle, str):
1131
+ handle = open(handle, "rb")
1132
+ handles.append(handle)
1133
+
1134
+ try:
1135
+ # open mmap and adds *-able
1136
+ # error: Argument 1 to "_IOWrapper" has incompatible type "mmap";
1137
+ # expected "BaseBuffer"
1138
+ wrapped = _IOWrapper(
1139
+ mmap.mmap(
1140
+ handle.fileno(), 0, access=mmap.ACCESS_READ # type: ignore[arg-type]
1141
+ )
1142
+ )
1143
+ finally:
1144
+ for handle in reversed(handles):
1145
+ # error: "BaseBuffer" has no attribute "close"
1146
+ handle.close() # type: ignore[attr-defined]
1147
+
1148
+ return wrapped, memory_map, [wrapped]
1149
+
1150
+
1151
+ def file_exists(filepath_or_buffer: FilePath | BaseBuffer) -> bool:
1152
+ """Test whether file exists."""
1153
+ exists = False
1154
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
1155
+ if not isinstance(filepath_or_buffer, str):
1156
+ return exists
1157
+ try:
1158
+ exists = os.path.exists(filepath_or_buffer)
1159
+ # gh-5874: if the filepath is too long will raise here
1160
+ except (TypeError, ValueError):
1161
+ pass
1162
+ return exists
1163
+
1164
+
1165
+ def _is_binary_mode(handle: FilePath | BaseBuffer, mode: str) -> bool:
1166
+ """Whether the handle is opened in binary mode"""
1167
+ # specified by user
1168
+ if "t" in mode or "b" in mode:
1169
+ return "b" in mode
1170
+
1171
+ # exceptions
1172
+ text_classes = (
1173
+ # classes that expect string but have 'b' in mode
1174
+ codecs.StreamWriter,
1175
+ codecs.StreamReader,
1176
+ codecs.StreamReaderWriter,
1177
+ )
1178
+ if issubclass(type(handle), text_classes):
1179
+ return False
1180
+
1181
+ return isinstance(handle, _get_binary_io_classes()) or "b" in getattr(
1182
+ handle, "mode", mode
1183
+ )
1184
+
1185
+
1186
+ @functools.lru_cache
1187
+ def _get_binary_io_classes() -> tuple[type, ...]:
1188
+ """IO classes that that expect bytes"""
1189
+ binary_classes: tuple[type, ...] = (BufferedIOBase, RawIOBase)
1190
+
1191
+ # python-zstandard doesn't use any of the builtin base classes; instead we
1192
+ # have to use the `zstd.ZstdDecompressionReader` class for isinstance checks.
1193
+ # Unfortunately `zstd.ZstdDecompressionReader` isn't exposed by python-zstandard
1194
+ # so we have to get it from a `zstd.ZstdDecompressor` instance.
1195
+ # See also https://github.com/indygreg/python-zstandard/pull/165.
1196
+ zstd = import_optional_dependency("zstandard", errors="ignore")
1197
+ if zstd is not None:
1198
+ with zstd.ZstdDecompressor().stream_reader(b"") as reader:
1199
+ binary_classes += (type(reader),)
1200
+
1201
+ return binary_classes
1202
+
1203
+
1204
+ def is_potential_multi_index(
1205
+ columns: Sequence[Hashable] | MultiIndex,
1206
+ index_col: bool | Sequence[int] | None = None,
1207
+ ) -> bool:
1208
+ """
1209
+ Check whether or not the `columns` parameter
1210
+ could be converted into a MultiIndex.
1211
+
1212
+ Parameters
1213
+ ----------
1214
+ columns : array-like
1215
+ Object which may or may not be convertible into a MultiIndex
1216
+ index_col : None, bool or list, optional
1217
+ Column or columns to use as the (possibly hierarchical) index
1218
+
1219
+ Returns
1220
+ -------
1221
+ bool : Whether or not columns could become a MultiIndex
1222
+ """
1223
+ if index_col is None or isinstance(index_col, bool):
1224
+ index_col = []
1225
+
1226
+ return bool(
1227
+ len(columns)
1228
+ and not isinstance(columns, ABCMultiIndex)
1229
+ and all(isinstance(c, tuple) for c in columns if c not in list(index_col))
1230
+ )
1231
+
1232
+
1233
+ def dedup_names(
1234
+ names: Sequence[Hashable], is_potential_multiindex: bool
1235
+ ) -> Sequence[Hashable]:
1236
+ """
1237
+ Rename column names if duplicates exist.
1238
+
1239
+ Currently the renaming is done by appending a period and an autonumeric,
1240
+ but a custom pattern may be supported in the future.
1241
+
1242
+ Examples
1243
+ --------
1244
+ >>> dedup_names(["x", "y", "x", "x"], is_potential_multiindex=False)
1245
+ ['x', 'y', 'x.1', 'x.2']
1246
+ """
1247
+ names = list(names) # so we can index
1248
+ counts: DefaultDict[Hashable, int] = defaultdict(int)
1249
+
1250
+ for i, col in enumerate(names):
1251
+ cur_count = counts[col]
1252
+
1253
+ while cur_count > 0:
1254
+ counts[col] = cur_count + 1
1255
+
1256
+ if is_potential_multiindex:
1257
+ # for mypy
1258
+ assert isinstance(col, tuple)
1259
+ col = col[:-1] + (f"{col[-1]}.{cur_count}",)
1260
+ else:
1261
+ col = f"{col}.{cur_count}"
1262
+ cur_count = counts[col]
1263
+
1264
+ names[i] = col
1265
+ counts[col] = cur_count + 1
1266
+
1267
+ return names
venv/lib/python3.10/site-packages/pandas/io/feather_format.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ feather-format compat """
2
+ from __future__ import annotations
3
+
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Any,
7
+ )
8
+
9
+ from pandas._config import using_pyarrow_string_dtype
10
+
11
+ from pandas._libs import lib
12
+ from pandas.compat._optional import import_optional_dependency
13
+ from pandas.util._decorators import doc
14
+ from pandas.util._validators import check_dtype_backend
15
+
16
+ import pandas as pd
17
+ from pandas.core.api import DataFrame
18
+ from pandas.core.shared_docs import _shared_docs
19
+
20
+ from pandas.io._util import arrow_string_types_mapper
21
+ from pandas.io.common import get_handle
22
+
23
+ if TYPE_CHECKING:
24
+ from collections.abc import (
25
+ Hashable,
26
+ Sequence,
27
+ )
28
+
29
+ from pandas._typing import (
30
+ DtypeBackend,
31
+ FilePath,
32
+ ReadBuffer,
33
+ StorageOptions,
34
+ WriteBuffer,
35
+ )
36
+
37
+
38
+ @doc(storage_options=_shared_docs["storage_options"])
39
+ def to_feather(
40
+ df: DataFrame,
41
+ path: FilePath | WriteBuffer[bytes],
42
+ storage_options: StorageOptions | None = None,
43
+ **kwargs: Any,
44
+ ) -> None:
45
+ """
46
+ Write a DataFrame to the binary Feather format.
47
+
48
+ Parameters
49
+ ----------
50
+ df : DataFrame
51
+ path : str, path object, or file-like object
52
+ {storage_options}
53
+ **kwargs :
54
+ Additional keywords passed to `pyarrow.feather.write_feather`.
55
+
56
+ """
57
+ import_optional_dependency("pyarrow")
58
+ from pyarrow import feather
59
+
60
+ if not isinstance(df, DataFrame):
61
+ raise ValueError("feather only support IO with DataFrames")
62
+
63
+ with get_handle(
64
+ path, "wb", storage_options=storage_options, is_text=False
65
+ ) as handles:
66
+ feather.write_feather(df, handles.handle, **kwargs)
67
+
68
+
69
+ @doc(storage_options=_shared_docs["storage_options"])
70
+ def read_feather(
71
+ path: FilePath | ReadBuffer[bytes],
72
+ columns: Sequence[Hashable] | None = None,
73
+ use_threads: bool = True,
74
+ storage_options: StorageOptions | None = None,
75
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
76
+ ) -> DataFrame:
77
+ """
78
+ Load a feather-format object from the file path.
79
+
80
+ Parameters
81
+ ----------
82
+ path : str, path object, or file-like object
83
+ String, path object (implementing ``os.PathLike[str]``), or file-like
84
+ object implementing a binary ``read()`` function. The string could be a URL.
85
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
86
+ expected. A local file could be: ``file://localhost/path/to/table.feather``.
87
+ columns : sequence, default None
88
+ If not provided, all columns are read.
89
+ use_threads : bool, default True
90
+ Whether to parallelize reading using multiple threads.
91
+ {storage_options}
92
+
93
+ dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
94
+ Back-end data type applied to the resultant :class:`DataFrame`
95
+ (still experimental). Behaviour is as follows:
96
+
97
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
98
+ (default).
99
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
100
+ DataFrame.
101
+
102
+ .. versionadded:: 2.0
103
+
104
+ Returns
105
+ -------
106
+ type of object stored in file
107
+
108
+ Examples
109
+ --------
110
+ >>> df = pd.read_feather("path/to/file.feather") # doctest: +SKIP
111
+ """
112
+ import_optional_dependency("pyarrow")
113
+ from pyarrow import feather
114
+
115
+ # import utils to register the pyarrow extension types
116
+ import pandas.core.arrays.arrow.extension_types # pyright: ignore[reportUnusedImport] # noqa: F401
117
+
118
+ check_dtype_backend(dtype_backend)
119
+
120
+ with get_handle(
121
+ path, "rb", storage_options=storage_options, is_text=False
122
+ ) as handles:
123
+ if dtype_backend is lib.no_default and not using_pyarrow_string_dtype():
124
+ return feather.read_feather(
125
+ handles.handle, columns=columns, use_threads=bool(use_threads)
126
+ )
127
+
128
+ pa_table = feather.read_table(
129
+ handles.handle, columns=columns, use_threads=bool(use_threads)
130
+ )
131
+
132
+ if dtype_backend == "numpy_nullable":
133
+ from pandas.io._util import _arrow_dtype_mapping
134
+
135
+ return pa_table.to_pandas(types_mapper=_arrow_dtype_mapping().get)
136
+
137
+ elif dtype_backend == "pyarrow":
138
+ return pa_table.to_pandas(types_mapper=pd.ArrowDtype)
139
+
140
+ elif using_pyarrow_string_dtype():
141
+ return pa_table.to_pandas(types_mapper=arrow_string_types_mapper())
142
+ else:
143
+ raise NotImplementedError
venv/lib/python3.10/site-packages/pandas/io/formats/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # ruff: noqa: TCH004
2
+ from typing import TYPE_CHECKING
3
+
4
+ if TYPE_CHECKING:
5
+ # import modules that have public classes/functions
6
+ from pandas.io.formats import style
7
+
8
+ # and mark only those modules as public
9
+ __all__ = ["style"]
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (310 Bytes). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc ADDED
Binary file (4.52 kB). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc ADDED
Binary file (1.92 kB). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc ADDED
Binary file (9.88 kB). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc ADDED
Binary file (24.9 kB). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc ADDED
Binary file (57.8 kB). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc ADDED
Binary file (36.5 kB). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc ADDED
Binary file (16.9 kB). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc ADDED
Binary file (6.49 kB). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc ADDED
Binary file (137 kB). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc ADDED
Binary file (75.7 kB). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
venv/lib/python3.10/site-packages/pandas/io/formats/_color_data.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GH37967: Enable the use of CSS named colors, as defined in
2
+ # matplotlib.colors.CSS4_COLORS, when exporting to Excel.
3
+ # This data has been copied here, instead of being imported from matplotlib,
4
+ # not to have ``to_excel`` methods require matplotlib.
5
+ # source: matplotlib._color_data (3.3.3)
6
+ from __future__ import annotations
7
+
8
+ CSS4_COLORS = {
9
+ "aliceblue": "F0F8FF",
10
+ "antiquewhite": "FAEBD7",
11
+ "aqua": "00FFFF",
12
+ "aquamarine": "7FFFD4",
13
+ "azure": "F0FFFF",
14
+ "beige": "F5F5DC",
15
+ "bisque": "FFE4C4",
16
+ "black": "000000",
17
+ "blanchedalmond": "FFEBCD",
18
+ "blue": "0000FF",
19
+ "blueviolet": "8A2BE2",
20
+ "brown": "A52A2A",
21
+ "burlywood": "DEB887",
22
+ "cadetblue": "5F9EA0",
23
+ "chartreuse": "7FFF00",
24
+ "chocolate": "D2691E",
25
+ "coral": "FF7F50",
26
+ "cornflowerblue": "6495ED",
27
+ "cornsilk": "FFF8DC",
28
+ "crimson": "DC143C",
29
+ "cyan": "00FFFF",
30
+ "darkblue": "00008B",
31
+ "darkcyan": "008B8B",
32
+ "darkgoldenrod": "B8860B",
33
+ "darkgray": "A9A9A9",
34
+ "darkgreen": "006400",
35
+ "darkgrey": "A9A9A9",
36
+ "darkkhaki": "BDB76B",
37
+ "darkmagenta": "8B008B",
38
+ "darkolivegreen": "556B2F",
39
+ "darkorange": "FF8C00",
40
+ "darkorchid": "9932CC",
41
+ "darkred": "8B0000",
42
+ "darksalmon": "E9967A",
43
+ "darkseagreen": "8FBC8F",
44
+ "darkslateblue": "483D8B",
45
+ "darkslategray": "2F4F4F",
46
+ "darkslategrey": "2F4F4F",
47
+ "darkturquoise": "00CED1",
48
+ "darkviolet": "9400D3",
49
+ "deeppink": "FF1493",
50
+ "deepskyblue": "00BFFF",
51
+ "dimgray": "696969",
52
+ "dimgrey": "696969",
53
+ "dodgerblue": "1E90FF",
54
+ "firebrick": "B22222",
55
+ "floralwhite": "FFFAF0",
56
+ "forestgreen": "228B22",
57
+ "fuchsia": "FF00FF",
58
+ "gainsboro": "DCDCDC",
59
+ "ghostwhite": "F8F8FF",
60
+ "gold": "FFD700",
61
+ "goldenrod": "DAA520",
62
+ "gray": "808080",
63
+ "green": "008000",
64
+ "greenyellow": "ADFF2F",
65
+ "grey": "808080",
66
+ "honeydew": "F0FFF0",
67
+ "hotpink": "FF69B4",
68
+ "indianred": "CD5C5C",
69
+ "indigo": "4B0082",
70
+ "ivory": "FFFFF0",
71
+ "khaki": "F0E68C",
72
+ "lavender": "E6E6FA",
73
+ "lavenderblush": "FFF0F5",
74
+ "lawngreen": "7CFC00",
75
+ "lemonchiffon": "FFFACD",
76
+ "lightblue": "ADD8E6",
77
+ "lightcoral": "F08080",
78
+ "lightcyan": "E0FFFF",
79
+ "lightgoldenrodyellow": "FAFAD2",
80
+ "lightgray": "D3D3D3",
81
+ "lightgreen": "90EE90",
82
+ "lightgrey": "D3D3D3",
83
+ "lightpink": "FFB6C1",
84
+ "lightsalmon": "FFA07A",
85
+ "lightseagreen": "20B2AA",
86
+ "lightskyblue": "87CEFA",
87
+ "lightslategray": "778899",
88
+ "lightslategrey": "778899",
89
+ "lightsteelblue": "B0C4DE",
90
+ "lightyellow": "FFFFE0",
91
+ "lime": "00FF00",
92
+ "limegreen": "32CD32",
93
+ "linen": "FAF0E6",
94
+ "magenta": "FF00FF",
95
+ "maroon": "800000",
96
+ "mediumaquamarine": "66CDAA",
97
+ "mediumblue": "0000CD",
98
+ "mediumorchid": "BA55D3",
99
+ "mediumpurple": "9370DB",
100
+ "mediumseagreen": "3CB371",
101
+ "mediumslateblue": "7B68EE",
102
+ "mediumspringgreen": "00FA9A",
103
+ "mediumturquoise": "48D1CC",
104
+ "mediumvioletred": "C71585",
105
+ "midnightblue": "191970",
106
+ "mintcream": "F5FFFA",
107
+ "mistyrose": "FFE4E1",
108
+ "moccasin": "FFE4B5",
109
+ "navajowhite": "FFDEAD",
110
+ "navy": "000080",
111
+ "oldlace": "FDF5E6",
112
+ "olive": "808000",
113
+ "olivedrab": "6B8E23",
114
+ "orange": "FFA500",
115
+ "orangered": "FF4500",
116
+ "orchid": "DA70D6",
117
+ "palegoldenrod": "EEE8AA",
118
+ "palegreen": "98FB98",
119
+ "paleturquoise": "AFEEEE",
120
+ "palevioletred": "DB7093",
121
+ "papayawhip": "FFEFD5",
122
+ "peachpuff": "FFDAB9",
123
+ "peru": "CD853F",
124
+ "pink": "FFC0CB",
125
+ "plum": "DDA0DD",
126
+ "powderblue": "B0E0E6",
127
+ "purple": "800080",
128
+ "rebeccapurple": "663399",
129
+ "red": "FF0000",
130
+ "rosybrown": "BC8F8F",
131
+ "royalblue": "4169E1",
132
+ "saddlebrown": "8B4513",
133
+ "salmon": "FA8072",
134
+ "sandybrown": "F4A460",
135
+ "seagreen": "2E8B57",
136
+ "seashell": "FFF5EE",
137
+ "sienna": "A0522D",
138
+ "silver": "C0C0C0",
139
+ "skyblue": "87CEEB",
140
+ "slateblue": "6A5ACD",
141
+ "slategray": "708090",
142
+ "slategrey": "708090",
143
+ "snow": "FFFAFA",
144
+ "springgreen": "00FF7F",
145
+ "steelblue": "4682B4",
146
+ "tan": "D2B48C",
147
+ "teal": "008080",
148
+ "thistle": "D8BFD8",
149
+ "tomato": "FF6347",
150
+ "turquoise": "40E0D0",
151
+ "violet": "EE82EE",
152
+ "wheat": "F5DEB3",
153
+ "white": "FFFFFF",
154
+ "whitesmoke": "F5F5F5",
155
+ "yellow": "FFFF00",
156
+ "yellowgreen": "9ACD32",
157
+ }
venv/lib/python3.10/site-packages/pandas/io/formats/console.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Internal module for console introspection
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from shutil import get_terminal_size
7
+
8
+
9
+ def get_console_size() -> tuple[int | None, int | None]:
10
+ """
11
+ Return console size as tuple = (width, height).
12
+
13
+ Returns (None,None) in non-interactive session.
14
+ """
15
+ from pandas import get_option
16
+
17
+ display_width = get_option("display.width")
18
+ display_height = get_option("display.max_rows")
19
+
20
+ # Consider
21
+ # interactive shell terminal, can detect term size
22
+ # interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term
23
+ # size non-interactive script, should disregard term size
24
+
25
+ # in addition
26
+ # width,height have default values, but setting to 'None' signals
27
+ # should use Auto-Detection, But only in interactive shell-terminal.
28
+ # Simple. yeah.
29
+
30
+ if in_interactive_session():
31
+ if in_ipython_frontend():
32
+ # sane defaults for interactive non-shell terminal
33
+ # match default for width,height in config_init
34
+ from pandas._config.config import get_default_val
35
+
36
+ terminal_width = get_default_val("display.width")
37
+ terminal_height = get_default_val("display.max_rows")
38
+ else:
39
+ # pure terminal
40
+ terminal_width, terminal_height = get_terminal_size()
41
+ else:
42
+ terminal_width, terminal_height = None, None
43
+
44
+ # Note if the User sets width/Height to None (auto-detection)
45
+ # and we're in a script (non-inter), this will return (None,None)
46
+ # caller needs to deal.
47
+ return display_width or terminal_width, display_height or terminal_height
48
+
49
+
50
+ # ----------------------------------------------------------------------
51
+ # Detect our environment
52
+
53
+
54
+ def in_interactive_session() -> bool:
55
+ """
56
+ Check if we're running in an interactive shell.
57
+
58
+ Returns
59
+ -------
60
+ bool
61
+ True if running under python/ipython interactive shell.
62
+ """
63
+ from pandas import get_option
64
+
65
+ def check_main():
66
+ try:
67
+ import __main__ as main
68
+ except ModuleNotFoundError:
69
+ return get_option("mode.sim_interactive")
70
+ return not hasattr(main, "__file__") or get_option("mode.sim_interactive")
71
+
72
+ try:
73
+ # error: Name '__IPYTHON__' is not defined
74
+ return __IPYTHON__ or check_main() # type: ignore[name-defined]
75
+ except NameError:
76
+ return check_main()
77
+
78
+
79
+ def in_ipython_frontend() -> bool:
80
+ """
81
+ Check if we're inside an IPython zmq frontend.
82
+
83
+ Returns
84
+ -------
85
+ bool
86
+ """
87
+ try:
88
+ # error: Name 'get_ipython' is not defined
89
+ ip = get_ipython() # type: ignore[name-defined]
90
+ return "zmq" in str(type(ip)).lower()
91
+ except NameError:
92
+ pass
93
+
94
+ return False
venv/lib/python3.10/site-packages/pandas/io/formats/css.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities for interpreting CSS from Stylers for formatting non-HTML outputs.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import re
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Callable,
10
+ )
11
+ import warnings
12
+
13
+ from pandas.errors import CSSWarning
14
+ from pandas.util._exceptions import find_stack_level
15
+
16
+ if TYPE_CHECKING:
17
+ from collections.abc import (
18
+ Generator,
19
+ Iterable,
20
+ Iterator,
21
+ )
22
+
23
+
24
+ def _side_expander(prop_fmt: str) -> Callable:
25
+ """
26
+ Wrapper to expand shorthand property into top, right, bottom, left properties
27
+
28
+ Parameters
29
+ ----------
30
+ side : str
31
+ The border side to expand into properties
32
+
33
+ Returns
34
+ -------
35
+ function: Return to call when a 'border(-{side}): {value}' string is encountered
36
+ """
37
+
38
+ def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]:
39
+ """
40
+ Expand shorthand property into side-specific property (top, right, bottom, left)
41
+
42
+ Parameters
43
+ ----------
44
+ prop (str): CSS property name
45
+ value (str): String token for property
46
+
47
+ Yields
48
+ ------
49
+ Tuple (str, str): Expanded property, value
50
+ """
51
+ tokens = value.split()
52
+ try:
53
+ mapping = self.SIDE_SHORTHANDS[len(tokens)]
54
+ except KeyError:
55
+ warnings.warn(
56
+ f'Could not expand "{prop}: {value}"',
57
+ CSSWarning,
58
+ stacklevel=find_stack_level(),
59
+ )
60
+ return
61
+ for key, idx in zip(self.SIDES, mapping):
62
+ yield prop_fmt.format(key), tokens[idx]
63
+
64
+ return expand
65
+
66
+
67
+ def _border_expander(side: str = "") -> Callable:
68
+ """
69
+ Wrapper to expand 'border' property into border color, style, and width properties
70
+
71
+ Parameters
72
+ ----------
73
+ side : str
74
+ The border side to expand into properties
75
+
76
+ Returns
77
+ -------
78
+ function: Return to call when a 'border(-{side}): {value}' string is encountered
79
+ """
80
+ if side != "":
81
+ side = f"-{side}"
82
+
83
+ def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]:
84
+ """
85
+ Expand border into color, style, and width tuples
86
+
87
+ Parameters
88
+ ----------
89
+ prop : str
90
+ CSS property name passed to styler
91
+ value : str
92
+ Value passed to styler for property
93
+
94
+ Yields
95
+ ------
96
+ Tuple (str, str): Expanded property, value
97
+ """
98
+ tokens = value.split()
99
+ if len(tokens) == 0 or len(tokens) > 3:
100
+ warnings.warn(
101
+ f'Too many tokens provided to "{prop}" (expected 1-3)',
102
+ CSSWarning,
103
+ stacklevel=find_stack_level(),
104
+ )
105
+
106
+ # TODO: Can we use current color as initial value to comply with CSS standards?
107
+ border_declarations = {
108
+ f"border{side}-color": "black",
109
+ f"border{side}-style": "none",
110
+ f"border{side}-width": "medium",
111
+ }
112
+ for token in tokens:
113
+ if token.lower() in self.BORDER_STYLES:
114
+ border_declarations[f"border{side}-style"] = token
115
+ elif any(ratio in token.lower() for ratio in self.BORDER_WIDTH_RATIOS):
116
+ border_declarations[f"border{side}-width"] = token
117
+ else:
118
+ border_declarations[f"border{side}-color"] = token
119
+ # TODO: Warn user if item entered more than once (e.g. "border: red green")
120
+
121
+ # Per CSS, "border" will reset previous "border-*" definitions
122
+ yield from self.atomize(border_declarations.items())
123
+
124
+ return expand
125
+
126
+
127
+ class CSSResolver:
128
+ """
129
+ A callable for parsing and resolving CSS to atomic properties.
130
+ """
131
+
132
+ UNIT_RATIOS = {
133
+ "pt": ("pt", 1),
134
+ "em": ("em", 1),
135
+ "rem": ("pt", 12),
136
+ "ex": ("em", 0.5),
137
+ # 'ch':
138
+ "px": ("pt", 0.75),
139
+ "pc": ("pt", 12),
140
+ "in": ("pt", 72),
141
+ "cm": ("in", 1 / 2.54),
142
+ "mm": ("in", 1 / 25.4),
143
+ "q": ("mm", 0.25),
144
+ "!!default": ("em", 0),
145
+ }
146
+
147
+ FONT_SIZE_RATIOS = UNIT_RATIOS.copy()
148
+ FONT_SIZE_RATIOS.update(
149
+ {
150
+ "%": ("em", 0.01),
151
+ "xx-small": ("rem", 0.5),
152
+ "x-small": ("rem", 0.625),
153
+ "small": ("rem", 0.8),
154
+ "medium": ("rem", 1),
155
+ "large": ("rem", 1.125),
156
+ "x-large": ("rem", 1.5),
157
+ "xx-large": ("rem", 2),
158
+ "smaller": ("em", 1 / 1.2),
159
+ "larger": ("em", 1.2),
160
+ "!!default": ("em", 1),
161
+ }
162
+ )
163
+
164
+ MARGIN_RATIOS = UNIT_RATIOS.copy()
165
+ MARGIN_RATIOS.update({"none": ("pt", 0)})
166
+
167
+ BORDER_WIDTH_RATIOS = UNIT_RATIOS.copy()
168
+ BORDER_WIDTH_RATIOS.update(
169
+ {
170
+ "none": ("pt", 0),
171
+ "thick": ("px", 4),
172
+ "medium": ("px", 2),
173
+ "thin": ("px", 1),
174
+ # Default: medium only if solid
175
+ }
176
+ )
177
+
178
+ BORDER_STYLES = [
179
+ "none",
180
+ "hidden",
181
+ "dotted",
182
+ "dashed",
183
+ "solid",
184
+ "double",
185
+ "groove",
186
+ "ridge",
187
+ "inset",
188
+ "outset",
189
+ "mediumdashdot",
190
+ "dashdotdot",
191
+ "hair",
192
+ "mediumdashdotdot",
193
+ "dashdot",
194
+ "slantdashdot",
195
+ "mediumdashed",
196
+ ]
197
+
198
+ SIDE_SHORTHANDS = {
199
+ 1: [0, 0, 0, 0],
200
+ 2: [0, 1, 0, 1],
201
+ 3: [0, 1, 2, 1],
202
+ 4: [0, 1, 2, 3],
203
+ }
204
+
205
+ SIDES = ("top", "right", "bottom", "left")
206
+
207
+ CSS_EXPANSIONS = {
208
+ **{
209
+ (f"border-{prop}" if prop else "border"): _border_expander(prop)
210
+ for prop in ["", "top", "right", "bottom", "left"]
211
+ },
212
+ **{
213
+ f"border-{prop}": _side_expander(f"border-{{:s}}-{prop}")
214
+ for prop in ["color", "style", "width"]
215
+ },
216
+ "margin": _side_expander("margin-{:s}"),
217
+ "padding": _side_expander("padding-{:s}"),
218
+ }
219
+
220
+ def __call__(
221
+ self,
222
+ declarations: str | Iterable[tuple[str, str]],
223
+ inherited: dict[str, str] | None = None,
224
+ ) -> dict[str, str]:
225
+ """
226
+ The given declarations to atomic properties.
227
+
228
+ Parameters
229
+ ----------
230
+ declarations_str : str | Iterable[tuple[str, str]]
231
+ A CSS string or set of CSS declaration tuples
232
+ e.g. "font-weight: bold; background: blue" or
233
+ {("font-weight", "bold"), ("background", "blue")}
234
+ inherited : dict, optional
235
+ Atomic properties indicating the inherited style context in which
236
+ declarations_str is to be resolved. ``inherited`` should already
237
+ be resolved, i.e. valid output of this method.
238
+
239
+ Returns
240
+ -------
241
+ dict
242
+ Atomic CSS 2.2 properties.
243
+
244
+ Examples
245
+ --------
246
+ >>> resolve = CSSResolver()
247
+ >>> inherited = {'font-family': 'serif', 'font-weight': 'bold'}
248
+ >>> out = resolve('''
249
+ ... border-color: BLUE RED;
250
+ ... font-size: 1em;
251
+ ... font-size: 2em;
252
+ ... font-weight: normal;
253
+ ... font-weight: inherit;
254
+ ... ''', inherited)
255
+ >>> sorted(out.items()) # doctest: +NORMALIZE_WHITESPACE
256
+ [('border-bottom-color', 'blue'),
257
+ ('border-left-color', 'red'),
258
+ ('border-right-color', 'red'),
259
+ ('border-top-color', 'blue'),
260
+ ('font-family', 'serif'),
261
+ ('font-size', '24pt'),
262
+ ('font-weight', 'bold')]
263
+ """
264
+ if isinstance(declarations, str):
265
+ declarations = self.parse(declarations)
266
+ props = dict(self.atomize(declarations))
267
+ if inherited is None:
268
+ inherited = {}
269
+
270
+ props = self._update_initial(props, inherited)
271
+ props = self._update_font_size(props, inherited)
272
+ return self._update_other_units(props)
273
+
274
+ def _update_initial(
275
+ self,
276
+ props: dict[str, str],
277
+ inherited: dict[str, str],
278
+ ) -> dict[str, str]:
279
+ # 1. resolve inherited, initial
280
+ for prop, val in inherited.items():
281
+ if prop not in props:
282
+ props[prop] = val
283
+
284
+ new_props = props.copy()
285
+ for prop, val in props.items():
286
+ if val == "inherit":
287
+ val = inherited.get(prop, "initial")
288
+
289
+ if val in ("initial", None):
290
+ # we do not define a complete initial stylesheet
291
+ del new_props[prop]
292
+ else:
293
+ new_props[prop] = val
294
+ return new_props
295
+
296
+ def _update_font_size(
297
+ self,
298
+ props: dict[str, str],
299
+ inherited: dict[str, str],
300
+ ) -> dict[str, str]:
301
+ # 2. resolve relative font size
302
+ if props.get("font-size"):
303
+ props["font-size"] = self.size_to_pt(
304
+ props["font-size"],
305
+ self._get_font_size(inherited),
306
+ conversions=self.FONT_SIZE_RATIOS,
307
+ )
308
+ return props
309
+
310
+ def _get_font_size(self, props: dict[str, str]) -> float | None:
311
+ if props.get("font-size"):
312
+ font_size_string = props["font-size"]
313
+ return self._get_float_font_size_from_pt(font_size_string)
314
+ return None
315
+
316
+ def _get_float_font_size_from_pt(self, font_size_string: str) -> float:
317
+ assert font_size_string.endswith("pt")
318
+ return float(font_size_string.rstrip("pt"))
319
+
320
+ def _update_other_units(self, props: dict[str, str]) -> dict[str, str]:
321
+ font_size = self._get_font_size(props)
322
+ # 3. TODO: resolve other font-relative units
323
+ for side in self.SIDES:
324
+ prop = f"border-{side}-width"
325
+ if prop in props:
326
+ props[prop] = self.size_to_pt(
327
+ props[prop],
328
+ em_pt=font_size,
329
+ conversions=self.BORDER_WIDTH_RATIOS,
330
+ )
331
+
332
+ for prop in [f"margin-{side}", f"padding-{side}"]:
333
+ if prop in props:
334
+ # TODO: support %
335
+ props[prop] = self.size_to_pt(
336
+ props[prop],
337
+ em_pt=font_size,
338
+ conversions=self.MARGIN_RATIOS,
339
+ )
340
+ return props
341
+
342
+ def size_to_pt(self, in_val, em_pt=None, conversions=UNIT_RATIOS) -> str:
343
+ def _error():
344
+ warnings.warn(
345
+ f"Unhandled size: {repr(in_val)}",
346
+ CSSWarning,
347
+ stacklevel=find_stack_level(),
348
+ )
349
+ return self.size_to_pt("1!!default", conversions=conversions)
350
+
351
+ match = re.match(r"^(\S*?)([a-zA-Z%!].*)", in_val)
352
+ if match is None:
353
+ return _error()
354
+
355
+ val, unit = match.groups()
356
+ if val == "":
357
+ # hack for 'large' etc.
358
+ val = 1
359
+ else:
360
+ try:
361
+ val = float(val)
362
+ except ValueError:
363
+ return _error()
364
+
365
+ while unit != "pt":
366
+ if unit == "em":
367
+ if em_pt is None:
368
+ unit = "rem"
369
+ else:
370
+ val *= em_pt
371
+ unit = "pt"
372
+ continue
373
+
374
+ try:
375
+ unit, mul = conversions[unit]
376
+ except KeyError:
377
+ return _error()
378
+ val *= mul
379
+
380
+ val = round(val, 5)
381
+ if int(val) == val:
382
+ size_fmt = f"{int(val):d}pt"
383
+ else:
384
+ size_fmt = f"{val:f}pt"
385
+ return size_fmt
386
+
387
+ def atomize(self, declarations: Iterable) -> Generator[tuple[str, str], None, None]:
388
+ for prop, value in declarations:
389
+ prop = prop.lower()
390
+ value = value.lower()
391
+ if prop in self.CSS_EXPANSIONS:
392
+ expand = self.CSS_EXPANSIONS[prop]
393
+ yield from expand(self, prop, value)
394
+ else:
395
+ yield prop, value
396
+
397
+ def parse(self, declarations_str: str) -> Iterator[tuple[str, str]]:
398
+ """
399
+ Generates (prop, value) pairs from declarations.
400
+
401
+ In a future version may generate parsed tokens from tinycss/tinycss2
402
+
403
+ Parameters
404
+ ----------
405
+ declarations_str : str
406
+ """
407
+ for decl in declarations_str.split(";"):
408
+ if not decl.strip():
409
+ continue
410
+ prop, sep, val = decl.partition(":")
411
+ prop = prop.strip().lower()
412
+ # TODO: don't lowercase case sensitive parts of values (strings)
413
+ val = val.strip().lower()
414
+ if sep:
415
+ yield prop, val
416
+ else:
417
+ warnings.warn(
418
+ f"Ill-formatted attribute: expected a colon in {repr(decl)}",
419
+ CSSWarning,
420
+ stacklevel=find_stack_level(),
421
+ )
venv/lib/python3.10/site-packages/pandas/io/formats/csvs.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module for formatting output data into CSV files.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from collections.abc import (
8
+ Hashable,
9
+ Iterable,
10
+ Iterator,
11
+ Sequence,
12
+ )
13
+ import csv as csvlib
14
+ import os
15
+ from typing import (
16
+ TYPE_CHECKING,
17
+ Any,
18
+ cast,
19
+ )
20
+
21
+ import numpy as np
22
+
23
+ from pandas._libs import writers as libwriters
24
+ from pandas._typing import SequenceNotStr
25
+ from pandas.util._decorators import cache_readonly
26
+
27
+ from pandas.core.dtypes.generic import (
28
+ ABCDatetimeIndex,
29
+ ABCIndex,
30
+ ABCMultiIndex,
31
+ ABCPeriodIndex,
32
+ )
33
+ from pandas.core.dtypes.missing import notna
34
+
35
+ from pandas.core.indexes.api import Index
36
+
37
+ from pandas.io.common import get_handle
38
+
39
+ if TYPE_CHECKING:
40
+ from pandas._typing import (
41
+ CompressionOptions,
42
+ FilePath,
43
+ FloatFormatType,
44
+ IndexLabel,
45
+ StorageOptions,
46
+ WriteBuffer,
47
+ npt,
48
+ )
49
+
50
+ from pandas.io.formats.format import DataFrameFormatter
51
+
52
+
53
+ _DEFAULT_CHUNKSIZE_CELLS = 100_000
54
+
55
+
56
+ class CSVFormatter:
57
+ cols: npt.NDArray[np.object_]
58
+
59
+ def __init__(
60
+ self,
61
+ formatter: DataFrameFormatter,
62
+ path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] = "",
63
+ sep: str = ",",
64
+ cols: Sequence[Hashable] | None = None,
65
+ index_label: IndexLabel | None = None,
66
+ mode: str = "w",
67
+ encoding: str | None = None,
68
+ errors: str = "strict",
69
+ compression: CompressionOptions = "infer",
70
+ quoting: int | None = None,
71
+ lineterminator: str | None = "\n",
72
+ chunksize: int | None = None,
73
+ quotechar: str | None = '"',
74
+ date_format: str | None = None,
75
+ doublequote: bool = True,
76
+ escapechar: str | None = None,
77
+ storage_options: StorageOptions | None = None,
78
+ ) -> None:
79
+ self.fmt = formatter
80
+
81
+ self.obj = self.fmt.frame
82
+
83
+ self.filepath_or_buffer = path_or_buf
84
+ self.encoding = encoding
85
+ self.compression: CompressionOptions = compression
86
+ self.mode = mode
87
+ self.storage_options = storage_options
88
+
89
+ self.sep = sep
90
+ self.index_label = self._initialize_index_label(index_label)
91
+ self.errors = errors
92
+ self.quoting = quoting or csvlib.QUOTE_MINIMAL
93
+ self.quotechar = self._initialize_quotechar(quotechar)
94
+ self.doublequote = doublequote
95
+ self.escapechar = escapechar
96
+ self.lineterminator = lineterminator or os.linesep
97
+ self.date_format = date_format
98
+ self.cols = self._initialize_columns(cols)
99
+ self.chunksize = self._initialize_chunksize(chunksize)
100
+
101
+ @property
102
+ def na_rep(self) -> str:
103
+ return self.fmt.na_rep
104
+
105
+ @property
106
+ def float_format(self) -> FloatFormatType | None:
107
+ return self.fmt.float_format
108
+
109
+ @property
110
+ def decimal(self) -> str:
111
+ return self.fmt.decimal
112
+
113
+ @property
114
+ def header(self) -> bool | SequenceNotStr[str]:
115
+ return self.fmt.header
116
+
117
+ @property
118
+ def index(self) -> bool:
119
+ return self.fmt.index
120
+
121
+ def _initialize_index_label(self, index_label: IndexLabel | None) -> IndexLabel:
122
+ if index_label is not False:
123
+ if index_label is None:
124
+ return self._get_index_label_from_obj()
125
+ elif not isinstance(index_label, (list, tuple, np.ndarray, ABCIndex)):
126
+ # given a string for a DF with Index
127
+ return [index_label]
128
+ return index_label
129
+
130
+ def _get_index_label_from_obj(self) -> Sequence[Hashable]:
131
+ if isinstance(self.obj.index, ABCMultiIndex):
132
+ return self._get_index_label_multiindex()
133
+ else:
134
+ return self._get_index_label_flat()
135
+
136
+ def _get_index_label_multiindex(self) -> Sequence[Hashable]:
137
+ return [name or "" for name in self.obj.index.names]
138
+
139
+ def _get_index_label_flat(self) -> Sequence[Hashable]:
140
+ index_label = self.obj.index.name
141
+ return [""] if index_label is None else [index_label]
142
+
143
+ def _initialize_quotechar(self, quotechar: str | None) -> str | None:
144
+ if self.quoting != csvlib.QUOTE_NONE:
145
+ # prevents crash in _csv
146
+ return quotechar
147
+ return None
148
+
149
+ @property
150
+ def has_mi_columns(self) -> bool:
151
+ return bool(isinstance(self.obj.columns, ABCMultiIndex))
152
+
153
+ def _initialize_columns(
154
+ self, cols: Iterable[Hashable] | None
155
+ ) -> npt.NDArray[np.object_]:
156
+ # validate mi options
157
+ if self.has_mi_columns:
158
+ if cols is not None:
159
+ msg = "cannot specify cols with a MultiIndex on the columns"
160
+ raise TypeError(msg)
161
+
162
+ if cols is not None:
163
+ if isinstance(cols, ABCIndex):
164
+ cols = cols._get_values_for_csv(**self._number_format)
165
+ else:
166
+ cols = list(cols)
167
+ self.obj = self.obj.loc[:, cols]
168
+
169
+ # update columns to include possible multiplicity of dupes
170
+ # and make sure cols is just a list of labels
171
+ new_cols = self.obj.columns
172
+ return new_cols._get_values_for_csv(**self._number_format)
173
+
174
+ def _initialize_chunksize(self, chunksize: int | None) -> int:
175
+ if chunksize is None:
176
+ return (_DEFAULT_CHUNKSIZE_CELLS // (len(self.cols) or 1)) or 1
177
+ return int(chunksize)
178
+
179
+ @property
180
+ def _number_format(self) -> dict[str, Any]:
181
+ """Dictionary used for storing number formatting settings."""
182
+ return {
183
+ "na_rep": self.na_rep,
184
+ "float_format": self.float_format,
185
+ "date_format": self.date_format,
186
+ "quoting": self.quoting,
187
+ "decimal": self.decimal,
188
+ }
189
+
190
+ @cache_readonly
191
+ def data_index(self) -> Index:
192
+ data_index = self.obj.index
193
+ if (
194
+ isinstance(data_index, (ABCDatetimeIndex, ABCPeriodIndex))
195
+ and self.date_format is not None
196
+ ):
197
+ data_index = Index(
198
+ [x.strftime(self.date_format) if notna(x) else "" for x in data_index]
199
+ )
200
+ elif isinstance(data_index, ABCMultiIndex):
201
+ data_index = data_index.remove_unused_levels()
202
+ return data_index
203
+
204
+ @property
205
+ def nlevels(self) -> int:
206
+ if self.index:
207
+ return getattr(self.data_index, "nlevels", 1)
208
+ else:
209
+ return 0
210
+
211
+ @property
212
+ def _has_aliases(self) -> bool:
213
+ return isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))
214
+
215
+ @property
216
+ def _need_to_save_header(self) -> bool:
217
+ return bool(self._has_aliases or self.header)
218
+
219
+ @property
220
+ def write_cols(self) -> SequenceNotStr[Hashable]:
221
+ if self._has_aliases:
222
+ assert not isinstance(self.header, bool)
223
+ if len(self.header) != len(self.cols):
224
+ raise ValueError(
225
+ f"Writing {len(self.cols)} cols but got {len(self.header)} aliases"
226
+ )
227
+ return self.header
228
+ else:
229
+ # self.cols is an ndarray derived from Index._get_values_for_csv,
230
+ # so its entries are strings, i.e. hashable
231
+ return cast(SequenceNotStr[Hashable], self.cols)
232
+
233
+ @property
234
+ def encoded_labels(self) -> list[Hashable]:
235
+ encoded_labels: list[Hashable] = []
236
+
237
+ if self.index and self.index_label:
238
+ assert isinstance(self.index_label, Sequence)
239
+ encoded_labels = list(self.index_label)
240
+
241
+ if not self.has_mi_columns or self._has_aliases:
242
+ encoded_labels += list(self.write_cols)
243
+
244
+ return encoded_labels
245
+
246
+ def save(self) -> None:
247
+ """
248
+ Create the writer & save.
249
+ """
250
+ # apply compression and byte/text conversion
251
+ with get_handle(
252
+ self.filepath_or_buffer,
253
+ self.mode,
254
+ encoding=self.encoding,
255
+ errors=self.errors,
256
+ compression=self.compression,
257
+ storage_options=self.storage_options,
258
+ ) as handles:
259
+ # Note: self.encoding is irrelevant here
260
+ self.writer = csvlib.writer(
261
+ handles.handle,
262
+ lineterminator=self.lineterminator,
263
+ delimiter=self.sep,
264
+ quoting=self.quoting,
265
+ doublequote=self.doublequote,
266
+ escapechar=self.escapechar,
267
+ quotechar=self.quotechar,
268
+ )
269
+
270
+ self._save()
271
+
272
+ def _save(self) -> None:
273
+ if self._need_to_save_header:
274
+ self._save_header()
275
+ self._save_body()
276
+
277
+ def _save_header(self) -> None:
278
+ if not self.has_mi_columns or self._has_aliases:
279
+ self.writer.writerow(self.encoded_labels)
280
+ else:
281
+ for row in self._generate_multiindex_header_rows():
282
+ self.writer.writerow(row)
283
+
284
+ def _generate_multiindex_header_rows(self) -> Iterator[list[Hashable]]:
285
+ columns = self.obj.columns
286
+ for i in range(columns.nlevels):
287
+ # we need at least 1 index column to write our col names
288
+ col_line = []
289
+ if self.index:
290
+ # name is the first column
291
+ col_line.append(columns.names[i])
292
+
293
+ if isinstance(self.index_label, list) and len(self.index_label) > 1:
294
+ col_line.extend([""] * (len(self.index_label) - 1))
295
+
296
+ col_line.extend(columns._get_level_values(i))
297
+ yield col_line
298
+
299
+ # Write out the index line if it's not empty.
300
+ # Otherwise, we will print out an extraneous
301
+ # blank line between the mi and the data rows.
302
+ if self.encoded_labels and set(self.encoded_labels) != {""}:
303
+ yield self.encoded_labels + [""] * len(columns)
304
+
305
+ def _save_body(self) -> None:
306
+ nrows = len(self.data_index)
307
+ chunks = (nrows // self.chunksize) + 1
308
+ for i in range(chunks):
309
+ start_i = i * self.chunksize
310
+ end_i = min(start_i + self.chunksize, nrows)
311
+ if start_i >= end_i:
312
+ break
313
+ self._save_chunk(start_i, end_i)
314
+
315
+ def _save_chunk(self, start_i: int, end_i: int) -> None:
316
+ # create the data for a chunk
317
+ slicer = slice(start_i, end_i)
318
+ df = self.obj.iloc[slicer]
319
+
320
+ res = df._get_values_for_csv(**self._number_format)
321
+ data = list(res._iter_column_arrays())
322
+
323
+ ix = self.data_index[slicer]._get_values_for_csv(**self._number_format)
324
+ libwriters.write_csv_rows(
325
+ data,
326
+ ix,
327
+ self.nlevels,
328
+ self.cols,
329
+ self.writer,
330
+ )
venv/lib/python3.10/site-packages/pandas/io/formats/excel.py ADDED
@@ -0,0 +1,962 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities for conversion to writer-agnostic Excel representation.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from collections.abc import (
7
+ Hashable,
8
+ Iterable,
9
+ Mapping,
10
+ Sequence,
11
+ )
12
+ import functools
13
+ import itertools
14
+ import re
15
+ from typing import (
16
+ TYPE_CHECKING,
17
+ Any,
18
+ Callable,
19
+ cast,
20
+ )
21
+ import warnings
22
+
23
+ import numpy as np
24
+
25
+ from pandas._libs.lib import is_list_like
26
+ from pandas.util._decorators import doc
27
+ from pandas.util._exceptions import find_stack_level
28
+
29
+ from pandas.core.dtypes import missing
30
+ from pandas.core.dtypes.common import (
31
+ is_float,
32
+ is_scalar,
33
+ )
34
+
35
+ from pandas import (
36
+ DataFrame,
37
+ Index,
38
+ MultiIndex,
39
+ PeriodIndex,
40
+ )
41
+ import pandas.core.common as com
42
+ from pandas.core.shared_docs import _shared_docs
43
+
44
+ from pandas.io.formats._color_data import CSS4_COLORS
45
+ from pandas.io.formats.css import (
46
+ CSSResolver,
47
+ CSSWarning,
48
+ )
49
+ from pandas.io.formats.format import get_level_lengths
50
+ from pandas.io.formats.printing import pprint_thing
51
+
52
+ if TYPE_CHECKING:
53
+ from pandas._typing import (
54
+ FilePath,
55
+ IndexLabel,
56
+ StorageOptions,
57
+ WriteExcelBuffer,
58
+ )
59
+
60
+ from pandas import ExcelWriter
61
+
62
+
63
+ class ExcelCell:
64
+ __fields__ = ("row", "col", "val", "style", "mergestart", "mergeend")
65
+ __slots__ = __fields__
66
+
67
+ def __init__(
68
+ self,
69
+ row: int,
70
+ col: int,
71
+ val,
72
+ style=None,
73
+ mergestart: int | None = None,
74
+ mergeend: int | None = None,
75
+ ) -> None:
76
+ self.row = row
77
+ self.col = col
78
+ self.val = val
79
+ self.style = style
80
+ self.mergestart = mergestart
81
+ self.mergeend = mergeend
82
+
83
+
84
+ class CssExcelCell(ExcelCell):
85
+ def __init__(
86
+ self,
87
+ row: int,
88
+ col: int,
89
+ val,
90
+ style: dict | None,
91
+ css_styles: dict[tuple[int, int], list[tuple[str, Any]]] | None,
92
+ css_row: int,
93
+ css_col: int,
94
+ css_converter: Callable | None,
95
+ **kwargs,
96
+ ) -> None:
97
+ if css_styles and css_converter:
98
+ # Use dict to get only one (case-insensitive) declaration per property
99
+ declaration_dict = {
100
+ prop.lower(): val for prop, val in css_styles[css_row, css_col]
101
+ }
102
+ # Convert to frozenset for order-invariant caching
103
+ unique_declarations = frozenset(declaration_dict.items())
104
+ style = css_converter(unique_declarations)
105
+
106
+ super().__init__(row=row, col=col, val=val, style=style, **kwargs)
107
+
108
+
109
+ class CSSToExcelConverter:
110
+ """
111
+ A callable for converting CSS declarations to ExcelWriter styles
112
+
113
+ Supports parts of CSS 2.2, with minimal CSS 3.0 support (e.g. text-shadow),
114
+ focusing on font styling, backgrounds, borders and alignment.
115
+
116
+ Operates by first computing CSS styles in a fairly generic
117
+ way (see :meth:`compute_css`) then determining Excel style
118
+ properties from CSS properties (see :meth:`build_xlstyle`).
119
+
120
+ Parameters
121
+ ----------
122
+ inherited : str, optional
123
+ CSS declarations understood to be the containing scope for the
124
+ CSS processed by :meth:`__call__`.
125
+ """
126
+
127
+ NAMED_COLORS = CSS4_COLORS
128
+
129
+ VERTICAL_MAP = {
130
+ "top": "top",
131
+ "text-top": "top",
132
+ "middle": "center",
133
+ "baseline": "bottom",
134
+ "bottom": "bottom",
135
+ "text-bottom": "bottom",
136
+ # OpenXML also has 'justify', 'distributed'
137
+ }
138
+
139
+ BOLD_MAP = {
140
+ "bold": True,
141
+ "bolder": True,
142
+ "600": True,
143
+ "700": True,
144
+ "800": True,
145
+ "900": True,
146
+ "normal": False,
147
+ "lighter": False,
148
+ "100": False,
149
+ "200": False,
150
+ "300": False,
151
+ "400": False,
152
+ "500": False,
153
+ }
154
+
155
+ ITALIC_MAP = {
156
+ "normal": False,
157
+ "italic": True,
158
+ "oblique": True,
159
+ }
160
+
161
+ FAMILY_MAP = {
162
+ "serif": 1, # roman
163
+ "sans-serif": 2, # swiss
164
+ "cursive": 4, # script
165
+ "fantasy": 5, # decorative
166
+ }
167
+
168
+ BORDER_STYLE_MAP = {
169
+ style.lower(): style
170
+ for style in [
171
+ "dashed",
172
+ "mediumDashDot",
173
+ "dashDotDot",
174
+ "hair",
175
+ "dotted",
176
+ "mediumDashDotDot",
177
+ "double",
178
+ "dashDot",
179
+ "slantDashDot",
180
+ "mediumDashed",
181
+ ]
182
+ }
183
+
184
+ # NB: Most of the methods here could be classmethods, as only __init__
185
+ # and __call__ make use of instance attributes. We leave them as
186
+ # instancemethods so that users can easily experiment with extensions
187
+ # without monkey-patching.
188
+ inherited: dict[str, str] | None
189
+
190
+ def __init__(self, inherited: str | None = None) -> None:
191
+ if inherited is not None:
192
+ self.inherited = self.compute_css(inherited)
193
+ else:
194
+ self.inherited = None
195
+ # We should avoid cache on the __call__ method.
196
+ # Otherwise once the method __call__ has been called
197
+ # garbage collection no longer deletes the instance.
198
+ self._call_cached = functools.cache(self._call_uncached)
199
+
200
+ compute_css = CSSResolver()
201
+
202
+ def __call__(
203
+ self, declarations: str | frozenset[tuple[str, str]]
204
+ ) -> dict[str, dict[str, str]]:
205
+ """
206
+ Convert CSS declarations to ExcelWriter style.
207
+
208
+ Parameters
209
+ ----------
210
+ declarations : str | frozenset[tuple[str, str]]
211
+ CSS string or set of CSS declaration tuples.
212
+ e.g. "font-weight: bold; background: blue" or
213
+ {("font-weight", "bold"), ("background", "blue")}
214
+
215
+ Returns
216
+ -------
217
+ xlstyle : dict
218
+ A style as interpreted by ExcelWriter when found in
219
+ ExcelCell.style.
220
+ """
221
+ return self._call_cached(declarations)
222
+
223
+ def _call_uncached(
224
+ self, declarations: str | frozenset[tuple[str, str]]
225
+ ) -> dict[str, dict[str, str]]:
226
+ properties = self.compute_css(declarations, self.inherited)
227
+ return self.build_xlstyle(properties)
228
+
229
+ def build_xlstyle(self, props: Mapping[str, str]) -> dict[str, dict[str, str]]:
230
+ out = {
231
+ "alignment": self.build_alignment(props),
232
+ "border": self.build_border(props),
233
+ "fill": self.build_fill(props),
234
+ "font": self.build_font(props),
235
+ "number_format": self.build_number_format(props),
236
+ }
237
+
238
+ # TODO: handle cell width and height: needs support in pandas.io.excel
239
+
240
+ def remove_none(d: dict[str, str | None]) -> None:
241
+ """Remove key where value is None, through nested dicts"""
242
+ for k, v in list(d.items()):
243
+ if v is None:
244
+ del d[k]
245
+ elif isinstance(v, dict):
246
+ remove_none(v)
247
+ if not v:
248
+ del d[k]
249
+
250
+ remove_none(out)
251
+ return out
252
+
253
+ def build_alignment(self, props: Mapping[str, str]) -> dict[str, bool | str | None]:
254
+ # TODO: text-indent, padding-left -> alignment.indent
255
+ return {
256
+ "horizontal": props.get("text-align"),
257
+ "vertical": self._get_vertical_alignment(props),
258
+ "wrap_text": self._get_is_wrap_text(props),
259
+ }
260
+
261
+ def _get_vertical_alignment(self, props: Mapping[str, str]) -> str | None:
262
+ vertical_align = props.get("vertical-align")
263
+ if vertical_align:
264
+ return self.VERTICAL_MAP.get(vertical_align)
265
+ return None
266
+
267
+ def _get_is_wrap_text(self, props: Mapping[str, str]) -> bool | None:
268
+ if props.get("white-space") is None:
269
+ return None
270
+ return bool(props["white-space"] not in ("nowrap", "pre", "pre-line"))
271
+
272
+ def build_border(
273
+ self, props: Mapping[str, str]
274
+ ) -> dict[str, dict[str, str | None]]:
275
+ return {
276
+ side: {
277
+ "style": self._border_style(
278
+ props.get(f"border-{side}-style"),
279
+ props.get(f"border-{side}-width"),
280
+ self.color_to_excel(props.get(f"border-{side}-color")),
281
+ ),
282
+ "color": self.color_to_excel(props.get(f"border-{side}-color")),
283
+ }
284
+ for side in ["top", "right", "bottom", "left"]
285
+ }
286
+
287
+ def _border_style(self, style: str | None, width: str | None, color: str | None):
288
+ # convert styles and widths to openxml, one of:
289
+ # 'dashDot'
290
+ # 'dashDotDot'
291
+ # 'dashed'
292
+ # 'dotted'
293
+ # 'double'
294
+ # 'hair'
295
+ # 'medium'
296
+ # 'mediumDashDot'
297
+ # 'mediumDashDotDot'
298
+ # 'mediumDashed'
299
+ # 'slantDashDot'
300
+ # 'thick'
301
+ # 'thin'
302
+ if width is None and style is None and color is None:
303
+ # Return None will remove "border" from style dictionary
304
+ return None
305
+
306
+ if width is None and style is None:
307
+ # Return "none" will keep "border" in style dictionary
308
+ return "none"
309
+
310
+ if style in ("none", "hidden"):
311
+ return "none"
312
+
313
+ width_name = self._get_width_name(width)
314
+ if width_name is None:
315
+ return "none"
316
+
317
+ if style in (None, "groove", "ridge", "inset", "outset", "solid"):
318
+ # not handled
319
+ return width_name
320
+
321
+ if style == "double":
322
+ return "double"
323
+ if style == "dotted":
324
+ if width_name in ("hair", "thin"):
325
+ return "dotted"
326
+ return "mediumDashDotDot"
327
+ if style == "dashed":
328
+ if width_name in ("hair", "thin"):
329
+ return "dashed"
330
+ return "mediumDashed"
331
+ elif style in self.BORDER_STYLE_MAP:
332
+ # Excel-specific styles
333
+ return self.BORDER_STYLE_MAP[style]
334
+ else:
335
+ warnings.warn(
336
+ f"Unhandled border style format: {repr(style)}",
337
+ CSSWarning,
338
+ stacklevel=find_stack_level(),
339
+ )
340
+ return "none"
341
+
342
+ def _get_width_name(self, width_input: str | None) -> str | None:
343
+ width = self._width_to_float(width_input)
344
+ if width < 1e-5:
345
+ return None
346
+ elif width < 1.3:
347
+ return "thin"
348
+ elif width < 2.8:
349
+ return "medium"
350
+ return "thick"
351
+
352
+ def _width_to_float(self, width: str | None) -> float:
353
+ if width is None:
354
+ width = "2pt"
355
+ return self._pt_to_float(width)
356
+
357
+ def _pt_to_float(self, pt_string: str) -> float:
358
+ assert pt_string.endswith("pt")
359
+ return float(pt_string.rstrip("pt"))
360
+
361
+ def build_fill(self, props: Mapping[str, str]):
362
+ # TODO: perhaps allow for special properties
363
+ # -excel-pattern-bgcolor and -excel-pattern-type
364
+ fill_color = props.get("background-color")
365
+ if fill_color not in (None, "transparent", "none"):
366
+ return {"fgColor": self.color_to_excel(fill_color), "patternType": "solid"}
367
+
368
+ def build_number_format(self, props: Mapping[str, str]) -> dict[str, str | None]:
369
+ fc = props.get("number-format")
370
+ fc = fc.replace("§", ";") if isinstance(fc, str) else fc
371
+ return {"format_code": fc}
372
+
373
+ def build_font(
374
+ self, props: Mapping[str, str]
375
+ ) -> dict[str, bool | float | str | None]:
376
+ font_names = self._get_font_names(props)
377
+ decoration = self._get_decoration(props)
378
+ return {
379
+ "name": font_names[0] if font_names else None,
380
+ "family": self._select_font_family(font_names),
381
+ "size": self._get_font_size(props),
382
+ "bold": self._get_is_bold(props),
383
+ "italic": self._get_is_italic(props),
384
+ "underline": ("single" if "underline" in decoration else None),
385
+ "strike": ("line-through" in decoration) or None,
386
+ "color": self.color_to_excel(props.get("color")),
387
+ # shadow if nonzero digit before shadow color
388
+ "shadow": self._get_shadow(props),
389
+ }
390
+
391
+ def _get_is_bold(self, props: Mapping[str, str]) -> bool | None:
392
+ weight = props.get("font-weight")
393
+ if weight:
394
+ return self.BOLD_MAP.get(weight)
395
+ return None
396
+
397
+ def _get_is_italic(self, props: Mapping[str, str]) -> bool | None:
398
+ font_style = props.get("font-style")
399
+ if font_style:
400
+ return self.ITALIC_MAP.get(font_style)
401
+ return None
402
+
403
+ def _get_decoration(self, props: Mapping[str, str]) -> Sequence[str]:
404
+ decoration = props.get("text-decoration")
405
+ if decoration is not None:
406
+ return decoration.split()
407
+ else:
408
+ return ()
409
+
410
+ def _get_underline(self, decoration: Sequence[str]) -> str | None:
411
+ if "underline" in decoration:
412
+ return "single"
413
+ return None
414
+
415
+ def _get_shadow(self, props: Mapping[str, str]) -> bool | None:
416
+ if "text-shadow" in props:
417
+ return bool(re.search("^[^#(]*[1-9]", props["text-shadow"]))
418
+ return None
419
+
420
+ def _get_font_names(self, props: Mapping[str, str]) -> Sequence[str]:
421
+ font_names_tmp = re.findall(
422
+ r"""(?x)
423
+ (
424
+ "(?:[^"]|\\")+"
425
+ |
426
+ '(?:[^']|\\')+'
427
+ |
428
+ [^'",]+
429
+ )(?=,|\s*$)
430
+ """,
431
+ props.get("font-family", ""),
432
+ )
433
+
434
+ font_names = []
435
+ for name in font_names_tmp:
436
+ if name[:1] == '"':
437
+ name = name[1:-1].replace('\\"', '"')
438
+ elif name[:1] == "'":
439
+ name = name[1:-1].replace("\\'", "'")
440
+ else:
441
+ name = name.strip()
442
+ if name:
443
+ font_names.append(name)
444
+ return font_names
445
+
446
+ def _get_font_size(self, props: Mapping[str, str]) -> float | None:
447
+ size = props.get("font-size")
448
+ if size is None:
449
+ return size
450
+ return self._pt_to_float(size)
451
+
452
+ def _select_font_family(self, font_names: Sequence[str]) -> int | None:
453
+ family = None
454
+ for name in font_names:
455
+ family = self.FAMILY_MAP.get(name)
456
+ if family:
457
+ break
458
+
459
+ return family
460
+
461
+ def color_to_excel(self, val: str | None) -> str | None:
462
+ if val is None:
463
+ return None
464
+
465
+ if self._is_hex_color(val):
466
+ return self._convert_hex_to_excel(val)
467
+
468
+ try:
469
+ return self.NAMED_COLORS[val]
470
+ except KeyError:
471
+ warnings.warn(
472
+ f"Unhandled color format: {repr(val)}",
473
+ CSSWarning,
474
+ stacklevel=find_stack_level(),
475
+ )
476
+ return None
477
+
478
+ def _is_hex_color(self, color_string: str) -> bool:
479
+ return bool(color_string.startswith("#"))
480
+
481
+ def _convert_hex_to_excel(self, color_string: str) -> str:
482
+ code = color_string.lstrip("#")
483
+ if self._is_shorthand_color(color_string):
484
+ return (code[0] * 2 + code[1] * 2 + code[2] * 2).upper()
485
+ else:
486
+ return code.upper()
487
+
488
+ def _is_shorthand_color(self, color_string: str) -> bool:
489
+ """Check if color code is shorthand.
490
+
491
+ #FFF is a shorthand as opposed to full #FFFFFF.
492
+ """
493
+ code = color_string.lstrip("#")
494
+ if len(code) == 3:
495
+ return True
496
+ elif len(code) == 6:
497
+ return False
498
+ else:
499
+ raise ValueError(f"Unexpected color {color_string}")
500
+
501
+
502
+ class ExcelFormatter:
503
+ """
504
+ Class for formatting a DataFrame to a list of ExcelCells,
505
+
506
+ Parameters
507
+ ----------
508
+ df : DataFrame or Styler
509
+ na_rep: na representation
510
+ float_format : str, default None
511
+ Format string for floating point numbers
512
+ cols : sequence, optional
513
+ Columns to write
514
+ header : bool or sequence of str, default True
515
+ Write out column names. If a list of string is given it is
516
+ assumed to be aliases for the column names
517
+ index : bool, default True
518
+ output row names (index)
519
+ index_label : str or sequence, default None
520
+ Column label for index column(s) if desired. If None is given, and
521
+ `header` and `index` are True, then the index names are used. A
522
+ sequence should be given if the DataFrame uses MultiIndex.
523
+ merge_cells : bool, default False
524
+ Format MultiIndex and Hierarchical Rows as merged cells.
525
+ inf_rep : str, default `'inf'`
526
+ representation for np.inf values (which aren't representable in Excel)
527
+ A `'-'` sign will be added in front of -inf.
528
+ style_converter : callable, optional
529
+ This translates Styler styles (CSS) into ExcelWriter styles.
530
+ Defaults to ``CSSToExcelConverter()``.
531
+ It should have signature css_declarations string -> excel style.
532
+ This is only called for body cells.
533
+ """
534
+
535
+ max_rows = 2**20
536
+ max_cols = 2**14
537
+
538
+ def __init__(
539
+ self,
540
+ df,
541
+ na_rep: str = "",
542
+ float_format: str | None = None,
543
+ cols: Sequence[Hashable] | None = None,
544
+ header: Sequence[Hashable] | bool = True,
545
+ index: bool = True,
546
+ index_label: IndexLabel | None = None,
547
+ merge_cells: bool = False,
548
+ inf_rep: str = "inf",
549
+ style_converter: Callable | None = None,
550
+ ) -> None:
551
+ self.rowcounter = 0
552
+ self.na_rep = na_rep
553
+ if not isinstance(df, DataFrame):
554
+ self.styler = df
555
+ self.styler._compute() # calculate applied styles
556
+ df = df.data
557
+ if style_converter is None:
558
+ style_converter = CSSToExcelConverter()
559
+ self.style_converter: Callable | None = style_converter
560
+ else:
561
+ self.styler = None
562
+ self.style_converter = None
563
+ self.df = df
564
+ if cols is not None:
565
+ # all missing, raise
566
+ if not len(Index(cols).intersection(df.columns)):
567
+ raise KeyError("passes columns are not ALL present dataframe")
568
+
569
+ if len(Index(cols).intersection(df.columns)) != len(set(cols)):
570
+ # Deprecated in GH#17295, enforced in 1.0.0
571
+ raise KeyError("Not all names specified in 'columns' are found")
572
+
573
+ self.df = df.reindex(columns=cols)
574
+
575
+ self.columns = self.df.columns
576
+ self.float_format = float_format
577
+ self.index = index
578
+ self.index_label = index_label
579
+ self.header = header
580
+ self.merge_cells = merge_cells
581
+ self.inf_rep = inf_rep
582
+
583
+ @property
584
+ def header_style(self) -> dict[str, dict[str, str | bool]]:
585
+ return {
586
+ "font": {"bold": True},
587
+ "borders": {
588
+ "top": "thin",
589
+ "right": "thin",
590
+ "bottom": "thin",
591
+ "left": "thin",
592
+ },
593
+ "alignment": {"horizontal": "center", "vertical": "top"},
594
+ }
595
+
596
+ def _format_value(self, val):
597
+ if is_scalar(val) and missing.isna(val):
598
+ val = self.na_rep
599
+ elif is_float(val):
600
+ if missing.isposinf_scalar(val):
601
+ val = self.inf_rep
602
+ elif missing.isneginf_scalar(val):
603
+ val = f"-{self.inf_rep}"
604
+ elif self.float_format is not None:
605
+ val = float(self.float_format % val)
606
+ if getattr(val, "tzinfo", None) is not None:
607
+ raise ValueError(
608
+ "Excel does not support datetimes with "
609
+ "timezones. Please ensure that datetimes "
610
+ "are timezone unaware before writing to Excel."
611
+ )
612
+ return val
613
+
614
+ def _format_header_mi(self) -> Iterable[ExcelCell]:
615
+ if self.columns.nlevels > 1:
616
+ if not self.index:
617
+ raise NotImplementedError(
618
+ "Writing to Excel with MultiIndex columns and no "
619
+ "index ('index'=False) is not yet implemented."
620
+ )
621
+
622
+ if not (self._has_aliases or self.header):
623
+ return
624
+
625
+ columns = self.columns
626
+ level_strs = columns._format_multi(
627
+ sparsify=self.merge_cells, include_names=False
628
+ )
629
+ level_lengths = get_level_lengths(level_strs)
630
+ coloffset = 0
631
+ lnum = 0
632
+
633
+ if self.index and isinstance(self.df.index, MultiIndex):
634
+ coloffset = len(self.df.index[0]) - 1
635
+
636
+ if self.merge_cells:
637
+ # Format multi-index as a merged cells.
638
+ for lnum, name in enumerate(columns.names):
639
+ yield ExcelCell(
640
+ row=lnum,
641
+ col=coloffset,
642
+ val=name,
643
+ style=self.header_style,
644
+ )
645
+
646
+ for lnum, (spans, levels, level_codes) in enumerate(
647
+ zip(level_lengths, columns.levels, columns.codes)
648
+ ):
649
+ values = levels.take(level_codes)
650
+ for i, span_val in spans.items():
651
+ mergestart, mergeend = None, None
652
+ if span_val > 1:
653
+ mergestart, mergeend = lnum, coloffset + i + span_val
654
+ yield CssExcelCell(
655
+ row=lnum,
656
+ col=coloffset + i + 1,
657
+ val=values[i],
658
+ style=self.header_style,
659
+ css_styles=getattr(self.styler, "ctx_columns", None),
660
+ css_row=lnum,
661
+ css_col=i,
662
+ css_converter=self.style_converter,
663
+ mergestart=mergestart,
664
+ mergeend=mergeend,
665
+ )
666
+ else:
667
+ # Format in legacy format with dots to indicate levels.
668
+ for i, values in enumerate(zip(*level_strs)):
669
+ v = ".".join(map(pprint_thing, values))
670
+ yield CssExcelCell(
671
+ row=lnum,
672
+ col=coloffset + i + 1,
673
+ val=v,
674
+ style=self.header_style,
675
+ css_styles=getattr(self.styler, "ctx_columns", None),
676
+ css_row=lnum,
677
+ css_col=i,
678
+ css_converter=self.style_converter,
679
+ )
680
+
681
+ self.rowcounter = lnum
682
+
683
+ def _format_header_regular(self) -> Iterable[ExcelCell]:
684
+ if self._has_aliases or self.header:
685
+ coloffset = 0
686
+
687
+ if self.index:
688
+ coloffset = 1
689
+ if isinstance(self.df.index, MultiIndex):
690
+ coloffset = len(self.df.index.names)
691
+
692
+ colnames = self.columns
693
+ if self._has_aliases:
694
+ self.header = cast(Sequence, self.header)
695
+ if len(self.header) != len(self.columns):
696
+ raise ValueError(
697
+ f"Writing {len(self.columns)} cols "
698
+ f"but got {len(self.header)} aliases"
699
+ )
700
+ colnames = self.header
701
+
702
+ for colindex, colname in enumerate(colnames):
703
+ yield CssExcelCell(
704
+ row=self.rowcounter,
705
+ col=colindex + coloffset,
706
+ val=colname,
707
+ style=self.header_style,
708
+ css_styles=getattr(self.styler, "ctx_columns", None),
709
+ css_row=0,
710
+ css_col=colindex,
711
+ css_converter=self.style_converter,
712
+ )
713
+
714
+ def _format_header(self) -> Iterable[ExcelCell]:
715
+ gen: Iterable[ExcelCell]
716
+
717
+ if isinstance(self.columns, MultiIndex):
718
+ gen = self._format_header_mi()
719
+ else:
720
+ gen = self._format_header_regular()
721
+
722
+ gen2: Iterable[ExcelCell] = ()
723
+
724
+ if self.df.index.names:
725
+ row = [x if x is not None else "" for x in self.df.index.names] + [
726
+ ""
727
+ ] * len(self.columns)
728
+ if functools.reduce(lambda x, y: x and y, (x != "" for x in row)):
729
+ gen2 = (
730
+ ExcelCell(self.rowcounter, colindex, val, self.header_style)
731
+ for colindex, val in enumerate(row)
732
+ )
733
+ self.rowcounter += 1
734
+ return itertools.chain(gen, gen2)
735
+
736
+ def _format_body(self) -> Iterable[ExcelCell]:
737
+ if isinstance(self.df.index, MultiIndex):
738
+ return self._format_hierarchical_rows()
739
+ else:
740
+ return self._format_regular_rows()
741
+
742
+ def _format_regular_rows(self) -> Iterable[ExcelCell]:
743
+ if self._has_aliases or self.header:
744
+ self.rowcounter += 1
745
+
746
+ # output index and index_label?
747
+ if self.index:
748
+ # check aliases
749
+ # if list only take first as this is not a MultiIndex
750
+ if self.index_label and isinstance(
751
+ self.index_label, (list, tuple, np.ndarray, Index)
752
+ ):
753
+ index_label = self.index_label[0]
754
+ # if string good to go
755
+ elif self.index_label and isinstance(self.index_label, str):
756
+ index_label = self.index_label
757
+ else:
758
+ index_label = self.df.index.names[0]
759
+
760
+ if isinstance(self.columns, MultiIndex):
761
+ self.rowcounter += 1
762
+
763
+ if index_label and self.header is not False:
764
+ yield ExcelCell(self.rowcounter - 1, 0, index_label, self.header_style)
765
+
766
+ # write index_values
767
+ index_values = self.df.index
768
+ if isinstance(self.df.index, PeriodIndex):
769
+ index_values = self.df.index.to_timestamp()
770
+
771
+ for idx, idxval in enumerate(index_values):
772
+ yield CssExcelCell(
773
+ row=self.rowcounter + idx,
774
+ col=0,
775
+ val=idxval,
776
+ style=self.header_style,
777
+ css_styles=getattr(self.styler, "ctx_index", None),
778
+ css_row=idx,
779
+ css_col=0,
780
+ css_converter=self.style_converter,
781
+ )
782
+ coloffset = 1
783
+ else:
784
+ coloffset = 0
785
+
786
+ yield from self._generate_body(coloffset)
787
+
788
+ def _format_hierarchical_rows(self) -> Iterable[ExcelCell]:
789
+ if self._has_aliases or self.header:
790
+ self.rowcounter += 1
791
+
792
+ gcolidx = 0
793
+
794
+ if self.index:
795
+ index_labels = self.df.index.names
796
+ # check for aliases
797
+ if self.index_label and isinstance(
798
+ self.index_label, (list, tuple, np.ndarray, Index)
799
+ ):
800
+ index_labels = self.index_label
801
+
802
+ # MultiIndex columns require an extra row
803
+ # with index names (blank if None) for
804
+ # unambiguous round-trip, unless not merging,
805
+ # in which case the names all go on one row Issue #11328
806
+ if isinstance(self.columns, MultiIndex) and self.merge_cells:
807
+ self.rowcounter += 1
808
+
809
+ # if index labels are not empty go ahead and dump
810
+ if com.any_not_none(*index_labels) and self.header is not False:
811
+ for cidx, name in enumerate(index_labels):
812
+ yield ExcelCell(self.rowcounter - 1, cidx, name, self.header_style)
813
+
814
+ if self.merge_cells:
815
+ # Format hierarchical rows as merged cells.
816
+ level_strs = self.df.index._format_multi(
817
+ sparsify=True, include_names=False
818
+ )
819
+ level_lengths = get_level_lengths(level_strs)
820
+
821
+ for spans, levels, level_codes in zip(
822
+ level_lengths, self.df.index.levels, self.df.index.codes
823
+ ):
824
+ values = levels.take(
825
+ level_codes,
826
+ allow_fill=levels._can_hold_na,
827
+ fill_value=levels._na_value,
828
+ )
829
+
830
+ for i, span_val in spans.items():
831
+ mergestart, mergeend = None, None
832
+ if span_val > 1:
833
+ mergestart = self.rowcounter + i + span_val - 1
834
+ mergeend = gcolidx
835
+ yield CssExcelCell(
836
+ row=self.rowcounter + i,
837
+ col=gcolidx,
838
+ val=values[i],
839
+ style=self.header_style,
840
+ css_styles=getattr(self.styler, "ctx_index", None),
841
+ css_row=i,
842
+ css_col=gcolidx,
843
+ css_converter=self.style_converter,
844
+ mergestart=mergestart,
845
+ mergeend=mergeend,
846
+ )
847
+ gcolidx += 1
848
+
849
+ else:
850
+ # Format hierarchical rows with non-merged values.
851
+ for indexcolvals in zip(*self.df.index):
852
+ for idx, indexcolval in enumerate(indexcolvals):
853
+ yield CssExcelCell(
854
+ row=self.rowcounter + idx,
855
+ col=gcolidx,
856
+ val=indexcolval,
857
+ style=self.header_style,
858
+ css_styles=getattr(self.styler, "ctx_index", None),
859
+ css_row=idx,
860
+ css_col=gcolidx,
861
+ css_converter=self.style_converter,
862
+ )
863
+ gcolidx += 1
864
+
865
+ yield from self._generate_body(gcolidx)
866
+
867
+ @property
868
+ def _has_aliases(self) -> bool:
869
+ """Whether the aliases for column names are present."""
870
+ return is_list_like(self.header)
871
+
872
+ def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]:
873
+ # Write the body of the frame data series by series.
874
+ for colidx in range(len(self.columns)):
875
+ series = self.df.iloc[:, colidx]
876
+ for i, val in enumerate(series):
877
+ yield CssExcelCell(
878
+ row=self.rowcounter + i,
879
+ col=colidx + coloffset,
880
+ val=val,
881
+ style=None,
882
+ css_styles=getattr(self.styler, "ctx", None),
883
+ css_row=i,
884
+ css_col=colidx,
885
+ css_converter=self.style_converter,
886
+ )
887
+
888
+ def get_formatted_cells(self) -> Iterable[ExcelCell]:
889
+ for cell in itertools.chain(self._format_header(), self._format_body()):
890
+ cell.val = self._format_value(cell.val)
891
+ yield cell
892
+
893
+ @doc(storage_options=_shared_docs["storage_options"])
894
+ def write(
895
+ self,
896
+ writer: FilePath | WriteExcelBuffer | ExcelWriter,
897
+ sheet_name: str = "Sheet1",
898
+ startrow: int = 0,
899
+ startcol: int = 0,
900
+ freeze_panes: tuple[int, int] | None = None,
901
+ engine: str | None = None,
902
+ storage_options: StorageOptions | None = None,
903
+ engine_kwargs: dict | None = None,
904
+ ) -> None:
905
+ """
906
+ writer : path-like, file-like, or ExcelWriter object
907
+ File path or existing ExcelWriter
908
+ sheet_name : str, default 'Sheet1'
909
+ Name of sheet which will contain DataFrame
910
+ startrow :
911
+ upper left cell row to dump data frame
912
+ startcol :
913
+ upper left cell column to dump data frame
914
+ freeze_panes : tuple of integer (length 2), default None
915
+ Specifies the one-based bottommost row and rightmost column that
916
+ is to be frozen
917
+ engine : string, default None
918
+ write engine to use if writer is a path - you can also set this
919
+ via the options ``io.excel.xlsx.writer``,
920
+ or ``io.excel.xlsm.writer``.
921
+
922
+ {storage_options}
923
+
924
+ engine_kwargs: dict, optional
925
+ Arbitrary keyword arguments passed to excel engine.
926
+ """
927
+ from pandas.io.excel import ExcelWriter
928
+
929
+ num_rows, num_cols = self.df.shape
930
+ if num_rows > self.max_rows or num_cols > self.max_cols:
931
+ raise ValueError(
932
+ f"This sheet is too large! Your sheet size is: {num_rows}, {num_cols} "
933
+ f"Max sheet size is: {self.max_rows}, {self.max_cols}"
934
+ )
935
+
936
+ if engine_kwargs is None:
937
+ engine_kwargs = {}
938
+
939
+ formatted_cells = self.get_formatted_cells()
940
+ if isinstance(writer, ExcelWriter):
941
+ need_save = False
942
+ else:
943
+ writer = ExcelWriter(
944
+ writer,
945
+ engine=engine,
946
+ storage_options=storage_options,
947
+ engine_kwargs=engine_kwargs,
948
+ )
949
+ need_save = True
950
+
951
+ try:
952
+ writer._write_cells(
953
+ formatted_cells,
954
+ sheet_name,
955
+ startrow=startrow,
956
+ startcol=startcol,
957
+ freeze_panes=freeze_panes,
958
+ )
959
+ finally:
960
+ # make sure to close opened file handles
961
+ if need_save:
962
+ writer.close()
venv/lib/python3.10/site-packages/pandas/io/formats/format.py ADDED
@@ -0,0 +1,2058 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Internal module for formatting output data in csv, html, xml,
3
+ and latex files. This module also applies to display formatting.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ from collections.abc import (
8
+ Generator,
9
+ Hashable,
10
+ Mapping,
11
+ Sequence,
12
+ )
13
+ from contextlib import contextmanager
14
+ from csv import QUOTE_NONE
15
+ from decimal import Decimal
16
+ from functools import partial
17
+ from io import StringIO
18
+ import math
19
+ import re
20
+ from shutil import get_terminal_size
21
+ from typing import (
22
+ TYPE_CHECKING,
23
+ Any,
24
+ Callable,
25
+ Final,
26
+ cast,
27
+ )
28
+
29
+ import numpy as np
30
+
31
+ from pandas._config.config import (
32
+ get_option,
33
+ set_option,
34
+ )
35
+
36
+ from pandas._libs import lib
37
+ from pandas._libs.missing import NA
38
+ from pandas._libs.tslibs import (
39
+ NaT,
40
+ Timedelta,
41
+ Timestamp,
42
+ )
43
+ from pandas._libs.tslibs.nattype import NaTType
44
+
45
+ from pandas.core.dtypes.common import (
46
+ is_complex_dtype,
47
+ is_float,
48
+ is_integer,
49
+ is_list_like,
50
+ is_numeric_dtype,
51
+ is_scalar,
52
+ )
53
+ from pandas.core.dtypes.dtypes import (
54
+ CategoricalDtype,
55
+ DatetimeTZDtype,
56
+ ExtensionDtype,
57
+ )
58
+ from pandas.core.dtypes.missing import (
59
+ isna,
60
+ notna,
61
+ )
62
+
63
+ from pandas.core.arrays import (
64
+ Categorical,
65
+ DatetimeArray,
66
+ ExtensionArray,
67
+ TimedeltaArray,
68
+ )
69
+ from pandas.core.arrays.string_ import StringDtype
70
+ from pandas.core.base import PandasObject
71
+ import pandas.core.common as com
72
+ from pandas.core.indexes.api import (
73
+ Index,
74
+ MultiIndex,
75
+ PeriodIndex,
76
+ ensure_index,
77
+ )
78
+ from pandas.core.indexes.datetimes import DatetimeIndex
79
+ from pandas.core.indexes.timedeltas import TimedeltaIndex
80
+ from pandas.core.reshape.concat import concat
81
+
82
+ from pandas.io.common import (
83
+ check_parent_directory,
84
+ stringify_path,
85
+ )
86
+ from pandas.io.formats import printing
87
+
88
+ if TYPE_CHECKING:
89
+ from pandas._typing import (
90
+ ArrayLike,
91
+ Axes,
92
+ ColspaceArgType,
93
+ ColspaceType,
94
+ CompressionOptions,
95
+ FilePath,
96
+ FloatFormatType,
97
+ FormattersType,
98
+ IndexLabel,
99
+ SequenceNotStr,
100
+ StorageOptions,
101
+ WriteBuffer,
102
+ )
103
+
104
+ from pandas import (
105
+ DataFrame,
106
+ Series,
107
+ )
108
+
109
+
110
+ common_docstring: Final = """
111
+ Parameters
112
+ ----------
113
+ buf : str, Path or StringIO-like, optional, default None
114
+ Buffer to write to. If None, the output is returned as a string.
115
+ columns : array-like, optional, default None
116
+ The subset of columns to write. Writes all columns by default.
117
+ col_space : %(col_space_type)s, optional
118
+ %(col_space)s.
119
+ header : %(header_type)s, optional
120
+ %(header)s.
121
+ index : bool, optional, default True
122
+ Whether to print index (row) labels.
123
+ na_rep : str, optional, default 'NaN'
124
+ String representation of ``NaN`` to use.
125
+ formatters : list, tuple or dict of one-param. functions, optional
126
+ Formatter functions to apply to columns' elements by position or
127
+ name.
128
+ The result of each function must be a unicode string.
129
+ List/tuple must be of length equal to the number of columns.
130
+ float_format : one-parameter function, optional, default None
131
+ Formatter function to apply to columns' elements if they are
132
+ floats. This function must return a unicode string and will be
133
+ applied only to the non-``NaN`` elements, with ``NaN`` being
134
+ handled by ``na_rep``.
135
+ sparsify : bool, optional, default True
136
+ Set to False for a DataFrame with a hierarchical index to print
137
+ every multiindex key at each row.
138
+ index_names : bool, optional, default True
139
+ Prints the names of the indexes.
140
+ justify : str, default None
141
+ How to justify the column labels. If None uses the option from
142
+ the print configuration (controlled by set_option), 'right' out
143
+ of the box. Valid values are
144
+
145
+ * left
146
+ * right
147
+ * center
148
+ * justify
149
+ * justify-all
150
+ * start
151
+ * end
152
+ * inherit
153
+ * match-parent
154
+ * initial
155
+ * unset.
156
+ max_rows : int, optional
157
+ Maximum number of rows to display in the console.
158
+ max_cols : int, optional
159
+ Maximum number of columns to display in the console.
160
+ show_dimensions : bool, default False
161
+ Display DataFrame dimensions (number of rows by number of columns).
162
+ decimal : str, default '.'
163
+ Character recognized as decimal separator, e.g. ',' in Europe.
164
+ """
165
+
166
+ VALID_JUSTIFY_PARAMETERS = (
167
+ "left",
168
+ "right",
169
+ "center",
170
+ "justify",
171
+ "justify-all",
172
+ "start",
173
+ "end",
174
+ "inherit",
175
+ "match-parent",
176
+ "initial",
177
+ "unset",
178
+ )
179
+
180
+ return_docstring: Final = """
181
+ Returns
182
+ -------
183
+ str or None
184
+ If buf is None, returns the result as a string. Otherwise returns
185
+ None.
186
+ """
187
+
188
+
189
+ class SeriesFormatter:
190
+ """
191
+ Implement the main logic of Series.to_string, which underlies
192
+ Series.__repr__.
193
+ """
194
+
195
+ def __init__(
196
+ self,
197
+ series: Series,
198
+ *,
199
+ length: bool | str = True,
200
+ header: bool = True,
201
+ index: bool = True,
202
+ na_rep: str = "NaN",
203
+ name: bool = False,
204
+ float_format: str | None = None,
205
+ dtype: bool = True,
206
+ max_rows: int | None = None,
207
+ min_rows: int | None = None,
208
+ ) -> None:
209
+ self.series = series
210
+ self.buf = StringIO()
211
+ self.name = name
212
+ self.na_rep = na_rep
213
+ self.header = header
214
+ self.length = length
215
+ self.index = index
216
+ self.max_rows = max_rows
217
+ self.min_rows = min_rows
218
+
219
+ if float_format is None:
220
+ float_format = get_option("display.float_format")
221
+ self.float_format = float_format
222
+ self.dtype = dtype
223
+ self.adj = printing.get_adjustment()
224
+
225
+ self._chk_truncate()
226
+
227
+ def _chk_truncate(self) -> None:
228
+ self.tr_row_num: int | None
229
+
230
+ min_rows = self.min_rows
231
+ max_rows = self.max_rows
232
+ # truncation determined by max_rows, actual truncated number of rows
233
+ # used below by min_rows
234
+ is_truncated_vertically = max_rows and (len(self.series) > max_rows)
235
+ series = self.series
236
+ if is_truncated_vertically:
237
+ max_rows = cast(int, max_rows)
238
+ if min_rows:
239
+ # if min_rows is set (not None or 0), set max_rows to minimum
240
+ # of both
241
+ max_rows = min(min_rows, max_rows)
242
+ if max_rows == 1:
243
+ row_num = max_rows
244
+ series = series.iloc[:max_rows]
245
+ else:
246
+ row_num = max_rows // 2
247
+ series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
248
+ self.tr_row_num = row_num
249
+ else:
250
+ self.tr_row_num = None
251
+ self.tr_series = series
252
+ self.is_truncated_vertically = is_truncated_vertically
253
+
254
+ def _get_footer(self) -> str:
255
+ name = self.series.name
256
+ footer = ""
257
+
258
+ index = self.series.index
259
+ if (
260
+ isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex))
261
+ and index.freq is not None
262
+ ):
263
+ footer += f"Freq: {index.freqstr}"
264
+
265
+ if self.name is not False and name is not None:
266
+ if footer:
267
+ footer += ", "
268
+
269
+ series_name = printing.pprint_thing(name, escape_chars=("\t", "\r", "\n"))
270
+ footer += f"Name: {series_name}"
271
+
272
+ if self.length is True or (
273
+ self.length == "truncate" and self.is_truncated_vertically
274
+ ):
275
+ if footer:
276
+ footer += ", "
277
+ footer += f"Length: {len(self.series)}"
278
+
279
+ if self.dtype is not False and self.dtype is not None:
280
+ dtype_name = getattr(self.tr_series.dtype, "name", None)
281
+ if dtype_name:
282
+ if footer:
283
+ footer += ", "
284
+ footer += f"dtype: {printing.pprint_thing(dtype_name)}"
285
+
286
+ # level infos are added to the end and in a new line, like it is done
287
+ # for Categoricals
288
+ if isinstance(self.tr_series.dtype, CategoricalDtype):
289
+ level_info = self.tr_series._values._get_repr_footer()
290
+ if footer:
291
+ footer += "\n"
292
+ footer += level_info
293
+
294
+ return str(footer)
295
+
296
+ def _get_formatted_values(self) -> list[str]:
297
+ return format_array(
298
+ self.tr_series._values,
299
+ None,
300
+ float_format=self.float_format,
301
+ na_rep=self.na_rep,
302
+ leading_space=self.index,
303
+ )
304
+
305
+ def to_string(self) -> str:
306
+ series = self.tr_series
307
+ footer = self._get_footer()
308
+
309
+ if len(series) == 0:
310
+ return f"{type(self.series).__name__}([], {footer})"
311
+
312
+ index = series.index
313
+ have_header = _has_names(index)
314
+ if isinstance(index, MultiIndex):
315
+ fmt_index = index._format_multi(include_names=True, sparsify=None)
316
+ adj = printing.get_adjustment()
317
+ fmt_index = adj.adjoin(2, *fmt_index).split("\n")
318
+ else:
319
+ fmt_index = index._format_flat(include_name=True)
320
+ fmt_values = self._get_formatted_values()
321
+
322
+ if self.is_truncated_vertically:
323
+ n_header_rows = 0
324
+ row_num = self.tr_row_num
325
+ row_num = cast(int, row_num)
326
+ width = self.adj.len(fmt_values[row_num - 1])
327
+ if width > 3:
328
+ dot_str = "..."
329
+ else:
330
+ dot_str = ".."
331
+ # Series uses mode=center because it has single value columns
332
+ # DataFrame uses mode=left
333
+ dot_str = self.adj.justify([dot_str], width, mode="center")[0]
334
+ fmt_values.insert(row_num + n_header_rows, dot_str)
335
+ fmt_index.insert(row_num + 1, "")
336
+
337
+ if self.index:
338
+ result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
339
+ else:
340
+ result = self.adj.adjoin(3, fmt_values)
341
+
342
+ if self.header and have_header:
343
+ result = fmt_index[0] + "\n" + result
344
+
345
+ if footer:
346
+ result += "\n" + footer
347
+
348
+ return str("".join(result))
349
+
350
+
351
+ def get_dataframe_repr_params() -> dict[str, Any]:
352
+ """Get the parameters used to repr(dataFrame) calls using DataFrame.to_string.
353
+
354
+ Supplying these parameters to DataFrame.to_string is equivalent to calling
355
+ ``repr(DataFrame)``. This is useful if you want to adjust the repr output.
356
+
357
+ .. versionadded:: 1.4.0
358
+
359
+ Example
360
+ -------
361
+ >>> import pandas as pd
362
+ >>>
363
+ >>> df = pd.DataFrame([[1, 2], [3, 4]])
364
+ >>> repr_params = pd.io.formats.format.get_dataframe_repr_params()
365
+ >>> repr(df) == df.to_string(**repr_params)
366
+ True
367
+ """
368
+ from pandas.io.formats import console
369
+
370
+ if get_option("display.expand_frame_repr"):
371
+ line_width, _ = console.get_console_size()
372
+ else:
373
+ line_width = None
374
+ return {
375
+ "max_rows": get_option("display.max_rows"),
376
+ "min_rows": get_option("display.min_rows"),
377
+ "max_cols": get_option("display.max_columns"),
378
+ "max_colwidth": get_option("display.max_colwidth"),
379
+ "show_dimensions": get_option("display.show_dimensions"),
380
+ "line_width": line_width,
381
+ }
382
+
383
+
384
+ def get_series_repr_params() -> dict[str, Any]:
385
+ """Get the parameters used to repr(Series) calls using Series.to_string.
386
+
387
+ Supplying these parameters to Series.to_string is equivalent to calling
388
+ ``repr(series)``. This is useful if you want to adjust the series repr output.
389
+
390
+ .. versionadded:: 1.4.0
391
+
392
+ Example
393
+ -------
394
+ >>> import pandas as pd
395
+ >>>
396
+ >>> ser = pd.Series([1, 2, 3, 4])
397
+ >>> repr_params = pd.io.formats.format.get_series_repr_params()
398
+ >>> repr(ser) == ser.to_string(**repr_params)
399
+ True
400
+ """
401
+ width, height = get_terminal_size()
402
+ max_rows_opt = get_option("display.max_rows")
403
+ max_rows = height if max_rows_opt == 0 else max_rows_opt
404
+ min_rows = height if max_rows_opt == 0 else get_option("display.min_rows")
405
+
406
+ return {
407
+ "name": True,
408
+ "dtype": True,
409
+ "min_rows": min_rows,
410
+ "max_rows": max_rows,
411
+ "length": get_option("display.show_dimensions"),
412
+ }
413
+
414
+
415
+ class DataFrameFormatter:
416
+ """
417
+ Class for processing dataframe formatting options and data.
418
+
419
+ Used by DataFrame.to_string, which backs DataFrame.__repr__.
420
+ """
421
+
422
+ __doc__ = __doc__ if __doc__ else ""
423
+ __doc__ += common_docstring + return_docstring
424
+
425
+ def __init__(
426
+ self,
427
+ frame: DataFrame,
428
+ columns: Axes | None = None,
429
+ col_space: ColspaceArgType | None = None,
430
+ header: bool | SequenceNotStr[str] = True,
431
+ index: bool = True,
432
+ na_rep: str = "NaN",
433
+ formatters: FormattersType | None = None,
434
+ justify: str | None = None,
435
+ float_format: FloatFormatType | None = None,
436
+ sparsify: bool | None = None,
437
+ index_names: bool = True,
438
+ max_rows: int | None = None,
439
+ min_rows: int | None = None,
440
+ max_cols: int | None = None,
441
+ show_dimensions: bool | str = False,
442
+ decimal: str = ".",
443
+ bold_rows: bool = False,
444
+ escape: bool = True,
445
+ ) -> None:
446
+ self.frame = frame
447
+ self.columns = self._initialize_columns(columns)
448
+ self.col_space = self._initialize_colspace(col_space)
449
+ self.header = header
450
+ self.index = index
451
+ self.na_rep = na_rep
452
+ self.formatters = self._initialize_formatters(formatters)
453
+ self.justify = self._initialize_justify(justify)
454
+ self.float_format = float_format
455
+ self.sparsify = self._initialize_sparsify(sparsify)
456
+ self.show_index_names = index_names
457
+ self.decimal = decimal
458
+ self.bold_rows = bold_rows
459
+ self.escape = escape
460
+ self.max_rows = max_rows
461
+ self.min_rows = min_rows
462
+ self.max_cols = max_cols
463
+ self.show_dimensions = show_dimensions
464
+
465
+ self.max_cols_fitted = self._calc_max_cols_fitted()
466
+ self.max_rows_fitted = self._calc_max_rows_fitted()
467
+
468
+ self.tr_frame = self.frame
469
+ self.truncate()
470
+ self.adj = printing.get_adjustment()
471
+
472
+ def get_strcols(self) -> list[list[str]]:
473
+ """
474
+ Render a DataFrame to a list of columns (as lists of strings).
475
+ """
476
+ strcols = self._get_strcols_without_index()
477
+
478
+ if self.index:
479
+ str_index = self._get_formatted_index(self.tr_frame)
480
+ strcols.insert(0, str_index)
481
+
482
+ return strcols
483
+
484
+ @property
485
+ def should_show_dimensions(self) -> bool:
486
+ return self.show_dimensions is True or (
487
+ self.show_dimensions == "truncate" and self.is_truncated
488
+ )
489
+
490
+ @property
491
+ def is_truncated(self) -> bool:
492
+ return bool(self.is_truncated_horizontally or self.is_truncated_vertically)
493
+
494
+ @property
495
+ def is_truncated_horizontally(self) -> bool:
496
+ return bool(self.max_cols_fitted and (len(self.columns) > self.max_cols_fitted))
497
+
498
+ @property
499
+ def is_truncated_vertically(self) -> bool:
500
+ return bool(self.max_rows_fitted and (len(self.frame) > self.max_rows_fitted))
501
+
502
+ @property
503
+ def dimensions_info(self) -> str:
504
+ return f"\n\n[{len(self.frame)} rows x {len(self.frame.columns)} columns]"
505
+
506
+ @property
507
+ def has_index_names(self) -> bool:
508
+ return _has_names(self.frame.index)
509
+
510
+ @property
511
+ def has_column_names(self) -> bool:
512
+ return _has_names(self.frame.columns)
513
+
514
+ @property
515
+ def show_row_idx_names(self) -> bool:
516
+ return all((self.has_index_names, self.index, self.show_index_names))
517
+
518
+ @property
519
+ def show_col_idx_names(self) -> bool:
520
+ return all((self.has_column_names, self.show_index_names, self.header))
521
+
522
+ @property
523
+ def max_rows_displayed(self) -> int:
524
+ return min(self.max_rows or len(self.frame), len(self.frame))
525
+
526
+ def _initialize_sparsify(self, sparsify: bool | None) -> bool:
527
+ if sparsify is None:
528
+ return get_option("display.multi_sparse")
529
+ return sparsify
530
+
531
+ def _initialize_formatters(
532
+ self, formatters: FormattersType | None
533
+ ) -> FormattersType:
534
+ if formatters is None:
535
+ return {}
536
+ elif len(self.frame.columns) == len(formatters) or isinstance(formatters, dict):
537
+ return formatters
538
+ else:
539
+ raise ValueError(
540
+ f"Formatters length({len(formatters)}) should match "
541
+ f"DataFrame number of columns({len(self.frame.columns)})"
542
+ )
543
+
544
+ def _initialize_justify(self, justify: str | None) -> str:
545
+ if justify is None:
546
+ return get_option("display.colheader_justify")
547
+ else:
548
+ return justify
549
+
550
+ def _initialize_columns(self, columns: Axes | None) -> Index:
551
+ if columns is not None:
552
+ cols = ensure_index(columns)
553
+ self.frame = self.frame[cols]
554
+ return cols
555
+ else:
556
+ return self.frame.columns
557
+
558
+ def _initialize_colspace(self, col_space: ColspaceArgType | None) -> ColspaceType:
559
+ result: ColspaceType
560
+
561
+ if col_space is None:
562
+ result = {}
563
+ elif isinstance(col_space, (int, str)):
564
+ result = {"": col_space}
565
+ result.update({column: col_space for column in self.frame.columns})
566
+ elif isinstance(col_space, Mapping):
567
+ for column in col_space.keys():
568
+ if column not in self.frame.columns and column != "":
569
+ raise ValueError(
570
+ f"Col_space is defined for an unknown column: {column}"
571
+ )
572
+ result = col_space
573
+ else:
574
+ if len(self.frame.columns) != len(col_space):
575
+ raise ValueError(
576
+ f"Col_space length({len(col_space)}) should match "
577
+ f"DataFrame number of columns({len(self.frame.columns)})"
578
+ )
579
+ result = dict(zip(self.frame.columns, col_space))
580
+ return result
581
+
582
+ def _calc_max_cols_fitted(self) -> int | None:
583
+ """Number of columns fitting the screen."""
584
+ if not self._is_in_terminal():
585
+ return self.max_cols
586
+
587
+ width, _ = get_terminal_size()
588
+ if self._is_screen_narrow(width):
589
+ return width
590
+ else:
591
+ return self.max_cols
592
+
593
+ def _calc_max_rows_fitted(self) -> int | None:
594
+ """Number of rows with data fitting the screen."""
595
+ max_rows: int | None
596
+
597
+ if self._is_in_terminal():
598
+ _, height = get_terminal_size()
599
+ if self.max_rows == 0:
600
+ # rows available to fill with actual data
601
+ return height - self._get_number_of_auxiliary_rows()
602
+
603
+ if self._is_screen_short(height):
604
+ max_rows = height
605
+ else:
606
+ max_rows = self.max_rows
607
+ else:
608
+ max_rows = self.max_rows
609
+
610
+ return self._adjust_max_rows(max_rows)
611
+
612
+ def _adjust_max_rows(self, max_rows: int | None) -> int | None:
613
+ """Adjust max_rows using display logic.
614
+
615
+ See description here:
616
+ https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
617
+
618
+ GH #37359
619
+ """
620
+ if max_rows:
621
+ if (len(self.frame) > max_rows) and self.min_rows:
622
+ # if truncated, set max_rows showed to min_rows
623
+ max_rows = min(self.min_rows, max_rows)
624
+ return max_rows
625
+
626
+ def _is_in_terminal(self) -> bool:
627
+ """Check if the output is to be shown in terminal."""
628
+ return bool(self.max_cols == 0 or self.max_rows == 0)
629
+
630
+ def _is_screen_narrow(self, max_width) -> bool:
631
+ return bool(self.max_cols == 0 and len(self.frame.columns) > max_width)
632
+
633
+ def _is_screen_short(self, max_height) -> bool:
634
+ return bool(self.max_rows == 0 and len(self.frame) > max_height)
635
+
636
+ def _get_number_of_auxiliary_rows(self) -> int:
637
+ """Get number of rows occupied by prompt, dots and dimension info."""
638
+ dot_row = 1
639
+ prompt_row = 1
640
+ num_rows = dot_row + prompt_row
641
+
642
+ if self.show_dimensions:
643
+ num_rows += len(self.dimensions_info.splitlines())
644
+
645
+ if self.header:
646
+ num_rows += 1
647
+
648
+ return num_rows
649
+
650
+ def truncate(self) -> None:
651
+ """
652
+ Check whether the frame should be truncated. If so, slice the frame up.
653
+ """
654
+ if self.is_truncated_horizontally:
655
+ self._truncate_horizontally()
656
+
657
+ if self.is_truncated_vertically:
658
+ self._truncate_vertically()
659
+
660
+ def _truncate_horizontally(self) -> None:
661
+ """Remove columns, which are not to be displayed and adjust formatters.
662
+
663
+ Attributes affected:
664
+ - tr_frame
665
+ - formatters
666
+ - tr_col_num
667
+ """
668
+ assert self.max_cols_fitted is not None
669
+ col_num = self.max_cols_fitted // 2
670
+ if col_num >= 1:
671
+ left = self.tr_frame.iloc[:, :col_num]
672
+ right = self.tr_frame.iloc[:, -col_num:]
673
+ self.tr_frame = concat((left, right), axis=1)
674
+
675
+ # truncate formatter
676
+ if isinstance(self.formatters, (list, tuple)):
677
+ self.formatters = [
678
+ *self.formatters[:col_num],
679
+ *self.formatters[-col_num:],
680
+ ]
681
+ else:
682
+ col_num = cast(int, self.max_cols)
683
+ self.tr_frame = self.tr_frame.iloc[:, :col_num]
684
+ self.tr_col_num = col_num
685
+
686
+ def _truncate_vertically(self) -> None:
687
+ """Remove rows, which are not to be displayed.
688
+
689
+ Attributes affected:
690
+ - tr_frame
691
+ - tr_row_num
692
+ """
693
+ assert self.max_rows_fitted is not None
694
+ row_num = self.max_rows_fitted // 2
695
+ if row_num >= 1:
696
+ _len = len(self.tr_frame)
697
+ _slice = np.hstack([np.arange(row_num), np.arange(_len - row_num, _len)])
698
+ self.tr_frame = self.tr_frame.iloc[_slice]
699
+ else:
700
+ row_num = cast(int, self.max_rows)
701
+ self.tr_frame = self.tr_frame.iloc[:row_num, :]
702
+ self.tr_row_num = row_num
703
+
704
+ def _get_strcols_without_index(self) -> list[list[str]]:
705
+ strcols: list[list[str]] = []
706
+
707
+ if not is_list_like(self.header) and not self.header:
708
+ for i, c in enumerate(self.tr_frame):
709
+ fmt_values = self.format_col(i)
710
+ fmt_values = _make_fixed_width(
711
+ strings=fmt_values,
712
+ justify=self.justify,
713
+ minimum=int(self.col_space.get(c, 0)),
714
+ adj=self.adj,
715
+ )
716
+ strcols.append(fmt_values)
717
+ return strcols
718
+
719
+ if is_list_like(self.header):
720
+ # cast here since can't be bool if is_list_like
721
+ self.header = cast(list[str], self.header)
722
+ if len(self.header) != len(self.columns):
723
+ raise ValueError(
724
+ f"Writing {len(self.columns)} cols "
725
+ f"but got {len(self.header)} aliases"
726
+ )
727
+ str_columns = [[label] for label in self.header]
728
+ else:
729
+ str_columns = self._get_formatted_column_labels(self.tr_frame)
730
+
731
+ if self.show_row_idx_names:
732
+ for x in str_columns:
733
+ x.append("")
734
+
735
+ for i, c in enumerate(self.tr_frame):
736
+ cheader = str_columns[i]
737
+ header_colwidth = max(
738
+ int(self.col_space.get(c, 0)), *(self.adj.len(x) for x in cheader)
739
+ )
740
+ fmt_values = self.format_col(i)
741
+ fmt_values = _make_fixed_width(
742
+ fmt_values, self.justify, minimum=header_colwidth, adj=self.adj
743
+ )
744
+
745
+ max_len = max(*(self.adj.len(x) for x in fmt_values), header_colwidth)
746
+ cheader = self.adj.justify(cheader, max_len, mode=self.justify)
747
+ strcols.append(cheader + fmt_values)
748
+
749
+ return strcols
750
+
751
+ def format_col(self, i: int) -> list[str]:
752
+ frame = self.tr_frame
753
+ formatter = self._get_formatter(i)
754
+ return format_array(
755
+ frame.iloc[:, i]._values,
756
+ formatter,
757
+ float_format=self.float_format,
758
+ na_rep=self.na_rep,
759
+ space=self.col_space.get(frame.columns[i]),
760
+ decimal=self.decimal,
761
+ leading_space=self.index,
762
+ )
763
+
764
+ def _get_formatter(self, i: str | int) -> Callable | None:
765
+ if isinstance(self.formatters, (list, tuple)):
766
+ if is_integer(i):
767
+ i = cast(int, i)
768
+ return self.formatters[i]
769
+ else:
770
+ return None
771
+ else:
772
+ if is_integer(i) and i not in self.columns:
773
+ i = self.columns[i]
774
+ return self.formatters.get(i, None)
775
+
776
+ def _get_formatted_column_labels(self, frame: DataFrame) -> list[list[str]]:
777
+ from pandas.core.indexes.multi import sparsify_labels
778
+
779
+ columns = frame.columns
780
+
781
+ if isinstance(columns, MultiIndex):
782
+ fmt_columns = columns._format_multi(sparsify=False, include_names=False)
783
+ fmt_columns = list(zip(*fmt_columns))
784
+ dtypes = self.frame.dtypes._values
785
+
786
+ # if we have a Float level, they don't use leading space at all
787
+ restrict_formatting = any(level.is_floating for level in columns.levels)
788
+ need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
789
+
790
+ def space_format(x, y):
791
+ if (
792
+ y not in self.formatters
793
+ and need_leadsp[x]
794
+ and not restrict_formatting
795
+ ):
796
+ return " " + y
797
+ return y
798
+
799
+ str_columns_tuple = list(
800
+ zip(*([space_format(x, y) for y in x] for x in fmt_columns))
801
+ )
802
+ if self.sparsify and len(str_columns_tuple):
803
+ str_columns_tuple = sparsify_labels(str_columns_tuple)
804
+
805
+ str_columns = [list(x) for x in zip(*str_columns_tuple)]
806
+ else:
807
+ fmt_columns = columns._format_flat(include_name=False)
808
+ dtypes = self.frame.dtypes
809
+ need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
810
+ str_columns = [
811
+ [" " + x if not self._get_formatter(i) and need_leadsp[x] else x]
812
+ for i, x in enumerate(fmt_columns)
813
+ ]
814
+ # self.str_columns = str_columns
815
+ return str_columns
816
+
817
+ def _get_formatted_index(self, frame: DataFrame) -> list[str]:
818
+ # Note: this is only used by to_string() and to_latex(), not by
819
+ # to_html(). so safe to cast col_space here.
820
+ col_space = {k: cast(int, v) for k, v in self.col_space.items()}
821
+ index = frame.index
822
+ columns = frame.columns
823
+ fmt = self._get_formatter("__index__")
824
+
825
+ if isinstance(index, MultiIndex):
826
+ fmt_index = index._format_multi(
827
+ sparsify=self.sparsify,
828
+ include_names=self.show_row_idx_names,
829
+ formatter=fmt,
830
+ )
831
+ else:
832
+ fmt_index = [
833
+ index._format_flat(include_name=self.show_row_idx_names, formatter=fmt)
834
+ ]
835
+
836
+ fmt_index = [
837
+ tuple(
838
+ _make_fixed_width(
839
+ list(x), justify="left", minimum=col_space.get("", 0), adj=self.adj
840
+ )
841
+ )
842
+ for x in fmt_index
843
+ ]
844
+
845
+ adjoined = self.adj.adjoin(1, *fmt_index).split("\n")
846
+
847
+ # empty space for columns
848
+ if self.show_col_idx_names:
849
+ col_header = [str(x) for x in self._get_column_name_list()]
850
+ else:
851
+ col_header = [""] * columns.nlevels
852
+
853
+ if self.header:
854
+ return col_header + adjoined
855
+ else:
856
+ return adjoined
857
+
858
+ def _get_column_name_list(self) -> list[Hashable]:
859
+ names: list[Hashable] = []
860
+ columns = self.frame.columns
861
+ if isinstance(columns, MultiIndex):
862
+ names.extend("" if name is None else name for name in columns.names)
863
+ else:
864
+ names.append("" if columns.name is None else columns.name)
865
+ return names
866
+
867
+
868
+ class DataFrameRenderer:
869
+ """Class for creating dataframe output in multiple formats.
870
+
871
+ Called in pandas.core.generic.NDFrame:
872
+ - to_csv
873
+ - to_latex
874
+
875
+ Called in pandas.core.frame.DataFrame:
876
+ - to_html
877
+ - to_string
878
+
879
+ Parameters
880
+ ----------
881
+ fmt : DataFrameFormatter
882
+ Formatter with the formatting options.
883
+ """
884
+
885
+ def __init__(self, fmt: DataFrameFormatter) -> None:
886
+ self.fmt = fmt
887
+
888
+ def to_html(
889
+ self,
890
+ buf: FilePath | WriteBuffer[str] | None = None,
891
+ encoding: str | None = None,
892
+ classes: str | list | tuple | None = None,
893
+ notebook: bool = False,
894
+ border: int | bool | None = None,
895
+ table_id: str | None = None,
896
+ render_links: bool = False,
897
+ ) -> str | None:
898
+ """
899
+ Render a DataFrame to a html table.
900
+
901
+ Parameters
902
+ ----------
903
+ buf : str, path object, file-like object, or None, default None
904
+ String, path object (implementing ``os.PathLike[str]``), or file-like
905
+ object implementing a string ``write()`` function. If None, the result is
906
+ returned as a string.
907
+ encoding : str, default “utf-8”
908
+ Set character encoding.
909
+ classes : str or list-like
910
+ classes to include in the `class` attribute of the opening
911
+ ``<table>`` tag, in addition to the default "dataframe".
912
+ notebook : {True, False}, optional, default False
913
+ Whether the generated HTML is for IPython Notebook.
914
+ border : int
915
+ A ``border=border`` attribute is included in the opening
916
+ ``<table>`` tag. Default ``pd.options.display.html.border``.
917
+ table_id : str, optional
918
+ A css id is included in the opening `<table>` tag if specified.
919
+ render_links : bool, default False
920
+ Convert URLs to HTML links.
921
+ """
922
+ from pandas.io.formats.html import (
923
+ HTMLFormatter,
924
+ NotebookFormatter,
925
+ )
926
+
927
+ Klass = NotebookFormatter if notebook else HTMLFormatter
928
+
929
+ html_formatter = Klass(
930
+ self.fmt,
931
+ classes=classes,
932
+ border=border,
933
+ table_id=table_id,
934
+ render_links=render_links,
935
+ )
936
+ string = html_formatter.to_string()
937
+ return save_to_buffer(string, buf=buf, encoding=encoding)
938
+
939
+ def to_string(
940
+ self,
941
+ buf: FilePath | WriteBuffer[str] | None = None,
942
+ encoding: str | None = None,
943
+ line_width: int | None = None,
944
+ ) -> str | None:
945
+ """
946
+ Render a DataFrame to a console-friendly tabular output.
947
+
948
+ Parameters
949
+ ----------
950
+ buf : str, path object, file-like object, or None, default None
951
+ String, path object (implementing ``os.PathLike[str]``), or file-like
952
+ object implementing a string ``write()`` function. If None, the result is
953
+ returned as a string.
954
+ encoding: str, default “utf-8”
955
+ Set character encoding.
956
+ line_width : int, optional
957
+ Width to wrap a line in characters.
958
+ """
959
+ from pandas.io.formats.string import StringFormatter
960
+
961
+ string_formatter = StringFormatter(self.fmt, line_width=line_width)
962
+ string = string_formatter.to_string()
963
+ return save_to_buffer(string, buf=buf, encoding=encoding)
964
+
965
+ def to_csv(
966
+ self,
967
+ path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
968
+ encoding: str | None = None,
969
+ sep: str = ",",
970
+ columns: Sequence[Hashable] | None = None,
971
+ index_label: IndexLabel | None = None,
972
+ mode: str = "w",
973
+ compression: CompressionOptions = "infer",
974
+ quoting: int | None = None,
975
+ quotechar: str = '"',
976
+ lineterminator: str | None = None,
977
+ chunksize: int | None = None,
978
+ date_format: str | None = None,
979
+ doublequote: bool = True,
980
+ escapechar: str | None = None,
981
+ errors: str = "strict",
982
+ storage_options: StorageOptions | None = None,
983
+ ) -> str | None:
984
+ """
985
+ Render dataframe as comma-separated file.
986
+ """
987
+ from pandas.io.formats.csvs import CSVFormatter
988
+
989
+ if path_or_buf is None:
990
+ created_buffer = True
991
+ path_or_buf = StringIO()
992
+ else:
993
+ created_buffer = False
994
+
995
+ csv_formatter = CSVFormatter(
996
+ path_or_buf=path_or_buf,
997
+ lineterminator=lineterminator,
998
+ sep=sep,
999
+ encoding=encoding,
1000
+ errors=errors,
1001
+ compression=compression,
1002
+ quoting=quoting,
1003
+ cols=columns,
1004
+ index_label=index_label,
1005
+ mode=mode,
1006
+ chunksize=chunksize,
1007
+ quotechar=quotechar,
1008
+ date_format=date_format,
1009
+ doublequote=doublequote,
1010
+ escapechar=escapechar,
1011
+ storage_options=storage_options,
1012
+ formatter=self.fmt,
1013
+ )
1014
+ csv_formatter.save()
1015
+
1016
+ if created_buffer:
1017
+ assert isinstance(path_or_buf, StringIO)
1018
+ content = path_or_buf.getvalue()
1019
+ path_or_buf.close()
1020
+ return content
1021
+
1022
+ return None
1023
+
1024
+
1025
+ def save_to_buffer(
1026
+ string: str,
1027
+ buf: FilePath | WriteBuffer[str] | None = None,
1028
+ encoding: str | None = None,
1029
+ ) -> str | None:
1030
+ """
1031
+ Perform serialization. Write to buf or return as string if buf is None.
1032
+ """
1033
+ with _get_buffer(buf, encoding=encoding) as fd:
1034
+ fd.write(string)
1035
+ if buf is None:
1036
+ # error: "WriteBuffer[str]" has no attribute "getvalue"
1037
+ return fd.getvalue() # type: ignore[attr-defined]
1038
+ return None
1039
+
1040
+
1041
+ @contextmanager
1042
+ def _get_buffer(
1043
+ buf: FilePath | WriteBuffer[str] | None, encoding: str | None = None
1044
+ ) -> Generator[WriteBuffer[str], None, None] | Generator[StringIO, None, None]:
1045
+ """
1046
+ Context manager to open, yield and close buffer for filenames or Path-like
1047
+ objects, otherwise yield buf unchanged.
1048
+ """
1049
+ if buf is not None:
1050
+ buf = stringify_path(buf)
1051
+ else:
1052
+ buf = StringIO()
1053
+
1054
+ if encoding is None:
1055
+ encoding = "utf-8"
1056
+ elif not isinstance(buf, str):
1057
+ raise ValueError("buf is not a file name and encoding is specified.")
1058
+
1059
+ if hasattr(buf, "write"):
1060
+ # Incompatible types in "yield" (actual type "Union[str, WriteBuffer[str],
1061
+ # StringIO]", expected type "Union[WriteBuffer[str], StringIO]")
1062
+ yield buf # type: ignore[misc]
1063
+ elif isinstance(buf, str):
1064
+ check_parent_directory(str(buf))
1065
+ with open(buf, "w", encoding=encoding, newline="") as f:
1066
+ # GH#30034 open instead of codecs.open prevents a file leak
1067
+ # if we have an invalid encoding argument.
1068
+ # newline="" is needed to roundtrip correctly on
1069
+ # windows test_to_latex_filename
1070
+ yield f
1071
+ else:
1072
+ raise TypeError("buf is not a file name and it has no write method")
1073
+
1074
+
1075
+ # ----------------------------------------------------------------------
1076
+ # Array formatters
1077
+
1078
+
1079
+ def format_array(
1080
+ values: ArrayLike,
1081
+ formatter: Callable | None,
1082
+ float_format: FloatFormatType | None = None,
1083
+ na_rep: str = "NaN",
1084
+ digits: int | None = None,
1085
+ space: str | int | None = None,
1086
+ justify: str = "right",
1087
+ decimal: str = ".",
1088
+ leading_space: bool | None = True,
1089
+ quoting: int | None = None,
1090
+ fallback_formatter: Callable | None = None,
1091
+ ) -> list[str]:
1092
+ """
1093
+ Format an array for printing.
1094
+
1095
+ Parameters
1096
+ ----------
1097
+ values : np.ndarray or ExtensionArray
1098
+ formatter
1099
+ float_format
1100
+ na_rep
1101
+ digits
1102
+ space
1103
+ justify
1104
+ decimal
1105
+ leading_space : bool, optional, default True
1106
+ Whether the array should be formatted with a leading space.
1107
+ When an array as a column of a Series or DataFrame, we do want
1108
+ the leading space to pad between columns.
1109
+
1110
+ When formatting an Index subclass
1111
+ (e.g. IntervalIndex._get_values_for_csv), we don't want the
1112
+ leading space since it should be left-aligned.
1113
+ fallback_formatter
1114
+
1115
+ Returns
1116
+ -------
1117
+ List[str]
1118
+ """
1119
+ fmt_klass: type[_GenericArrayFormatter]
1120
+ if lib.is_np_dtype(values.dtype, "M"):
1121
+ fmt_klass = _Datetime64Formatter
1122
+ values = cast(DatetimeArray, values)
1123
+ elif isinstance(values.dtype, DatetimeTZDtype):
1124
+ fmt_klass = _Datetime64TZFormatter
1125
+ values = cast(DatetimeArray, values)
1126
+ elif lib.is_np_dtype(values.dtype, "m"):
1127
+ fmt_klass = _Timedelta64Formatter
1128
+ values = cast(TimedeltaArray, values)
1129
+ elif isinstance(values.dtype, ExtensionDtype):
1130
+ fmt_klass = _ExtensionArrayFormatter
1131
+ elif lib.is_np_dtype(values.dtype, "fc"):
1132
+ fmt_klass = FloatArrayFormatter
1133
+ elif lib.is_np_dtype(values.dtype, "iu"):
1134
+ fmt_klass = _IntArrayFormatter
1135
+ else:
1136
+ fmt_klass = _GenericArrayFormatter
1137
+
1138
+ if space is None:
1139
+ space = 12
1140
+
1141
+ if float_format is None:
1142
+ float_format = get_option("display.float_format")
1143
+
1144
+ if digits is None:
1145
+ digits = get_option("display.precision")
1146
+
1147
+ fmt_obj = fmt_klass(
1148
+ values,
1149
+ digits=digits,
1150
+ na_rep=na_rep,
1151
+ float_format=float_format,
1152
+ formatter=formatter,
1153
+ space=space,
1154
+ justify=justify,
1155
+ decimal=decimal,
1156
+ leading_space=leading_space,
1157
+ quoting=quoting,
1158
+ fallback_formatter=fallback_formatter,
1159
+ )
1160
+
1161
+ return fmt_obj.get_result()
1162
+
1163
+
1164
+ class _GenericArrayFormatter:
1165
+ def __init__(
1166
+ self,
1167
+ values: ArrayLike,
1168
+ digits: int = 7,
1169
+ formatter: Callable | None = None,
1170
+ na_rep: str = "NaN",
1171
+ space: str | int = 12,
1172
+ float_format: FloatFormatType | None = None,
1173
+ justify: str = "right",
1174
+ decimal: str = ".",
1175
+ quoting: int | None = None,
1176
+ fixed_width: bool = True,
1177
+ leading_space: bool | None = True,
1178
+ fallback_formatter: Callable | None = None,
1179
+ ) -> None:
1180
+ self.values = values
1181
+ self.digits = digits
1182
+ self.na_rep = na_rep
1183
+ self.space = space
1184
+ self.formatter = formatter
1185
+ self.float_format = float_format
1186
+ self.justify = justify
1187
+ self.decimal = decimal
1188
+ self.quoting = quoting
1189
+ self.fixed_width = fixed_width
1190
+ self.leading_space = leading_space
1191
+ self.fallback_formatter = fallback_formatter
1192
+
1193
+ def get_result(self) -> list[str]:
1194
+ fmt_values = self._format_strings()
1195
+ return _make_fixed_width(fmt_values, self.justify)
1196
+
1197
+ def _format_strings(self) -> list[str]:
1198
+ if self.float_format is None:
1199
+ float_format = get_option("display.float_format")
1200
+ if float_format is None:
1201
+ precision = get_option("display.precision")
1202
+ float_format = lambda x: _trim_zeros_single_float(
1203
+ f"{x: .{precision:d}f}"
1204
+ )
1205
+ else:
1206
+ float_format = self.float_format
1207
+
1208
+ if self.formatter is not None:
1209
+ formatter = self.formatter
1210
+ elif self.fallback_formatter is not None:
1211
+ formatter = self.fallback_formatter
1212
+ else:
1213
+ quote_strings = self.quoting is not None and self.quoting != QUOTE_NONE
1214
+ formatter = partial(
1215
+ printing.pprint_thing,
1216
+ escape_chars=("\t", "\r", "\n"),
1217
+ quote_strings=quote_strings,
1218
+ )
1219
+
1220
+ def _format(x):
1221
+ if self.na_rep is not None and is_scalar(x) and isna(x):
1222
+ if x is None:
1223
+ return "None"
1224
+ elif x is NA:
1225
+ return str(NA)
1226
+ elif lib.is_float(x) and np.isinf(x):
1227
+ # TODO(3.0): this will be unreachable when use_inf_as_na
1228
+ # deprecation is enforced
1229
+ return str(x)
1230
+ elif x is NaT or isinstance(x, (np.datetime64, np.timedelta64)):
1231
+ return "NaT"
1232
+ return self.na_rep
1233
+ elif isinstance(x, PandasObject):
1234
+ return str(x)
1235
+ elif isinstance(x, StringDtype):
1236
+ return repr(x)
1237
+ else:
1238
+ # object dtype
1239
+ return str(formatter(x))
1240
+
1241
+ vals = self.values
1242
+ if not isinstance(vals, np.ndarray):
1243
+ raise TypeError(
1244
+ "ExtensionArray formatting should use _ExtensionArrayFormatter"
1245
+ )
1246
+ inferred = lib.map_infer(vals, is_float)
1247
+ is_float_type = (
1248
+ inferred
1249
+ # vals may have 2 or more dimensions
1250
+ & np.all(notna(vals), axis=tuple(range(1, len(vals.shape))))
1251
+ )
1252
+ leading_space = self.leading_space
1253
+ if leading_space is None:
1254
+ leading_space = is_float_type.any()
1255
+
1256
+ fmt_values = []
1257
+ for i, v in enumerate(vals):
1258
+ if (not is_float_type[i] or self.formatter is not None) and leading_space:
1259
+ fmt_values.append(f" {_format(v)}")
1260
+ elif is_float_type[i]:
1261
+ fmt_values.append(float_format(v))
1262
+ else:
1263
+ if leading_space is False:
1264
+ # False specifically, so that the default is
1265
+ # to include a space if we get here.
1266
+ tpl = "{v}"
1267
+ else:
1268
+ tpl = " {v}"
1269
+ fmt_values.append(tpl.format(v=_format(v)))
1270
+
1271
+ return fmt_values
1272
+
1273
+
1274
+ class FloatArrayFormatter(_GenericArrayFormatter):
1275
+ def __init__(self, *args, **kwargs) -> None:
1276
+ super().__init__(*args, **kwargs)
1277
+
1278
+ # float_format is expected to be a string
1279
+ # formatter should be used to pass a function
1280
+ if self.float_format is not None and self.formatter is None:
1281
+ # GH21625, GH22270
1282
+ self.fixed_width = False
1283
+ if callable(self.float_format):
1284
+ self.formatter = self.float_format
1285
+ self.float_format = None
1286
+
1287
+ def _value_formatter(
1288
+ self,
1289
+ float_format: FloatFormatType | None = None,
1290
+ threshold: float | None = None,
1291
+ ) -> Callable:
1292
+ """Returns a function to be applied on each value to format it"""
1293
+ # the float_format parameter supersedes self.float_format
1294
+ if float_format is None:
1295
+ float_format = self.float_format
1296
+
1297
+ # we are going to compose different functions, to first convert to
1298
+ # a string, then replace the decimal symbol, and finally chop according
1299
+ # to the threshold
1300
+
1301
+ # when there is no float_format, we use str instead of '%g'
1302
+ # because str(0.0) = '0.0' while '%g' % 0.0 = '0'
1303
+ if float_format:
1304
+
1305
+ def base_formatter(v):
1306
+ assert float_format is not None # for mypy
1307
+ # error: "str" not callable
1308
+ # error: Unexpected keyword argument "value" for "__call__" of
1309
+ # "EngFormatter"
1310
+ return (
1311
+ float_format(value=v) # type: ignore[operator,call-arg]
1312
+ if notna(v)
1313
+ else self.na_rep
1314
+ )
1315
+
1316
+ else:
1317
+
1318
+ def base_formatter(v):
1319
+ return str(v) if notna(v) else self.na_rep
1320
+
1321
+ if self.decimal != ".":
1322
+
1323
+ def decimal_formatter(v):
1324
+ return base_formatter(v).replace(".", self.decimal, 1)
1325
+
1326
+ else:
1327
+ decimal_formatter = base_formatter
1328
+
1329
+ if threshold is None:
1330
+ return decimal_formatter
1331
+
1332
+ def formatter(value):
1333
+ if notna(value):
1334
+ if abs(value) > threshold:
1335
+ return decimal_formatter(value)
1336
+ else:
1337
+ return decimal_formatter(0.0)
1338
+ else:
1339
+ return self.na_rep
1340
+
1341
+ return formatter
1342
+
1343
+ def get_result_as_array(self) -> np.ndarray:
1344
+ """
1345
+ Returns the float values converted into strings using
1346
+ the parameters given at initialisation, as a numpy array
1347
+ """
1348
+
1349
+ def format_with_na_rep(values: ArrayLike, formatter: Callable, na_rep: str):
1350
+ mask = isna(values)
1351
+ formatted = np.array(
1352
+ [
1353
+ formatter(val) if not m else na_rep
1354
+ for val, m in zip(values.ravel(), mask.ravel())
1355
+ ]
1356
+ ).reshape(values.shape)
1357
+ return formatted
1358
+
1359
+ def format_complex_with_na_rep(
1360
+ values: ArrayLike, formatter: Callable, na_rep: str
1361
+ ):
1362
+ real_values = np.real(values).ravel() # type: ignore[arg-type]
1363
+ imag_values = np.imag(values).ravel() # type: ignore[arg-type]
1364
+ real_mask, imag_mask = isna(real_values), isna(imag_values)
1365
+ formatted_lst = []
1366
+ for val, real_val, imag_val, re_isna, im_isna in zip(
1367
+ values.ravel(),
1368
+ real_values,
1369
+ imag_values,
1370
+ real_mask,
1371
+ imag_mask,
1372
+ ):
1373
+ if not re_isna and not im_isna:
1374
+ formatted_lst.append(formatter(val))
1375
+ elif not re_isna: # xxx+nanj
1376
+ formatted_lst.append(f"{formatter(real_val)}+{na_rep}j")
1377
+ elif not im_isna: # nan[+/-]xxxj
1378
+ # The imaginary part may either start with a "-" or a space
1379
+ imag_formatted = formatter(imag_val).strip()
1380
+ if imag_formatted.startswith("-"):
1381
+ formatted_lst.append(f"{na_rep}{imag_formatted}j")
1382
+ else:
1383
+ formatted_lst.append(f"{na_rep}+{imag_formatted}j")
1384
+ else: # nan+nanj
1385
+ formatted_lst.append(f"{na_rep}+{na_rep}j")
1386
+ return np.array(formatted_lst).reshape(values.shape)
1387
+
1388
+ if self.formatter is not None:
1389
+ return format_with_na_rep(self.values, self.formatter, self.na_rep)
1390
+
1391
+ if self.fixed_width:
1392
+ threshold = get_option("display.chop_threshold")
1393
+ else:
1394
+ threshold = None
1395
+
1396
+ # if we have a fixed_width, we'll need to try different float_format
1397
+ def format_values_with(float_format):
1398
+ formatter = self._value_formatter(float_format, threshold)
1399
+
1400
+ # default formatter leaves a space to the left when formatting
1401
+ # floats, must be consistent for left-justifying NaNs (GH #25061)
1402
+ na_rep = " " + self.na_rep if self.justify == "left" else self.na_rep
1403
+
1404
+ # different formatting strategies for complex and non-complex data
1405
+ # need to distinguish complex and float NaNs (GH #53762)
1406
+ values = self.values
1407
+ is_complex = is_complex_dtype(values)
1408
+
1409
+ # separate the wheat from the chaff
1410
+ if is_complex:
1411
+ values = format_complex_with_na_rep(values, formatter, na_rep)
1412
+ else:
1413
+ values = format_with_na_rep(values, formatter, na_rep)
1414
+
1415
+ if self.fixed_width:
1416
+ if is_complex:
1417
+ result = _trim_zeros_complex(values, self.decimal)
1418
+ else:
1419
+ result = _trim_zeros_float(values, self.decimal)
1420
+ return np.asarray(result, dtype="object")
1421
+
1422
+ return values
1423
+
1424
+ # There is a special default string when we are fixed-width
1425
+ # The default is otherwise to use str instead of a formatting string
1426
+ float_format: FloatFormatType | None
1427
+ if self.float_format is None:
1428
+ if self.fixed_width:
1429
+ if self.leading_space is True:
1430
+ fmt_str = "{value: .{digits:d}f}"
1431
+ else:
1432
+ fmt_str = "{value:.{digits:d}f}"
1433
+ float_format = partial(fmt_str.format, digits=self.digits)
1434
+ else:
1435
+ float_format = self.float_format
1436
+ else:
1437
+ float_format = lambda value: self.float_format % value
1438
+
1439
+ formatted_values = format_values_with(float_format)
1440
+
1441
+ if not self.fixed_width:
1442
+ return formatted_values
1443
+
1444
+ # we need do convert to engineering format if some values are too small
1445
+ # and would appear as 0, or if some values are too big and take too
1446
+ # much space
1447
+
1448
+ if len(formatted_values) > 0:
1449
+ maxlen = max(len(x) for x in formatted_values)
1450
+ too_long = maxlen > self.digits + 6
1451
+ else:
1452
+ too_long = False
1453
+
1454
+ abs_vals = np.abs(self.values)
1455
+ # this is pretty arbitrary for now
1456
+ # large values: more that 8 characters including decimal symbol
1457
+ # and first digit, hence > 1e6
1458
+ has_large_values = (abs_vals > 1e6).any()
1459
+ has_small_values = ((abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)).any()
1460
+
1461
+ if has_small_values or (too_long and has_large_values):
1462
+ if self.leading_space is True:
1463
+ fmt_str = "{value: .{digits:d}e}"
1464
+ else:
1465
+ fmt_str = "{value:.{digits:d}e}"
1466
+ float_format = partial(fmt_str.format, digits=self.digits)
1467
+ formatted_values = format_values_with(float_format)
1468
+
1469
+ return formatted_values
1470
+
1471
+ def _format_strings(self) -> list[str]:
1472
+ return list(self.get_result_as_array())
1473
+
1474
+
1475
+ class _IntArrayFormatter(_GenericArrayFormatter):
1476
+ def _format_strings(self) -> list[str]:
1477
+ if self.leading_space is False:
1478
+ formatter_str = lambda x: f"{x:d}".format(x=x)
1479
+ else:
1480
+ formatter_str = lambda x: f"{x: d}".format(x=x)
1481
+ formatter = self.formatter or formatter_str
1482
+ fmt_values = [formatter(x) for x in self.values]
1483
+ return fmt_values
1484
+
1485
+
1486
+ class _Datetime64Formatter(_GenericArrayFormatter):
1487
+ values: DatetimeArray
1488
+
1489
+ def __init__(
1490
+ self,
1491
+ values: DatetimeArray,
1492
+ nat_rep: str = "NaT",
1493
+ date_format: None = None,
1494
+ **kwargs,
1495
+ ) -> None:
1496
+ super().__init__(values, **kwargs)
1497
+ self.nat_rep = nat_rep
1498
+ self.date_format = date_format
1499
+
1500
+ def _format_strings(self) -> list[str]:
1501
+ """we by definition have DO NOT have a TZ"""
1502
+ values = self.values
1503
+
1504
+ if self.formatter is not None:
1505
+ return [self.formatter(x) for x in values]
1506
+
1507
+ fmt_values = values._format_native_types(
1508
+ na_rep=self.nat_rep, date_format=self.date_format
1509
+ )
1510
+ return fmt_values.tolist()
1511
+
1512
+
1513
+ class _ExtensionArrayFormatter(_GenericArrayFormatter):
1514
+ values: ExtensionArray
1515
+
1516
+ def _format_strings(self) -> list[str]:
1517
+ values = self.values
1518
+
1519
+ formatter = self.formatter
1520
+ fallback_formatter = None
1521
+ if formatter is None:
1522
+ fallback_formatter = values._formatter(boxed=True)
1523
+
1524
+ if isinstance(values, Categorical):
1525
+ # Categorical is special for now, so that we can preserve tzinfo
1526
+ array = values._internal_get_values()
1527
+ else:
1528
+ array = np.asarray(values, dtype=object)
1529
+
1530
+ fmt_values = format_array(
1531
+ array,
1532
+ formatter,
1533
+ float_format=self.float_format,
1534
+ na_rep=self.na_rep,
1535
+ digits=self.digits,
1536
+ space=self.space,
1537
+ justify=self.justify,
1538
+ decimal=self.decimal,
1539
+ leading_space=self.leading_space,
1540
+ quoting=self.quoting,
1541
+ fallback_formatter=fallback_formatter,
1542
+ )
1543
+ return fmt_values
1544
+
1545
+
1546
+ def format_percentiles(
1547
+ percentiles: (np.ndarray | Sequence[float]),
1548
+ ) -> list[str]:
1549
+ """
1550
+ Outputs rounded and formatted percentiles.
1551
+
1552
+ Parameters
1553
+ ----------
1554
+ percentiles : list-like, containing floats from interval [0,1]
1555
+
1556
+ Returns
1557
+ -------
1558
+ formatted : list of strings
1559
+
1560
+ Notes
1561
+ -----
1562
+ Rounding precision is chosen so that: (1) if any two elements of
1563
+ ``percentiles`` differ, they remain different after rounding
1564
+ (2) no entry is *rounded* to 0% or 100%.
1565
+ Any non-integer is always rounded to at least 1 decimal place.
1566
+
1567
+ Examples
1568
+ --------
1569
+ Keeps all entries different after rounding:
1570
+
1571
+ >>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
1572
+ ['1.999%', '2.001%', '50%', '66.667%', '99.99%']
1573
+
1574
+ No element is rounded to 0% or 100% (unless already equal to it).
1575
+ Duplicates are allowed:
1576
+
1577
+ >>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
1578
+ ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
1579
+ """
1580
+ percentiles = np.asarray(percentiles)
1581
+
1582
+ # It checks for np.nan as well
1583
+ if (
1584
+ not is_numeric_dtype(percentiles)
1585
+ or not np.all(percentiles >= 0)
1586
+ or not np.all(percentiles <= 1)
1587
+ ):
1588
+ raise ValueError("percentiles should all be in the interval [0,1]")
1589
+
1590
+ percentiles = 100 * percentiles
1591
+ prec = get_precision(percentiles)
1592
+ percentiles_round_type = percentiles.round(prec).astype(int)
1593
+
1594
+ int_idx = np.isclose(percentiles_round_type, percentiles)
1595
+
1596
+ if np.all(int_idx):
1597
+ out = percentiles_round_type.astype(str)
1598
+ return [i + "%" for i in out]
1599
+
1600
+ unique_pcts = np.unique(percentiles)
1601
+ prec = get_precision(unique_pcts)
1602
+ out = np.empty_like(percentiles, dtype=object)
1603
+ out[int_idx] = percentiles[int_idx].round().astype(int).astype(str)
1604
+
1605
+ out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
1606
+ return [i + "%" for i in out]
1607
+
1608
+
1609
+ def get_precision(array: np.ndarray | Sequence[float]) -> int:
1610
+ to_begin = array[0] if array[0] > 0 else None
1611
+ to_end = 100 - array[-1] if array[-1] < 100 else None
1612
+ diff = np.ediff1d(array, to_begin=to_begin, to_end=to_end)
1613
+ diff = abs(diff)
1614
+ prec = -np.floor(np.log10(np.min(diff))).astype(int)
1615
+ prec = max(1, prec)
1616
+ return prec
1617
+
1618
+
1619
+ def _format_datetime64(x: NaTType | Timestamp, nat_rep: str = "NaT") -> str:
1620
+ if x is NaT:
1621
+ return nat_rep
1622
+
1623
+ # Timestamp.__str__ falls back to datetime.datetime.__str__ = isoformat(sep=' ')
1624
+ # so it already uses string formatting rather than strftime (faster).
1625
+ return str(x)
1626
+
1627
+
1628
+ def _format_datetime64_dateonly(
1629
+ x: NaTType | Timestamp,
1630
+ nat_rep: str = "NaT",
1631
+ date_format: str | None = None,
1632
+ ) -> str:
1633
+ if isinstance(x, NaTType):
1634
+ return nat_rep
1635
+
1636
+ if date_format:
1637
+ return x.strftime(date_format)
1638
+ else:
1639
+ # Timestamp._date_repr relies on string formatting (faster than strftime)
1640
+ return x._date_repr
1641
+
1642
+
1643
+ def get_format_datetime64(
1644
+ is_dates_only: bool, nat_rep: str = "NaT", date_format: str | None = None
1645
+ ) -> Callable:
1646
+ """Return a formatter callable taking a datetime64 as input and providing
1647
+ a string as output"""
1648
+
1649
+ if is_dates_only:
1650
+ return lambda x: _format_datetime64_dateonly(
1651
+ x, nat_rep=nat_rep, date_format=date_format
1652
+ )
1653
+ else:
1654
+ return lambda x: _format_datetime64(x, nat_rep=nat_rep)
1655
+
1656
+
1657
+ class _Datetime64TZFormatter(_Datetime64Formatter):
1658
+ values: DatetimeArray
1659
+
1660
+ def _format_strings(self) -> list[str]:
1661
+ """we by definition have a TZ"""
1662
+ ido = self.values._is_dates_only
1663
+ values = self.values.astype(object)
1664
+ formatter = self.formatter or get_format_datetime64(
1665
+ ido, date_format=self.date_format
1666
+ )
1667
+ fmt_values = [formatter(x) for x in values]
1668
+
1669
+ return fmt_values
1670
+
1671
+
1672
+ class _Timedelta64Formatter(_GenericArrayFormatter):
1673
+ values: TimedeltaArray
1674
+
1675
+ def __init__(
1676
+ self,
1677
+ values: TimedeltaArray,
1678
+ nat_rep: str = "NaT",
1679
+ **kwargs,
1680
+ ) -> None:
1681
+ # TODO: nat_rep is never passed, na_rep is.
1682
+ super().__init__(values, **kwargs)
1683
+ self.nat_rep = nat_rep
1684
+
1685
+ def _format_strings(self) -> list[str]:
1686
+ formatter = self.formatter or get_format_timedelta64(
1687
+ self.values, nat_rep=self.nat_rep, box=False
1688
+ )
1689
+ return [formatter(x) for x in self.values]
1690
+
1691
+
1692
+ def get_format_timedelta64(
1693
+ values: TimedeltaArray,
1694
+ nat_rep: str | float = "NaT",
1695
+ box: bool = False,
1696
+ ) -> Callable:
1697
+ """
1698
+ Return a formatter function for a range of timedeltas.
1699
+ These will all have the same format argument
1700
+
1701
+ If box, then show the return in quotes
1702
+ """
1703
+ even_days = values._is_dates_only
1704
+
1705
+ if even_days:
1706
+ format = None
1707
+ else:
1708
+ format = "long"
1709
+
1710
+ def _formatter(x):
1711
+ if x is None or (is_scalar(x) and isna(x)):
1712
+ return nat_rep
1713
+
1714
+ if not isinstance(x, Timedelta):
1715
+ x = Timedelta(x)
1716
+
1717
+ # Timedelta._repr_base uses string formatting (faster than strftime)
1718
+ result = x._repr_base(format=format)
1719
+ if box:
1720
+ result = f"'{result}'"
1721
+ return result
1722
+
1723
+ return _formatter
1724
+
1725
+
1726
+ def _make_fixed_width(
1727
+ strings: list[str],
1728
+ justify: str = "right",
1729
+ minimum: int | None = None,
1730
+ adj: printing._TextAdjustment | None = None,
1731
+ ) -> list[str]:
1732
+ if len(strings) == 0 or justify == "all":
1733
+ return strings
1734
+
1735
+ if adj is None:
1736
+ adjustment = printing.get_adjustment()
1737
+ else:
1738
+ adjustment = adj
1739
+
1740
+ max_len = max(adjustment.len(x) for x in strings)
1741
+
1742
+ if minimum is not None:
1743
+ max_len = max(minimum, max_len)
1744
+
1745
+ conf_max = get_option("display.max_colwidth")
1746
+ if conf_max is not None and max_len > conf_max:
1747
+ max_len = conf_max
1748
+
1749
+ def just(x: str) -> str:
1750
+ if conf_max is not None:
1751
+ if (conf_max > 3) & (adjustment.len(x) > max_len):
1752
+ x = x[: max_len - 3] + "..."
1753
+ return x
1754
+
1755
+ strings = [just(x) for x in strings]
1756
+ result = adjustment.justify(strings, max_len, mode=justify)
1757
+ return result
1758
+
1759
+
1760
+ def _trim_zeros_complex(str_complexes: ArrayLike, decimal: str = ".") -> list[str]:
1761
+ """
1762
+ Separates the real and imaginary parts from the complex number, and
1763
+ executes the _trim_zeros_float method on each of those.
1764
+ """
1765
+ real_part, imag_part = [], []
1766
+ for x in str_complexes:
1767
+ # Complex numbers are represented as "(-)xxx(+/-)xxxj"
1768
+ # The split will give [{"", "-"}, "xxx", "+/-", "xxx", "j", ""]
1769
+ # Therefore, the imaginary part is the 4th and 3rd last elements,
1770
+ # and the real part is everything before the imaginary part
1771
+ trimmed = re.split(r"([j+-])", x)
1772
+ real_part.append("".join(trimmed[:-4]))
1773
+ imag_part.append("".join(trimmed[-4:-2]))
1774
+
1775
+ # We want to align the lengths of the real and imaginary parts of each complex
1776
+ # number, as well as the lengths the real (resp. complex) parts of all numbers
1777
+ # in the array
1778
+ n = len(str_complexes)
1779
+ padded_parts = _trim_zeros_float(real_part + imag_part, decimal)
1780
+ if len(padded_parts) == 0:
1781
+ return []
1782
+ padded_length = max(len(part) for part in padded_parts) - 1
1783
+ padded = [
1784
+ real_pt # real part, possibly NaN
1785
+ + imag_pt[0] # +/-
1786
+ + f"{imag_pt[1:]:>{padded_length}}" # complex part (no sign), possibly nan
1787
+ + "j"
1788
+ for real_pt, imag_pt in zip(padded_parts[:n], padded_parts[n:])
1789
+ ]
1790
+ return padded
1791
+
1792
+
1793
+ def _trim_zeros_single_float(str_float: str) -> str:
1794
+ """
1795
+ Trims trailing zeros after a decimal point,
1796
+ leaving just one if necessary.
1797
+ """
1798
+ str_float = str_float.rstrip("0")
1799
+ if str_float.endswith("."):
1800
+ str_float += "0"
1801
+
1802
+ return str_float
1803
+
1804
+
1805
+ def _trim_zeros_float(
1806
+ str_floats: ArrayLike | list[str], decimal: str = "."
1807
+ ) -> list[str]:
1808
+ """
1809
+ Trims the maximum number of trailing zeros equally from
1810
+ all numbers containing decimals, leaving just one if
1811
+ necessary.
1812
+ """
1813
+ trimmed = str_floats
1814
+ number_regex = re.compile(rf"^\s*[\+-]?[0-9]+\{decimal}[0-9]*$")
1815
+
1816
+ def is_number_with_decimal(x) -> bool:
1817
+ return re.match(number_regex, x) is not None
1818
+
1819
+ def should_trim(values: ArrayLike | list[str]) -> bool:
1820
+ """
1821
+ Determine if an array of strings should be trimmed.
1822
+
1823
+ Returns True if all numbers containing decimals (defined by the
1824
+ above regular expression) within the array end in a zero, otherwise
1825
+ returns False.
1826
+ """
1827
+ numbers = [x for x in values if is_number_with_decimal(x)]
1828
+ return len(numbers) > 0 and all(x.endswith("0") for x in numbers)
1829
+
1830
+ while should_trim(trimmed):
1831
+ trimmed = [x[:-1] if is_number_with_decimal(x) else x for x in trimmed]
1832
+
1833
+ # leave one 0 after the decimal points if need be.
1834
+ result = [
1835
+ x + "0" if is_number_with_decimal(x) and x.endswith(decimal) else x
1836
+ for x in trimmed
1837
+ ]
1838
+ return result
1839
+
1840
+
1841
+ def _has_names(index: Index) -> bool:
1842
+ if isinstance(index, MultiIndex):
1843
+ return com.any_not_none(*index.names)
1844
+ else:
1845
+ return index.name is not None
1846
+
1847
+
1848
+ class EngFormatter:
1849
+ """
1850
+ Formats float values according to engineering format.
1851
+
1852
+ Based on matplotlib.ticker.EngFormatter
1853
+ """
1854
+
1855
+ # The SI engineering prefixes
1856
+ ENG_PREFIXES = {
1857
+ -24: "y",
1858
+ -21: "z",
1859
+ -18: "a",
1860
+ -15: "f",
1861
+ -12: "p",
1862
+ -9: "n",
1863
+ -6: "u",
1864
+ -3: "m",
1865
+ 0: "",
1866
+ 3: "k",
1867
+ 6: "M",
1868
+ 9: "G",
1869
+ 12: "T",
1870
+ 15: "P",
1871
+ 18: "E",
1872
+ 21: "Z",
1873
+ 24: "Y",
1874
+ }
1875
+
1876
+ def __init__(
1877
+ self, accuracy: int | None = None, use_eng_prefix: bool = False
1878
+ ) -> None:
1879
+ self.accuracy = accuracy
1880
+ self.use_eng_prefix = use_eng_prefix
1881
+
1882
+ def __call__(self, num: float) -> str:
1883
+ """
1884
+ Formats a number in engineering notation, appending a letter
1885
+ representing the power of 1000 of the original number. Some examples:
1886
+ >>> format_eng = EngFormatter(accuracy=0, use_eng_prefix=True)
1887
+ >>> format_eng(0)
1888
+ ' 0'
1889
+ >>> format_eng = EngFormatter(accuracy=1, use_eng_prefix=True)
1890
+ >>> format_eng(1_000_000)
1891
+ ' 1.0M'
1892
+ >>> format_eng = EngFormatter(accuracy=2, use_eng_prefix=False)
1893
+ >>> format_eng("-1e-6")
1894
+ '-1.00E-06'
1895
+
1896
+ @param num: the value to represent
1897
+ @type num: either a numeric value or a string that can be converted to
1898
+ a numeric value (as per decimal.Decimal constructor)
1899
+
1900
+ @return: engineering formatted string
1901
+ """
1902
+ dnum = Decimal(str(num))
1903
+
1904
+ if Decimal.is_nan(dnum):
1905
+ return "NaN"
1906
+
1907
+ if Decimal.is_infinite(dnum):
1908
+ return "inf"
1909
+
1910
+ sign = 1
1911
+
1912
+ if dnum < 0: # pragma: no cover
1913
+ sign = -1
1914
+ dnum = -dnum
1915
+
1916
+ if dnum != 0:
1917
+ pow10 = Decimal(int(math.floor(dnum.log10() / 3) * 3))
1918
+ else:
1919
+ pow10 = Decimal(0)
1920
+
1921
+ pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
1922
+ pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
1923
+ int_pow10 = int(pow10)
1924
+
1925
+ if self.use_eng_prefix:
1926
+ prefix = self.ENG_PREFIXES[int_pow10]
1927
+ elif int_pow10 < 0:
1928
+ prefix = f"E-{-int_pow10:02d}"
1929
+ else:
1930
+ prefix = f"E+{int_pow10:02d}"
1931
+
1932
+ mant = sign * dnum / (10**pow10)
1933
+
1934
+ if self.accuracy is None: # pragma: no cover
1935
+ format_str = "{mant: g}{prefix}"
1936
+ else:
1937
+ format_str = f"{{mant: .{self.accuracy:d}f}}{{prefix}}"
1938
+
1939
+ formatted = format_str.format(mant=mant, prefix=prefix)
1940
+
1941
+ return formatted
1942
+
1943
+
1944
+ def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> None:
1945
+ """
1946
+ Format float representation in DataFrame with SI notation.
1947
+
1948
+ Parameters
1949
+ ----------
1950
+ accuracy : int, default 3
1951
+ Number of decimal digits after the floating point.
1952
+ use_eng_prefix : bool, default False
1953
+ Whether to represent a value with SI prefixes.
1954
+
1955
+ Returns
1956
+ -------
1957
+ None
1958
+
1959
+ Examples
1960
+ --------
1961
+ >>> df = pd.DataFrame([1e-9, 1e-3, 1, 1e3, 1e6])
1962
+ >>> df
1963
+ 0
1964
+ 0 1.000000e-09
1965
+ 1 1.000000e-03
1966
+ 2 1.000000e+00
1967
+ 3 1.000000e+03
1968
+ 4 1.000000e+06
1969
+
1970
+ >>> pd.set_eng_float_format(accuracy=1)
1971
+ >>> df
1972
+ 0
1973
+ 0 1.0E-09
1974
+ 1 1.0E-03
1975
+ 2 1.0E+00
1976
+ 3 1.0E+03
1977
+ 4 1.0E+06
1978
+
1979
+ >>> pd.set_eng_float_format(use_eng_prefix=True)
1980
+ >>> df
1981
+ 0
1982
+ 0 1.000n
1983
+ 1 1.000m
1984
+ 2 1.000
1985
+ 3 1.000k
1986
+ 4 1.000M
1987
+
1988
+ >>> pd.set_eng_float_format(accuracy=1, use_eng_prefix=True)
1989
+ >>> df
1990
+ 0
1991
+ 0 1.0n
1992
+ 1 1.0m
1993
+ 2 1.0
1994
+ 3 1.0k
1995
+ 4 1.0M
1996
+
1997
+ >>> pd.set_option("display.float_format", None) # unset option
1998
+ """
1999
+ set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
2000
+
2001
+
2002
+ def get_level_lengths(
2003
+ levels: Any, sentinel: bool | object | str = ""
2004
+ ) -> list[dict[int, int]]:
2005
+ """
2006
+ For each index in each level the function returns lengths of indexes.
2007
+
2008
+ Parameters
2009
+ ----------
2010
+ levels : list of lists
2011
+ List of values on for level.
2012
+ sentinel : string, optional
2013
+ Value which states that no new index starts on there.
2014
+
2015
+ Returns
2016
+ -------
2017
+ Returns list of maps. For each level returns map of indexes (key is index
2018
+ in row and value is length of index).
2019
+ """
2020
+ if len(levels) == 0:
2021
+ return []
2022
+
2023
+ control = [True] * len(levels[0])
2024
+
2025
+ result = []
2026
+ for level in levels:
2027
+ last_index = 0
2028
+
2029
+ lengths = {}
2030
+ for i, key in enumerate(level):
2031
+ if control[i] and key == sentinel:
2032
+ pass
2033
+ else:
2034
+ control[i] = False
2035
+ lengths[last_index] = i - last_index
2036
+ last_index = i
2037
+
2038
+ lengths[last_index] = len(level) - last_index
2039
+
2040
+ result.append(lengths)
2041
+
2042
+ return result
2043
+
2044
+
2045
+ def buffer_put_lines(buf: WriteBuffer[str], lines: list[str]) -> None:
2046
+ """
2047
+ Appends lines to a buffer.
2048
+
2049
+ Parameters
2050
+ ----------
2051
+ buf
2052
+ The buffer to write to
2053
+ lines
2054
+ The lines to append.
2055
+ """
2056
+ if any(isinstance(x, str) for x in lines):
2057
+ lines = [str(x) for x in lines]
2058
+ buf.write("\n".join(lines))
venv/lib/python3.10/site-packages/pandas/io/formats/html.py ADDED
@@ -0,0 +1,646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module for formatting output data in HTML.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from textwrap import dedent
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Any,
10
+ Final,
11
+ cast,
12
+ )
13
+
14
+ from pandas._config import get_option
15
+
16
+ from pandas._libs import lib
17
+
18
+ from pandas import (
19
+ MultiIndex,
20
+ option_context,
21
+ )
22
+
23
+ from pandas.io.common import is_url
24
+ from pandas.io.formats.format import (
25
+ DataFrameFormatter,
26
+ get_level_lengths,
27
+ )
28
+ from pandas.io.formats.printing import pprint_thing
29
+
30
+ if TYPE_CHECKING:
31
+ from collections.abc import (
32
+ Hashable,
33
+ Iterable,
34
+ Mapping,
35
+ )
36
+
37
+
38
+ class HTMLFormatter:
39
+ """
40
+ Internal class for formatting output data in html.
41
+ This class is intended for shared functionality between
42
+ DataFrame.to_html() and DataFrame._repr_html_().
43
+ Any logic in common with other output formatting methods
44
+ should ideally be inherited from classes in format.py
45
+ and this class responsible for only producing html markup.
46
+ """
47
+
48
+ indent_delta: Final = 2
49
+
50
+ def __init__(
51
+ self,
52
+ formatter: DataFrameFormatter,
53
+ classes: str | list[str] | tuple[str, ...] | None = None,
54
+ border: int | bool | None = None,
55
+ table_id: str | None = None,
56
+ render_links: bool = False,
57
+ ) -> None:
58
+ self.fmt = formatter
59
+ self.classes = classes
60
+
61
+ self.frame = self.fmt.frame
62
+ self.columns = self.fmt.tr_frame.columns
63
+ self.elements: list[str] = []
64
+ self.bold_rows = self.fmt.bold_rows
65
+ self.escape = self.fmt.escape
66
+ self.show_dimensions = self.fmt.show_dimensions
67
+ if border is None or border is True:
68
+ border = cast(int, get_option("display.html.border"))
69
+ elif not border:
70
+ border = None
71
+
72
+ self.border = border
73
+ self.table_id = table_id
74
+ self.render_links = render_links
75
+
76
+ self.col_space = {}
77
+ is_multi_index = isinstance(self.columns, MultiIndex)
78
+ for column, value in self.fmt.col_space.items():
79
+ col_space_value = f"{value}px" if isinstance(value, int) else value
80
+ self.col_space[column] = col_space_value
81
+ # GH 53885: Handling case where column is index
82
+ # Flatten the data in the multi index and add in the map
83
+ if is_multi_index and isinstance(column, tuple):
84
+ for column_index in column:
85
+ self.col_space[str(column_index)] = col_space_value
86
+
87
+ def to_string(self) -> str:
88
+ lines = self.render()
89
+ if any(isinstance(x, str) for x in lines):
90
+ lines = [str(x) for x in lines]
91
+ return "\n".join(lines)
92
+
93
+ def render(self) -> list[str]:
94
+ self._write_table()
95
+
96
+ if self.should_show_dimensions:
97
+ by = chr(215) # × # noqa: RUF003
98
+ self.write(
99
+ f"<p>{len(self.frame)} rows {by} {len(self.frame.columns)} columns</p>"
100
+ )
101
+
102
+ return self.elements
103
+
104
+ @property
105
+ def should_show_dimensions(self) -> bool:
106
+ return self.fmt.should_show_dimensions
107
+
108
+ @property
109
+ def show_row_idx_names(self) -> bool:
110
+ return self.fmt.show_row_idx_names
111
+
112
+ @property
113
+ def show_col_idx_names(self) -> bool:
114
+ return self.fmt.show_col_idx_names
115
+
116
+ @property
117
+ def row_levels(self) -> int:
118
+ if self.fmt.index:
119
+ # showing (row) index
120
+ return self.frame.index.nlevels
121
+ elif self.show_col_idx_names:
122
+ # see gh-22579
123
+ # Column misalignment also occurs for
124
+ # a standard index when the columns index is named.
125
+ # If the row index is not displayed a column of
126
+ # blank cells need to be included before the DataFrame values.
127
+ return 1
128
+ # not showing (row) index
129
+ return 0
130
+
131
+ def _get_columns_formatted_values(self) -> Iterable:
132
+ return self.columns
133
+
134
+ @property
135
+ def is_truncated(self) -> bool:
136
+ return self.fmt.is_truncated
137
+
138
+ @property
139
+ def ncols(self) -> int:
140
+ return len(self.fmt.tr_frame.columns)
141
+
142
+ def write(self, s: Any, indent: int = 0) -> None:
143
+ rs = pprint_thing(s)
144
+ self.elements.append(" " * indent + rs)
145
+
146
+ def write_th(
147
+ self, s: Any, header: bool = False, indent: int = 0, tags: str | None = None
148
+ ) -> None:
149
+ """
150
+ Method for writing a formatted <th> cell.
151
+
152
+ If col_space is set on the formatter then that is used for
153
+ the value of min-width.
154
+
155
+ Parameters
156
+ ----------
157
+ s : object
158
+ The data to be written inside the cell.
159
+ header : bool, default False
160
+ Set to True if the <th> is for use inside <thead>. This will
161
+ cause min-width to be set if there is one.
162
+ indent : int, default 0
163
+ The indentation level of the cell.
164
+ tags : str, default None
165
+ Tags to include in the cell.
166
+
167
+ Returns
168
+ -------
169
+ A written <th> cell.
170
+ """
171
+ col_space = self.col_space.get(s, None)
172
+
173
+ if header and col_space is not None:
174
+ tags = tags or ""
175
+ tags += f'style="min-width: {col_space};"'
176
+
177
+ self._write_cell(s, kind="th", indent=indent, tags=tags)
178
+
179
+ def write_td(self, s: Any, indent: int = 0, tags: str | None = None) -> None:
180
+ self._write_cell(s, kind="td", indent=indent, tags=tags)
181
+
182
+ def _write_cell(
183
+ self, s: Any, kind: str = "td", indent: int = 0, tags: str | None = None
184
+ ) -> None:
185
+ if tags is not None:
186
+ start_tag = f"<{kind} {tags}>"
187
+ else:
188
+ start_tag = f"<{kind}>"
189
+
190
+ if self.escape:
191
+ # escape & first to prevent double escaping of &
192
+ esc = {"&": r"&amp;", "<": r"&lt;", ">": r"&gt;"}
193
+ else:
194
+ esc = {}
195
+
196
+ rs = pprint_thing(s, escape_chars=esc).strip()
197
+
198
+ if self.render_links and is_url(rs):
199
+ rs_unescaped = pprint_thing(s, escape_chars={}).strip()
200
+ start_tag += f'<a href="{rs_unescaped}" target="_blank">'
201
+ end_a = "</a>"
202
+ else:
203
+ end_a = ""
204
+
205
+ self.write(f"{start_tag}{rs}{end_a}</{kind}>", indent)
206
+
207
+ def write_tr(
208
+ self,
209
+ line: Iterable,
210
+ indent: int = 0,
211
+ indent_delta: int = 0,
212
+ header: bool = False,
213
+ align: str | None = None,
214
+ tags: dict[int, str] | None = None,
215
+ nindex_levels: int = 0,
216
+ ) -> None:
217
+ if tags is None:
218
+ tags = {}
219
+
220
+ if align is None:
221
+ self.write("<tr>", indent)
222
+ else:
223
+ self.write(f'<tr style="text-align: {align};">', indent)
224
+ indent += indent_delta
225
+
226
+ for i, s in enumerate(line):
227
+ val_tag = tags.get(i, None)
228
+ if header or (self.bold_rows and i < nindex_levels):
229
+ self.write_th(s, indent=indent, header=header, tags=val_tag)
230
+ else:
231
+ self.write_td(s, indent, tags=val_tag)
232
+
233
+ indent -= indent_delta
234
+ self.write("</tr>", indent)
235
+
236
+ def _write_table(self, indent: int = 0) -> None:
237
+ _classes = ["dataframe"] # Default class.
238
+ use_mathjax = get_option("display.html.use_mathjax")
239
+ if not use_mathjax:
240
+ _classes.append("tex2jax_ignore")
241
+ if self.classes is not None:
242
+ if isinstance(self.classes, str):
243
+ self.classes = self.classes.split()
244
+ if not isinstance(self.classes, (list, tuple)):
245
+ raise TypeError(
246
+ "classes must be a string, list, "
247
+ f"or tuple, not {type(self.classes)}"
248
+ )
249
+ _classes.extend(self.classes)
250
+
251
+ if self.table_id is None:
252
+ id_section = ""
253
+ else:
254
+ id_section = f' id="{self.table_id}"'
255
+
256
+ if self.border is None:
257
+ border_attr = ""
258
+ else:
259
+ border_attr = f' border="{self.border}"'
260
+
261
+ self.write(
262
+ f'<table{border_attr} class="{" ".join(_classes)}"{id_section}>',
263
+ indent,
264
+ )
265
+
266
+ if self.fmt.header or self.show_row_idx_names:
267
+ self._write_header(indent + self.indent_delta)
268
+
269
+ self._write_body(indent + self.indent_delta)
270
+
271
+ self.write("</table>", indent)
272
+
273
+ def _write_col_header(self, indent: int) -> None:
274
+ row: list[Hashable]
275
+ is_truncated_horizontally = self.fmt.is_truncated_horizontally
276
+ if isinstance(self.columns, MultiIndex):
277
+ template = 'colspan="{span:d}" halign="left"'
278
+
279
+ sentinel: lib.NoDefault | bool
280
+ if self.fmt.sparsify:
281
+ # GH3547
282
+ sentinel = lib.no_default
283
+ else:
284
+ sentinel = False
285
+ levels = self.columns._format_multi(sparsify=sentinel, include_names=False)
286
+ level_lengths = get_level_lengths(levels, sentinel)
287
+ inner_lvl = len(level_lengths) - 1
288
+ for lnum, (records, values) in enumerate(zip(level_lengths, levels)):
289
+ if is_truncated_horizontally:
290
+ # modify the header lines
291
+ ins_col = self.fmt.tr_col_num
292
+ if self.fmt.sparsify:
293
+ recs_new = {}
294
+ # Increment tags after ... col.
295
+ for tag, span in list(records.items()):
296
+ if tag >= ins_col:
297
+ recs_new[tag + 1] = span
298
+ elif tag + span > ins_col:
299
+ recs_new[tag] = span + 1
300
+ if lnum == inner_lvl:
301
+ values = (
302
+ values[:ins_col] + ("...",) + values[ins_col:]
303
+ )
304
+ else:
305
+ # sparse col headers do not receive a ...
306
+ values = (
307
+ values[:ins_col]
308
+ + (values[ins_col - 1],)
309
+ + values[ins_col:]
310
+ )
311
+ else:
312
+ recs_new[tag] = span
313
+ # if ins_col lies between tags, all col headers
314
+ # get ...
315
+ if tag + span == ins_col:
316
+ recs_new[ins_col] = 1
317
+ values = values[:ins_col] + ("...",) + values[ins_col:]
318
+ records = recs_new
319
+ inner_lvl = len(level_lengths) - 1
320
+ if lnum == inner_lvl:
321
+ records[ins_col] = 1
322
+ else:
323
+ recs_new = {}
324
+ for tag, span in list(records.items()):
325
+ if tag >= ins_col:
326
+ recs_new[tag + 1] = span
327
+ else:
328
+ recs_new[tag] = span
329
+ recs_new[ins_col] = 1
330
+ records = recs_new
331
+ values = values[:ins_col] + ["..."] + values[ins_col:]
332
+
333
+ # see gh-22579
334
+ # Column Offset Bug with to_html(index=False) with
335
+ # MultiIndex Columns and Index.
336
+ # Initially fill row with blank cells before column names.
337
+ # TODO: Refactor to remove code duplication with code
338
+ # block below for standard columns index.
339
+ row = [""] * (self.row_levels - 1)
340
+ if self.fmt.index or self.show_col_idx_names:
341
+ # see gh-22747
342
+ # If to_html(index_names=False) do not show columns
343
+ # index names.
344
+ # TODO: Refactor to use _get_column_name_list from
345
+ # DataFrameFormatter class and create a
346
+ # _get_formatted_column_labels function for code
347
+ # parity with DataFrameFormatter class.
348
+ if self.fmt.show_index_names:
349
+ name = self.columns.names[lnum]
350
+ row.append(pprint_thing(name or ""))
351
+ else:
352
+ row.append("")
353
+
354
+ tags = {}
355
+ j = len(row)
356
+ for i, v in enumerate(values):
357
+ if i in records:
358
+ if records[i] > 1:
359
+ tags[j] = template.format(span=records[i])
360
+ else:
361
+ continue
362
+ j += 1
363
+ row.append(v)
364
+ self.write_tr(row, indent, self.indent_delta, tags=tags, header=True)
365
+ else:
366
+ # see gh-22579
367
+ # Column misalignment also occurs for
368
+ # a standard index when the columns index is named.
369
+ # Initially fill row with blank cells before column names.
370
+ # TODO: Refactor to remove code duplication with code block
371
+ # above for columns MultiIndex.
372
+ row = [""] * (self.row_levels - 1)
373
+ if self.fmt.index or self.show_col_idx_names:
374
+ # see gh-22747
375
+ # If to_html(index_names=False) do not show columns
376
+ # index names.
377
+ # TODO: Refactor to use _get_column_name_list from
378
+ # DataFrameFormatter class.
379
+ if self.fmt.show_index_names:
380
+ row.append(self.columns.name or "")
381
+ else:
382
+ row.append("")
383
+ row.extend(self._get_columns_formatted_values())
384
+ align = self.fmt.justify
385
+
386
+ if is_truncated_horizontally:
387
+ ins_col = self.row_levels + self.fmt.tr_col_num
388
+ row.insert(ins_col, "...")
389
+
390
+ self.write_tr(row, indent, self.indent_delta, header=True, align=align)
391
+
392
+ def _write_row_header(self, indent: int) -> None:
393
+ is_truncated_horizontally = self.fmt.is_truncated_horizontally
394
+ row = [x if x is not None else "" for x in self.frame.index.names] + [""] * (
395
+ self.ncols + (1 if is_truncated_horizontally else 0)
396
+ )
397
+ self.write_tr(row, indent, self.indent_delta, header=True)
398
+
399
+ def _write_header(self, indent: int) -> None:
400
+ self.write("<thead>", indent)
401
+
402
+ if self.fmt.header:
403
+ self._write_col_header(indent + self.indent_delta)
404
+
405
+ if self.show_row_idx_names:
406
+ self._write_row_header(indent + self.indent_delta)
407
+
408
+ self.write("</thead>", indent)
409
+
410
+ def _get_formatted_values(self) -> dict[int, list[str]]:
411
+ with option_context("display.max_colwidth", None):
412
+ fmt_values = {i: self.fmt.format_col(i) for i in range(self.ncols)}
413
+ return fmt_values
414
+
415
+ def _write_body(self, indent: int) -> None:
416
+ self.write("<tbody>", indent)
417
+ fmt_values = self._get_formatted_values()
418
+
419
+ # write values
420
+ if self.fmt.index and isinstance(self.frame.index, MultiIndex):
421
+ self._write_hierarchical_rows(fmt_values, indent + self.indent_delta)
422
+ else:
423
+ self._write_regular_rows(fmt_values, indent + self.indent_delta)
424
+
425
+ self.write("</tbody>", indent)
426
+
427
+ def _write_regular_rows(
428
+ self, fmt_values: Mapping[int, list[str]], indent: int
429
+ ) -> None:
430
+ is_truncated_horizontally = self.fmt.is_truncated_horizontally
431
+ is_truncated_vertically = self.fmt.is_truncated_vertically
432
+
433
+ nrows = len(self.fmt.tr_frame)
434
+
435
+ if self.fmt.index:
436
+ fmt = self.fmt._get_formatter("__index__")
437
+ if fmt is not None:
438
+ index_values = self.fmt.tr_frame.index.map(fmt)
439
+ else:
440
+ # only reached with non-Multi index
441
+ index_values = self.fmt.tr_frame.index._format_flat(include_name=False)
442
+
443
+ row: list[str] = []
444
+ for i in range(nrows):
445
+ if is_truncated_vertically and i == (self.fmt.tr_row_num):
446
+ str_sep_row = ["..."] * len(row)
447
+ self.write_tr(
448
+ str_sep_row,
449
+ indent,
450
+ self.indent_delta,
451
+ tags=None,
452
+ nindex_levels=self.row_levels,
453
+ )
454
+
455
+ row = []
456
+ if self.fmt.index:
457
+ row.append(index_values[i])
458
+ # see gh-22579
459
+ # Column misalignment also occurs for
460
+ # a standard index when the columns index is named.
461
+ # Add blank cell before data cells.
462
+ elif self.show_col_idx_names:
463
+ row.append("")
464
+ row.extend(fmt_values[j][i] for j in range(self.ncols))
465
+
466
+ if is_truncated_horizontally:
467
+ dot_col_ix = self.fmt.tr_col_num + self.row_levels
468
+ row.insert(dot_col_ix, "...")
469
+ self.write_tr(
470
+ row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels
471
+ )
472
+
473
+ def _write_hierarchical_rows(
474
+ self, fmt_values: Mapping[int, list[str]], indent: int
475
+ ) -> None:
476
+ template = 'rowspan="{span}" valign="top"'
477
+
478
+ is_truncated_horizontally = self.fmt.is_truncated_horizontally
479
+ is_truncated_vertically = self.fmt.is_truncated_vertically
480
+ frame = self.fmt.tr_frame
481
+ nrows = len(frame)
482
+
483
+ assert isinstance(frame.index, MultiIndex)
484
+ idx_values = frame.index._format_multi(sparsify=False, include_names=False)
485
+ idx_values = list(zip(*idx_values))
486
+
487
+ if self.fmt.sparsify:
488
+ # GH3547
489
+ sentinel = lib.no_default
490
+ levels = frame.index._format_multi(sparsify=sentinel, include_names=False)
491
+
492
+ level_lengths = get_level_lengths(levels, sentinel)
493
+ inner_lvl = len(level_lengths) - 1
494
+ if is_truncated_vertically:
495
+ # Insert ... row and adjust idx_values and
496
+ # level_lengths to take this into account.
497
+ ins_row = self.fmt.tr_row_num
498
+ inserted = False
499
+ for lnum, records in enumerate(level_lengths):
500
+ rec_new = {}
501
+ for tag, span in list(records.items()):
502
+ if tag >= ins_row:
503
+ rec_new[tag + 1] = span
504
+ elif tag + span > ins_row:
505
+ rec_new[tag] = span + 1
506
+
507
+ # GH 14882 - Make sure insertion done once
508
+ if not inserted:
509
+ dot_row = list(idx_values[ins_row - 1])
510
+ dot_row[-1] = "..."
511
+ idx_values.insert(ins_row, tuple(dot_row))
512
+ inserted = True
513
+ else:
514
+ dot_row = list(idx_values[ins_row])
515
+ dot_row[inner_lvl - lnum] = "..."
516
+ idx_values[ins_row] = tuple(dot_row)
517
+ else:
518
+ rec_new[tag] = span
519
+ # If ins_row lies between tags, all cols idx cols
520
+ # receive ...
521
+ if tag + span == ins_row:
522
+ rec_new[ins_row] = 1
523
+ if lnum == 0:
524
+ idx_values.insert(
525
+ ins_row, tuple(["..."] * len(level_lengths))
526
+ )
527
+
528
+ # GH 14882 - Place ... in correct level
529
+ elif inserted:
530
+ dot_row = list(idx_values[ins_row])
531
+ dot_row[inner_lvl - lnum] = "..."
532
+ idx_values[ins_row] = tuple(dot_row)
533
+ level_lengths[lnum] = rec_new
534
+
535
+ level_lengths[inner_lvl][ins_row] = 1
536
+ for ix_col in fmt_values:
537
+ fmt_values[ix_col].insert(ins_row, "...")
538
+ nrows += 1
539
+
540
+ for i in range(nrows):
541
+ row = []
542
+ tags = {}
543
+
544
+ sparse_offset = 0
545
+ j = 0
546
+ for records, v in zip(level_lengths, idx_values[i]):
547
+ if i in records:
548
+ if records[i] > 1:
549
+ tags[j] = template.format(span=records[i])
550
+ else:
551
+ sparse_offset += 1
552
+ continue
553
+
554
+ j += 1
555
+ row.append(v)
556
+
557
+ row.extend(fmt_values[j][i] for j in range(self.ncols))
558
+ if is_truncated_horizontally:
559
+ row.insert(
560
+ self.row_levels - sparse_offset + self.fmt.tr_col_num, "..."
561
+ )
562
+ self.write_tr(
563
+ row,
564
+ indent,
565
+ self.indent_delta,
566
+ tags=tags,
567
+ nindex_levels=len(levels) - sparse_offset,
568
+ )
569
+ else:
570
+ row = []
571
+ for i in range(len(frame)):
572
+ if is_truncated_vertically and i == (self.fmt.tr_row_num):
573
+ str_sep_row = ["..."] * len(row)
574
+ self.write_tr(
575
+ str_sep_row,
576
+ indent,
577
+ self.indent_delta,
578
+ tags=None,
579
+ nindex_levels=self.row_levels,
580
+ )
581
+
582
+ idx_values = list(
583
+ zip(*frame.index._format_multi(sparsify=False, include_names=False))
584
+ )
585
+ row = []
586
+ row.extend(idx_values[i])
587
+ row.extend(fmt_values[j][i] for j in range(self.ncols))
588
+ if is_truncated_horizontally:
589
+ row.insert(self.row_levels + self.fmt.tr_col_num, "...")
590
+ self.write_tr(
591
+ row,
592
+ indent,
593
+ self.indent_delta,
594
+ tags=None,
595
+ nindex_levels=frame.index.nlevels,
596
+ )
597
+
598
+
599
+ class NotebookFormatter(HTMLFormatter):
600
+ """
601
+ Internal class for formatting output data in html for display in Jupyter
602
+ Notebooks. This class is intended for functionality specific to
603
+ DataFrame._repr_html_() and DataFrame.to_html(notebook=True)
604
+ """
605
+
606
+ def _get_formatted_values(self) -> dict[int, list[str]]:
607
+ return {i: self.fmt.format_col(i) for i in range(self.ncols)}
608
+
609
+ def _get_columns_formatted_values(self) -> list[str]:
610
+ # only reached with non-Multi Index
611
+ return self.columns._format_flat(include_name=False)
612
+
613
+ def write_style(self) -> None:
614
+ # We use the "scoped" attribute here so that the desired
615
+ # style properties for the data frame are not then applied
616
+ # throughout the entire notebook.
617
+ template_first = """\
618
+ <style scoped>"""
619
+ template_last = """\
620
+ </style>"""
621
+ template_select = """\
622
+ .dataframe %s {
623
+ %s: %s;
624
+ }"""
625
+ element_props = [
626
+ ("tbody tr th:only-of-type", "vertical-align", "middle"),
627
+ ("tbody tr th", "vertical-align", "top"),
628
+ ]
629
+ if isinstance(self.columns, MultiIndex):
630
+ element_props.append(("thead tr th", "text-align", "left"))
631
+ if self.show_row_idx_names:
632
+ element_props.append(
633
+ ("thead tr:last-of-type th", "text-align", "right")
634
+ )
635
+ else:
636
+ element_props.append(("thead th", "text-align", "right"))
637
+ template_mid = "\n\n".join(template_select % t for t in element_props)
638
+ template = dedent(f"{template_first}\n{template_mid}\n{template_last}")
639
+ self.write(template)
640
+
641
+ def render(self) -> list[str]:
642
+ self.write("<div>")
643
+ self.write_style()
644
+ super().render()
645
+ self.write("</div>")
646
+ return self.elements
venv/lib/python3.10/site-packages/pandas/io/formats/info.py ADDED
@@ -0,0 +1,1101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from abc import (
4
+ ABC,
5
+ abstractmethod,
6
+ )
7
+ import sys
8
+ from textwrap import dedent
9
+ from typing import TYPE_CHECKING
10
+
11
+ from pandas._config import get_option
12
+
13
+ from pandas.io.formats import format as fmt
14
+ from pandas.io.formats.printing import pprint_thing
15
+
16
+ if TYPE_CHECKING:
17
+ from collections.abc import (
18
+ Iterable,
19
+ Iterator,
20
+ Mapping,
21
+ Sequence,
22
+ )
23
+
24
+ from pandas._typing import (
25
+ Dtype,
26
+ WriteBuffer,
27
+ )
28
+
29
+ from pandas import (
30
+ DataFrame,
31
+ Index,
32
+ Series,
33
+ )
34
+
35
+
36
+ frame_max_cols_sub = dedent(
37
+ """\
38
+ max_cols : int, optional
39
+ When to switch from the verbose to the truncated output. If the
40
+ DataFrame has more than `max_cols` columns, the truncated output
41
+ is used. By default, the setting in
42
+ ``pandas.options.display.max_info_columns`` is used."""
43
+ )
44
+
45
+
46
+ show_counts_sub = dedent(
47
+ """\
48
+ show_counts : bool, optional
49
+ Whether to show the non-null counts. By default, this is shown
50
+ only if the DataFrame is smaller than
51
+ ``pandas.options.display.max_info_rows`` and
52
+ ``pandas.options.display.max_info_columns``. A value of True always
53
+ shows the counts, and False never shows the counts."""
54
+ )
55
+
56
+
57
+ frame_examples_sub = dedent(
58
+ """\
59
+ >>> int_values = [1, 2, 3, 4, 5]
60
+ >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
61
+ >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
62
+ >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
63
+ ... "float_col": float_values})
64
+ >>> df
65
+ int_col text_col float_col
66
+ 0 1 alpha 0.00
67
+ 1 2 beta 0.25
68
+ 2 3 gamma 0.50
69
+ 3 4 delta 0.75
70
+ 4 5 epsilon 1.00
71
+
72
+ Prints information of all columns:
73
+
74
+ >>> df.info(verbose=True)
75
+ <class 'pandas.core.frame.DataFrame'>
76
+ RangeIndex: 5 entries, 0 to 4
77
+ Data columns (total 3 columns):
78
+ # Column Non-Null Count Dtype
79
+ --- ------ -------------- -----
80
+ 0 int_col 5 non-null int64
81
+ 1 text_col 5 non-null object
82
+ 2 float_col 5 non-null float64
83
+ dtypes: float64(1), int64(1), object(1)
84
+ memory usage: 248.0+ bytes
85
+
86
+ Prints a summary of columns count and its dtypes but not per column
87
+ information:
88
+
89
+ >>> df.info(verbose=False)
90
+ <class 'pandas.core.frame.DataFrame'>
91
+ RangeIndex: 5 entries, 0 to 4
92
+ Columns: 3 entries, int_col to float_col
93
+ dtypes: float64(1), int64(1), object(1)
94
+ memory usage: 248.0+ bytes
95
+
96
+ Pipe output of DataFrame.info to buffer instead of sys.stdout, get
97
+ buffer content and writes to a text file:
98
+
99
+ >>> import io
100
+ >>> buffer = io.StringIO()
101
+ >>> df.info(buf=buffer)
102
+ >>> s = buffer.getvalue()
103
+ >>> with open("df_info.txt", "w",
104
+ ... encoding="utf-8") as f: # doctest: +SKIP
105
+ ... f.write(s)
106
+ 260
107
+
108
+ The `memory_usage` parameter allows deep introspection mode, specially
109
+ useful for big DataFrames and fine-tune memory optimization:
110
+
111
+ >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
112
+ >>> df = pd.DataFrame({
113
+ ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
114
+ ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
115
+ ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
116
+ ... })
117
+ >>> df.info()
118
+ <class 'pandas.core.frame.DataFrame'>
119
+ RangeIndex: 1000000 entries, 0 to 999999
120
+ Data columns (total 3 columns):
121
+ # Column Non-Null Count Dtype
122
+ --- ------ -------------- -----
123
+ 0 column_1 1000000 non-null object
124
+ 1 column_2 1000000 non-null object
125
+ 2 column_3 1000000 non-null object
126
+ dtypes: object(3)
127
+ memory usage: 22.9+ MB
128
+
129
+ >>> df.info(memory_usage='deep')
130
+ <class 'pandas.core.frame.DataFrame'>
131
+ RangeIndex: 1000000 entries, 0 to 999999
132
+ Data columns (total 3 columns):
133
+ # Column Non-Null Count Dtype
134
+ --- ------ -------------- -----
135
+ 0 column_1 1000000 non-null object
136
+ 1 column_2 1000000 non-null object
137
+ 2 column_3 1000000 non-null object
138
+ dtypes: object(3)
139
+ memory usage: 165.9 MB"""
140
+ )
141
+
142
+
143
+ frame_see_also_sub = dedent(
144
+ """\
145
+ DataFrame.describe: Generate descriptive statistics of DataFrame
146
+ columns.
147
+ DataFrame.memory_usage: Memory usage of DataFrame columns."""
148
+ )
149
+
150
+
151
+ frame_sub_kwargs = {
152
+ "klass": "DataFrame",
153
+ "type_sub": " and columns",
154
+ "max_cols_sub": frame_max_cols_sub,
155
+ "show_counts_sub": show_counts_sub,
156
+ "examples_sub": frame_examples_sub,
157
+ "see_also_sub": frame_see_also_sub,
158
+ "version_added_sub": "",
159
+ }
160
+
161
+
162
+ series_examples_sub = dedent(
163
+ """\
164
+ >>> int_values = [1, 2, 3, 4, 5]
165
+ >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
166
+ >>> s = pd.Series(text_values, index=int_values)
167
+ >>> s.info()
168
+ <class 'pandas.core.series.Series'>
169
+ Index: 5 entries, 1 to 5
170
+ Series name: None
171
+ Non-Null Count Dtype
172
+ -------------- -----
173
+ 5 non-null object
174
+ dtypes: object(1)
175
+ memory usage: 80.0+ bytes
176
+
177
+ Prints a summary excluding information about its values:
178
+
179
+ >>> s.info(verbose=False)
180
+ <class 'pandas.core.series.Series'>
181
+ Index: 5 entries, 1 to 5
182
+ dtypes: object(1)
183
+ memory usage: 80.0+ bytes
184
+
185
+ Pipe output of Series.info to buffer instead of sys.stdout, get
186
+ buffer content and writes to a text file:
187
+
188
+ >>> import io
189
+ >>> buffer = io.StringIO()
190
+ >>> s.info(buf=buffer)
191
+ >>> s = buffer.getvalue()
192
+ >>> with open("df_info.txt", "w",
193
+ ... encoding="utf-8") as f: # doctest: +SKIP
194
+ ... f.write(s)
195
+ 260
196
+
197
+ The `memory_usage` parameter allows deep introspection mode, specially
198
+ useful for big Series and fine-tune memory optimization:
199
+
200
+ >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
201
+ >>> s = pd.Series(np.random.choice(['a', 'b', 'c'], 10 ** 6))
202
+ >>> s.info()
203
+ <class 'pandas.core.series.Series'>
204
+ RangeIndex: 1000000 entries, 0 to 999999
205
+ Series name: None
206
+ Non-Null Count Dtype
207
+ -------------- -----
208
+ 1000000 non-null object
209
+ dtypes: object(1)
210
+ memory usage: 7.6+ MB
211
+
212
+ >>> s.info(memory_usage='deep')
213
+ <class 'pandas.core.series.Series'>
214
+ RangeIndex: 1000000 entries, 0 to 999999
215
+ Series name: None
216
+ Non-Null Count Dtype
217
+ -------------- -----
218
+ 1000000 non-null object
219
+ dtypes: object(1)
220
+ memory usage: 55.3 MB"""
221
+ )
222
+
223
+
224
+ series_see_also_sub = dedent(
225
+ """\
226
+ Series.describe: Generate descriptive statistics of Series.
227
+ Series.memory_usage: Memory usage of Series."""
228
+ )
229
+
230
+
231
+ series_sub_kwargs = {
232
+ "klass": "Series",
233
+ "type_sub": "",
234
+ "max_cols_sub": "",
235
+ "show_counts_sub": show_counts_sub,
236
+ "examples_sub": series_examples_sub,
237
+ "see_also_sub": series_see_also_sub,
238
+ "version_added_sub": "\n.. versionadded:: 1.4.0\n",
239
+ }
240
+
241
+
242
+ INFO_DOCSTRING = dedent(
243
+ """
244
+ Print a concise summary of a {klass}.
245
+
246
+ This method prints information about a {klass} including
247
+ the index dtype{type_sub}, non-null values and memory usage.
248
+ {version_added_sub}\
249
+
250
+ Parameters
251
+ ----------
252
+ verbose : bool, optional
253
+ Whether to print the full summary. By default, the setting in
254
+ ``pandas.options.display.max_info_columns`` is followed.
255
+ buf : writable buffer, defaults to sys.stdout
256
+ Where to send the output. By default, the output is printed to
257
+ sys.stdout. Pass a writable buffer if you need to further process
258
+ the output.
259
+ {max_cols_sub}
260
+ memory_usage : bool, str, optional
261
+ Specifies whether total memory usage of the {klass}
262
+ elements (including the index) should be displayed. By default,
263
+ this follows the ``pandas.options.display.memory_usage`` setting.
264
+
265
+ True always show memory usage. False never shows memory usage.
266
+ A value of 'deep' is equivalent to "True with deep introspection".
267
+ Memory usage is shown in human-readable units (base-2
268
+ representation). Without deep introspection a memory estimation is
269
+ made based in column dtype and number of rows assuming values
270
+ consume the same memory amount for corresponding dtypes. With deep
271
+ memory introspection, a real memory usage calculation is performed
272
+ at the cost of computational resources. See the
273
+ :ref:`Frequently Asked Questions <df-memory-usage>` for more
274
+ details.
275
+ {show_counts_sub}
276
+
277
+ Returns
278
+ -------
279
+ None
280
+ This method prints a summary of a {klass} and returns None.
281
+
282
+ See Also
283
+ --------
284
+ {see_also_sub}
285
+
286
+ Examples
287
+ --------
288
+ {examples_sub}
289
+ """
290
+ )
291
+
292
+
293
+ def _put_str(s: str | Dtype, space: int) -> str:
294
+ """
295
+ Make string of specified length, padding to the right if necessary.
296
+
297
+ Parameters
298
+ ----------
299
+ s : Union[str, Dtype]
300
+ String to be formatted.
301
+ space : int
302
+ Length to force string to be of.
303
+
304
+ Returns
305
+ -------
306
+ str
307
+ String coerced to given length.
308
+
309
+ Examples
310
+ --------
311
+ >>> pd.io.formats.info._put_str("panda", 6)
312
+ 'panda '
313
+ >>> pd.io.formats.info._put_str("panda", 4)
314
+ 'pand'
315
+ """
316
+ return str(s)[:space].ljust(space)
317
+
318
+
319
+ def _sizeof_fmt(num: float, size_qualifier: str) -> str:
320
+ """
321
+ Return size in human readable format.
322
+
323
+ Parameters
324
+ ----------
325
+ num : int
326
+ Size in bytes.
327
+ size_qualifier : str
328
+ Either empty, or '+' (if lower bound).
329
+
330
+ Returns
331
+ -------
332
+ str
333
+ Size in human readable format.
334
+
335
+ Examples
336
+ --------
337
+ >>> _sizeof_fmt(23028, '')
338
+ '22.5 KB'
339
+
340
+ >>> _sizeof_fmt(23028, '+')
341
+ '22.5+ KB'
342
+ """
343
+ for x in ["bytes", "KB", "MB", "GB", "TB"]:
344
+ if num < 1024.0:
345
+ return f"{num:3.1f}{size_qualifier} {x}"
346
+ num /= 1024.0
347
+ return f"{num:3.1f}{size_qualifier} PB"
348
+
349
+
350
+ def _initialize_memory_usage(
351
+ memory_usage: bool | str | None = None,
352
+ ) -> bool | str:
353
+ """Get memory usage based on inputs and display options."""
354
+ if memory_usage is None:
355
+ memory_usage = get_option("display.memory_usage")
356
+ return memory_usage
357
+
358
+
359
+ class _BaseInfo(ABC):
360
+ """
361
+ Base class for DataFrameInfo and SeriesInfo.
362
+
363
+ Parameters
364
+ ----------
365
+ data : DataFrame or Series
366
+ Either dataframe or series.
367
+ memory_usage : bool or str, optional
368
+ If "deep", introspect the data deeply by interrogating object dtypes
369
+ for system-level memory consumption, and include it in the returned
370
+ values.
371
+ """
372
+
373
+ data: DataFrame | Series
374
+ memory_usage: bool | str
375
+
376
+ @property
377
+ @abstractmethod
378
+ def dtypes(self) -> Iterable[Dtype]:
379
+ """
380
+ Dtypes.
381
+
382
+ Returns
383
+ -------
384
+ dtypes : sequence
385
+ Dtype of each of the DataFrame's columns (or one series column).
386
+ """
387
+
388
+ @property
389
+ @abstractmethod
390
+ def dtype_counts(self) -> Mapping[str, int]:
391
+ """Mapping dtype - number of counts."""
392
+
393
+ @property
394
+ @abstractmethod
395
+ def non_null_counts(self) -> Sequence[int]:
396
+ """Sequence of non-null counts for all columns or column (if series)."""
397
+
398
+ @property
399
+ @abstractmethod
400
+ def memory_usage_bytes(self) -> int:
401
+ """
402
+ Memory usage in bytes.
403
+
404
+ Returns
405
+ -------
406
+ memory_usage_bytes : int
407
+ Object's total memory usage in bytes.
408
+ """
409
+
410
+ @property
411
+ def memory_usage_string(self) -> str:
412
+ """Memory usage in a form of human readable string."""
413
+ return f"{_sizeof_fmt(self.memory_usage_bytes, self.size_qualifier)}\n"
414
+
415
+ @property
416
+ def size_qualifier(self) -> str:
417
+ size_qualifier = ""
418
+ if self.memory_usage:
419
+ if self.memory_usage != "deep":
420
+ # size_qualifier is just a best effort; not guaranteed to catch
421
+ # all cases (e.g., it misses categorical data even with object
422
+ # categories)
423
+ if (
424
+ "object" in self.dtype_counts
425
+ or self.data.index._is_memory_usage_qualified()
426
+ ):
427
+ size_qualifier = "+"
428
+ return size_qualifier
429
+
430
+ @abstractmethod
431
+ def render(
432
+ self,
433
+ *,
434
+ buf: WriteBuffer[str] | None,
435
+ max_cols: int | None,
436
+ verbose: bool | None,
437
+ show_counts: bool | None,
438
+ ) -> None:
439
+ pass
440
+
441
+
442
+ class DataFrameInfo(_BaseInfo):
443
+ """
444
+ Class storing dataframe-specific info.
445
+ """
446
+
447
+ def __init__(
448
+ self,
449
+ data: DataFrame,
450
+ memory_usage: bool | str | None = None,
451
+ ) -> None:
452
+ self.data: DataFrame = data
453
+ self.memory_usage = _initialize_memory_usage(memory_usage)
454
+
455
+ @property
456
+ def dtype_counts(self) -> Mapping[str, int]:
457
+ return _get_dataframe_dtype_counts(self.data)
458
+
459
+ @property
460
+ def dtypes(self) -> Iterable[Dtype]:
461
+ """
462
+ Dtypes.
463
+
464
+ Returns
465
+ -------
466
+ dtypes
467
+ Dtype of each of the DataFrame's columns.
468
+ """
469
+ return self.data.dtypes
470
+
471
+ @property
472
+ def ids(self) -> Index:
473
+ """
474
+ Column names.
475
+
476
+ Returns
477
+ -------
478
+ ids : Index
479
+ DataFrame's column names.
480
+ """
481
+ return self.data.columns
482
+
483
+ @property
484
+ def col_count(self) -> int:
485
+ """Number of columns to be summarized."""
486
+ return len(self.ids)
487
+
488
+ @property
489
+ def non_null_counts(self) -> Sequence[int]:
490
+ """Sequence of non-null counts for all columns or column (if series)."""
491
+ return self.data.count()
492
+
493
+ @property
494
+ def memory_usage_bytes(self) -> int:
495
+ deep = self.memory_usage == "deep"
496
+ return self.data.memory_usage(index=True, deep=deep).sum()
497
+
498
+ def render(
499
+ self,
500
+ *,
501
+ buf: WriteBuffer[str] | None,
502
+ max_cols: int | None,
503
+ verbose: bool | None,
504
+ show_counts: bool | None,
505
+ ) -> None:
506
+ printer = _DataFrameInfoPrinter(
507
+ info=self,
508
+ max_cols=max_cols,
509
+ verbose=verbose,
510
+ show_counts=show_counts,
511
+ )
512
+ printer.to_buffer(buf)
513
+
514
+
515
+ class SeriesInfo(_BaseInfo):
516
+ """
517
+ Class storing series-specific info.
518
+ """
519
+
520
+ def __init__(
521
+ self,
522
+ data: Series,
523
+ memory_usage: bool | str | None = None,
524
+ ) -> None:
525
+ self.data: Series = data
526
+ self.memory_usage = _initialize_memory_usage(memory_usage)
527
+
528
+ def render(
529
+ self,
530
+ *,
531
+ buf: WriteBuffer[str] | None = None,
532
+ max_cols: int | None = None,
533
+ verbose: bool | None = None,
534
+ show_counts: bool | None = None,
535
+ ) -> None:
536
+ if max_cols is not None:
537
+ raise ValueError(
538
+ "Argument `max_cols` can only be passed "
539
+ "in DataFrame.info, not Series.info"
540
+ )
541
+ printer = _SeriesInfoPrinter(
542
+ info=self,
543
+ verbose=verbose,
544
+ show_counts=show_counts,
545
+ )
546
+ printer.to_buffer(buf)
547
+
548
+ @property
549
+ def non_null_counts(self) -> Sequence[int]:
550
+ return [self.data.count()]
551
+
552
+ @property
553
+ def dtypes(self) -> Iterable[Dtype]:
554
+ return [self.data.dtypes]
555
+
556
+ @property
557
+ def dtype_counts(self) -> Mapping[str, int]:
558
+ from pandas.core.frame import DataFrame
559
+
560
+ return _get_dataframe_dtype_counts(DataFrame(self.data))
561
+
562
+ @property
563
+ def memory_usage_bytes(self) -> int:
564
+ """Memory usage in bytes.
565
+
566
+ Returns
567
+ -------
568
+ memory_usage_bytes : int
569
+ Object's total memory usage in bytes.
570
+ """
571
+ deep = self.memory_usage == "deep"
572
+ return self.data.memory_usage(index=True, deep=deep)
573
+
574
+
575
+ class _InfoPrinterAbstract:
576
+ """
577
+ Class for printing dataframe or series info.
578
+ """
579
+
580
+ def to_buffer(self, buf: WriteBuffer[str] | None = None) -> None:
581
+ """Save dataframe info into buffer."""
582
+ table_builder = self._create_table_builder()
583
+ lines = table_builder.get_lines()
584
+ if buf is None: # pragma: no cover
585
+ buf = sys.stdout
586
+ fmt.buffer_put_lines(buf, lines)
587
+
588
+ @abstractmethod
589
+ def _create_table_builder(self) -> _TableBuilderAbstract:
590
+ """Create instance of table builder."""
591
+
592
+
593
+ class _DataFrameInfoPrinter(_InfoPrinterAbstract):
594
+ """
595
+ Class for printing dataframe info.
596
+
597
+ Parameters
598
+ ----------
599
+ info : DataFrameInfo
600
+ Instance of DataFrameInfo.
601
+ max_cols : int, optional
602
+ When to switch from the verbose to the truncated output.
603
+ verbose : bool, optional
604
+ Whether to print the full summary.
605
+ show_counts : bool, optional
606
+ Whether to show the non-null counts.
607
+ """
608
+
609
+ def __init__(
610
+ self,
611
+ info: DataFrameInfo,
612
+ max_cols: int | None = None,
613
+ verbose: bool | None = None,
614
+ show_counts: bool | None = None,
615
+ ) -> None:
616
+ self.info = info
617
+ self.data = info.data
618
+ self.verbose = verbose
619
+ self.max_cols = self._initialize_max_cols(max_cols)
620
+ self.show_counts = self._initialize_show_counts(show_counts)
621
+
622
+ @property
623
+ def max_rows(self) -> int:
624
+ """Maximum info rows to be displayed."""
625
+ return get_option("display.max_info_rows", len(self.data) + 1)
626
+
627
+ @property
628
+ def exceeds_info_cols(self) -> bool:
629
+ """Check if number of columns to be summarized does not exceed maximum."""
630
+ return bool(self.col_count > self.max_cols)
631
+
632
+ @property
633
+ def exceeds_info_rows(self) -> bool:
634
+ """Check if number of rows to be summarized does not exceed maximum."""
635
+ return bool(len(self.data) > self.max_rows)
636
+
637
+ @property
638
+ def col_count(self) -> int:
639
+ """Number of columns to be summarized."""
640
+ return self.info.col_count
641
+
642
+ def _initialize_max_cols(self, max_cols: int | None) -> int:
643
+ if max_cols is None:
644
+ return get_option("display.max_info_columns", self.col_count + 1)
645
+ return max_cols
646
+
647
+ def _initialize_show_counts(self, show_counts: bool | None) -> bool:
648
+ if show_counts is None:
649
+ return bool(not self.exceeds_info_cols and not self.exceeds_info_rows)
650
+ else:
651
+ return show_counts
652
+
653
+ def _create_table_builder(self) -> _DataFrameTableBuilder:
654
+ """
655
+ Create instance of table builder based on verbosity and display settings.
656
+ """
657
+ if self.verbose:
658
+ return _DataFrameTableBuilderVerbose(
659
+ info=self.info,
660
+ with_counts=self.show_counts,
661
+ )
662
+ elif self.verbose is False: # specifically set to False, not necessarily None
663
+ return _DataFrameTableBuilderNonVerbose(info=self.info)
664
+ elif self.exceeds_info_cols:
665
+ return _DataFrameTableBuilderNonVerbose(info=self.info)
666
+ else:
667
+ return _DataFrameTableBuilderVerbose(
668
+ info=self.info,
669
+ with_counts=self.show_counts,
670
+ )
671
+
672
+
673
+ class _SeriesInfoPrinter(_InfoPrinterAbstract):
674
+ """Class for printing series info.
675
+
676
+ Parameters
677
+ ----------
678
+ info : SeriesInfo
679
+ Instance of SeriesInfo.
680
+ verbose : bool, optional
681
+ Whether to print the full summary.
682
+ show_counts : bool, optional
683
+ Whether to show the non-null counts.
684
+ """
685
+
686
+ def __init__(
687
+ self,
688
+ info: SeriesInfo,
689
+ verbose: bool | None = None,
690
+ show_counts: bool | None = None,
691
+ ) -> None:
692
+ self.info = info
693
+ self.data = info.data
694
+ self.verbose = verbose
695
+ self.show_counts = self._initialize_show_counts(show_counts)
696
+
697
+ def _create_table_builder(self) -> _SeriesTableBuilder:
698
+ """
699
+ Create instance of table builder based on verbosity.
700
+ """
701
+ if self.verbose or self.verbose is None:
702
+ return _SeriesTableBuilderVerbose(
703
+ info=self.info,
704
+ with_counts=self.show_counts,
705
+ )
706
+ else:
707
+ return _SeriesTableBuilderNonVerbose(info=self.info)
708
+
709
+ def _initialize_show_counts(self, show_counts: bool | None) -> bool:
710
+ if show_counts is None:
711
+ return True
712
+ else:
713
+ return show_counts
714
+
715
+
716
+ class _TableBuilderAbstract(ABC):
717
+ """
718
+ Abstract builder for info table.
719
+ """
720
+
721
+ _lines: list[str]
722
+ info: _BaseInfo
723
+
724
+ @abstractmethod
725
+ def get_lines(self) -> list[str]:
726
+ """Product in a form of list of lines (strings)."""
727
+
728
+ @property
729
+ def data(self) -> DataFrame | Series:
730
+ return self.info.data
731
+
732
+ @property
733
+ def dtypes(self) -> Iterable[Dtype]:
734
+ """Dtypes of each of the DataFrame's columns."""
735
+ return self.info.dtypes
736
+
737
+ @property
738
+ def dtype_counts(self) -> Mapping[str, int]:
739
+ """Mapping dtype - number of counts."""
740
+ return self.info.dtype_counts
741
+
742
+ @property
743
+ def display_memory_usage(self) -> bool:
744
+ """Whether to display memory usage."""
745
+ return bool(self.info.memory_usage)
746
+
747
+ @property
748
+ def memory_usage_string(self) -> str:
749
+ """Memory usage string with proper size qualifier."""
750
+ return self.info.memory_usage_string
751
+
752
+ @property
753
+ def non_null_counts(self) -> Sequence[int]:
754
+ return self.info.non_null_counts
755
+
756
+ def add_object_type_line(self) -> None:
757
+ """Add line with string representation of dataframe to the table."""
758
+ self._lines.append(str(type(self.data)))
759
+
760
+ def add_index_range_line(self) -> None:
761
+ """Add line with range of indices to the table."""
762
+ self._lines.append(self.data.index._summary())
763
+
764
+ def add_dtypes_line(self) -> None:
765
+ """Add summary line with dtypes present in dataframe."""
766
+ collected_dtypes = [
767
+ f"{key}({val:d})" for key, val in sorted(self.dtype_counts.items())
768
+ ]
769
+ self._lines.append(f"dtypes: {', '.join(collected_dtypes)}")
770
+
771
+
772
+ class _DataFrameTableBuilder(_TableBuilderAbstract):
773
+ """
774
+ Abstract builder for dataframe info table.
775
+
776
+ Parameters
777
+ ----------
778
+ info : DataFrameInfo.
779
+ Instance of DataFrameInfo.
780
+ """
781
+
782
+ def __init__(self, *, info: DataFrameInfo) -> None:
783
+ self.info: DataFrameInfo = info
784
+
785
+ def get_lines(self) -> list[str]:
786
+ self._lines = []
787
+ if self.col_count == 0:
788
+ self._fill_empty_info()
789
+ else:
790
+ self._fill_non_empty_info()
791
+ return self._lines
792
+
793
+ def _fill_empty_info(self) -> None:
794
+ """Add lines to the info table, pertaining to empty dataframe."""
795
+ self.add_object_type_line()
796
+ self.add_index_range_line()
797
+ self._lines.append(f"Empty {type(self.data).__name__}\n")
798
+
799
+ @abstractmethod
800
+ def _fill_non_empty_info(self) -> None:
801
+ """Add lines to the info table, pertaining to non-empty dataframe."""
802
+
803
+ @property
804
+ def data(self) -> DataFrame:
805
+ """DataFrame."""
806
+ return self.info.data
807
+
808
+ @property
809
+ def ids(self) -> Index:
810
+ """Dataframe columns."""
811
+ return self.info.ids
812
+
813
+ @property
814
+ def col_count(self) -> int:
815
+ """Number of dataframe columns to be summarized."""
816
+ return self.info.col_count
817
+
818
+ def add_memory_usage_line(self) -> None:
819
+ """Add line containing memory usage."""
820
+ self._lines.append(f"memory usage: {self.memory_usage_string}")
821
+
822
+
823
+ class _DataFrameTableBuilderNonVerbose(_DataFrameTableBuilder):
824
+ """
825
+ Dataframe info table builder for non-verbose output.
826
+ """
827
+
828
+ def _fill_non_empty_info(self) -> None:
829
+ """Add lines to the info table, pertaining to non-empty dataframe."""
830
+ self.add_object_type_line()
831
+ self.add_index_range_line()
832
+ self.add_columns_summary_line()
833
+ self.add_dtypes_line()
834
+ if self.display_memory_usage:
835
+ self.add_memory_usage_line()
836
+
837
+ def add_columns_summary_line(self) -> None:
838
+ self._lines.append(self.ids._summary(name="Columns"))
839
+
840
+
841
+ class _TableBuilderVerboseMixin(_TableBuilderAbstract):
842
+ """
843
+ Mixin for verbose info output.
844
+ """
845
+
846
+ SPACING: str = " " * 2
847
+ strrows: Sequence[Sequence[str]]
848
+ gross_column_widths: Sequence[int]
849
+ with_counts: bool
850
+
851
+ @property
852
+ @abstractmethod
853
+ def headers(self) -> Sequence[str]:
854
+ """Headers names of the columns in verbose table."""
855
+
856
+ @property
857
+ def header_column_widths(self) -> Sequence[int]:
858
+ """Widths of header columns (only titles)."""
859
+ return [len(col) for col in self.headers]
860
+
861
+ def _get_gross_column_widths(self) -> Sequence[int]:
862
+ """Get widths of columns containing both headers and actual content."""
863
+ body_column_widths = self._get_body_column_widths()
864
+ return [
865
+ max(*widths)
866
+ for widths in zip(self.header_column_widths, body_column_widths)
867
+ ]
868
+
869
+ def _get_body_column_widths(self) -> Sequence[int]:
870
+ """Get widths of table content columns."""
871
+ strcols: Sequence[Sequence[str]] = list(zip(*self.strrows))
872
+ return [max(len(x) for x in col) for col in strcols]
873
+
874
+ def _gen_rows(self) -> Iterator[Sequence[str]]:
875
+ """
876
+ Generator function yielding rows content.
877
+
878
+ Each element represents a row comprising a sequence of strings.
879
+ """
880
+ if self.with_counts:
881
+ return self._gen_rows_with_counts()
882
+ else:
883
+ return self._gen_rows_without_counts()
884
+
885
+ @abstractmethod
886
+ def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:
887
+ """Iterator with string representation of body data with counts."""
888
+
889
+ @abstractmethod
890
+ def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:
891
+ """Iterator with string representation of body data without counts."""
892
+
893
+ def add_header_line(self) -> None:
894
+ header_line = self.SPACING.join(
895
+ [
896
+ _put_str(header, col_width)
897
+ for header, col_width in zip(self.headers, self.gross_column_widths)
898
+ ]
899
+ )
900
+ self._lines.append(header_line)
901
+
902
+ def add_separator_line(self) -> None:
903
+ separator_line = self.SPACING.join(
904
+ [
905
+ _put_str("-" * header_colwidth, gross_colwidth)
906
+ for header_colwidth, gross_colwidth in zip(
907
+ self.header_column_widths, self.gross_column_widths
908
+ )
909
+ ]
910
+ )
911
+ self._lines.append(separator_line)
912
+
913
+ def add_body_lines(self) -> None:
914
+ for row in self.strrows:
915
+ body_line = self.SPACING.join(
916
+ [
917
+ _put_str(col, gross_colwidth)
918
+ for col, gross_colwidth in zip(row, self.gross_column_widths)
919
+ ]
920
+ )
921
+ self._lines.append(body_line)
922
+
923
+ def _gen_non_null_counts(self) -> Iterator[str]:
924
+ """Iterator with string representation of non-null counts."""
925
+ for count in self.non_null_counts:
926
+ yield f"{count} non-null"
927
+
928
+ def _gen_dtypes(self) -> Iterator[str]:
929
+ """Iterator with string representation of column dtypes."""
930
+ for dtype in self.dtypes:
931
+ yield pprint_thing(dtype)
932
+
933
+
934
+ class _DataFrameTableBuilderVerbose(_DataFrameTableBuilder, _TableBuilderVerboseMixin):
935
+ """
936
+ Dataframe info table builder for verbose output.
937
+ """
938
+
939
+ def __init__(
940
+ self,
941
+ *,
942
+ info: DataFrameInfo,
943
+ with_counts: bool,
944
+ ) -> None:
945
+ self.info = info
946
+ self.with_counts = with_counts
947
+ self.strrows: Sequence[Sequence[str]] = list(self._gen_rows())
948
+ self.gross_column_widths: Sequence[int] = self._get_gross_column_widths()
949
+
950
+ def _fill_non_empty_info(self) -> None:
951
+ """Add lines to the info table, pertaining to non-empty dataframe."""
952
+ self.add_object_type_line()
953
+ self.add_index_range_line()
954
+ self.add_columns_summary_line()
955
+ self.add_header_line()
956
+ self.add_separator_line()
957
+ self.add_body_lines()
958
+ self.add_dtypes_line()
959
+ if self.display_memory_usage:
960
+ self.add_memory_usage_line()
961
+
962
+ @property
963
+ def headers(self) -> Sequence[str]:
964
+ """Headers names of the columns in verbose table."""
965
+ if self.with_counts:
966
+ return [" # ", "Column", "Non-Null Count", "Dtype"]
967
+ return [" # ", "Column", "Dtype"]
968
+
969
+ def add_columns_summary_line(self) -> None:
970
+ self._lines.append(f"Data columns (total {self.col_count} columns):")
971
+
972
+ def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:
973
+ """Iterator with string representation of body data without counts."""
974
+ yield from zip(
975
+ self._gen_line_numbers(),
976
+ self._gen_columns(),
977
+ self._gen_dtypes(),
978
+ )
979
+
980
+ def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:
981
+ """Iterator with string representation of body data with counts."""
982
+ yield from zip(
983
+ self._gen_line_numbers(),
984
+ self._gen_columns(),
985
+ self._gen_non_null_counts(),
986
+ self._gen_dtypes(),
987
+ )
988
+
989
+ def _gen_line_numbers(self) -> Iterator[str]:
990
+ """Iterator with string representation of column numbers."""
991
+ for i, _ in enumerate(self.ids):
992
+ yield f" {i}"
993
+
994
+ def _gen_columns(self) -> Iterator[str]:
995
+ """Iterator with string representation of column names."""
996
+ for col in self.ids:
997
+ yield pprint_thing(col)
998
+
999
+
1000
+ class _SeriesTableBuilder(_TableBuilderAbstract):
1001
+ """
1002
+ Abstract builder for series info table.
1003
+
1004
+ Parameters
1005
+ ----------
1006
+ info : SeriesInfo.
1007
+ Instance of SeriesInfo.
1008
+ """
1009
+
1010
+ def __init__(self, *, info: SeriesInfo) -> None:
1011
+ self.info: SeriesInfo = info
1012
+
1013
+ def get_lines(self) -> list[str]:
1014
+ self._lines = []
1015
+ self._fill_non_empty_info()
1016
+ return self._lines
1017
+
1018
+ @property
1019
+ def data(self) -> Series:
1020
+ """Series."""
1021
+ return self.info.data
1022
+
1023
+ def add_memory_usage_line(self) -> None:
1024
+ """Add line containing memory usage."""
1025
+ self._lines.append(f"memory usage: {self.memory_usage_string}")
1026
+
1027
+ @abstractmethod
1028
+ def _fill_non_empty_info(self) -> None:
1029
+ """Add lines to the info table, pertaining to non-empty series."""
1030
+
1031
+
1032
+ class _SeriesTableBuilderNonVerbose(_SeriesTableBuilder):
1033
+ """
1034
+ Series info table builder for non-verbose output.
1035
+ """
1036
+
1037
+ def _fill_non_empty_info(self) -> None:
1038
+ """Add lines to the info table, pertaining to non-empty series."""
1039
+ self.add_object_type_line()
1040
+ self.add_index_range_line()
1041
+ self.add_dtypes_line()
1042
+ if self.display_memory_usage:
1043
+ self.add_memory_usage_line()
1044
+
1045
+
1046
+ class _SeriesTableBuilderVerbose(_SeriesTableBuilder, _TableBuilderVerboseMixin):
1047
+ """
1048
+ Series info table builder for verbose output.
1049
+ """
1050
+
1051
+ def __init__(
1052
+ self,
1053
+ *,
1054
+ info: SeriesInfo,
1055
+ with_counts: bool,
1056
+ ) -> None:
1057
+ self.info = info
1058
+ self.with_counts = with_counts
1059
+ self.strrows: Sequence[Sequence[str]] = list(self._gen_rows())
1060
+ self.gross_column_widths: Sequence[int] = self._get_gross_column_widths()
1061
+
1062
+ def _fill_non_empty_info(self) -> None:
1063
+ """Add lines to the info table, pertaining to non-empty series."""
1064
+ self.add_object_type_line()
1065
+ self.add_index_range_line()
1066
+ self.add_series_name_line()
1067
+ self.add_header_line()
1068
+ self.add_separator_line()
1069
+ self.add_body_lines()
1070
+ self.add_dtypes_line()
1071
+ if self.display_memory_usage:
1072
+ self.add_memory_usage_line()
1073
+
1074
+ def add_series_name_line(self) -> None:
1075
+ self._lines.append(f"Series name: {self.data.name}")
1076
+
1077
+ @property
1078
+ def headers(self) -> Sequence[str]:
1079
+ """Headers names of the columns in verbose table."""
1080
+ if self.with_counts:
1081
+ return ["Non-Null Count", "Dtype"]
1082
+ return ["Dtype"]
1083
+
1084
+ def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:
1085
+ """Iterator with string representation of body data without counts."""
1086
+ yield from self._gen_dtypes()
1087
+
1088
+ def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:
1089
+ """Iterator with string representation of body data with counts."""
1090
+ yield from zip(
1091
+ self._gen_non_null_counts(),
1092
+ self._gen_dtypes(),
1093
+ )
1094
+
1095
+
1096
+ def _get_dataframe_dtype_counts(df: DataFrame) -> Mapping[str, int]:
1097
+ """
1098
+ Create mapping between datatypes and their number of occurrences.
1099
+ """
1100
+ # groupby dtype.name to collect e.g. Categorical columns
1101
+ return df.dtypes.value_counts().groupby(lambda x: x.name).sum()
venv/lib/python3.10/site-packages/pandas/io/formats/printing.py ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Printing tools.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from collections.abc import (
7
+ Iterable,
8
+ Mapping,
9
+ Sequence,
10
+ )
11
+ import sys
12
+ from typing import (
13
+ Any,
14
+ Callable,
15
+ TypeVar,
16
+ Union,
17
+ )
18
+ from unicodedata import east_asian_width
19
+
20
+ from pandas._config import get_option
21
+
22
+ from pandas.core.dtypes.inference import is_sequence
23
+
24
+ from pandas.io.formats.console import get_console_size
25
+
26
+ EscapeChars = Union[Mapping[str, str], Iterable[str]]
27
+ _KT = TypeVar("_KT")
28
+ _VT = TypeVar("_VT")
29
+
30
+
31
+ def adjoin(space: int, *lists: list[str], **kwargs) -> str:
32
+ """
33
+ Glues together two sets of strings using the amount of space requested.
34
+ The idea is to prettify.
35
+
36
+ ----------
37
+ space : int
38
+ number of spaces for padding
39
+ lists : str
40
+ list of str which being joined
41
+ strlen : callable
42
+ function used to calculate the length of each str. Needed for unicode
43
+ handling.
44
+ justfunc : callable
45
+ function used to justify str. Needed for unicode handling.
46
+ """
47
+ strlen = kwargs.pop("strlen", len)
48
+ justfunc = kwargs.pop("justfunc", _adj_justify)
49
+
50
+ newLists = []
51
+ lengths = [max(map(strlen, x)) + space for x in lists[:-1]]
52
+ # not the last one
53
+ lengths.append(max(map(len, lists[-1])))
54
+ maxLen = max(map(len, lists))
55
+ for i, lst in enumerate(lists):
56
+ nl = justfunc(lst, lengths[i], mode="left")
57
+ nl = ([" " * lengths[i]] * (maxLen - len(lst))) + nl
58
+ newLists.append(nl)
59
+ toJoin = zip(*newLists)
60
+ return "\n".join("".join(lines) for lines in toJoin)
61
+
62
+
63
+ def _adj_justify(texts: Iterable[str], max_len: int, mode: str = "right") -> list[str]:
64
+ """
65
+ Perform ljust, center, rjust against string or list-like
66
+ """
67
+ if mode == "left":
68
+ return [x.ljust(max_len) for x in texts]
69
+ elif mode == "center":
70
+ return [x.center(max_len) for x in texts]
71
+ else:
72
+ return [x.rjust(max_len) for x in texts]
73
+
74
+
75
+ # Unicode consolidation
76
+ # ---------------------
77
+ #
78
+ # pprinting utility functions for generating Unicode text or
79
+ # bytes(3.x)/str(2.x) representations of objects.
80
+ # Try to use these as much as possible rather than rolling your own.
81
+ #
82
+ # When to use
83
+ # -----------
84
+ #
85
+ # 1) If you're writing code internal to pandas (no I/O directly involved),
86
+ # use pprint_thing().
87
+ #
88
+ # It will always return unicode text which can handled by other
89
+ # parts of the package without breakage.
90
+ #
91
+ # 2) if you need to write something out to file, use
92
+ # pprint_thing_encoded(encoding).
93
+ #
94
+ # If no encoding is specified, it defaults to utf-8. Since encoding pure
95
+ # ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
96
+ # working with straight ascii.
97
+
98
+
99
+ def _pprint_seq(
100
+ seq: Sequence, _nest_lvl: int = 0, max_seq_items: int | None = None, **kwds
101
+ ) -> str:
102
+ """
103
+ internal. pprinter for iterables. you should probably use pprint_thing()
104
+ rather than calling this directly.
105
+
106
+ bounds length of printed sequence, depending on options
107
+ """
108
+ if isinstance(seq, set):
109
+ fmt = "{{{body}}}"
110
+ else:
111
+ fmt = "[{body}]" if hasattr(seq, "__setitem__") else "({body})"
112
+
113
+ if max_seq_items is False:
114
+ nitems = len(seq)
115
+ else:
116
+ nitems = max_seq_items or get_option("max_seq_items") or len(seq)
117
+
118
+ s = iter(seq)
119
+ # handle sets, no slicing
120
+ r = [
121
+ pprint_thing(next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)
122
+ for i in range(min(nitems, len(seq)))
123
+ ]
124
+ body = ", ".join(r)
125
+
126
+ if nitems < len(seq):
127
+ body += ", ..."
128
+ elif isinstance(seq, tuple) and len(seq) == 1:
129
+ body += ","
130
+
131
+ return fmt.format(body=body)
132
+
133
+
134
+ def _pprint_dict(
135
+ seq: Mapping, _nest_lvl: int = 0, max_seq_items: int | None = None, **kwds
136
+ ) -> str:
137
+ """
138
+ internal. pprinter for iterables. you should probably use pprint_thing()
139
+ rather than calling this directly.
140
+ """
141
+ fmt = "{{{things}}}"
142
+ pairs = []
143
+
144
+ pfmt = "{key}: {val}"
145
+
146
+ if max_seq_items is False:
147
+ nitems = len(seq)
148
+ else:
149
+ nitems = max_seq_items or get_option("max_seq_items") or len(seq)
150
+
151
+ for k, v in list(seq.items())[:nitems]:
152
+ pairs.append(
153
+ pfmt.format(
154
+ key=pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),
155
+ val=pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),
156
+ )
157
+ )
158
+
159
+ if nitems < len(seq):
160
+ return fmt.format(things=", ".join(pairs) + ", ...")
161
+ else:
162
+ return fmt.format(things=", ".join(pairs))
163
+
164
+
165
+ def pprint_thing(
166
+ thing: Any,
167
+ _nest_lvl: int = 0,
168
+ escape_chars: EscapeChars | None = None,
169
+ default_escapes: bool = False,
170
+ quote_strings: bool = False,
171
+ max_seq_items: int | None = None,
172
+ ) -> str:
173
+ """
174
+ This function is the sanctioned way of converting objects
175
+ to a string representation and properly handles nested sequences.
176
+
177
+ Parameters
178
+ ----------
179
+ thing : anything to be formatted
180
+ _nest_lvl : internal use only. pprint_thing() is mutually-recursive
181
+ with pprint_sequence, this argument is used to keep track of the
182
+ current nesting level, and limit it.
183
+ escape_chars : list or dict, optional
184
+ Characters to escape. If a dict is passed the values are the
185
+ replacements
186
+ default_escapes : bool, default False
187
+ Whether the input escape characters replaces or adds to the defaults
188
+ max_seq_items : int or None, default None
189
+ Pass through to other pretty printers to limit sequence printing
190
+
191
+ Returns
192
+ -------
193
+ str
194
+ """
195
+
196
+ def as_escaped_string(
197
+ thing: Any, escape_chars: EscapeChars | None = escape_chars
198
+ ) -> str:
199
+ translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"}
200
+ if isinstance(escape_chars, dict):
201
+ if default_escapes:
202
+ translate.update(escape_chars)
203
+ else:
204
+ translate = escape_chars
205
+ escape_chars = list(escape_chars.keys())
206
+ else:
207
+ escape_chars = escape_chars or ()
208
+
209
+ result = str(thing)
210
+ for c in escape_chars:
211
+ result = result.replace(c, translate[c])
212
+ return result
213
+
214
+ if hasattr(thing, "__next__"):
215
+ return str(thing)
216
+ elif isinstance(thing, dict) and _nest_lvl < get_option(
217
+ "display.pprint_nest_depth"
218
+ ):
219
+ result = _pprint_dict(
220
+ thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items
221
+ )
222
+ elif is_sequence(thing) and _nest_lvl < get_option("display.pprint_nest_depth"):
223
+ result = _pprint_seq(
224
+ thing,
225
+ _nest_lvl,
226
+ escape_chars=escape_chars,
227
+ quote_strings=quote_strings,
228
+ max_seq_items=max_seq_items,
229
+ )
230
+ elif isinstance(thing, str) and quote_strings:
231
+ result = f"'{as_escaped_string(thing)}'"
232
+ else:
233
+ result = as_escaped_string(thing)
234
+
235
+ return result
236
+
237
+
238
+ def pprint_thing_encoded(
239
+ object, encoding: str = "utf-8", errors: str = "replace"
240
+ ) -> bytes:
241
+ value = pprint_thing(object) # get unicode representation of object
242
+ return value.encode(encoding, errors)
243
+
244
+
245
+ def enable_data_resource_formatter(enable: bool) -> None:
246
+ if "IPython" not in sys.modules:
247
+ # definitely not in IPython
248
+ return
249
+ from IPython import get_ipython
250
+
251
+ ip = get_ipython()
252
+ if ip is None:
253
+ # still not in IPython
254
+ return
255
+
256
+ formatters = ip.display_formatter.formatters
257
+ mimetype = "application/vnd.dataresource+json"
258
+
259
+ if enable:
260
+ if mimetype not in formatters:
261
+ # define tableschema formatter
262
+ from IPython.core.formatters import BaseFormatter
263
+ from traitlets import ObjectName
264
+
265
+ class TableSchemaFormatter(BaseFormatter):
266
+ print_method = ObjectName("_repr_data_resource_")
267
+ _return_type = (dict,)
268
+
269
+ # register it:
270
+ formatters[mimetype] = TableSchemaFormatter()
271
+ # enable it if it's been disabled:
272
+ formatters[mimetype].enabled = True
273
+ # unregister tableschema mime-type
274
+ elif mimetype in formatters:
275
+ formatters[mimetype].enabled = False
276
+
277
+
278
+ def default_pprint(thing: Any, max_seq_items: int | None = None) -> str:
279
+ return pprint_thing(
280
+ thing,
281
+ escape_chars=("\t", "\r", "\n"),
282
+ quote_strings=True,
283
+ max_seq_items=max_seq_items,
284
+ )
285
+
286
+
287
+ def format_object_summary(
288
+ obj,
289
+ formatter: Callable,
290
+ is_justify: bool = True,
291
+ name: str | None = None,
292
+ indent_for_name: bool = True,
293
+ line_break_each_value: bool = False,
294
+ ) -> str:
295
+ """
296
+ Return the formatted obj as a unicode string
297
+
298
+ Parameters
299
+ ----------
300
+ obj : object
301
+ must be iterable and support __getitem__
302
+ formatter : callable
303
+ string formatter for an element
304
+ is_justify : bool
305
+ should justify the display
306
+ name : name, optional
307
+ defaults to the class name of the obj
308
+ indent_for_name : bool, default True
309
+ Whether subsequent lines should be indented to
310
+ align with the name.
311
+ line_break_each_value : bool, default False
312
+ If True, inserts a line break for each value of ``obj``.
313
+ If False, only break lines when the a line of values gets wider
314
+ than the display width.
315
+
316
+ Returns
317
+ -------
318
+ summary string
319
+ """
320
+ display_width, _ = get_console_size()
321
+ if display_width is None:
322
+ display_width = get_option("display.width") or 80
323
+ if name is None:
324
+ name = type(obj).__name__
325
+
326
+ if indent_for_name:
327
+ name_len = len(name)
328
+ space1 = f'\n{(" " * (name_len + 1))}'
329
+ space2 = f'\n{(" " * (name_len + 2))}'
330
+ else:
331
+ space1 = "\n"
332
+ space2 = "\n " # space for the opening '['
333
+
334
+ n = len(obj)
335
+ if line_break_each_value:
336
+ # If we want to vertically align on each value of obj, we need to
337
+ # separate values by a line break and indent the values
338
+ sep = ",\n " + " " * len(name)
339
+ else:
340
+ sep = ","
341
+ max_seq_items = get_option("display.max_seq_items") or n
342
+
343
+ # are we a truncated display
344
+ is_truncated = n > max_seq_items
345
+
346
+ # adj can optionally handle unicode eastern asian width
347
+ adj = get_adjustment()
348
+
349
+ def _extend_line(
350
+ s: str, line: str, value: str, display_width: int, next_line_prefix: str
351
+ ) -> tuple[str, str]:
352
+ if adj.len(line.rstrip()) + adj.len(value.rstrip()) >= display_width:
353
+ s += line.rstrip()
354
+ line = next_line_prefix
355
+ line += value
356
+ return s, line
357
+
358
+ def best_len(values: list[str]) -> int:
359
+ if values:
360
+ return max(adj.len(x) for x in values)
361
+ else:
362
+ return 0
363
+
364
+ close = ", "
365
+
366
+ if n == 0:
367
+ summary = f"[]{close}"
368
+ elif n == 1 and not line_break_each_value:
369
+ first = formatter(obj[0])
370
+ summary = f"[{first}]{close}"
371
+ elif n == 2 and not line_break_each_value:
372
+ first = formatter(obj[0])
373
+ last = formatter(obj[-1])
374
+ summary = f"[{first}, {last}]{close}"
375
+ else:
376
+ if max_seq_items == 1:
377
+ # If max_seq_items=1 show only last element
378
+ head = []
379
+ tail = [formatter(x) for x in obj[-1:]]
380
+ elif n > max_seq_items:
381
+ n = min(max_seq_items // 2, 10)
382
+ head = [formatter(x) for x in obj[:n]]
383
+ tail = [formatter(x) for x in obj[-n:]]
384
+ else:
385
+ head = []
386
+ tail = [formatter(x) for x in obj]
387
+
388
+ # adjust all values to max length if needed
389
+ if is_justify:
390
+ if line_break_each_value:
391
+ # Justify each string in the values of head and tail, so the
392
+ # strings will right align when head and tail are stacked
393
+ # vertically.
394
+ head, tail = _justify(head, tail)
395
+ elif is_truncated or not (
396
+ len(", ".join(head)) < display_width
397
+ and len(", ".join(tail)) < display_width
398
+ ):
399
+ # Each string in head and tail should align with each other
400
+ max_length = max(best_len(head), best_len(tail))
401
+ head = [x.rjust(max_length) for x in head]
402
+ tail = [x.rjust(max_length) for x in tail]
403
+ # If we are not truncated and we are only a single
404
+ # line, then don't justify
405
+
406
+ if line_break_each_value:
407
+ # Now head and tail are of type List[Tuple[str]]. Below we
408
+ # convert them into List[str], so there will be one string per
409
+ # value. Also truncate items horizontally if wider than
410
+ # max_space
411
+ max_space = display_width - len(space2)
412
+ value = tail[0]
413
+ max_items = 1
414
+ for num_items in reversed(range(1, len(value) + 1)):
415
+ pprinted_seq = _pprint_seq(value, max_seq_items=num_items)
416
+ if len(pprinted_seq) < max_space:
417
+ max_items = num_items
418
+ break
419
+ head = [_pprint_seq(x, max_seq_items=max_items) for x in head]
420
+ tail = [_pprint_seq(x, max_seq_items=max_items) for x in tail]
421
+
422
+ summary = ""
423
+ line = space2
424
+
425
+ for head_value in head:
426
+ word = head_value + sep + " "
427
+ summary, line = _extend_line(summary, line, word, display_width, space2)
428
+
429
+ if is_truncated:
430
+ # remove trailing space of last line
431
+ summary += line.rstrip() + space2 + "..."
432
+ line = space2
433
+
434
+ for tail_item in tail[:-1]:
435
+ word = tail_item + sep + " "
436
+ summary, line = _extend_line(summary, line, word, display_width, space2)
437
+
438
+ # last value: no sep added + 1 space of width used for trailing ','
439
+ summary, line = _extend_line(summary, line, tail[-1], display_width - 2, space2)
440
+ summary += line
441
+
442
+ # right now close is either '' or ', '
443
+ # Now we want to include the ']', but not the maybe space.
444
+ close = "]" + close.rstrip(" ")
445
+ summary += close
446
+
447
+ if len(summary) > (display_width) or line_break_each_value:
448
+ summary += space1
449
+ else: # one row
450
+ summary += " "
451
+
452
+ # remove initial space
453
+ summary = "[" + summary[len(space2) :]
454
+
455
+ return summary
456
+
457
+
458
+ def _justify(
459
+ head: list[Sequence[str]], tail: list[Sequence[str]]
460
+ ) -> tuple[list[tuple[str, ...]], list[tuple[str, ...]]]:
461
+ """
462
+ Justify items in head and tail, so they are right-aligned when stacked.
463
+
464
+ Parameters
465
+ ----------
466
+ head : list-like of list-likes of strings
467
+ tail : list-like of list-likes of strings
468
+
469
+ Returns
470
+ -------
471
+ tuple of list of tuples of strings
472
+ Same as head and tail, but items are right aligned when stacked
473
+ vertically.
474
+
475
+ Examples
476
+ --------
477
+ >>> _justify([['a', 'b']], [['abc', 'abcd']])
478
+ ([(' a', ' b')], [('abc', 'abcd')])
479
+ """
480
+ combined = head + tail
481
+
482
+ # For each position for the sequences in ``combined``,
483
+ # find the length of the largest string.
484
+ max_length = [0] * len(combined[0])
485
+ for inner_seq in combined:
486
+ length = [len(item) for item in inner_seq]
487
+ max_length = [max(x, y) for x, y in zip(max_length, length)]
488
+
489
+ # justify each item in each list-like in head and tail using max_length
490
+ head_tuples = [
491
+ tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in head
492
+ ]
493
+ tail_tuples = [
494
+ tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in tail
495
+ ]
496
+ return head_tuples, tail_tuples
497
+
498
+
499
+ class PrettyDict(dict[_KT, _VT]):
500
+ """Dict extension to support abbreviated __repr__"""
501
+
502
+ def __repr__(self) -> str:
503
+ return pprint_thing(self)
504
+
505
+
506
+ class _TextAdjustment:
507
+ def __init__(self) -> None:
508
+ self.encoding = get_option("display.encoding")
509
+
510
+ def len(self, text: str) -> int:
511
+ return len(text)
512
+
513
+ def justify(self, texts: Any, max_len: int, mode: str = "right") -> list[str]:
514
+ """
515
+ Perform ljust, center, rjust against string or list-like
516
+ """
517
+ if mode == "left":
518
+ return [x.ljust(max_len) for x in texts]
519
+ elif mode == "center":
520
+ return [x.center(max_len) for x in texts]
521
+ else:
522
+ return [x.rjust(max_len) for x in texts]
523
+
524
+ def adjoin(self, space: int, *lists, **kwargs) -> str:
525
+ return adjoin(space, *lists, strlen=self.len, justfunc=self.justify, **kwargs)
526
+
527
+
528
+ class _EastAsianTextAdjustment(_TextAdjustment):
529
+ def __init__(self) -> None:
530
+ super().__init__()
531
+ if get_option("display.unicode.ambiguous_as_wide"):
532
+ self.ambiguous_width = 2
533
+ else:
534
+ self.ambiguous_width = 1
535
+
536
+ # Definition of East Asian Width
537
+ # https://unicode.org/reports/tr11/
538
+ # Ambiguous width can be changed by option
539
+ self._EAW_MAP = {"Na": 1, "N": 1, "W": 2, "F": 2, "H": 1}
540
+
541
+ def len(self, text: str) -> int:
542
+ """
543
+ Calculate display width considering unicode East Asian Width
544
+ """
545
+ if not isinstance(text, str):
546
+ return len(text)
547
+
548
+ return sum(
549
+ self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text
550
+ )
551
+
552
+ def justify(
553
+ self, texts: Iterable[str], max_len: int, mode: str = "right"
554
+ ) -> list[str]:
555
+ # re-calculate padding space per str considering East Asian Width
556
+ def _get_pad(t):
557
+ return max_len - self.len(t) + len(t)
558
+
559
+ if mode == "left":
560
+ return [x.ljust(_get_pad(x)) for x in texts]
561
+ elif mode == "center":
562
+ return [x.center(_get_pad(x)) for x in texts]
563
+ else:
564
+ return [x.rjust(_get_pad(x)) for x in texts]
565
+
566
+
567
+ def get_adjustment() -> _TextAdjustment:
568
+ use_east_asian_width = get_option("display.unicode.east_asian_width")
569
+ if use_east_asian_width:
570
+ return _EastAsianTextAdjustment()
571
+ else:
572
+ return _TextAdjustment()
venv/lib/python3.10/site-packages/pandas/io/formats/string.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module for formatting output data in console (to string).
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from shutil import get_terminal_size
7
+ from typing import TYPE_CHECKING
8
+
9
+ import numpy as np
10
+
11
+ from pandas.io.formats.printing import pprint_thing
12
+
13
+ if TYPE_CHECKING:
14
+ from collections.abc import Iterable
15
+
16
+ from pandas.io.formats.format import DataFrameFormatter
17
+
18
+
19
+ class StringFormatter:
20
+ """Formatter for string representation of a dataframe."""
21
+
22
+ def __init__(self, fmt: DataFrameFormatter, line_width: int | None = None) -> None:
23
+ self.fmt = fmt
24
+ self.adj = fmt.adj
25
+ self.frame = fmt.frame
26
+ self.line_width = line_width
27
+
28
+ def to_string(self) -> str:
29
+ text = self._get_string_representation()
30
+ if self.fmt.should_show_dimensions:
31
+ text = f"{text}{self.fmt.dimensions_info}"
32
+ return text
33
+
34
+ def _get_strcols(self) -> list[list[str]]:
35
+ strcols = self.fmt.get_strcols()
36
+ if self.fmt.is_truncated:
37
+ strcols = self._insert_dot_separators(strcols)
38
+ return strcols
39
+
40
+ def _get_string_representation(self) -> str:
41
+ if self.fmt.frame.empty:
42
+ return self._empty_info_line
43
+
44
+ strcols = self._get_strcols()
45
+
46
+ if self.line_width is None:
47
+ # no need to wrap around just print the whole frame
48
+ return self.adj.adjoin(1, *strcols)
49
+
50
+ if self._need_to_wrap_around:
51
+ return self._join_multiline(strcols)
52
+
53
+ return self._fit_strcols_to_terminal_width(strcols)
54
+
55
+ @property
56
+ def _empty_info_line(self) -> str:
57
+ return (
58
+ f"Empty {type(self.frame).__name__}\n"
59
+ f"Columns: {pprint_thing(self.frame.columns)}\n"
60
+ f"Index: {pprint_thing(self.frame.index)}"
61
+ )
62
+
63
+ @property
64
+ def _need_to_wrap_around(self) -> bool:
65
+ return bool(self.fmt.max_cols is None or self.fmt.max_cols > 0)
66
+
67
+ def _insert_dot_separators(self, strcols: list[list[str]]) -> list[list[str]]:
68
+ str_index = self.fmt._get_formatted_index(self.fmt.tr_frame)
69
+ index_length = len(str_index)
70
+
71
+ if self.fmt.is_truncated_horizontally:
72
+ strcols = self._insert_dot_separator_horizontal(strcols, index_length)
73
+
74
+ if self.fmt.is_truncated_vertically:
75
+ strcols = self._insert_dot_separator_vertical(strcols, index_length)
76
+
77
+ return strcols
78
+
79
+ @property
80
+ def _adjusted_tr_col_num(self) -> int:
81
+ return self.fmt.tr_col_num + 1 if self.fmt.index else self.fmt.tr_col_num
82
+
83
+ def _insert_dot_separator_horizontal(
84
+ self, strcols: list[list[str]], index_length: int
85
+ ) -> list[list[str]]:
86
+ strcols.insert(self._adjusted_tr_col_num, [" ..."] * index_length)
87
+ return strcols
88
+
89
+ def _insert_dot_separator_vertical(
90
+ self, strcols: list[list[str]], index_length: int
91
+ ) -> list[list[str]]:
92
+ n_header_rows = index_length - len(self.fmt.tr_frame)
93
+ row_num = self.fmt.tr_row_num
94
+ for ix, col in enumerate(strcols):
95
+ cwidth = self.adj.len(col[row_num])
96
+
97
+ if self.fmt.is_truncated_horizontally:
98
+ is_dot_col = ix == self._adjusted_tr_col_num
99
+ else:
100
+ is_dot_col = False
101
+
102
+ if cwidth > 3 or is_dot_col:
103
+ dots = "..."
104
+ else:
105
+ dots = ".."
106
+
107
+ if ix == 0 and self.fmt.index:
108
+ dot_mode = "left"
109
+ elif is_dot_col:
110
+ cwidth = 4
111
+ dot_mode = "right"
112
+ else:
113
+ dot_mode = "right"
114
+
115
+ dot_str = self.adj.justify([dots], cwidth, mode=dot_mode)[0]
116
+ col.insert(row_num + n_header_rows, dot_str)
117
+ return strcols
118
+
119
+ def _join_multiline(self, strcols_input: Iterable[list[str]]) -> str:
120
+ lwidth = self.line_width
121
+ adjoin_width = 1
122
+ strcols = list(strcols_input)
123
+
124
+ if self.fmt.index:
125
+ idx = strcols.pop(0)
126
+ lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width
127
+
128
+ col_widths = [
129
+ np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0
130
+ for col in strcols
131
+ ]
132
+
133
+ assert lwidth is not None
134
+ col_bins = _binify(col_widths, lwidth)
135
+ nbins = len(col_bins)
136
+
137
+ str_lst = []
138
+ start = 0
139
+ for i, end in enumerate(col_bins):
140
+ row = strcols[start:end]
141
+ if self.fmt.index:
142
+ row.insert(0, idx)
143
+ if nbins > 1:
144
+ nrows = len(row[-1])
145
+ if end <= len(strcols) and i < nbins - 1:
146
+ row.append([" \\"] + [" "] * (nrows - 1))
147
+ else:
148
+ row.append([" "] * nrows)
149
+ str_lst.append(self.adj.adjoin(adjoin_width, *row))
150
+ start = end
151
+ return "\n\n".join(str_lst)
152
+
153
+ def _fit_strcols_to_terminal_width(self, strcols: list[list[str]]) -> str:
154
+ from pandas import Series
155
+
156
+ lines = self.adj.adjoin(1, *strcols).split("\n")
157
+ max_len = Series(lines).str.len().max()
158
+ # plus truncate dot col
159
+ width, _ = get_terminal_size()
160
+ dif = max_len - width
161
+ # '+ 1' to avoid too wide repr (GH PR #17023)
162
+ adj_dif = dif + 1
163
+ col_lens = Series([Series(ele).str.len().max() for ele in strcols])
164
+ n_cols = len(col_lens)
165
+ counter = 0
166
+ while adj_dif > 0 and n_cols > 1:
167
+ counter += 1
168
+ mid = round(n_cols / 2)
169
+ mid_ix = col_lens.index[mid]
170
+ col_len = col_lens[mid_ix]
171
+ # adjoin adds one
172
+ adj_dif -= col_len + 1
173
+ col_lens = col_lens.drop(mid_ix)
174
+ n_cols = len(col_lens)
175
+
176
+ # subtract index column
177
+ max_cols_fitted = n_cols - self.fmt.index
178
+ # GH-21180. Ensure that we print at least two.
179
+ max_cols_fitted = max(max_cols_fitted, 2)
180
+ self.fmt.max_cols_fitted = max_cols_fitted
181
+
182
+ # Call again _truncate to cut frame appropriately
183
+ # and then generate string representation
184
+ self.fmt.truncate()
185
+ strcols = self._get_strcols()
186
+ return self.adj.adjoin(1, *strcols)
187
+
188
+
189
+ def _binify(cols: list[int], line_width: int) -> list[int]:
190
+ adjoin_width = 1
191
+ bins = []
192
+ curr_width = 0
193
+ i_last_column = len(cols) - 1
194
+ for i, w in enumerate(cols):
195
+ w_adjoined = w + adjoin_width
196
+ curr_width += w_adjoined
197
+ if i_last_column == i:
198
+ wrap = curr_width + 1 > line_width and i > 0
199
+ else:
200
+ wrap = curr_width + 2 > line_width and i > 0
201
+ if wrap:
202
+ bins.append(i)
203
+ curr_width = w_adjoined
204
+
205
+ bins.append(len(cols))
206
+ return bins
venv/lib/python3.10/site-packages/pandas/io/formats/style.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/pandas/io/formats/style_render.py ADDED
@@ -0,0 +1,2497 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import defaultdict
4
+ from collections.abc import Sequence
5
+ from functools import partial
6
+ import re
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Any,
10
+ Callable,
11
+ DefaultDict,
12
+ Optional,
13
+ TypedDict,
14
+ Union,
15
+ )
16
+ from uuid import uuid4
17
+
18
+ import numpy as np
19
+
20
+ from pandas._config import get_option
21
+
22
+ from pandas._libs import lib
23
+ from pandas.compat._optional import import_optional_dependency
24
+
25
+ from pandas.core.dtypes.common import (
26
+ is_complex,
27
+ is_float,
28
+ is_integer,
29
+ )
30
+ from pandas.core.dtypes.generic import ABCSeries
31
+
32
+ from pandas import (
33
+ DataFrame,
34
+ Index,
35
+ IndexSlice,
36
+ MultiIndex,
37
+ Series,
38
+ isna,
39
+ )
40
+ from pandas.api.types import is_list_like
41
+ import pandas.core.common as com
42
+
43
+ if TYPE_CHECKING:
44
+ from pandas._typing import (
45
+ Axis,
46
+ Level,
47
+ )
48
+ jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.")
49
+ from markupsafe import escape as escape_html # markupsafe is jinja2 dependency
50
+
51
+ BaseFormatter = Union[str, Callable]
52
+ ExtFormatter = Union[BaseFormatter, dict[Any, Optional[BaseFormatter]]]
53
+ CSSPair = tuple[str, Union[str, float]]
54
+ CSSList = list[CSSPair]
55
+ CSSProperties = Union[str, CSSList]
56
+
57
+
58
+ class CSSDict(TypedDict):
59
+ selector: str
60
+ props: CSSProperties
61
+
62
+
63
+ CSSStyles = list[CSSDict]
64
+ Subset = Union[slice, Sequence, Index]
65
+
66
+
67
+ class StylerRenderer:
68
+ """
69
+ Base class to process rendering a Styler with a specified jinja2 template.
70
+ """
71
+
72
+ loader = jinja2.PackageLoader("pandas", "io/formats/templates")
73
+ env = jinja2.Environment(loader=loader, trim_blocks=True)
74
+ template_html = env.get_template("html.tpl")
75
+ template_html_table = env.get_template("html_table.tpl")
76
+ template_html_style = env.get_template("html_style.tpl")
77
+ template_latex = env.get_template("latex.tpl")
78
+ template_string = env.get_template("string.tpl")
79
+
80
+ def __init__(
81
+ self,
82
+ data: DataFrame | Series,
83
+ uuid: str | None = None,
84
+ uuid_len: int = 5,
85
+ table_styles: CSSStyles | None = None,
86
+ table_attributes: str | None = None,
87
+ caption: str | tuple | list | None = None,
88
+ cell_ids: bool = True,
89
+ precision: int | None = None,
90
+ ) -> None:
91
+ # validate ordered args
92
+ if isinstance(data, Series):
93
+ data = data.to_frame()
94
+ if not isinstance(data, DataFrame):
95
+ raise TypeError("``data`` must be a Series or DataFrame")
96
+ self.data: DataFrame = data
97
+ self.index: Index = data.index
98
+ self.columns: Index = data.columns
99
+ if not isinstance(uuid_len, int) or uuid_len < 0:
100
+ raise TypeError("``uuid_len`` must be an integer in range [0, 32].")
101
+ self.uuid = uuid or uuid4().hex[: min(32, uuid_len)]
102
+ self.uuid_len = len(self.uuid)
103
+ self.table_styles = table_styles
104
+ self.table_attributes = table_attributes
105
+ self.caption = caption
106
+ self.cell_ids = cell_ids
107
+ self.css = {
108
+ "row_heading": "row_heading",
109
+ "col_heading": "col_heading",
110
+ "index_name": "index_name",
111
+ "col": "col",
112
+ "row": "row",
113
+ "col_trim": "col_trim",
114
+ "row_trim": "row_trim",
115
+ "level": "level",
116
+ "data": "data",
117
+ "blank": "blank",
118
+ "foot": "foot",
119
+ }
120
+ self.concatenated: list[StylerRenderer] = []
121
+ # add rendering variables
122
+ self.hide_index_names: bool = False
123
+ self.hide_column_names: bool = False
124
+ self.hide_index_: list = [False] * self.index.nlevels
125
+ self.hide_columns_: list = [False] * self.columns.nlevels
126
+ self.hidden_rows: Sequence[int] = [] # sequence for specific hidden rows/cols
127
+ self.hidden_columns: Sequence[int] = []
128
+ self.ctx: DefaultDict[tuple[int, int], CSSList] = defaultdict(list)
129
+ self.ctx_index: DefaultDict[tuple[int, int], CSSList] = defaultdict(list)
130
+ self.ctx_columns: DefaultDict[tuple[int, int], CSSList] = defaultdict(list)
131
+ self.cell_context: DefaultDict[tuple[int, int], str] = defaultdict(str)
132
+ self._todo: list[tuple[Callable, tuple, dict]] = []
133
+ self.tooltips: Tooltips | None = None
134
+ precision = (
135
+ get_option("styler.format.precision") if precision is None else precision
136
+ )
137
+ self._display_funcs: DefaultDict[ # maps (row, col) -> format func
138
+ tuple[int, int], Callable[[Any], str]
139
+ ] = defaultdict(lambda: partial(_default_formatter, precision=precision))
140
+ self._display_funcs_index: DefaultDict[ # maps (row, level) -> format func
141
+ tuple[int, int], Callable[[Any], str]
142
+ ] = defaultdict(lambda: partial(_default_formatter, precision=precision))
143
+ self._display_funcs_columns: DefaultDict[ # maps (level, col) -> format func
144
+ tuple[int, int], Callable[[Any], str]
145
+ ] = defaultdict(lambda: partial(_default_formatter, precision=precision))
146
+
147
+ def _render(
148
+ self,
149
+ sparse_index: bool,
150
+ sparse_columns: bool,
151
+ max_rows: int | None = None,
152
+ max_cols: int | None = None,
153
+ blank: str = "",
154
+ ):
155
+ """
156
+ Computes and applies styles and then generates the general render dicts.
157
+
158
+ Also extends the `ctx` and `ctx_index` attributes with those of concatenated
159
+ stylers for use within `_translate_latex`
160
+ """
161
+ self._compute()
162
+ dxs = []
163
+ ctx_len = len(self.index)
164
+ for i, concatenated in enumerate(self.concatenated):
165
+ concatenated.hide_index_ = self.hide_index_
166
+ concatenated.hidden_columns = self.hidden_columns
167
+ foot = f"{self.css['foot']}{i}"
168
+ concatenated.css = {
169
+ **self.css,
170
+ "data": f"{foot}_data",
171
+ "row_heading": f"{foot}_row_heading",
172
+ "row": f"{foot}_row",
173
+ "foot": f"{foot}_foot",
174
+ }
175
+ dx = concatenated._render(
176
+ sparse_index, sparse_columns, max_rows, max_cols, blank
177
+ )
178
+ dxs.append(dx)
179
+
180
+ for (r, c), v in concatenated.ctx.items():
181
+ self.ctx[(r + ctx_len, c)] = v
182
+ for (r, c), v in concatenated.ctx_index.items():
183
+ self.ctx_index[(r + ctx_len, c)] = v
184
+
185
+ ctx_len += len(concatenated.index)
186
+
187
+ d = self._translate(
188
+ sparse_index, sparse_columns, max_rows, max_cols, blank, dxs
189
+ )
190
+ return d
191
+
192
+ def _render_html(
193
+ self,
194
+ sparse_index: bool,
195
+ sparse_columns: bool,
196
+ max_rows: int | None = None,
197
+ max_cols: int | None = None,
198
+ **kwargs,
199
+ ) -> str:
200
+ """
201
+ Renders the ``Styler`` including all applied styles to HTML.
202
+ Generates a dict with necessary kwargs passed to jinja2 template.
203
+ """
204
+ d = self._render(sparse_index, sparse_columns, max_rows, max_cols, "&nbsp;")
205
+ d.update(kwargs)
206
+ return self.template_html.render(
207
+ **d,
208
+ html_table_tpl=self.template_html_table,
209
+ html_style_tpl=self.template_html_style,
210
+ )
211
+
212
+ def _render_latex(
213
+ self, sparse_index: bool, sparse_columns: bool, clines: str | None, **kwargs
214
+ ) -> str:
215
+ """
216
+ Render a Styler in latex format
217
+ """
218
+ d = self._render(sparse_index, sparse_columns, None, None)
219
+ self._translate_latex(d, clines=clines)
220
+ self.template_latex.globals["parse_wrap"] = _parse_latex_table_wrapping
221
+ self.template_latex.globals["parse_table"] = _parse_latex_table_styles
222
+ self.template_latex.globals["parse_cell"] = _parse_latex_cell_styles
223
+ self.template_latex.globals["parse_header"] = _parse_latex_header_span
224
+ d.update(kwargs)
225
+ return self.template_latex.render(**d)
226
+
227
+ def _render_string(
228
+ self,
229
+ sparse_index: bool,
230
+ sparse_columns: bool,
231
+ max_rows: int | None = None,
232
+ max_cols: int | None = None,
233
+ **kwargs,
234
+ ) -> str:
235
+ """
236
+ Render a Styler in string format
237
+ """
238
+ d = self._render(sparse_index, sparse_columns, max_rows, max_cols)
239
+ d.update(kwargs)
240
+ return self.template_string.render(**d)
241
+
242
+ def _compute(self):
243
+ """
244
+ Execute the style functions built up in `self._todo`.
245
+
246
+ Relies on the conventions that all style functions go through
247
+ .apply or .map. The append styles to apply as tuples of
248
+
249
+ (application method, *args, **kwargs)
250
+ """
251
+ self.ctx.clear()
252
+ self.ctx_index.clear()
253
+ self.ctx_columns.clear()
254
+ r = self
255
+ for func, args, kwargs in self._todo:
256
+ r = func(self)(*args, **kwargs)
257
+ return r
258
+
259
+ def _translate(
260
+ self,
261
+ sparse_index: bool,
262
+ sparse_cols: bool,
263
+ max_rows: int | None = None,
264
+ max_cols: int | None = None,
265
+ blank: str = "&nbsp;",
266
+ dxs: list[dict] | None = None,
267
+ ):
268
+ """
269
+ Process Styler data and settings into a dict for template rendering.
270
+
271
+ Convert data and settings from ``Styler`` attributes such as ``self.data``,
272
+ ``self.tooltips`` including applying any methods in ``self._todo``.
273
+
274
+ Parameters
275
+ ----------
276
+ sparse_index : bool
277
+ Whether to sparsify the index or print all hierarchical index elements.
278
+ Upstream defaults are typically to `pandas.options.styler.sparse.index`.
279
+ sparse_cols : bool
280
+ Whether to sparsify the columns or print all hierarchical column elements.
281
+ Upstream defaults are typically to `pandas.options.styler.sparse.columns`.
282
+ max_rows, max_cols : int, optional
283
+ Specific max rows and cols. max_elements always take precedence in render.
284
+ blank : str
285
+ Entry to top-left blank cells.
286
+ dxs : list[dict]
287
+ The render dicts of the concatenated Stylers.
288
+
289
+ Returns
290
+ -------
291
+ d : dict
292
+ The following structure: {uuid, table_styles, caption, head, body,
293
+ cellstyle, table_attributes}
294
+ """
295
+ if dxs is None:
296
+ dxs = []
297
+ self.css["blank_value"] = blank
298
+
299
+ # construct render dict
300
+ d = {
301
+ "uuid": self.uuid,
302
+ "table_styles": format_table_styles(self.table_styles or []),
303
+ "caption": self.caption,
304
+ }
305
+
306
+ max_elements = get_option("styler.render.max_elements")
307
+ max_rows = max_rows if max_rows else get_option("styler.render.max_rows")
308
+ max_cols = max_cols if max_cols else get_option("styler.render.max_columns")
309
+ max_rows, max_cols = _get_trimming_maximums(
310
+ len(self.data.index),
311
+ len(self.data.columns),
312
+ max_elements,
313
+ max_rows,
314
+ max_cols,
315
+ )
316
+
317
+ self.cellstyle_map_columns: DefaultDict[
318
+ tuple[CSSPair, ...], list[str]
319
+ ] = defaultdict(list)
320
+ head = self._translate_header(sparse_cols, max_cols)
321
+ d.update({"head": head})
322
+
323
+ # for sparsifying a MultiIndex and for use with latex clines
324
+ idx_lengths = _get_level_lengths(
325
+ self.index, sparse_index, max_rows, self.hidden_rows
326
+ )
327
+ d.update({"index_lengths": idx_lengths})
328
+
329
+ self.cellstyle_map: DefaultDict[tuple[CSSPair, ...], list[str]] = defaultdict(
330
+ list
331
+ )
332
+ self.cellstyle_map_index: DefaultDict[
333
+ tuple[CSSPair, ...], list[str]
334
+ ] = defaultdict(list)
335
+ body: list = self._translate_body(idx_lengths, max_rows, max_cols)
336
+ d.update({"body": body})
337
+
338
+ ctx_maps = {
339
+ "cellstyle": "cellstyle_map",
340
+ "cellstyle_index": "cellstyle_map_index",
341
+ "cellstyle_columns": "cellstyle_map_columns",
342
+ } # add the cell_ids styles map to the render dictionary in right format
343
+ for k, attr in ctx_maps.items():
344
+ map = [
345
+ {"props": list(props), "selectors": selectors}
346
+ for props, selectors in getattr(self, attr).items()
347
+ ]
348
+ d.update({k: map})
349
+
350
+ for dx in dxs: # self.concatenated is not empty
351
+ d["body"].extend(dx["body"]) # type: ignore[union-attr]
352
+ d["cellstyle"].extend(dx["cellstyle"]) # type: ignore[union-attr]
353
+ d["cellstyle_index"].extend( # type: ignore[union-attr]
354
+ dx["cellstyle_index"]
355
+ )
356
+
357
+ table_attr = self.table_attributes
358
+ if not get_option("styler.html.mathjax"):
359
+ table_attr = table_attr or ""
360
+ if 'class="' in table_attr:
361
+ table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ')
362
+ else:
363
+ table_attr += ' class="tex2jax_ignore"'
364
+ d.update({"table_attributes": table_attr})
365
+
366
+ if self.tooltips:
367
+ d = self.tooltips._translate(self, d)
368
+
369
+ return d
370
+
371
+ def _translate_header(self, sparsify_cols: bool, max_cols: int):
372
+ """
373
+ Build each <tr> within table <head> as a list
374
+
375
+ Using the structure:
376
+ +----------------------------+---------------+---------------------------+
377
+ | index_blanks ... | column_name_0 | column_headers (level_0) |
378
+ 1) | .. | .. | .. |
379
+ | index_blanks ... | column_name_n | column_headers (level_n) |
380
+ +----------------------------+---------------+---------------------------+
381
+ 2) | index_names (level_0 to level_n) ... | column_blanks ... |
382
+ +----------------------------+---------------+---------------------------+
383
+
384
+ Parameters
385
+ ----------
386
+ sparsify_cols : bool
387
+ Whether column_headers section will add colspan attributes (>1) to elements.
388
+ max_cols : int
389
+ Maximum number of columns to render. If exceeded will contain `...` filler.
390
+
391
+ Returns
392
+ -------
393
+ head : list
394
+ The associated HTML elements needed for template rendering.
395
+ """
396
+ # for sparsifying a MultiIndex
397
+ col_lengths = _get_level_lengths(
398
+ self.columns, sparsify_cols, max_cols, self.hidden_columns
399
+ )
400
+
401
+ clabels = self.data.columns.tolist()
402
+ if self.data.columns.nlevels == 1:
403
+ clabels = [[x] for x in clabels]
404
+ clabels = list(zip(*clabels))
405
+
406
+ head = []
407
+ # 1) column headers
408
+ for r, hide in enumerate(self.hide_columns_):
409
+ if hide or not clabels:
410
+ continue
411
+
412
+ header_row = self._generate_col_header_row(
413
+ (r, clabels), max_cols, col_lengths
414
+ )
415
+ head.append(header_row)
416
+
417
+ # 2) index names
418
+ if (
419
+ self.data.index.names
420
+ and com.any_not_none(*self.data.index.names)
421
+ and not all(self.hide_index_)
422
+ and not self.hide_index_names
423
+ ):
424
+ index_names_row = self._generate_index_names_row(
425
+ clabels, max_cols, col_lengths
426
+ )
427
+ head.append(index_names_row)
428
+
429
+ return head
430
+
431
+ def _generate_col_header_row(
432
+ self, iter: Sequence, max_cols: int, col_lengths: dict
433
+ ):
434
+ """
435
+ Generate the row containing column headers:
436
+
437
+ +----------------------------+---------------+---------------------------+
438
+ | index_blanks ... | column_name_i | column_headers (level_i) |
439
+ +----------------------------+---------------+---------------------------+
440
+
441
+ Parameters
442
+ ----------
443
+ iter : tuple
444
+ Looping variables from outer scope
445
+ max_cols : int
446
+ Permissible number of columns
447
+ col_lengths :
448
+ c
449
+
450
+ Returns
451
+ -------
452
+ list of elements
453
+ """
454
+
455
+ r, clabels = iter
456
+
457
+ # number of index blanks is governed by number of hidden index levels
458
+ index_blanks = [
459
+ _element("th", self.css["blank"], self.css["blank_value"], True)
460
+ ] * (self.index.nlevels - sum(self.hide_index_) - 1)
461
+
462
+ name = self.data.columns.names[r]
463
+ column_name = [
464
+ _element(
465
+ "th",
466
+ (
467
+ f"{self.css['blank']} {self.css['level']}{r}"
468
+ if name is None
469
+ else f"{self.css['index_name']} {self.css['level']}{r}"
470
+ ),
471
+ name
472
+ if (name is not None and not self.hide_column_names)
473
+ else self.css["blank_value"],
474
+ not all(self.hide_index_),
475
+ )
476
+ ]
477
+
478
+ column_headers: list = []
479
+ visible_col_count: int = 0
480
+ for c, value in enumerate(clabels[r]):
481
+ header_element_visible = _is_visible(c, r, col_lengths)
482
+ if header_element_visible:
483
+ visible_col_count += col_lengths.get((r, c), 0)
484
+ if self._check_trim(
485
+ visible_col_count,
486
+ max_cols,
487
+ column_headers,
488
+ "th",
489
+ f"{self.css['col_heading']} {self.css['level']}{r} "
490
+ f"{self.css['col_trim']}",
491
+ ):
492
+ break
493
+
494
+ header_element = _element(
495
+ "th",
496
+ (
497
+ f"{self.css['col_heading']} {self.css['level']}{r} "
498
+ f"{self.css['col']}{c}"
499
+ ),
500
+ value,
501
+ header_element_visible,
502
+ display_value=self._display_funcs_columns[(r, c)](value),
503
+ attributes=(
504
+ f'colspan="{col_lengths.get((r, c), 0)}"'
505
+ if col_lengths.get((r, c), 0) > 1
506
+ else ""
507
+ ),
508
+ )
509
+
510
+ if self.cell_ids:
511
+ header_element["id"] = f"{self.css['level']}{r}_{self.css['col']}{c}"
512
+ if (
513
+ header_element_visible
514
+ and (r, c) in self.ctx_columns
515
+ and self.ctx_columns[r, c]
516
+ ):
517
+ header_element["id"] = f"{self.css['level']}{r}_{self.css['col']}{c}"
518
+ self.cellstyle_map_columns[tuple(self.ctx_columns[r, c])].append(
519
+ f"{self.css['level']}{r}_{self.css['col']}{c}"
520
+ )
521
+
522
+ column_headers.append(header_element)
523
+
524
+ return index_blanks + column_name + column_headers
525
+
526
+ def _generate_index_names_row(
527
+ self, iter: Sequence, max_cols: int, col_lengths: dict
528
+ ):
529
+ """
530
+ Generate the row containing index names
531
+
532
+ +----------------------------+---------------+---------------------------+
533
+ | index_names (level_0 to level_n) ... | column_blanks ... |
534
+ +----------------------------+---------------+---------------------------+
535
+
536
+ Parameters
537
+ ----------
538
+ iter : tuple
539
+ Looping variables from outer scope
540
+ max_cols : int
541
+ Permissible number of columns
542
+
543
+ Returns
544
+ -------
545
+ list of elements
546
+ """
547
+
548
+ clabels = iter
549
+
550
+ index_names = [
551
+ _element(
552
+ "th",
553
+ f"{self.css['index_name']} {self.css['level']}{c}",
554
+ self.css["blank_value"] if name is None else name,
555
+ not self.hide_index_[c],
556
+ )
557
+ for c, name in enumerate(self.data.index.names)
558
+ ]
559
+
560
+ column_blanks: list = []
561
+ visible_col_count: int = 0
562
+ if clabels:
563
+ last_level = self.columns.nlevels - 1 # use last level since never sparsed
564
+ for c, value in enumerate(clabels[last_level]):
565
+ header_element_visible = _is_visible(c, last_level, col_lengths)
566
+ if header_element_visible:
567
+ visible_col_count += 1
568
+ if self._check_trim(
569
+ visible_col_count,
570
+ max_cols,
571
+ column_blanks,
572
+ "th",
573
+ f"{self.css['blank']} {self.css['col']}{c} {self.css['col_trim']}",
574
+ self.css["blank_value"],
575
+ ):
576
+ break
577
+
578
+ column_blanks.append(
579
+ _element(
580
+ "th",
581
+ f"{self.css['blank']} {self.css['col']}{c}",
582
+ self.css["blank_value"],
583
+ c not in self.hidden_columns,
584
+ )
585
+ )
586
+
587
+ return index_names + column_blanks
588
+
589
+ def _translate_body(self, idx_lengths: dict, max_rows: int, max_cols: int):
590
+ """
591
+ Build each <tr> within table <body> as a list
592
+
593
+ Use the following structure:
594
+ +--------------------------------------------+---------------------------+
595
+ | index_header_0 ... index_header_n | data_by_column ... |
596
+ +--------------------------------------------+---------------------------+
597
+
598
+ Also add elements to the cellstyle_map for more efficient grouped elements in
599
+ <style></style> block
600
+
601
+ Parameters
602
+ ----------
603
+ sparsify_index : bool
604
+ Whether index_headers section will add rowspan attributes (>1) to elements.
605
+
606
+ Returns
607
+ -------
608
+ body : list
609
+ The associated HTML elements needed for template rendering.
610
+ """
611
+ rlabels = self.data.index.tolist()
612
+ if not isinstance(self.data.index, MultiIndex):
613
+ rlabels = [[x] for x in rlabels]
614
+
615
+ body: list = []
616
+ visible_row_count: int = 0
617
+ for r, row_tup in [
618
+ z for z in enumerate(self.data.itertuples()) if z[0] not in self.hidden_rows
619
+ ]:
620
+ visible_row_count += 1
621
+ if self._check_trim(
622
+ visible_row_count,
623
+ max_rows,
624
+ body,
625
+ "row",
626
+ ):
627
+ break
628
+
629
+ body_row = self._generate_body_row(
630
+ (r, row_tup, rlabels), max_cols, idx_lengths
631
+ )
632
+ body.append(body_row)
633
+ return body
634
+
635
+ def _check_trim(
636
+ self,
637
+ count: int,
638
+ max: int,
639
+ obj: list,
640
+ element: str,
641
+ css: str | None = None,
642
+ value: str = "...",
643
+ ) -> bool:
644
+ """
645
+ Indicates whether to break render loops and append a trimming indicator
646
+
647
+ Parameters
648
+ ----------
649
+ count : int
650
+ The loop count of previous visible items.
651
+ max : int
652
+ The allowable rendered items in the loop.
653
+ obj : list
654
+ The current render collection of the rendered items.
655
+ element : str
656
+ The type of element to append in the case a trimming indicator is needed.
657
+ css : str, optional
658
+ The css to add to the trimming indicator element.
659
+ value : str, optional
660
+ The value of the elements display if necessary.
661
+
662
+ Returns
663
+ -------
664
+ result : bool
665
+ Whether a trimming element was required and appended.
666
+ """
667
+ if count > max:
668
+ if element == "row":
669
+ obj.append(self._generate_trimmed_row(max))
670
+ else:
671
+ obj.append(_element(element, css, value, True, attributes=""))
672
+ return True
673
+ return False
674
+
675
+ def _generate_trimmed_row(self, max_cols: int) -> list:
676
+ """
677
+ When a render has too many rows we generate a trimming row containing "..."
678
+
679
+ Parameters
680
+ ----------
681
+ max_cols : int
682
+ Number of permissible columns
683
+
684
+ Returns
685
+ -------
686
+ list of elements
687
+ """
688
+ index_headers = [
689
+ _element(
690
+ "th",
691
+ (
692
+ f"{self.css['row_heading']} {self.css['level']}{c} "
693
+ f"{self.css['row_trim']}"
694
+ ),
695
+ "...",
696
+ not self.hide_index_[c],
697
+ attributes="",
698
+ )
699
+ for c in range(self.data.index.nlevels)
700
+ ]
701
+
702
+ data: list = []
703
+ visible_col_count: int = 0
704
+ for c, _ in enumerate(self.columns):
705
+ data_element_visible = c not in self.hidden_columns
706
+ if data_element_visible:
707
+ visible_col_count += 1
708
+ if self._check_trim(
709
+ visible_col_count,
710
+ max_cols,
711
+ data,
712
+ "td",
713
+ f"{self.css['data']} {self.css['row_trim']} {self.css['col_trim']}",
714
+ ):
715
+ break
716
+
717
+ data.append(
718
+ _element(
719
+ "td",
720
+ f"{self.css['data']} {self.css['col']}{c} {self.css['row_trim']}",
721
+ "...",
722
+ data_element_visible,
723
+ attributes="",
724
+ )
725
+ )
726
+
727
+ return index_headers + data
728
+
729
+ def _generate_body_row(
730
+ self,
731
+ iter: tuple,
732
+ max_cols: int,
733
+ idx_lengths: dict,
734
+ ):
735
+ """
736
+ Generate a regular row for the body section of appropriate format.
737
+
738
+ +--------------------------------------------+---------------------------+
739
+ | index_header_0 ... index_header_n | data_by_column ... |
740
+ +--------------------------------------------+---------------------------+
741
+
742
+ Parameters
743
+ ----------
744
+ iter : tuple
745
+ Iterable from outer scope: row number, row data tuple, row index labels.
746
+ max_cols : int
747
+ Number of permissible columns.
748
+ idx_lengths : dict
749
+ A map of the sparsification structure of the index
750
+
751
+ Returns
752
+ -------
753
+ list of elements
754
+ """
755
+ r, row_tup, rlabels = iter
756
+
757
+ index_headers = []
758
+ for c, value in enumerate(rlabels[r]):
759
+ header_element_visible = (
760
+ _is_visible(r, c, idx_lengths) and not self.hide_index_[c]
761
+ )
762
+ header_element = _element(
763
+ "th",
764
+ (
765
+ f"{self.css['row_heading']} {self.css['level']}{c} "
766
+ f"{self.css['row']}{r}"
767
+ ),
768
+ value,
769
+ header_element_visible,
770
+ display_value=self._display_funcs_index[(r, c)](value),
771
+ attributes=(
772
+ f'rowspan="{idx_lengths.get((c, r), 0)}"'
773
+ if idx_lengths.get((c, r), 0) > 1
774
+ else ""
775
+ ),
776
+ )
777
+
778
+ if self.cell_ids:
779
+ header_element[
780
+ "id"
781
+ ] = f"{self.css['level']}{c}_{self.css['row']}{r}" # id is given
782
+ if (
783
+ header_element_visible
784
+ and (r, c) in self.ctx_index
785
+ and self.ctx_index[r, c]
786
+ ):
787
+ # always add id if a style is specified
788
+ header_element["id"] = f"{self.css['level']}{c}_{self.css['row']}{r}"
789
+ self.cellstyle_map_index[tuple(self.ctx_index[r, c])].append(
790
+ f"{self.css['level']}{c}_{self.css['row']}{r}"
791
+ )
792
+
793
+ index_headers.append(header_element)
794
+
795
+ data: list = []
796
+ visible_col_count: int = 0
797
+ for c, value in enumerate(row_tup[1:]):
798
+ data_element_visible = (
799
+ c not in self.hidden_columns and r not in self.hidden_rows
800
+ )
801
+ if data_element_visible:
802
+ visible_col_count += 1
803
+ if self._check_trim(
804
+ visible_col_count,
805
+ max_cols,
806
+ data,
807
+ "td",
808
+ f"{self.css['data']} {self.css['row']}{r} {self.css['col_trim']}",
809
+ ):
810
+ break
811
+
812
+ # add custom classes from cell context
813
+ cls = ""
814
+ if (r, c) in self.cell_context:
815
+ cls = " " + self.cell_context[r, c]
816
+
817
+ data_element = _element(
818
+ "td",
819
+ (
820
+ f"{self.css['data']} {self.css['row']}{r} "
821
+ f"{self.css['col']}{c}{cls}"
822
+ ),
823
+ value,
824
+ data_element_visible,
825
+ attributes="",
826
+ display_value=self._display_funcs[(r, c)](value),
827
+ )
828
+
829
+ if self.cell_ids:
830
+ data_element["id"] = f"{self.css['row']}{r}_{self.css['col']}{c}"
831
+ if data_element_visible and (r, c) in self.ctx and self.ctx[r, c]:
832
+ # always add id if needed due to specified style
833
+ data_element["id"] = f"{self.css['row']}{r}_{self.css['col']}{c}"
834
+ self.cellstyle_map[tuple(self.ctx[r, c])].append(
835
+ f"{self.css['row']}{r}_{self.css['col']}{c}"
836
+ )
837
+
838
+ data.append(data_element)
839
+
840
+ return index_headers + data
841
+
842
+ def _translate_latex(self, d: dict, clines: str | None) -> None:
843
+ r"""
844
+ Post-process the default render dict for the LaTeX template format.
845
+
846
+ Processing items included are:
847
+ - Remove hidden columns from the non-headers part of the body.
848
+ - Place cellstyles directly in td cells rather than use cellstyle_map.
849
+ - Remove hidden indexes or reinsert missing th elements if part of multiindex
850
+ or multirow sparsification (so that \multirow and \multicol work correctly).
851
+ """
852
+ index_levels = self.index.nlevels
853
+ visible_index_level_n = index_levels - sum(self.hide_index_)
854
+ d["head"] = [
855
+ [
856
+ {**col, "cellstyle": self.ctx_columns[r, c - visible_index_level_n]}
857
+ for c, col in enumerate(row)
858
+ if col["is_visible"]
859
+ ]
860
+ for r, row in enumerate(d["head"])
861
+ ]
862
+
863
+ def _concatenated_visible_rows(obj, n, row_indices):
864
+ """
865
+ Extract all visible row indices recursively from concatenated stylers.
866
+ """
867
+ row_indices.extend(
868
+ [r + n for r in range(len(obj.index)) if r not in obj.hidden_rows]
869
+ )
870
+ n += len(obj.index)
871
+ for concatenated in obj.concatenated:
872
+ n = _concatenated_visible_rows(concatenated, n, row_indices)
873
+ return n
874
+
875
+ def concatenated_visible_rows(obj):
876
+ row_indices: list[int] = []
877
+ _concatenated_visible_rows(obj, 0, row_indices)
878
+ # TODO try to consolidate the concat visible rows
879
+ # methods to a single function / recursion for simplicity
880
+ return row_indices
881
+
882
+ body = []
883
+ for r, row in zip(concatenated_visible_rows(self), d["body"]):
884
+ # note: cannot enumerate d["body"] because rows were dropped if hidden
885
+ # during _translate_body so must zip to acquire the true r-index associated
886
+ # with the ctx obj which contains the cell styles.
887
+ if all(self.hide_index_):
888
+ row_body_headers = []
889
+ else:
890
+ row_body_headers = [
891
+ {
892
+ **col,
893
+ "display_value": col["display_value"]
894
+ if col["is_visible"]
895
+ else "",
896
+ "cellstyle": self.ctx_index[r, c],
897
+ }
898
+ for c, col in enumerate(row[:index_levels])
899
+ if (col["type"] == "th" and not self.hide_index_[c])
900
+ ]
901
+
902
+ row_body_cells = [
903
+ {**col, "cellstyle": self.ctx[r, c]}
904
+ for c, col in enumerate(row[index_levels:])
905
+ if (col["is_visible"] and col["type"] == "td")
906
+ ]
907
+
908
+ body.append(row_body_headers + row_body_cells)
909
+ d["body"] = body
910
+
911
+ # clines are determined from info on index_lengths and hidden_rows and input
912
+ # to a dict defining which row clines should be added in the template.
913
+ if clines not in [
914
+ None,
915
+ "all;data",
916
+ "all;index",
917
+ "skip-last;data",
918
+ "skip-last;index",
919
+ ]:
920
+ raise ValueError(
921
+ f"`clines` value of {clines} is invalid. Should either be None or one "
922
+ f"of 'all;data', 'all;index', 'skip-last;data', 'skip-last;index'."
923
+ )
924
+ if clines is not None:
925
+ data_len = len(row_body_cells) if "data" in clines and d["body"] else 0
926
+
927
+ d["clines"] = defaultdict(list)
928
+ visible_row_indexes: list[int] = [
929
+ r for r in range(len(self.data.index)) if r not in self.hidden_rows
930
+ ]
931
+ visible_index_levels: list[int] = [
932
+ i for i in range(index_levels) if not self.hide_index_[i]
933
+ ]
934
+ for rn, r in enumerate(visible_row_indexes):
935
+ for lvln, lvl in enumerate(visible_index_levels):
936
+ if lvl == index_levels - 1 and "skip-last" in clines:
937
+ continue
938
+ idx_len = d["index_lengths"].get((lvl, r), None)
939
+ if idx_len is not None: # i.e. not a sparsified entry
940
+ d["clines"][rn + idx_len].append(
941
+ f"\\cline{{{lvln+1}-{len(visible_index_levels)+data_len}}}"
942
+ )
943
+
944
+ def format(
945
+ self,
946
+ formatter: ExtFormatter | None = None,
947
+ subset: Subset | None = None,
948
+ na_rep: str | None = None,
949
+ precision: int | None = None,
950
+ decimal: str = ".",
951
+ thousands: str | None = None,
952
+ escape: str | None = None,
953
+ hyperlinks: str | None = None,
954
+ ) -> StylerRenderer:
955
+ r"""
956
+ Format the text display value of cells.
957
+
958
+ Parameters
959
+ ----------
960
+ formatter : str, callable, dict or None
961
+ Object to define how values are displayed. See notes.
962
+ subset : label, array-like, IndexSlice, optional
963
+ A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
964
+ or single key, to `DataFrame.loc[:, <subset>]` where the columns are
965
+ prioritised, to limit ``data`` to *before* applying the function.
966
+ na_rep : str, optional
967
+ Representation for missing values.
968
+ If ``na_rep`` is None, no special formatting is applied.
969
+ precision : int, optional
970
+ Floating point precision to use for display purposes, if not determined by
971
+ the specified ``formatter``.
972
+
973
+ .. versionadded:: 1.3.0
974
+
975
+ decimal : str, default "."
976
+ Character used as decimal separator for floats, complex and integers.
977
+
978
+ .. versionadded:: 1.3.0
979
+
980
+ thousands : str, optional, default None
981
+ Character used as thousands separator for floats, complex and integers.
982
+
983
+ .. versionadded:: 1.3.0
984
+
985
+ escape : str, optional
986
+ Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"``
987
+ in cell display string with HTML-safe sequences.
988
+ Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``,
989
+ ``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with
990
+ LaTeX-safe sequences.
991
+ Use 'latex-math' to replace the characters the same way as in 'latex' mode,
992
+ except for math substrings, which either are surrounded
993
+ by two characters ``$`` or start with the character ``\(`` and
994
+ end with ``\)``. Escaping is done before ``formatter``.
995
+
996
+ .. versionadded:: 1.3.0
997
+
998
+ hyperlinks : {"html", "latex"}, optional
999
+ Convert string patterns containing https://, http://, ftp:// or www. to
1000
+ HTML <a> tags as clickable URL hyperlinks if "html", or LaTeX \href
1001
+ commands if "latex".
1002
+
1003
+ .. versionadded:: 1.4.0
1004
+
1005
+ Returns
1006
+ -------
1007
+ Styler
1008
+
1009
+ See Also
1010
+ --------
1011
+ Styler.format_index: Format the text display value of index labels.
1012
+
1013
+ Notes
1014
+ -----
1015
+ This method assigns a formatting function, ``formatter``, to each cell in the
1016
+ DataFrame. If ``formatter`` is ``None``, then the default formatter is used.
1017
+ If a callable then that function should take a data value as input and return
1018
+ a displayable representation, such as a string. If ``formatter`` is
1019
+ given as a string this is assumed to be a valid Python format specification
1020
+ and is wrapped to a callable as ``string.format(x)``. If a ``dict`` is given,
1021
+ keys should correspond to column names, and values should be string or
1022
+ callable, as above.
1023
+
1024
+ The default formatter currently expresses floats and complex numbers with the
1025
+ pandas display precision unless using the ``precision`` argument here. The
1026
+ default formatter does not adjust the representation of missing values unless
1027
+ the ``na_rep`` argument is used.
1028
+
1029
+ The ``subset`` argument defines which region to apply the formatting function
1030
+ to. If the ``formatter`` argument is given in dict form but does not include
1031
+ all columns within the subset then these columns will have the default formatter
1032
+ applied. Any columns in the formatter dict excluded from the subset will
1033
+ be ignored.
1034
+
1035
+ When using a ``formatter`` string the dtypes must be compatible, otherwise a
1036
+ `ValueError` will be raised.
1037
+
1038
+ When instantiating a Styler, default formatting can be applied be setting the
1039
+ ``pandas.options``:
1040
+
1041
+ - ``styler.format.formatter``: default None.
1042
+ - ``styler.format.na_rep``: default None.
1043
+ - ``styler.format.precision``: default 6.
1044
+ - ``styler.format.decimal``: default ".".
1045
+ - ``styler.format.thousands``: default None.
1046
+ - ``styler.format.escape``: default None.
1047
+
1048
+ .. warning::
1049
+ `Styler.format` is ignored when using the output format `Styler.to_excel`,
1050
+ since Excel and Python have inherrently different formatting structures.
1051
+ However, it is possible to use the `number-format` pseudo CSS attribute
1052
+ to force Excel permissible formatting. See examples.
1053
+
1054
+ Examples
1055
+ --------
1056
+ Using ``na_rep`` and ``precision`` with the default ``formatter``
1057
+
1058
+ >>> df = pd.DataFrame([[np.nan, 1.0, 'A'], [2.0, np.nan, 3.0]])
1059
+ >>> df.style.format(na_rep='MISS', precision=3) # doctest: +SKIP
1060
+ 0 1 2
1061
+ 0 MISS 1.000 A
1062
+ 1 2.000 MISS 3.000
1063
+
1064
+ Using a ``formatter`` specification on consistent column dtypes
1065
+
1066
+ >>> df.style.format('{:.2f}', na_rep='MISS', subset=[0,1]) # doctest: +SKIP
1067
+ 0 1 2
1068
+ 0 MISS 1.00 A
1069
+ 1 2.00 MISS 3.000000
1070
+
1071
+ Using the default ``formatter`` for unspecified columns
1072
+
1073
+ >>> df.style.format({0: '{:.2f}', 1: '£ {:.1f}'}, na_rep='MISS', precision=1)
1074
+ ... # doctest: +SKIP
1075
+ 0 1 2
1076
+ 0 MISS £ 1.0 A
1077
+ 1 2.00 MISS 3.0
1078
+
1079
+ Multiple ``na_rep`` or ``precision`` specifications under the default
1080
+ ``formatter``.
1081
+
1082
+ >>> (df.style.format(na_rep='MISS', precision=1, subset=[0])
1083
+ ... .format(na_rep='PASS', precision=2, subset=[1, 2])) # doctest: +SKIP
1084
+ 0 1 2
1085
+ 0 MISS 1.00 A
1086
+ 1 2.0 PASS 3.00
1087
+
1088
+ Using a callable ``formatter`` function.
1089
+
1090
+ >>> func = lambda s: 'STRING' if isinstance(s, str) else 'FLOAT'
1091
+ >>> df.style.format({0: '{:.1f}', 2: func}, precision=4, na_rep='MISS')
1092
+ ... # doctest: +SKIP
1093
+ 0 1 2
1094
+ 0 MISS 1.0000 STRING
1095
+ 1 2.0 MISS FLOAT
1096
+
1097
+ Using a ``formatter`` with HTML ``escape`` and ``na_rep``.
1098
+
1099
+ >>> df = pd.DataFrame([['<div></div>', '"A&B"', None]])
1100
+ >>> s = df.style.format(
1101
+ ... '<a href="a.com/{0}">{0}</a>', escape="html", na_rep="NA"
1102
+ ... )
1103
+ >>> s.to_html() # doctest: +SKIP
1104
+ ...
1105
+ <td .. ><a href="a.com/&lt;div&gt;&lt;/div&gt;">&lt;div&gt;&lt;/div&gt;</a></td>
1106
+ <td .. ><a href="a.com/&#34;A&amp;B&#34;">&#34;A&amp;B&#34;</a></td>
1107
+ <td .. >NA</td>
1108
+ ...
1109
+
1110
+ Using a ``formatter`` with ``escape`` in 'latex' mode.
1111
+
1112
+ >>> df = pd.DataFrame([["123"], ["~ ^"], ["$%#"]])
1113
+ >>> df.style.format("\\textbf{{{}}}", escape="latex").to_latex()
1114
+ ... # doctest: +SKIP
1115
+ \begin{tabular}{ll}
1116
+ & 0 \\
1117
+ 0 & \textbf{123} \\
1118
+ 1 & \textbf{\textasciitilde \space \textasciicircum } \\
1119
+ 2 & \textbf{\$\%\#} \\
1120
+ \end{tabular}
1121
+
1122
+ Applying ``escape`` in 'latex-math' mode. In the example below
1123
+ we enter math mode using the character ``$``.
1124
+
1125
+ >>> df = pd.DataFrame([[r"$\sum_{i=1}^{10} a_i$ a~b $\alpha \
1126
+ ... = \frac{\beta}{\zeta^2}$"], ["%#^ $ \$x^2 $"]])
1127
+ >>> df.style.format(escape="latex-math").to_latex()
1128
+ ... # doctest: +SKIP
1129
+ \begin{tabular}{ll}
1130
+ & 0 \\
1131
+ 0 & $\sum_{i=1}^{10} a_i$ a\textasciitilde b $\alpha = \frac{\beta}{\zeta^2}$ \\
1132
+ 1 & \%\#\textasciicircum \space $ \$x^2 $ \\
1133
+ \end{tabular}
1134
+
1135
+ We can use the character ``\(`` to enter math mode and the character ``\)``
1136
+ to close math mode.
1137
+
1138
+ >>> df = pd.DataFrame([[r"\(\sum_{i=1}^{10} a_i\) a~b \(\alpha \
1139
+ ... = \frac{\beta}{\zeta^2}\)"], ["%#^ \( \$x^2 \)"]])
1140
+ >>> df.style.format(escape="latex-math").to_latex()
1141
+ ... # doctest: +SKIP
1142
+ \begin{tabular}{ll}
1143
+ & 0 \\
1144
+ 0 & \(\sum_{i=1}^{10} a_i\) a\textasciitilde b \(\alpha
1145
+ = \frac{\beta}{\zeta^2}\) \\
1146
+ 1 & \%\#\textasciicircum \space \( \$x^2 \) \\
1147
+ \end{tabular}
1148
+
1149
+ If we have in one DataFrame cell a combination of both shorthands
1150
+ for math formulas, the shorthand with the sign ``$`` will be applied.
1151
+
1152
+ >>> df = pd.DataFrame([[r"\( x^2 \) $x^2$"], \
1153
+ ... [r"$\frac{\beta}{\zeta}$ \(\frac{\beta}{\zeta}\)"]])
1154
+ >>> df.style.format(escape="latex-math").to_latex()
1155
+ ... # doctest: +SKIP
1156
+ \begin{tabular}{ll}
1157
+ & 0 \\
1158
+ 0 & \textbackslash ( x\textasciicircum 2 \textbackslash ) $x^2$ \\
1159
+ 1 & $\frac{\beta}{\zeta}$ \textbackslash (\textbackslash
1160
+ frac\{\textbackslash beta\}\{\textbackslash zeta\}\textbackslash ) \\
1161
+ \end{tabular}
1162
+
1163
+ Pandas defines a `number-format` pseudo CSS attribute instead of the `.format`
1164
+ method to create `to_excel` permissible formatting. Note that semi-colons are
1165
+ CSS protected characters but used as separators in Excel's format string.
1166
+ Replace semi-colons with the section separator character (ASCII-245) when
1167
+ defining the formatting here.
1168
+
1169
+ >>> df = pd.DataFrame({"A": [1, 0, -1]})
1170
+ >>> pseudo_css = "number-format: 0§[Red](0)§-§@;"
1171
+ >>> filename = "formatted_file.xlsx"
1172
+ >>> df.style.map(lambda v: pseudo_css).to_excel(filename) # doctest: +SKIP
1173
+
1174
+ .. figure:: ../../_static/style/format_excel_css.png
1175
+ """
1176
+ if all(
1177
+ (
1178
+ formatter is None,
1179
+ subset is None,
1180
+ precision is None,
1181
+ decimal == ".",
1182
+ thousands is None,
1183
+ na_rep is None,
1184
+ escape is None,
1185
+ hyperlinks is None,
1186
+ )
1187
+ ):
1188
+ self._display_funcs.clear()
1189
+ return self # clear the formatter / revert to default and avoid looping
1190
+
1191
+ subset = slice(None) if subset is None else subset
1192
+ subset = non_reducing_slice(subset)
1193
+ data = self.data.loc[subset]
1194
+
1195
+ if not isinstance(formatter, dict):
1196
+ formatter = {col: formatter for col in data.columns}
1197
+
1198
+ cis = self.columns.get_indexer_for(data.columns)
1199
+ ris = self.index.get_indexer_for(data.index)
1200
+ for ci in cis:
1201
+ format_func = _maybe_wrap_formatter(
1202
+ formatter.get(self.columns[ci]),
1203
+ na_rep=na_rep,
1204
+ precision=precision,
1205
+ decimal=decimal,
1206
+ thousands=thousands,
1207
+ escape=escape,
1208
+ hyperlinks=hyperlinks,
1209
+ )
1210
+ for ri in ris:
1211
+ self._display_funcs[(ri, ci)] = format_func
1212
+
1213
+ return self
1214
+
1215
+ def format_index(
1216
+ self,
1217
+ formatter: ExtFormatter | None = None,
1218
+ axis: Axis = 0,
1219
+ level: Level | list[Level] | None = None,
1220
+ na_rep: str | None = None,
1221
+ precision: int | None = None,
1222
+ decimal: str = ".",
1223
+ thousands: str | None = None,
1224
+ escape: str | None = None,
1225
+ hyperlinks: str | None = None,
1226
+ ) -> StylerRenderer:
1227
+ r"""
1228
+ Format the text display value of index labels or column headers.
1229
+
1230
+ .. versionadded:: 1.4.0
1231
+
1232
+ Parameters
1233
+ ----------
1234
+ formatter : str, callable, dict or None
1235
+ Object to define how values are displayed. See notes.
1236
+ axis : {0, "index", 1, "columns"}
1237
+ Whether to apply the formatter to the index or column headers.
1238
+ level : int, str, list
1239
+ The level(s) over which to apply the generic formatter.
1240
+ na_rep : str, optional
1241
+ Representation for missing values.
1242
+ If ``na_rep`` is None, no special formatting is applied.
1243
+ precision : int, optional
1244
+ Floating point precision to use for display purposes, if not determined by
1245
+ the specified ``formatter``.
1246
+ decimal : str, default "."
1247
+ Character used as decimal separator for floats, complex and integers.
1248
+ thousands : str, optional, default None
1249
+ Character used as thousands separator for floats, complex and integers.
1250
+ escape : str, optional
1251
+ Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"``
1252
+ in cell display string with HTML-safe sequences.
1253
+ Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``,
1254
+ ``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with
1255
+ LaTeX-safe sequences.
1256
+ Escaping is done before ``formatter``.
1257
+ hyperlinks : {"html", "latex"}, optional
1258
+ Convert string patterns containing https://, http://, ftp:// or www. to
1259
+ HTML <a> tags as clickable URL hyperlinks if "html", or LaTeX \href
1260
+ commands if "latex".
1261
+
1262
+ Returns
1263
+ -------
1264
+ Styler
1265
+
1266
+ See Also
1267
+ --------
1268
+ Styler.format: Format the text display value of data cells.
1269
+
1270
+ Notes
1271
+ -----
1272
+ This method assigns a formatting function, ``formatter``, to each level label
1273
+ in the DataFrame's index or column headers. If ``formatter`` is ``None``,
1274
+ then the default formatter is used.
1275
+ If a callable then that function should take a label value as input and return
1276
+ a displayable representation, such as a string. If ``formatter`` is
1277
+ given as a string this is assumed to be a valid Python format specification
1278
+ and is wrapped to a callable as ``string.format(x)``. If a ``dict`` is given,
1279
+ keys should correspond to MultiIndex level numbers or names, and values should
1280
+ be string or callable, as above.
1281
+
1282
+ The default formatter currently expresses floats and complex numbers with the
1283
+ pandas display precision unless using the ``precision`` argument here. The
1284
+ default formatter does not adjust the representation of missing values unless
1285
+ the ``na_rep`` argument is used.
1286
+
1287
+ The ``level`` argument defines which levels of a MultiIndex to apply the
1288
+ method to. If the ``formatter`` argument is given in dict form but does
1289
+ not include all levels within the level argument then these unspecified levels
1290
+ will have the default formatter applied. Any levels in the formatter dict
1291
+ specifically excluded from the level argument will be ignored.
1292
+
1293
+ When using a ``formatter`` string the dtypes must be compatible, otherwise a
1294
+ `ValueError` will be raised.
1295
+
1296
+ .. warning::
1297
+ `Styler.format_index` is ignored when using the output format
1298
+ `Styler.to_excel`, since Excel and Python have inherrently different
1299
+ formatting structures.
1300
+ However, it is possible to use the `number-format` pseudo CSS attribute
1301
+ to force Excel permissible formatting. See documentation for `Styler.format`.
1302
+
1303
+ Examples
1304
+ --------
1305
+ Using ``na_rep`` and ``precision`` with the default ``formatter``
1306
+
1307
+ >>> df = pd.DataFrame([[1, 2, 3]], columns=[2.0, np.nan, 4.0])
1308
+ >>> df.style.format_index(axis=1, na_rep='MISS', precision=3) # doctest: +SKIP
1309
+ 2.000 MISS 4.000
1310
+ 0 1 2 3
1311
+
1312
+ Using a ``formatter`` specification on consistent dtypes in a level
1313
+
1314
+ >>> df.style.format_index('{:.2f}', axis=1, na_rep='MISS') # doctest: +SKIP
1315
+ 2.00 MISS 4.00
1316
+ 0 1 2 3
1317
+
1318
+ Using the default ``formatter`` for unspecified levels
1319
+
1320
+ >>> df = pd.DataFrame([[1, 2, 3]],
1321
+ ... columns=pd.MultiIndex.from_arrays([["a", "a", "b"],[2, np.nan, 4]]))
1322
+ >>> df.style.format_index({0: lambda v: v.upper()}, axis=1, precision=1)
1323
+ ... # doctest: +SKIP
1324
+ A B
1325
+ 2.0 nan 4.0
1326
+ 0 1 2 3
1327
+
1328
+ Using a callable ``formatter`` function.
1329
+
1330
+ >>> func = lambda s: 'STRING' if isinstance(s, str) else 'FLOAT'
1331
+ >>> df.style.format_index(func, axis=1, na_rep='MISS')
1332
+ ... # doctest: +SKIP
1333
+ STRING STRING
1334
+ FLOAT MISS FLOAT
1335
+ 0 1 2 3
1336
+
1337
+ Using a ``formatter`` with HTML ``escape`` and ``na_rep``.
1338
+
1339
+ >>> df = pd.DataFrame([[1, 2, 3]], columns=['"A"', 'A&B', None])
1340
+ >>> s = df.style.format_index('$ {0}', axis=1, escape="html", na_rep="NA")
1341
+ ... # doctest: +SKIP
1342
+ <th .. >$ &#34;A&#34;</th>
1343
+ <th .. >$ A&amp;B</th>
1344
+ <th .. >NA</td>
1345
+ ...
1346
+
1347
+ Using a ``formatter`` with LaTeX ``escape``.
1348
+
1349
+ >>> df = pd.DataFrame([[1, 2, 3]], columns=["123", "~", "$%#"])
1350
+ >>> df.style.format_index("\\textbf{{{}}}", escape="latex", axis=1).to_latex()
1351
+ ... # doctest: +SKIP
1352
+ \begin{tabular}{lrrr}
1353
+ {} & {\textbf{123}} & {\textbf{\textasciitilde }} & {\textbf{\$\%\#}} \\
1354
+ 0 & 1 & 2 & 3 \\
1355
+ \end{tabular}
1356
+ """
1357
+ axis = self.data._get_axis_number(axis)
1358
+ if axis == 0:
1359
+ display_funcs_, obj = self._display_funcs_index, self.index
1360
+ else:
1361
+ display_funcs_, obj = self._display_funcs_columns, self.columns
1362
+ levels_ = refactor_levels(level, obj)
1363
+
1364
+ if all(
1365
+ (
1366
+ formatter is None,
1367
+ level is None,
1368
+ precision is None,
1369
+ decimal == ".",
1370
+ thousands is None,
1371
+ na_rep is None,
1372
+ escape is None,
1373
+ hyperlinks is None,
1374
+ )
1375
+ ):
1376
+ display_funcs_.clear()
1377
+ return self # clear the formatter / revert to default and avoid looping
1378
+
1379
+ if not isinstance(formatter, dict):
1380
+ formatter = {level: formatter for level in levels_}
1381
+ else:
1382
+ formatter = {
1383
+ obj._get_level_number(level): formatter_
1384
+ for level, formatter_ in formatter.items()
1385
+ }
1386
+
1387
+ for lvl in levels_:
1388
+ format_func = _maybe_wrap_formatter(
1389
+ formatter.get(lvl),
1390
+ na_rep=na_rep,
1391
+ precision=precision,
1392
+ decimal=decimal,
1393
+ thousands=thousands,
1394
+ escape=escape,
1395
+ hyperlinks=hyperlinks,
1396
+ )
1397
+
1398
+ for idx in [(i, lvl) if axis == 0 else (lvl, i) for i in range(len(obj))]:
1399
+ display_funcs_[idx] = format_func
1400
+
1401
+ return self
1402
+
1403
+ def relabel_index(
1404
+ self,
1405
+ labels: Sequence | Index,
1406
+ axis: Axis = 0,
1407
+ level: Level | list[Level] | None = None,
1408
+ ) -> StylerRenderer:
1409
+ r"""
1410
+ Relabel the index, or column header, keys to display a set of specified values.
1411
+
1412
+ .. versionadded:: 1.5.0
1413
+
1414
+ Parameters
1415
+ ----------
1416
+ labels : list-like or Index
1417
+ New labels to display. Must have same length as the underlying values not
1418
+ hidden.
1419
+ axis : {"index", 0, "columns", 1}
1420
+ Apply to the index or columns.
1421
+ level : int, str, list, optional
1422
+ The level(s) over which to apply the new labels. If `None` will apply
1423
+ to all levels of an Index or MultiIndex which are not hidden.
1424
+
1425
+ Returns
1426
+ -------
1427
+ Styler
1428
+
1429
+ See Also
1430
+ --------
1431
+ Styler.format_index: Format the text display value of index or column headers.
1432
+ Styler.hide: Hide the index, column headers, or specified data from display.
1433
+
1434
+ Notes
1435
+ -----
1436
+ As part of Styler, this method allows the display of an index to be
1437
+ completely user-specified without affecting the underlying DataFrame data,
1438
+ index, or column headers. This means that the flexibility of indexing is
1439
+ maintained whilst the final display is customisable.
1440
+
1441
+ Since Styler is designed to be progressively constructed with method chaining,
1442
+ this method is adapted to react to the **currently specified hidden elements**.
1443
+ This is useful because it means one does not have to specify all the new
1444
+ labels if the majority of an index, or column headers, have already been hidden.
1445
+ The following produce equivalent display (note the length of ``labels`` in
1446
+ each case).
1447
+
1448
+ .. code-block:: python
1449
+
1450
+ # relabel first, then hide
1451
+ df = pd.DataFrame({"col": ["a", "b", "c"]})
1452
+ df.style.relabel_index(["A", "B", "C"]).hide([0,1])
1453
+ # hide first, then relabel
1454
+ df = pd.DataFrame({"col": ["a", "b", "c"]})
1455
+ df.style.hide([0,1]).relabel_index(["C"])
1456
+
1457
+ This method should be used, rather than :meth:`Styler.format_index`, in one of
1458
+ the following cases (see examples):
1459
+
1460
+ - A specified set of labels are required which are not a function of the
1461
+ underlying index keys.
1462
+ - The function of the underlying index keys requires a counter variable,
1463
+ such as those available upon enumeration.
1464
+
1465
+ Examples
1466
+ --------
1467
+ Basic use
1468
+
1469
+ >>> df = pd.DataFrame({"col": ["a", "b", "c"]})
1470
+ >>> df.style.relabel_index(["A", "B", "C"]) # doctest: +SKIP
1471
+ col
1472
+ A a
1473
+ B b
1474
+ C c
1475
+
1476
+ Chaining with pre-hidden elements
1477
+
1478
+ >>> df.style.hide([0,1]).relabel_index(["C"]) # doctest: +SKIP
1479
+ col
1480
+ C c
1481
+
1482
+ Using a MultiIndex
1483
+
1484
+ >>> midx = pd.MultiIndex.from_product([[0, 1], [0, 1], [0, 1]])
1485
+ >>> df = pd.DataFrame({"col": list(range(8))}, index=midx)
1486
+ >>> styler = df.style # doctest: +SKIP
1487
+ col
1488
+ 0 0 0 0
1489
+ 1 1
1490
+ 1 0 2
1491
+ 1 3
1492
+ 1 0 0 4
1493
+ 1 5
1494
+ 1 0 6
1495
+ 1 7
1496
+ >>> styler.hide((midx.get_level_values(0)==0)|(midx.get_level_values(1)==0))
1497
+ ... # doctest: +SKIP
1498
+ >>> styler.hide(level=[0,1]) # doctest: +SKIP
1499
+ >>> styler.relabel_index(["binary6", "binary7"]) # doctest: +SKIP
1500
+ col
1501
+ binary6 6
1502
+ binary7 7
1503
+
1504
+ We can also achieve the above by indexing first and then re-labeling
1505
+
1506
+ >>> styler = df.loc[[(1,1,0), (1,1,1)]].style
1507
+ >>> styler.hide(level=[0,1]).relabel_index(["binary6", "binary7"])
1508
+ ... # doctest: +SKIP
1509
+ col
1510
+ binary6 6
1511
+ binary7 7
1512
+
1513
+ Defining a formatting function which uses an enumeration counter. Also note
1514
+ that the value of the index key is passed in the case of string labels so it
1515
+ can also be inserted into the label, using curly brackets (or double curly
1516
+ brackets if the string if pre-formatted),
1517
+
1518
+ >>> df = pd.DataFrame({"samples": np.random.rand(10)})
1519
+ >>> styler = df.loc[np.random.randint(0,10,3)].style
1520
+ >>> styler.relabel_index([f"sample{i+1} ({{}})" for i in range(3)])
1521
+ ... # doctest: +SKIP
1522
+ samples
1523
+ sample1 (5) 0.315811
1524
+ sample2 (0) 0.495941
1525
+ sample3 (2) 0.067946
1526
+ """
1527
+ axis = self.data._get_axis_number(axis)
1528
+ if axis == 0:
1529
+ display_funcs_, obj = self._display_funcs_index, self.index
1530
+ hidden_labels, hidden_lvls = self.hidden_rows, self.hide_index_
1531
+ else:
1532
+ display_funcs_, obj = self._display_funcs_columns, self.columns
1533
+ hidden_labels, hidden_lvls = self.hidden_columns, self.hide_columns_
1534
+ visible_len = len(obj) - len(set(hidden_labels))
1535
+ if len(labels) != visible_len:
1536
+ raise ValueError(
1537
+ "``labels`` must be of length equal to the number of "
1538
+ f"visible labels along ``axis`` ({visible_len})."
1539
+ )
1540
+
1541
+ if level is None:
1542
+ level = [i for i in range(obj.nlevels) if not hidden_lvls[i]]
1543
+ levels_ = refactor_levels(level, obj)
1544
+
1545
+ def alias_(x, value):
1546
+ if isinstance(value, str):
1547
+ return value.format(x)
1548
+ return value
1549
+
1550
+ for ai, i in enumerate([i for i in range(len(obj)) if i not in hidden_labels]):
1551
+ if len(levels_) == 1:
1552
+ idx = (i, levels_[0]) if axis == 0 else (levels_[0], i)
1553
+ display_funcs_[idx] = partial(alias_, value=labels[ai])
1554
+ else:
1555
+ for aj, lvl in enumerate(levels_):
1556
+ idx = (i, lvl) if axis == 0 else (lvl, i)
1557
+ display_funcs_[idx] = partial(alias_, value=labels[ai][aj])
1558
+
1559
+ return self
1560
+
1561
+
1562
+ def _element(
1563
+ html_element: str,
1564
+ html_class: str | None,
1565
+ value: Any,
1566
+ is_visible: bool,
1567
+ **kwargs,
1568
+ ) -> dict:
1569
+ """
1570
+ Template to return container with information for a <td></td> or <th></th> element.
1571
+ """
1572
+ if "display_value" not in kwargs:
1573
+ kwargs["display_value"] = value
1574
+ return {
1575
+ "type": html_element,
1576
+ "value": value,
1577
+ "class": html_class,
1578
+ "is_visible": is_visible,
1579
+ **kwargs,
1580
+ }
1581
+
1582
+
1583
+ def _get_trimming_maximums(
1584
+ rn,
1585
+ cn,
1586
+ max_elements,
1587
+ max_rows=None,
1588
+ max_cols=None,
1589
+ scaling_factor: float = 0.8,
1590
+ ) -> tuple[int, int]:
1591
+ """
1592
+ Recursively reduce the number of rows and columns to satisfy max elements.
1593
+
1594
+ Parameters
1595
+ ----------
1596
+ rn, cn : int
1597
+ The number of input rows / columns
1598
+ max_elements : int
1599
+ The number of allowable elements
1600
+ max_rows, max_cols : int, optional
1601
+ Directly specify an initial maximum rows or columns before compression.
1602
+ scaling_factor : float
1603
+ Factor at which to reduce the number of rows / columns to fit.
1604
+
1605
+ Returns
1606
+ -------
1607
+ rn, cn : tuple
1608
+ New rn and cn values that satisfy the max_elements constraint
1609
+ """
1610
+
1611
+ def scale_down(rn, cn):
1612
+ if cn >= rn:
1613
+ return rn, int(cn * scaling_factor)
1614
+ else:
1615
+ return int(rn * scaling_factor), cn
1616
+
1617
+ if max_rows:
1618
+ rn = max_rows if rn > max_rows else rn
1619
+ if max_cols:
1620
+ cn = max_cols if cn > max_cols else cn
1621
+
1622
+ while rn * cn > max_elements:
1623
+ rn, cn = scale_down(rn, cn)
1624
+
1625
+ return rn, cn
1626
+
1627
+
1628
+ def _get_level_lengths(
1629
+ index: Index,
1630
+ sparsify: bool,
1631
+ max_index: int,
1632
+ hidden_elements: Sequence[int] | None = None,
1633
+ ):
1634
+ """
1635
+ Given an index, find the level length for each element.
1636
+
1637
+ Parameters
1638
+ ----------
1639
+ index : Index
1640
+ Index or columns to determine lengths of each element
1641
+ sparsify : bool
1642
+ Whether to hide or show each distinct element in a MultiIndex
1643
+ max_index : int
1644
+ The maximum number of elements to analyse along the index due to trimming
1645
+ hidden_elements : sequence of int
1646
+ Index positions of elements hidden from display in the index affecting
1647
+ length
1648
+
1649
+ Returns
1650
+ -------
1651
+ Dict :
1652
+ Result is a dictionary of (level, initial_position): span
1653
+ """
1654
+ if isinstance(index, MultiIndex):
1655
+ levels = index._format_multi(sparsify=lib.no_default, include_names=False)
1656
+ else:
1657
+ levels = index._format_flat(include_name=False)
1658
+
1659
+ if hidden_elements is None:
1660
+ hidden_elements = []
1661
+
1662
+ lengths = {}
1663
+ if not isinstance(index, MultiIndex):
1664
+ for i, value in enumerate(levels):
1665
+ if i not in hidden_elements:
1666
+ lengths[(0, i)] = 1
1667
+ return lengths
1668
+
1669
+ for i, lvl in enumerate(levels):
1670
+ visible_row_count = 0 # used to break loop due to display trimming
1671
+ for j, row in enumerate(lvl):
1672
+ if visible_row_count > max_index:
1673
+ break
1674
+ if not sparsify:
1675
+ # then lengths will always equal 1 since no aggregation.
1676
+ if j not in hidden_elements:
1677
+ lengths[(i, j)] = 1
1678
+ visible_row_count += 1
1679
+ elif (row is not lib.no_default) and (j not in hidden_elements):
1680
+ # this element has not been sparsified so must be the start of section
1681
+ last_label = j
1682
+ lengths[(i, last_label)] = 1
1683
+ visible_row_count += 1
1684
+ elif row is not lib.no_default:
1685
+ # even if the above is hidden, keep track of it in case length > 1 and
1686
+ # later elements are visible
1687
+ last_label = j
1688
+ lengths[(i, last_label)] = 0
1689
+ elif j not in hidden_elements:
1690
+ # then element must be part of sparsified section and is visible
1691
+ visible_row_count += 1
1692
+ if visible_row_count > max_index:
1693
+ break # do not add a length since the render trim limit reached
1694
+ if lengths[(i, last_label)] == 0:
1695
+ # if previous iteration was first-of-section but hidden then offset
1696
+ last_label = j
1697
+ lengths[(i, last_label)] = 1
1698
+ else:
1699
+ # else add to previous iteration
1700
+ lengths[(i, last_label)] += 1
1701
+
1702
+ non_zero_lengths = {
1703
+ element: length for element, length in lengths.items() if length >= 1
1704
+ }
1705
+
1706
+ return non_zero_lengths
1707
+
1708
+
1709
+ def _is_visible(idx_row, idx_col, lengths) -> bool:
1710
+ """
1711
+ Index -> {(idx_row, idx_col): bool}).
1712
+ """
1713
+ return (idx_col, idx_row) in lengths
1714
+
1715
+
1716
+ def format_table_styles(styles: CSSStyles) -> CSSStyles:
1717
+ """
1718
+ looks for multiple CSS selectors and separates them:
1719
+ [{'selector': 'td, th', 'props': 'a:v;'}]
1720
+ ---> [{'selector': 'td', 'props': 'a:v;'},
1721
+ {'selector': 'th', 'props': 'a:v;'}]
1722
+ """
1723
+ return [
1724
+ {"selector": selector, "props": css_dict["props"]}
1725
+ for css_dict in styles
1726
+ for selector in css_dict["selector"].split(",")
1727
+ ]
1728
+
1729
+
1730
+ def _default_formatter(x: Any, precision: int, thousands: bool = False) -> Any:
1731
+ """
1732
+ Format the display of a value
1733
+
1734
+ Parameters
1735
+ ----------
1736
+ x : Any
1737
+ Input variable to be formatted
1738
+ precision : Int
1739
+ Floating point precision used if ``x`` is float or complex.
1740
+ thousands : bool, default False
1741
+ Whether to group digits with thousands separated with ",".
1742
+
1743
+ Returns
1744
+ -------
1745
+ value : Any
1746
+ Matches input type, or string if input is float or complex or int with sep.
1747
+ """
1748
+ if is_float(x) or is_complex(x):
1749
+ return f"{x:,.{precision}f}" if thousands else f"{x:.{precision}f}"
1750
+ elif is_integer(x):
1751
+ return f"{x:,}" if thousands else str(x)
1752
+ return x
1753
+
1754
+
1755
+ def _wrap_decimal_thousands(
1756
+ formatter: Callable, decimal: str, thousands: str | None
1757
+ ) -> Callable:
1758
+ """
1759
+ Takes a string formatting function and wraps logic to deal with thousands and
1760
+ decimal parameters, in the case that they are non-standard and that the input
1761
+ is a (float, complex, int).
1762
+ """
1763
+
1764
+ def wrapper(x):
1765
+ if is_float(x) or is_integer(x) or is_complex(x):
1766
+ if decimal != "." and thousands is not None and thousands != ",":
1767
+ return (
1768
+ formatter(x)
1769
+ .replace(",", "§_§-") # rare string to avoid "," <-> "." clash.
1770
+ .replace(".", decimal)
1771
+ .replace("§_§-", thousands)
1772
+ )
1773
+ elif decimal != "." and (thousands is None or thousands == ","):
1774
+ return formatter(x).replace(".", decimal)
1775
+ elif decimal == "." and thousands is not None and thousands != ",":
1776
+ return formatter(x).replace(",", thousands)
1777
+ return formatter(x)
1778
+
1779
+ return wrapper
1780
+
1781
+
1782
+ def _str_escape(x, escape):
1783
+ """if escaping: only use on str, else return input"""
1784
+ if isinstance(x, str):
1785
+ if escape == "html":
1786
+ return escape_html(x)
1787
+ elif escape == "latex":
1788
+ return _escape_latex(x)
1789
+ elif escape == "latex-math":
1790
+ return _escape_latex_math(x)
1791
+ else:
1792
+ raise ValueError(
1793
+ f"`escape` only permitted in {{'html', 'latex', 'latex-math'}}, \
1794
+ got {escape}"
1795
+ )
1796
+ return x
1797
+
1798
+
1799
+ def _render_href(x, format):
1800
+ """uses regex to detect a common URL pattern and converts to href tag in format."""
1801
+ if isinstance(x, str):
1802
+ if format == "html":
1803
+ href = '<a href="{0}" target="_blank">{0}</a>'
1804
+ elif format == "latex":
1805
+ href = r"\href{{{0}}}{{{0}}}"
1806
+ else:
1807
+ raise ValueError("``hyperlinks`` format can only be 'html' or 'latex'")
1808
+ pat = r"((http|ftp)s?:\/\/|www.)[\w/\-?=%.:@]+\.[\w/\-&?=%.,':;~!@#$*()\[\]]+"
1809
+ return re.sub(pat, lambda m: href.format(m.group(0)), x)
1810
+ return x
1811
+
1812
+
1813
+ def _maybe_wrap_formatter(
1814
+ formatter: BaseFormatter | None = None,
1815
+ na_rep: str | None = None,
1816
+ precision: int | None = None,
1817
+ decimal: str = ".",
1818
+ thousands: str | None = None,
1819
+ escape: str | None = None,
1820
+ hyperlinks: str | None = None,
1821
+ ) -> Callable:
1822
+ """
1823
+ Allows formatters to be expressed as str, callable or None, where None returns
1824
+ a default formatting function. wraps with na_rep, and precision where they are
1825
+ available.
1826
+ """
1827
+ # Get initial func from input string, input callable, or from default factory
1828
+ if isinstance(formatter, str):
1829
+ func_0 = lambda x: formatter.format(x)
1830
+ elif callable(formatter):
1831
+ func_0 = formatter
1832
+ elif formatter is None:
1833
+ precision = (
1834
+ get_option("styler.format.precision") if precision is None else precision
1835
+ )
1836
+ func_0 = partial(
1837
+ _default_formatter, precision=precision, thousands=(thousands is not None)
1838
+ )
1839
+ else:
1840
+ raise TypeError(f"'formatter' expected str or callable, got {type(formatter)}")
1841
+
1842
+ # Replace chars if escaping
1843
+ if escape is not None:
1844
+ func_1 = lambda x: func_0(_str_escape(x, escape=escape))
1845
+ else:
1846
+ func_1 = func_0
1847
+
1848
+ # Replace decimals and thousands if non-standard inputs detected
1849
+ if decimal != "." or (thousands is not None and thousands != ","):
1850
+ func_2 = _wrap_decimal_thousands(func_1, decimal=decimal, thousands=thousands)
1851
+ else:
1852
+ func_2 = func_1
1853
+
1854
+ # Render links
1855
+ if hyperlinks is not None:
1856
+ func_3 = lambda x: func_2(_render_href(x, format=hyperlinks))
1857
+ else:
1858
+ func_3 = func_2
1859
+
1860
+ # Replace missing values if na_rep
1861
+ if na_rep is None:
1862
+ return func_3
1863
+ else:
1864
+ return lambda x: na_rep if (isna(x) is True) else func_3(x)
1865
+
1866
+
1867
+ def non_reducing_slice(slice_: Subset):
1868
+ """
1869
+ Ensure that a slice doesn't reduce to a Series or Scalar.
1870
+
1871
+ Any user-passed `subset` should have this called on it
1872
+ to make sure we're always working with DataFrames.
1873
+ """
1874
+ # default to column slice, like DataFrame
1875
+ # ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
1876
+ kinds = (ABCSeries, np.ndarray, Index, list, str)
1877
+ if isinstance(slice_, kinds):
1878
+ slice_ = IndexSlice[:, slice_]
1879
+
1880
+ def pred(part) -> bool:
1881
+ """
1882
+ Returns
1883
+ -------
1884
+ bool
1885
+ True if slice does *not* reduce,
1886
+ False if `part` is a tuple.
1887
+ """
1888
+ # true when slice does *not* reduce, False when part is a tuple,
1889
+ # i.e. MultiIndex slice
1890
+ if isinstance(part, tuple):
1891
+ # GH#39421 check for sub-slice:
1892
+ return any((isinstance(s, slice) or is_list_like(s)) for s in part)
1893
+ else:
1894
+ return isinstance(part, slice) or is_list_like(part)
1895
+
1896
+ if not is_list_like(slice_):
1897
+ if not isinstance(slice_, slice):
1898
+ # a 1-d slice, like df.loc[1]
1899
+ slice_ = [[slice_]]
1900
+ else:
1901
+ # slice(a, b, c)
1902
+ slice_ = [slice_] # to tuplize later
1903
+ else:
1904
+ # error: Item "slice" of "Union[slice, Sequence[Any]]" has no attribute
1905
+ # "__iter__" (not iterable) -> is specifically list_like in conditional
1906
+ slice_ = [p if pred(p) else [p] for p in slice_] # type: ignore[union-attr]
1907
+ return tuple(slice_)
1908
+
1909
+
1910
+ def maybe_convert_css_to_tuples(style: CSSProperties) -> CSSList:
1911
+ """
1912
+ Convert css-string to sequence of tuples format if needed.
1913
+ 'color:red; border:1px solid black;' -> [('color', 'red'),
1914
+ ('border','1px solid red')]
1915
+ """
1916
+ if isinstance(style, str):
1917
+ s = style.split(";")
1918
+ try:
1919
+ return [
1920
+ (x.split(":")[0].strip(), x.split(":")[1].strip())
1921
+ for x in s
1922
+ if x.strip() != ""
1923
+ ]
1924
+ except IndexError:
1925
+ raise ValueError(
1926
+ "Styles supplied as string must follow CSS rule formats, "
1927
+ f"for example 'attr: val;'. '{style}' was given."
1928
+ )
1929
+ return style
1930
+
1931
+
1932
+ def refactor_levels(
1933
+ level: Level | list[Level] | None,
1934
+ obj: Index,
1935
+ ) -> list[int]:
1936
+ """
1937
+ Returns a consistent levels arg for use in ``hide_index`` or ``hide_columns``.
1938
+
1939
+ Parameters
1940
+ ----------
1941
+ level : int, str, list
1942
+ Original ``level`` arg supplied to above methods.
1943
+ obj:
1944
+ Either ``self.index`` or ``self.columns``
1945
+
1946
+ Returns
1947
+ -------
1948
+ list : refactored arg with a list of levels to hide
1949
+ """
1950
+ if level is None:
1951
+ levels_: list[int] = list(range(obj.nlevels))
1952
+ elif isinstance(level, int):
1953
+ levels_ = [level]
1954
+ elif isinstance(level, str):
1955
+ levels_ = [obj._get_level_number(level)]
1956
+ elif isinstance(level, list):
1957
+ levels_ = [
1958
+ obj._get_level_number(lev) if not isinstance(lev, int) else lev
1959
+ for lev in level
1960
+ ]
1961
+ else:
1962
+ raise ValueError("`level` must be of type `int`, `str` or list of such")
1963
+ return levels_
1964
+
1965
+
1966
+ class Tooltips:
1967
+ """
1968
+ An extension to ``Styler`` that allows for and manipulates tooltips on hover
1969
+ of ``<td>`` cells in the HTML result.
1970
+
1971
+ Parameters
1972
+ ----------
1973
+ css_name: str, default "pd-t"
1974
+ Name of the CSS class that controls visualisation of tooltips.
1975
+ css_props: list-like, default; see Notes
1976
+ List of (attr, value) tuples defining properties of the CSS class.
1977
+ tooltips: DataFrame, default empty
1978
+ DataFrame of strings aligned with underlying Styler data for tooltip
1979
+ display.
1980
+
1981
+ Notes
1982
+ -----
1983
+ The default properties for the tooltip CSS class are:
1984
+
1985
+ - visibility: hidden
1986
+ - position: absolute
1987
+ - z-index: 1
1988
+ - background-color: black
1989
+ - color: white
1990
+ - transform: translate(-20px, -20px)
1991
+
1992
+ Hidden visibility is a key prerequisite to the hover functionality, and should
1993
+ always be included in any manual properties specification.
1994
+ """
1995
+
1996
+ def __init__(
1997
+ self,
1998
+ css_props: CSSProperties = [
1999
+ ("visibility", "hidden"),
2000
+ ("position", "absolute"),
2001
+ ("z-index", 1),
2002
+ ("background-color", "black"),
2003
+ ("color", "white"),
2004
+ ("transform", "translate(-20px, -20px)"),
2005
+ ],
2006
+ css_name: str = "pd-t",
2007
+ tooltips: DataFrame = DataFrame(),
2008
+ ) -> None:
2009
+ self.class_name = css_name
2010
+ self.class_properties = css_props
2011
+ self.tt_data = tooltips
2012
+ self.table_styles: CSSStyles = []
2013
+
2014
+ @property
2015
+ def _class_styles(self):
2016
+ """
2017
+ Combine the ``_Tooltips`` CSS class name and CSS properties to the format
2018
+ required to extend the underlying ``Styler`` `table_styles` to allow
2019
+ tooltips to render in HTML.
2020
+
2021
+ Returns
2022
+ -------
2023
+ styles : List
2024
+ """
2025
+ return [
2026
+ {
2027
+ "selector": f".{self.class_name}",
2028
+ "props": maybe_convert_css_to_tuples(self.class_properties),
2029
+ }
2030
+ ]
2031
+
2032
+ def _pseudo_css(self, uuid: str, name: str, row: int, col: int, text: str):
2033
+ """
2034
+ For every table data-cell that has a valid tooltip (not None, NaN or
2035
+ empty string) must create two pseudo CSS entries for the specific
2036
+ <td> element id which are added to overall table styles:
2037
+ an on hover visibility change and a content change
2038
+ dependent upon the user's chosen display string.
2039
+
2040
+ For example:
2041
+ [{"selector": "T__row1_col1:hover .pd-t",
2042
+ "props": [("visibility", "visible")]},
2043
+ {"selector": "T__row1_col1 .pd-t::after",
2044
+ "props": [("content", "Some Valid Text String")]}]
2045
+
2046
+ Parameters
2047
+ ----------
2048
+ uuid: str
2049
+ The uuid of the Styler instance
2050
+ name: str
2051
+ The css-name of the class used for styling tooltips
2052
+ row : int
2053
+ The row index of the specified tooltip string data
2054
+ col : int
2055
+ The col index of the specified tooltip string data
2056
+ text : str
2057
+ The textual content of the tooltip to be displayed in HTML.
2058
+
2059
+ Returns
2060
+ -------
2061
+ pseudo_css : List
2062
+ """
2063
+ selector_id = "#T_" + uuid + "_row" + str(row) + "_col" + str(col)
2064
+ return [
2065
+ {
2066
+ "selector": selector_id + f":hover .{name}",
2067
+ "props": [("visibility", "visible")],
2068
+ },
2069
+ {
2070
+ "selector": selector_id + f" .{name}::after",
2071
+ "props": [("content", f'"{text}"')],
2072
+ },
2073
+ ]
2074
+
2075
+ def _translate(self, styler: StylerRenderer, d: dict):
2076
+ """
2077
+ Mutate the render dictionary to allow for tooltips:
2078
+
2079
+ - Add ``<span>`` HTML element to each data cells ``display_value``. Ignores
2080
+ headers.
2081
+ - Add table level CSS styles to control pseudo classes.
2082
+
2083
+ Parameters
2084
+ ----------
2085
+ styler_data : DataFrame
2086
+ Underlying ``Styler`` DataFrame used for reindexing.
2087
+ uuid : str
2088
+ The underlying ``Styler`` uuid for CSS id.
2089
+ d : dict
2090
+ The dictionary prior to final render
2091
+
2092
+ Returns
2093
+ -------
2094
+ render_dict : Dict
2095
+ """
2096
+ self.tt_data = self.tt_data.reindex_like(styler.data)
2097
+ if self.tt_data.empty:
2098
+ return d
2099
+
2100
+ name = self.class_name
2101
+ mask = (self.tt_data.isna()) | (self.tt_data.eq("")) # empty string = no ttip
2102
+ self.table_styles = [
2103
+ style
2104
+ for sublist in [
2105
+ self._pseudo_css(styler.uuid, name, i, j, str(self.tt_data.iloc[i, j]))
2106
+ for i in range(len(self.tt_data.index))
2107
+ for j in range(len(self.tt_data.columns))
2108
+ if not (
2109
+ mask.iloc[i, j]
2110
+ or i in styler.hidden_rows
2111
+ or j in styler.hidden_columns
2112
+ )
2113
+ ]
2114
+ for style in sublist
2115
+ ]
2116
+
2117
+ if self.table_styles:
2118
+ # add span class to every cell only if at least 1 non-empty tooltip
2119
+ for row in d["body"]:
2120
+ for item in row:
2121
+ if item["type"] == "td":
2122
+ item["display_value"] = (
2123
+ str(item["display_value"])
2124
+ + f'<span class="{self.class_name}"></span>'
2125
+ )
2126
+ d["table_styles"].extend(self._class_styles)
2127
+ d["table_styles"].extend(self.table_styles)
2128
+
2129
+ return d
2130
+
2131
+
2132
+ def _parse_latex_table_wrapping(table_styles: CSSStyles, caption: str | None) -> bool:
2133
+ """
2134
+ Indicate whether LaTeX {tabular} should be wrapped with a {table} environment.
2135
+
2136
+ Parses the `table_styles` and detects any selectors which must be included outside
2137
+ of {tabular}, i.e. indicating that wrapping must occur, and therefore return True,
2138
+ or if a caption exists and requires similar.
2139
+ """
2140
+ IGNORED_WRAPPERS = ["toprule", "midrule", "bottomrule", "column_format"]
2141
+ # ignored selectors are included with {tabular} so do not need wrapping
2142
+ return (
2143
+ table_styles is not None
2144
+ and any(d["selector"] not in IGNORED_WRAPPERS for d in table_styles)
2145
+ ) or caption is not None
2146
+
2147
+
2148
+ def _parse_latex_table_styles(table_styles: CSSStyles, selector: str) -> str | None:
2149
+ """
2150
+ Return the first 'props' 'value' from ``tables_styles`` identified by ``selector``.
2151
+
2152
+ Examples
2153
+ --------
2154
+ >>> table_styles = [{'selector': 'foo', 'props': [('attr','value')]},
2155
+ ... {'selector': 'bar', 'props': [('attr', 'overwritten')]},
2156
+ ... {'selector': 'bar', 'props': [('a1', 'baz'), ('a2', 'ignore')]}]
2157
+ >>> _parse_latex_table_styles(table_styles, selector='bar')
2158
+ 'baz'
2159
+
2160
+ Notes
2161
+ -----
2162
+ The replacement of "§" with ":" is to avoid the CSS problem where ":" has structural
2163
+ significance and cannot be used in LaTeX labels, but is often required by them.
2164
+ """
2165
+ for style in table_styles[::-1]: # in reverse for most recently applied style
2166
+ if style["selector"] == selector:
2167
+ return str(style["props"][0][1]).replace("§", ":")
2168
+ return None
2169
+
2170
+
2171
+ def _parse_latex_cell_styles(
2172
+ latex_styles: CSSList, display_value: str, convert_css: bool = False
2173
+ ) -> str:
2174
+ r"""
2175
+ Mutate the ``display_value`` string including LaTeX commands from ``latex_styles``.
2176
+
2177
+ This method builds a recursive latex chain of commands based on the
2178
+ CSSList input, nested around ``display_value``.
2179
+
2180
+ If a CSS style is given as ('<command>', '<options>') this is translated to
2181
+ '\<command><options>{display_value}', and this value is treated as the
2182
+ display value for the next iteration.
2183
+
2184
+ The most recent style forms the inner component, for example for styles:
2185
+ `[('c1', 'o1'), ('c2', 'o2')]` this returns: `\c1o1{\c2o2{display_value}}`
2186
+
2187
+ Sometimes latex commands have to be wrapped with curly braces in different ways:
2188
+ We create some parsing flags to identify the different behaviours:
2189
+
2190
+ - `--rwrap` : `\<command><options>{<display_value>}`
2191
+ - `--wrap` : `{\<command><options> <display_value>}`
2192
+ - `--nowrap` : `\<command><options> <display_value>`
2193
+ - `--lwrap` : `{\<command><options>} <display_value>`
2194
+ - `--dwrap` : `{\<command><options>}{<display_value>}`
2195
+
2196
+ For example for styles:
2197
+ `[('c1', 'o1--wrap'), ('c2', 'o2')]` this returns: `{\c1o1 \c2o2{display_value}}
2198
+ """
2199
+ if convert_css:
2200
+ latex_styles = _parse_latex_css_conversion(latex_styles)
2201
+ for command, options in latex_styles[::-1]: # in reverse for most recent style
2202
+ formatter = {
2203
+ "--wrap": f"{{\\{command}--to_parse {display_value}}}",
2204
+ "--nowrap": f"\\{command}--to_parse {display_value}",
2205
+ "--lwrap": f"{{\\{command}--to_parse}} {display_value}",
2206
+ "--rwrap": f"\\{command}--to_parse{{{display_value}}}",
2207
+ "--dwrap": f"{{\\{command}--to_parse}}{{{display_value}}}",
2208
+ }
2209
+ display_value = f"\\{command}{options} {display_value}"
2210
+ for arg in ["--nowrap", "--wrap", "--lwrap", "--rwrap", "--dwrap"]:
2211
+ if arg in str(options):
2212
+ display_value = formatter[arg].replace(
2213
+ "--to_parse", _parse_latex_options_strip(value=options, arg=arg)
2214
+ )
2215
+ break # only ever one purposeful entry
2216
+ return display_value
2217
+
2218
+
2219
+ def _parse_latex_header_span(
2220
+ cell: dict[str, Any],
2221
+ multirow_align: str,
2222
+ multicol_align: str,
2223
+ wrap: bool = False,
2224
+ convert_css: bool = False,
2225
+ ) -> str:
2226
+ r"""
2227
+ Refactor the cell `display_value` if a 'colspan' or 'rowspan' attribute is present.
2228
+
2229
+ 'rowspan' and 'colspan' do not occur simultaneouly. If they are detected then
2230
+ the `display_value` is altered to a LaTeX `multirow` or `multicol` command
2231
+ respectively, with the appropriate cell-span.
2232
+
2233
+ ``wrap`` is used to enclose the `display_value` in braces which is needed for
2234
+ column headers using an siunitx package.
2235
+
2236
+ Requires the package {multirow}, whereas multicol support is usually built in
2237
+ to the {tabular} environment.
2238
+
2239
+ Examples
2240
+ --------
2241
+ >>> cell = {'cellstyle': '', 'display_value':'text', 'attributes': 'colspan="3"'}
2242
+ >>> _parse_latex_header_span(cell, 't', 'c')
2243
+ '\\multicolumn{3}{c}{text}'
2244
+ """
2245
+ display_val = _parse_latex_cell_styles(
2246
+ cell["cellstyle"], cell["display_value"], convert_css
2247
+ )
2248
+ if "attributes" in cell:
2249
+ attrs = cell["attributes"]
2250
+ if 'colspan="' in attrs:
2251
+ colspan = attrs[attrs.find('colspan="') + 9 :] # len('colspan="') = 9
2252
+ colspan = int(colspan[: colspan.find('"')])
2253
+ if "naive-l" == multicol_align:
2254
+ out = f"{{{display_val}}}" if wrap else f"{display_val}"
2255
+ blanks = " & {}" if wrap else " &"
2256
+ return out + blanks * (colspan - 1)
2257
+ elif "naive-r" == multicol_align:
2258
+ out = f"{{{display_val}}}" if wrap else f"{display_val}"
2259
+ blanks = "{} & " if wrap else "& "
2260
+ return blanks * (colspan - 1) + out
2261
+ return f"\\multicolumn{{{colspan}}}{{{multicol_align}}}{{{display_val}}}"
2262
+ elif 'rowspan="' in attrs:
2263
+ if multirow_align == "naive":
2264
+ return display_val
2265
+ rowspan = attrs[attrs.find('rowspan="') + 9 :]
2266
+ rowspan = int(rowspan[: rowspan.find('"')])
2267
+ return f"\\multirow[{multirow_align}]{{{rowspan}}}{{*}}{{{display_val}}}"
2268
+ if wrap:
2269
+ return f"{{{display_val}}}"
2270
+ else:
2271
+ return display_val
2272
+
2273
+
2274
+ def _parse_latex_options_strip(value: str | float, arg: str) -> str:
2275
+ """
2276
+ Strip a css_value which may have latex wrapping arguments, css comment identifiers,
2277
+ and whitespaces, to a valid string for latex options parsing.
2278
+
2279
+ For example: 'red /* --wrap */ ' --> 'red'
2280
+ """
2281
+ return str(value).replace(arg, "").replace("/*", "").replace("*/", "").strip()
2282
+
2283
+
2284
+ def _parse_latex_css_conversion(styles: CSSList) -> CSSList:
2285
+ """
2286
+ Convert CSS (attribute,value) pairs to equivalent LaTeX (command,options) pairs.
2287
+
2288
+ Ignore conversion if tagged with `--latex` option, skipped if no conversion found.
2289
+ """
2290
+
2291
+ def font_weight(value, arg):
2292
+ if value in ("bold", "bolder"):
2293
+ return "bfseries", f"{arg}"
2294
+ return None
2295
+
2296
+ def font_style(value, arg):
2297
+ if value == "italic":
2298
+ return "itshape", f"{arg}"
2299
+ if value == "oblique":
2300
+ return "slshape", f"{arg}"
2301
+ return None
2302
+
2303
+ def color(value, user_arg, command, comm_arg):
2304
+ """
2305
+ CSS colors have 5 formats to process:
2306
+
2307
+ - 6 digit hex code: "#ff23ee" --> [HTML]{FF23EE}
2308
+ - 3 digit hex code: "#f0e" --> [HTML]{FF00EE}
2309
+ - rgba: rgba(128, 255, 0, 0.5) --> [rgb]{0.502, 1.000, 0.000}
2310
+ - rgb: rgb(128, 255, 0,) --> [rbg]{0.502, 1.000, 0.000}
2311
+ - string: red --> {red}
2312
+
2313
+ Additionally rgb or rgba can be expressed in % which is also parsed.
2314
+ """
2315
+ arg = user_arg if user_arg != "" else comm_arg
2316
+
2317
+ if value[0] == "#" and len(value) == 7: # color is hex code
2318
+ return command, f"[HTML]{{{value[1:].upper()}}}{arg}"
2319
+ if value[0] == "#" and len(value) == 4: # color is short hex code
2320
+ val = f"{value[1].upper()*2}{value[2].upper()*2}{value[3].upper()*2}"
2321
+ return command, f"[HTML]{{{val}}}{arg}"
2322
+ elif value[:3] == "rgb": # color is rgb or rgba
2323
+ r = re.findall("(?<=\\()[0-9\\s%]+(?=,)", value)[0].strip()
2324
+ r = float(r[:-1]) / 100 if "%" in r else int(r) / 255
2325
+ g = re.findall("(?<=,)[0-9\\s%]+(?=,)", value)[0].strip()
2326
+ g = float(g[:-1]) / 100 if "%" in g else int(g) / 255
2327
+ if value[3] == "a": # color is rgba
2328
+ b = re.findall("(?<=,)[0-9\\s%]+(?=,)", value)[1].strip()
2329
+ else: # color is rgb
2330
+ b = re.findall("(?<=,)[0-9\\s%]+(?=\\))", value)[0].strip()
2331
+ b = float(b[:-1]) / 100 if "%" in b else int(b) / 255
2332
+ return command, f"[rgb]{{{r:.3f}, {g:.3f}, {b:.3f}}}{arg}"
2333
+ else:
2334
+ return command, f"{{{value}}}{arg}" # color is likely string-named
2335
+
2336
+ CONVERTED_ATTRIBUTES: dict[str, Callable] = {
2337
+ "font-weight": font_weight,
2338
+ "background-color": partial(color, command="cellcolor", comm_arg="--lwrap"),
2339
+ "color": partial(color, command="color", comm_arg=""),
2340
+ "font-style": font_style,
2341
+ }
2342
+
2343
+ latex_styles: CSSList = []
2344
+ for attribute, value in styles:
2345
+ if isinstance(value, str) and "--latex" in value:
2346
+ # return the style without conversion but drop '--latex'
2347
+ latex_styles.append((attribute, value.replace("--latex", "")))
2348
+ if attribute in CONVERTED_ATTRIBUTES:
2349
+ arg = ""
2350
+ for x in ["--wrap", "--nowrap", "--lwrap", "--dwrap", "--rwrap"]:
2351
+ if x in str(value):
2352
+ arg, value = x, _parse_latex_options_strip(value, x)
2353
+ break
2354
+ latex_style = CONVERTED_ATTRIBUTES[attribute](value, arg)
2355
+ if latex_style is not None:
2356
+ latex_styles.extend([latex_style])
2357
+ return latex_styles
2358
+
2359
+
2360
+ def _escape_latex(s: str) -> str:
2361
+ r"""
2362
+ Replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, ``{``, ``}``,
2363
+ ``~``, ``^``, and ``\`` in the string with LaTeX-safe sequences.
2364
+
2365
+ Use this if you need to display text that might contain such characters in LaTeX.
2366
+
2367
+ Parameters
2368
+ ----------
2369
+ s : str
2370
+ Input to be escaped
2371
+
2372
+ Return
2373
+ ------
2374
+ str :
2375
+ Escaped string
2376
+ """
2377
+ return (
2378
+ s.replace("\\", "ab2§=§8yz") # rare string for final conversion: avoid \\ clash
2379
+ .replace("ab2§=§8yz ", "ab2§=§8yz\\space ") # since \backslash gobbles spaces
2380
+ .replace("&", "\\&")
2381
+ .replace("%", "\\%")
2382
+ .replace("$", "\\$")
2383
+ .replace("#", "\\#")
2384
+ .replace("_", "\\_")
2385
+ .replace("{", "\\{")
2386
+ .replace("}", "\\}")
2387
+ .replace("~ ", "~\\space ") # since \textasciitilde gobbles spaces
2388
+ .replace("~", "\\textasciitilde ")
2389
+ .replace("^ ", "^\\space ") # since \textasciicircum gobbles spaces
2390
+ .replace("^", "\\textasciicircum ")
2391
+ .replace("ab2§=§8yz", "\\textbackslash ")
2392
+ )
2393
+
2394
+
2395
+ def _math_mode_with_dollar(s: str) -> str:
2396
+ r"""
2397
+ All characters in LaTeX math mode are preserved.
2398
+
2399
+ The substrings in LaTeX math mode, which start with
2400
+ the character ``$`` and end with ``$``, are preserved
2401
+ without escaping. Otherwise regular LaTeX escaping applies.
2402
+
2403
+ Parameters
2404
+ ----------
2405
+ s : str
2406
+ Input to be escaped
2407
+
2408
+ Return
2409
+ ------
2410
+ str :
2411
+ Escaped string
2412
+ """
2413
+ s = s.replace(r"\$", r"rt8§=§7wz")
2414
+ pattern = re.compile(r"\$.*?\$")
2415
+ pos = 0
2416
+ ps = pattern.search(s, pos)
2417
+ res = []
2418
+ while ps:
2419
+ res.append(_escape_latex(s[pos : ps.span()[0]]))
2420
+ res.append(ps.group())
2421
+ pos = ps.span()[1]
2422
+ ps = pattern.search(s, pos)
2423
+
2424
+ res.append(_escape_latex(s[pos : len(s)]))
2425
+ return "".join(res).replace(r"rt8§=§7wz", r"\$")
2426
+
2427
+
2428
+ def _math_mode_with_parentheses(s: str) -> str:
2429
+ r"""
2430
+ All characters in LaTeX math mode are preserved.
2431
+
2432
+ The substrings in LaTeX math mode, which start with
2433
+ the character ``\(`` and end with ``\)``, are preserved
2434
+ without escaping. Otherwise regular LaTeX escaping applies.
2435
+
2436
+ Parameters
2437
+ ----------
2438
+ s : str
2439
+ Input to be escaped
2440
+
2441
+ Return
2442
+ ------
2443
+ str :
2444
+ Escaped string
2445
+ """
2446
+ s = s.replace(r"\(", r"LEFT§=§6yzLEFT").replace(r"\)", r"RIGHTab5§=§RIGHT")
2447
+ res = []
2448
+ for item in re.split(r"LEFT§=§6yz|ab5§=§RIGHT", s):
2449
+ if item.startswith("LEFT") and item.endswith("RIGHT"):
2450
+ res.append(item.replace("LEFT", r"\(").replace("RIGHT", r"\)"))
2451
+ elif "LEFT" in item and "RIGHT" in item:
2452
+ res.append(
2453
+ _escape_latex(item).replace("LEFT", r"\(").replace("RIGHT", r"\)")
2454
+ )
2455
+ else:
2456
+ res.append(
2457
+ _escape_latex(item)
2458
+ .replace("LEFT", r"\textbackslash (")
2459
+ .replace("RIGHT", r"\textbackslash )")
2460
+ )
2461
+ return "".join(res)
2462
+
2463
+
2464
+ def _escape_latex_math(s: str) -> str:
2465
+ r"""
2466
+ All characters in LaTeX math mode are preserved.
2467
+
2468
+ The substrings in LaTeX math mode, which either are surrounded
2469
+ by two characters ``$`` or start with the character ``\(`` and end with ``\)``,
2470
+ are preserved without escaping. Otherwise regular LaTeX escaping applies.
2471
+
2472
+ Parameters
2473
+ ----------
2474
+ s : str
2475
+ Input to be escaped
2476
+
2477
+ Return
2478
+ ------
2479
+ str :
2480
+ Escaped string
2481
+ """
2482
+ s = s.replace(r"\$", r"rt8§=§7wz")
2483
+ ps_d = re.compile(r"\$.*?\$").search(s, 0)
2484
+ ps_p = re.compile(r"\(.*?\)").search(s, 0)
2485
+ mode = []
2486
+ if ps_d:
2487
+ mode.append(ps_d.span()[0])
2488
+ if ps_p:
2489
+ mode.append(ps_p.span()[0])
2490
+ if len(mode) == 0:
2491
+ return _escape_latex(s.replace(r"rt8§=§7wz", r"\$"))
2492
+ if s[mode[0]] == r"$":
2493
+ return _math_mode_with_dollar(s.replace(r"rt8§=§7wz", r"\$"))
2494
+ if s[mode[0] - 1 : mode[0] + 1] == r"\(":
2495
+ return _math_mode_with_parentheses(s.replace(r"rt8§=§7wz", r"\$"))
2496
+ else:
2497
+ return _escape_latex(s.replace(r"rt8§=§7wz", r"\$"))