Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/_util.py +34 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py +747 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__init__.py +19 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_base.py +1659 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py +253 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py +127 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_util.py +334 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py +143 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py +284 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__init__.py +9 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/_color_data.py +157 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/console.py +94 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/css.py +421 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/excel.py +962 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/format.py +2058 -0
- env-llmeval/lib/python3.10/site-packages/pandas/io/formats/html.py +646 -0
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (321 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc
ADDED
Binary file (1.19 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc
ADDED
Binary file (1.31 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc
ADDED
Binary file (5.24 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc
ADDED
Binary file (27.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc
ADDED
Binary file (4.24 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc
ADDED
Binary file (8.79 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc
ADDED
Binary file (36.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc
ADDED
Binary file (7.65 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc
ADDED
Binary file (18.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc
ADDED
Binary file (5.83 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc
ADDED
Binary file (138 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc
ADDED
Binary file (2.31 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc
ADDED
Binary file (79.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc
ADDED
Binary file (103 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc
ADDED
Binary file (34 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/_util.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import Callable
|
4 |
+
|
5 |
+
from pandas.compat._optional import import_optional_dependency
|
6 |
+
|
7 |
+
import pandas as pd
|
8 |
+
|
9 |
+
|
10 |
+
def _arrow_dtype_mapping() -> dict:
|
11 |
+
pa = import_optional_dependency("pyarrow")
|
12 |
+
return {
|
13 |
+
pa.int8(): pd.Int8Dtype(),
|
14 |
+
pa.int16(): pd.Int16Dtype(),
|
15 |
+
pa.int32(): pd.Int32Dtype(),
|
16 |
+
pa.int64(): pd.Int64Dtype(),
|
17 |
+
pa.uint8(): pd.UInt8Dtype(),
|
18 |
+
pa.uint16(): pd.UInt16Dtype(),
|
19 |
+
pa.uint32(): pd.UInt32Dtype(),
|
20 |
+
pa.uint64(): pd.UInt64Dtype(),
|
21 |
+
pa.bool_(): pd.BooleanDtype(),
|
22 |
+
pa.string(): pd.StringDtype(),
|
23 |
+
pa.float32(): pd.Float32Dtype(),
|
24 |
+
pa.float64(): pd.Float64Dtype(),
|
25 |
+
}
|
26 |
+
|
27 |
+
|
28 |
+
def arrow_string_types_mapper() -> Callable:
|
29 |
+
pa = import_optional_dependency("pyarrow")
|
30 |
+
|
31 |
+
return {
|
32 |
+
pa.string(): pd.StringDtype(storage="pyarrow_numpy"),
|
33 |
+
pa.large_string(): pd.StringDtype(storage="pyarrow_numpy"),
|
34 |
+
}.get
|
env-llmeval/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py
ADDED
@@ -0,0 +1,747 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Pyperclip
|
3 |
+
|
4 |
+
A cross-platform clipboard module for Python,
|
5 |
+
with copy & paste functions for plain text.
|
6 |
+
By Al Sweigart [email protected]
|
7 |
+
Licence at LICENSES/PYPERCLIP_LICENSE
|
8 |
+
|
9 |
+
Usage:
|
10 |
+
import pyperclip
|
11 |
+
pyperclip.copy('The text to be copied to the clipboard.')
|
12 |
+
spam = pyperclip.paste()
|
13 |
+
|
14 |
+
if not pyperclip.is_available():
|
15 |
+
print("Copy functionality unavailable!")
|
16 |
+
|
17 |
+
On Windows, no additional modules are needed.
|
18 |
+
On Mac, the pyobjc module is used, falling back to the pbcopy and pbpaste cli
|
19 |
+
commands. (These commands should come with OS X.).
|
20 |
+
On Linux, install xclip, xsel, or wl-clipboard (for "wayland" sessions) via
|
21 |
+
package manager.
|
22 |
+
For example, in Debian:
|
23 |
+
sudo apt-get install xclip
|
24 |
+
sudo apt-get install xsel
|
25 |
+
sudo apt-get install wl-clipboard
|
26 |
+
|
27 |
+
Otherwise on Linux, you will need the PyQt5 modules installed.
|
28 |
+
|
29 |
+
This module does not work with PyGObject yet.
|
30 |
+
|
31 |
+
Cygwin is currently not supported.
|
32 |
+
|
33 |
+
Security Note: This module runs programs with these names:
|
34 |
+
- pbcopy
|
35 |
+
- pbpaste
|
36 |
+
- xclip
|
37 |
+
- xsel
|
38 |
+
- wl-copy/wl-paste
|
39 |
+
- klipper
|
40 |
+
- qdbus
|
41 |
+
A malicious user could rename or add programs with these names, tricking
|
42 |
+
Pyperclip into running them with whatever permissions the Python process has.
|
43 |
+
|
44 |
+
"""
|
45 |
+
|
46 |
+
__version__ = "1.8.2"
|
47 |
+
|
48 |
+
|
49 |
+
import contextlib
|
50 |
+
import ctypes
|
51 |
+
from ctypes import (
|
52 |
+
c_size_t,
|
53 |
+
c_wchar,
|
54 |
+
c_wchar_p,
|
55 |
+
get_errno,
|
56 |
+
sizeof,
|
57 |
+
)
|
58 |
+
import os
|
59 |
+
import platform
|
60 |
+
from shutil import which as _executable_exists
|
61 |
+
import subprocess
|
62 |
+
import time
|
63 |
+
import warnings
|
64 |
+
|
65 |
+
from pandas.errors import (
|
66 |
+
PyperclipException,
|
67 |
+
PyperclipWindowsException,
|
68 |
+
)
|
69 |
+
from pandas.util._exceptions import find_stack_level
|
70 |
+
|
71 |
+
# `import PyQt4` sys.exit()s if DISPLAY is not in the environment.
|
72 |
+
# Thus, we need to detect the presence of $DISPLAY manually
|
73 |
+
# and not load PyQt4 if it is absent.
|
74 |
+
HAS_DISPLAY = os.getenv("DISPLAY")
|
75 |
+
|
76 |
+
EXCEPT_MSG = """
|
77 |
+
Pyperclip could not find a copy/paste mechanism for your system.
|
78 |
+
For more information, please visit
|
79 |
+
https://pyperclip.readthedocs.io/en/latest/index.html#not-implemented-error
|
80 |
+
"""
|
81 |
+
|
82 |
+
ENCODING = "utf-8"
|
83 |
+
|
84 |
+
|
85 |
+
class PyperclipTimeoutException(PyperclipException):
|
86 |
+
pass
|
87 |
+
|
88 |
+
|
89 |
+
def _stringifyText(text) -> str:
|
90 |
+
acceptedTypes = (str, int, float, bool)
|
91 |
+
if not isinstance(text, acceptedTypes):
|
92 |
+
raise PyperclipException(
|
93 |
+
f"only str, int, float, and bool values "
|
94 |
+
f"can be copied to the clipboard, not {type(text).__name__}"
|
95 |
+
)
|
96 |
+
return str(text)
|
97 |
+
|
98 |
+
|
99 |
+
def init_osx_pbcopy_clipboard():
|
100 |
+
def copy_osx_pbcopy(text):
|
101 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
102 |
+
with subprocess.Popen(
|
103 |
+
["pbcopy", "w"], stdin=subprocess.PIPE, close_fds=True
|
104 |
+
) as p:
|
105 |
+
p.communicate(input=text.encode(ENCODING))
|
106 |
+
|
107 |
+
def paste_osx_pbcopy():
|
108 |
+
with subprocess.Popen(
|
109 |
+
["pbpaste", "r"], stdout=subprocess.PIPE, close_fds=True
|
110 |
+
) as p:
|
111 |
+
stdout = p.communicate()[0]
|
112 |
+
return stdout.decode(ENCODING)
|
113 |
+
|
114 |
+
return copy_osx_pbcopy, paste_osx_pbcopy
|
115 |
+
|
116 |
+
|
117 |
+
def init_osx_pyobjc_clipboard():
|
118 |
+
def copy_osx_pyobjc(text):
|
119 |
+
"""Copy string argument to clipboard"""
|
120 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
121 |
+
newStr = Foundation.NSString.stringWithString_(text).nsstring()
|
122 |
+
newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding)
|
123 |
+
board = AppKit.NSPasteboard.generalPasteboard()
|
124 |
+
board.declareTypes_owner_([AppKit.NSStringPboardType], None)
|
125 |
+
board.setData_forType_(newData, AppKit.NSStringPboardType)
|
126 |
+
|
127 |
+
def paste_osx_pyobjc():
|
128 |
+
"""Returns contents of clipboard"""
|
129 |
+
board = AppKit.NSPasteboard.generalPasteboard()
|
130 |
+
content = board.stringForType_(AppKit.NSStringPboardType)
|
131 |
+
return content
|
132 |
+
|
133 |
+
return copy_osx_pyobjc, paste_osx_pyobjc
|
134 |
+
|
135 |
+
|
136 |
+
def init_qt_clipboard():
|
137 |
+
global QApplication
|
138 |
+
# $DISPLAY should exist
|
139 |
+
|
140 |
+
# Try to import from qtpy, but if that fails try PyQt5 then PyQt4
|
141 |
+
try:
|
142 |
+
from qtpy.QtWidgets import QApplication
|
143 |
+
except ImportError:
|
144 |
+
try:
|
145 |
+
from PyQt5.QtWidgets import QApplication
|
146 |
+
except ImportError:
|
147 |
+
from PyQt4.QtGui import QApplication
|
148 |
+
|
149 |
+
app = QApplication.instance()
|
150 |
+
if app is None:
|
151 |
+
app = QApplication([])
|
152 |
+
|
153 |
+
def copy_qt(text):
|
154 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
155 |
+
cb = app.clipboard()
|
156 |
+
cb.setText(text)
|
157 |
+
|
158 |
+
def paste_qt() -> str:
|
159 |
+
cb = app.clipboard()
|
160 |
+
return str(cb.text())
|
161 |
+
|
162 |
+
return copy_qt, paste_qt
|
163 |
+
|
164 |
+
|
165 |
+
def init_xclip_clipboard():
|
166 |
+
DEFAULT_SELECTION = "c"
|
167 |
+
PRIMARY_SELECTION = "p"
|
168 |
+
|
169 |
+
def copy_xclip(text, primary=False):
|
170 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
171 |
+
selection = DEFAULT_SELECTION
|
172 |
+
if primary:
|
173 |
+
selection = PRIMARY_SELECTION
|
174 |
+
with subprocess.Popen(
|
175 |
+
["xclip", "-selection", selection], stdin=subprocess.PIPE, close_fds=True
|
176 |
+
) as p:
|
177 |
+
p.communicate(input=text.encode(ENCODING))
|
178 |
+
|
179 |
+
def paste_xclip(primary=False):
|
180 |
+
selection = DEFAULT_SELECTION
|
181 |
+
if primary:
|
182 |
+
selection = PRIMARY_SELECTION
|
183 |
+
with subprocess.Popen(
|
184 |
+
["xclip", "-selection", selection, "-o"],
|
185 |
+
stdout=subprocess.PIPE,
|
186 |
+
stderr=subprocess.PIPE,
|
187 |
+
close_fds=True,
|
188 |
+
) as p:
|
189 |
+
stdout = p.communicate()[0]
|
190 |
+
# Intentionally ignore extraneous output on stderr when clipboard is empty
|
191 |
+
return stdout.decode(ENCODING)
|
192 |
+
|
193 |
+
return copy_xclip, paste_xclip
|
194 |
+
|
195 |
+
|
196 |
+
def init_xsel_clipboard():
|
197 |
+
DEFAULT_SELECTION = "-b"
|
198 |
+
PRIMARY_SELECTION = "-p"
|
199 |
+
|
200 |
+
def copy_xsel(text, primary=False):
|
201 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
202 |
+
selection_flag = DEFAULT_SELECTION
|
203 |
+
if primary:
|
204 |
+
selection_flag = PRIMARY_SELECTION
|
205 |
+
with subprocess.Popen(
|
206 |
+
["xsel", selection_flag, "-i"], stdin=subprocess.PIPE, close_fds=True
|
207 |
+
) as p:
|
208 |
+
p.communicate(input=text.encode(ENCODING))
|
209 |
+
|
210 |
+
def paste_xsel(primary=False):
|
211 |
+
selection_flag = DEFAULT_SELECTION
|
212 |
+
if primary:
|
213 |
+
selection_flag = PRIMARY_SELECTION
|
214 |
+
with subprocess.Popen(
|
215 |
+
["xsel", selection_flag, "-o"], stdout=subprocess.PIPE, close_fds=True
|
216 |
+
) as p:
|
217 |
+
stdout = p.communicate()[0]
|
218 |
+
return stdout.decode(ENCODING)
|
219 |
+
|
220 |
+
return copy_xsel, paste_xsel
|
221 |
+
|
222 |
+
|
223 |
+
def init_wl_clipboard():
|
224 |
+
PRIMARY_SELECTION = "-p"
|
225 |
+
|
226 |
+
def copy_wl(text, primary=False):
|
227 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
228 |
+
args = ["wl-copy"]
|
229 |
+
if primary:
|
230 |
+
args.append(PRIMARY_SELECTION)
|
231 |
+
if not text:
|
232 |
+
args.append("--clear")
|
233 |
+
subprocess.check_call(args, close_fds=True)
|
234 |
+
else:
|
235 |
+
p = subprocess.Popen(args, stdin=subprocess.PIPE, close_fds=True)
|
236 |
+
p.communicate(input=text.encode(ENCODING))
|
237 |
+
|
238 |
+
def paste_wl(primary=False):
|
239 |
+
args = ["wl-paste", "-n"]
|
240 |
+
if primary:
|
241 |
+
args.append(PRIMARY_SELECTION)
|
242 |
+
p = subprocess.Popen(args, stdout=subprocess.PIPE, close_fds=True)
|
243 |
+
stdout, _stderr = p.communicate()
|
244 |
+
return stdout.decode(ENCODING)
|
245 |
+
|
246 |
+
return copy_wl, paste_wl
|
247 |
+
|
248 |
+
|
249 |
+
def init_klipper_clipboard():
|
250 |
+
def copy_klipper(text):
|
251 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
252 |
+
with subprocess.Popen(
|
253 |
+
[
|
254 |
+
"qdbus",
|
255 |
+
"org.kde.klipper",
|
256 |
+
"/klipper",
|
257 |
+
"setClipboardContents",
|
258 |
+
text.encode(ENCODING),
|
259 |
+
],
|
260 |
+
stdin=subprocess.PIPE,
|
261 |
+
close_fds=True,
|
262 |
+
) as p:
|
263 |
+
p.communicate(input=None)
|
264 |
+
|
265 |
+
def paste_klipper():
|
266 |
+
with subprocess.Popen(
|
267 |
+
["qdbus", "org.kde.klipper", "/klipper", "getClipboardContents"],
|
268 |
+
stdout=subprocess.PIPE,
|
269 |
+
close_fds=True,
|
270 |
+
) as p:
|
271 |
+
stdout = p.communicate()[0]
|
272 |
+
|
273 |
+
# Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
|
274 |
+
# TODO: https://github.com/asweigart/pyperclip/issues/43
|
275 |
+
clipboardContents = stdout.decode(ENCODING)
|
276 |
+
# even if blank, Klipper will append a newline at the end
|
277 |
+
assert len(clipboardContents) > 0
|
278 |
+
# make sure that newline is there
|
279 |
+
assert clipboardContents.endswith("\n")
|
280 |
+
if clipboardContents.endswith("\n"):
|
281 |
+
clipboardContents = clipboardContents[:-1]
|
282 |
+
return clipboardContents
|
283 |
+
|
284 |
+
return copy_klipper, paste_klipper
|
285 |
+
|
286 |
+
|
287 |
+
def init_dev_clipboard_clipboard():
|
288 |
+
def copy_dev_clipboard(text):
|
289 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
290 |
+
if text == "":
|
291 |
+
warnings.warn(
|
292 |
+
"Pyperclip cannot copy a blank string to the clipboard on Cygwin. "
|
293 |
+
"This is effectively a no-op.",
|
294 |
+
stacklevel=find_stack_level(),
|
295 |
+
)
|
296 |
+
if "\r" in text:
|
297 |
+
warnings.warn(
|
298 |
+
"Pyperclip cannot handle \\r characters on Cygwin.",
|
299 |
+
stacklevel=find_stack_level(),
|
300 |
+
)
|
301 |
+
|
302 |
+
with open("/dev/clipboard", "w", encoding="utf-8") as fd:
|
303 |
+
fd.write(text)
|
304 |
+
|
305 |
+
def paste_dev_clipboard() -> str:
|
306 |
+
with open("/dev/clipboard", encoding="utf-8") as fd:
|
307 |
+
content = fd.read()
|
308 |
+
return content
|
309 |
+
|
310 |
+
return copy_dev_clipboard, paste_dev_clipboard
|
311 |
+
|
312 |
+
|
313 |
+
def init_no_clipboard():
|
314 |
+
class ClipboardUnavailable:
|
315 |
+
def __call__(self, *args, **kwargs):
|
316 |
+
raise PyperclipException(EXCEPT_MSG)
|
317 |
+
|
318 |
+
def __bool__(self) -> bool:
|
319 |
+
return False
|
320 |
+
|
321 |
+
return ClipboardUnavailable(), ClipboardUnavailable()
|
322 |
+
|
323 |
+
|
324 |
+
# Windows-related clipboard functions:
|
325 |
+
class CheckedCall:
|
326 |
+
def __init__(self, f) -> None:
|
327 |
+
super().__setattr__("f", f)
|
328 |
+
|
329 |
+
def __call__(self, *args):
|
330 |
+
ret = self.f(*args)
|
331 |
+
if not ret and get_errno():
|
332 |
+
raise PyperclipWindowsException("Error calling " + self.f.__name__)
|
333 |
+
return ret
|
334 |
+
|
335 |
+
def __setattr__(self, key, value):
|
336 |
+
setattr(self.f, key, value)
|
337 |
+
|
338 |
+
|
339 |
+
def init_windows_clipboard():
|
340 |
+
global HGLOBAL, LPVOID, DWORD, LPCSTR, INT
|
341 |
+
global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE
|
342 |
+
from ctypes.wintypes import (
|
343 |
+
BOOL,
|
344 |
+
DWORD,
|
345 |
+
HANDLE,
|
346 |
+
HGLOBAL,
|
347 |
+
HINSTANCE,
|
348 |
+
HMENU,
|
349 |
+
HWND,
|
350 |
+
INT,
|
351 |
+
LPCSTR,
|
352 |
+
LPVOID,
|
353 |
+
UINT,
|
354 |
+
)
|
355 |
+
|
356 |
+
windll = ctypes.windll
|
357 |
+
msvcrt = ctypes.CDLL("msvcrt")
|
358 |
+
|
359 |
+
safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA)
|
360 |
+
safeCreateWindowExA.argtypes = [
|
361 |
+
DWORD,
|
362 |
+
LPCSTR,
|
363 |
+
LPCSTR,
|
364 |
+
DWORD,
|
365 |
+
INT,
|
366 |
+
INT,
|
367 |
+
INT,
|
368 |
+
INT,
|
369 |
+
HWND,
|
370 |
+
HMENU,
|
371 |
+
HINSTANCE,
|
372 |
+
LPVOID,
|
373 |
+
]
|
374 |
+
safeCreateWindowExA.restype = HWND
|
375 |
+
|
376 |
+
safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow)
|
377 |
+
safeDestroyWindow.argtypes = [HWND]
|
378 |
+
safeDestroyWindow.restype = BOOL
|
379 |
+
|
380 |
+
OpenClipboard = windll.user32.OpenClipboard
|
381 |
+
OpenClipboard.argtypes = [HWND]
|
382 |
+
OpenClipboard.restype = BOOL
|
383 |
+
|
384 |
+
safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard)
|
385 |
+
safeCloseClipboard.argtypes = []
|
386 |
+
safeCloseClipboard.restype = BOOL
|
387 |
+
|
388 |
+
safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard)
|
389 |
+
safeEmptyClipboard.argtypes = []
|
390 |
+
safeEmptyClipboard.restype = BOOL
|
391 |
+
|
392 |
+
safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData)
|
393 |
+
safeGetClipboardData.argtypes = [UINT]
|
394 |
+
safeGetClipboardData.restype = HANDLE
|
395 |
+
|
396 |
+
safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData)
|
397 |
+
safeSetClipboardData.argtypes = [UINT, HANDLE]
|
398 |
+
safeSetClipboardData.restype = HANDLE
|
399 |
+
|
400 |
+
safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc)
|
401 |
+
safeGlobalAlloc.argtypes = [UINT, c_size_t]
|
402 |
+
safeGlobalAlloc.restype = HGLOBAL
|
403 |
+
|
404 |
+
safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock)
|
405 |
+
safeGlobalLock.argtypes = [HGLOBAL]
|
406 |
+
safeGlobalLock.restype = LPVOID
|
407 |
+
|
408 |
+
safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock)
|
409 |
+
safeGlobalUnlock.argtypes = [HGLOBAL]
|
410 |
+
safeGlobalUnlock.restype = BOOL
|
411 |
+
|
412 |
+
wcslen = CheckedCall(msvcrt.wcslen)
|
413 |
+
wcslen.argtypes = [c_wchar_p]
|
414 |
+
wcslen.restype = UINT
|
415 |
+
|
416 |
+
GMEM_MOVEABLE = 0x0002
|
417 |
+
CF_UNICODETEXT = 13
|
418 |
+
|
419 |
+
@contextlib.contextmanager
|
420 |
+
def window():
|
421 |
+
"""
|
422 |
+
Context that provides a valid Windows hwnd.
|
423 |
+
"""
|
424 |
+
# we really just need the hwnd, so setting "STATIC"
|
425 |
+
# as predefined lpClass is just fine.
|
426 |
+
hwnd = safeCreateWindowExA(
|
427 |
+
0, b"STATIC", None, 0, 0, 0, 0, 0, None, None, None, None
|
428 |
+
)
|
429 |
+
try:
|
430 |
+
yield hwnd
|
431 |
+
finally:
|
432 |
+
safeDestroyWindow(hwnd)
|
433 |
+
|
434 |
+
@contextlib.contextmanager
|
435 |
+
def clipboard(hwnd):
|
436 |
+
"""
|
437 |
+
Context manager that opens the clipboard and prevents
|
438 |
+
other applications from modifying the clipboard content.
|
439 |
+
"""
|
440 |
+
# We may not get the clipboard handle immediately because
|
441 |
+
# some other application is accessing it (?)
|
442 |
+
# We try for at least 500ms to get the clipboard.
|
443 |
+
t = time.time() + 0.5
|
444 |
+
success = False
|
445 |
+
while time.time() < t:
|
446 |
+
success = OpenClipboard(hwnd)
|
447 |
+
if success:
|
448 |
+
break
|
449 |
+
time.sleep(0.01)
|
450 |
+
if not success:
|
451 |
+
raise PyperclipWindowsException("Error calling OpenClipboard")
|
452 |
+
|
453 |
+
try:
|
454 |
+
yield
|
455 |
+
finally:
|
456 |
+
safeCloseClipboard()
|
457 |
+
|
458 |
+
def copy_windows(text):
|
459 |
+
# This function is heavily based on
|
460 |
+
# http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard
|
461 |
+
|
462 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
463 |
+
|
464 |
+
with window() as hwnd:
|
465 |
+
# http://msdn.com/ms649048
|
466 |
+
# If an application calls OpenClipboard with hwnd set to NULL,
|
467 |
+
# EmptyClipboard sets the clipboard owner to NULL;
|
468 |
+
# this causes SetClipboardData to fail.
|
469 |
+
# => We need a valid hwnd to copy something.
|
470 |
+
with clipboard(hwnd):
|
471 |
+
safeEmptyClipboard()
|
472 |
+
|
473 |
+
if text:
|
474 |
+
# http://msdn.com/ms649051
|
475 |
+
# If the hMem parameter identifies a memory object,
|
476 |
+
# the object must have been allocated using the
|
477 |
+
# function with the GMEM_MOVEABLE flag.
|
478 |
+
count = wcslen(text) + 1
|
479 |
+
handle = safeGlobalAlloc(GMEM_MOVEABLE, count * sizeof(c_wchar))
|
480 |
+
locked_handle = safeGlobalLock(handle)
|
481 |
+
|
482 |
+
ctypes.memmove(
|
483 |
+
c_wchar_p(locked_handle),
|
484 |
+
c_wchar_p(text),
|
485 |
+
count * sizeof(c_wchar),
|
486 |
+
)
|
487 |
+
|
488 |
+
safeGlobalUnlock(handle)
|
489 |
+
safeSetClipboardData(CF_UNICODETEXT, handle)
|
490 |
+
|
491 |
+
def paste_windows():
|
492 |
+
with clipboard(None):
|
493 |
+
handle = safeGetClipboardData(CF_UNICODETEXT)
|
494 |
+
if not handle:
|
495 |
+
# GetClipboardData may return NULL with errno == NO_ERROR
|
496 |
+
# if the clipboard is empty.
|
497 |
+
# (Also, it may return a handle to an empty buffer,
|
498 |
+
# but technically that's not empty)
|
499 |
+
return ""
|
500 |
+
return c_wchar_p(handle).value
|
501 |
+
|
502 |
+
return copy_windows, paste_windows
|
503 |
+
|
504 |
+
|
505 |
+
def init_wsl_clipboard():
|
506 |
+
def copy_wsl(text):
|
507 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
508 |
+
with subprocess.Popen(["clip.exe"], stdin=subprocess.PIPE, close_fds=True) as p:
|
509 |
+
p.communicate(input=text.encode(ENCODING))
|
510 |
+
|
511 |
+
def paste_wsl():
|
512 |
+
with subprocess.Popen(
|
513 |
+
["powershell.exe", "-command", "Get-Clipboard"],
|
514 |
+
stdout=subprocess.PIPE,
|
515 |
+
stderr=subprocess.PIPE,
|
516 |
+
close_fds=True,
|
517 |
+
) as p:
|
518 |
+
stdout = p.communicate()[0]
|
519 |
+
# WSL appends "\r\n" to the contents.
|
520 |
+
return stdout[:-2].decode(ENCODING)
|
521 |
+
|
522 |
+
return copy_wsl, paste_wsl
|
523 |
+
|
524 |
+
|
525 |
+
# Automatic detection of clipboard mechanisms
|
526 |
+
# and importing is done in determine_clipboard():
|
527 |
+
def determine_clipboard():
|
528 |
+
"""
|
529 |
+
Determine the OS/platform and set the copy() and paste() functions
|
530 |
+
accordingly.
|
531 |
+
"""
|
532 |
+
global Foundation, AppKit, qtpy, PyQt4, PyQt5
|
533 |
+
|
534 |
+
# Setup for the CYGWIN platform:
|
535 |
+
if (
|
536 |
+
"cygwin" in platform.system().lower()
|
537 |
+
): # Cygwin has a variety of values returned by platform.system(),
|
538 |
+
# such as 'CYGWIN_NT-6.1'
|
539 |
+
# FIXME(pyperclip#55): pyperclip currently does not support Cygwin,
|
540 |
+
# see https://github.com/asweigart/pyperclip/issues/55
|
541 |
+
if os.path.exists("/dev/clipboard"):
|
542 |
+
warnings.warn(
|
543 |
+
"Pyperclip's support for Cygwin is not perfect, "
|
544 |
+
"see https://github.com/asweigart/pyperclip/issues/55",
|
545 |
+
stacklevel=find_stack_level(),
|
546 |
+
)
|
547 |
+
return init_dev_clipboard_clipboard()
|
548 |
+
|
549 |
+
# Setup for the WINDOWS platform:
|
550 |
+
elif os.name == "nt" or platform.system() == "Windows":
|
551 |
+
return init_windows_clipboard()
|
552 |
+
|
553 |
+
if platform.system() == "Linux":
|
554 |
+
if _executable_exists("wslconfig.exe"):
|
555 |
+
return init_wsl_clipboard()
|
556 |
+
|
557 |
+
# Setup for the macOS platform:
|
558 |
+
if os.name == "mac" or platform.system() == "Darwin":
|
559 |
+
try:
|
560 |
+
import AppKit
|
561 |
+
import Foundation # check if pyobjc is installed
|
562 |
+
except ImportError:
|
563 |
+
return init_osx_pbcopy_clipboard()
|
564 |
+
else:
|
565 |
+
return init_osx_pyobjc_clipboard()
|
566 |
+
|
567 |
+
# Setup for the LINUX platform:
|
568 |
+
if HAS_DISPLAY:
|
569 |
+
if os.environ.get("WAYLAND_DISPLAY") and _executable_exists("wl-copy"):
|
570 |
+
return init_wl_clipboard()
|
571 |
+
if _executable_exists("xsel"):
|
572 |
+
return init_xsel_clipboard()
|
573 |
+
if _executable_exists("xclip"):
|
574 |
+
return init_xclip_clipboard()
|
575 |
+
if _executable_exists("klipper") and _executable_exists("qdbus"):
|
576 |
+
return init_klipper_clipboard()
|
577 |
+
|
578 |
+
try:
|
579 |
+
# qtpy is a small abstraction layer that lets you write applications
|
580 |
+
# using a single api call to either PyQt or PySide.
|
581 |
+
# https://pypi.python.org/project/QtPy
|
582 |
+
import qtpy # check if qtpy is installed
|
583 |
+
except ImportError:
|
584 |
+
# If qtpy isn't installed, fall back on importing PyQt4.
|
585 |
+
try:
|
586 |
+
import PyQt5 # check if PyQt5 is installed
|
587 |
+
except ImportError:
|
588 |
+
try:
|
589 |
+
import PyQt4 # check if PyQt4 is installed
|
590 |
+
except ImportError:
|
591 |
+
pass # We want to fail fast for all non-ImportError exceptions.
|
592 |
+
else:
|
593 |
+
return init_qt_clipboard()
|
594 |
+
else:
|
595 |
+
return init_qt_clipboard()
|
596 |
+
else:
|
597 |
+
return init_qt_clipboard()
|
598 |
+
|
599 |
+
return init_no_clipboard()
|
600 |
+
|
601 |
+
|
602 |
+
def set_clipboard(clipboard):
|
603 |
+
"""
|
604 |
+
Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how
|
605 |
+
the copy() and paste() functions interact with the operating system to
|
606 |
+
implement the copy/paste feature. The clipboard parameter must be one of:
|
607 |
+
- pbcopy
|
608 |
+
- pyobjc (default on macOS)
|
609 |
+
- qt
|
610 |
+
- xclip
|
611 |
+
- xsel
|
612 |
+
- klipper
|
613 |
+
- windows (default on Windows)
|
614 |
+
- no (this is what is set when no clipboard mechanism can be found)
|
615 |
+
"""
|
616 |
+
global copy, paste
|
617 |
+
|
618 |
+
clipboard_types = {
|
619 |
+
"pbcopy": init_osx_pbcopy_clipboard,
|
620 |
+
"pyobjc": init_osx_pyobjc_clipboard,
|
621 |
+
"qt": init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5'
|
622 |
+
"xclip": init_xclip_clipboard,
|
623 |
+
"xsel": init_xsel_clipboard,
|
624 |
+
"wl-clipboard": init_wl_clipboard,
|
625 |
+
"klipper": init_klipper_clipboard,
|
626 |
+
"windows": init_windows_clipboard,
|
627 |
+
"no": init_no_clipboard,
|
628 |
+
}
|
629 |
+
|
630 |
+
if clipboard not in clipboard_types:
|
631 |
+
allowed_clipboard_types = [repr(_) for _ in clipboard_types]
|
632 |
+
raise ValueError(
|
633 |
+
f"Argument must be one of {', '.join(allowed_clipboard_types)}"
|
634 |
+
)
|
635 |
+
|
636 |
+
# Sets pyperclip's copy() and paste() functions:
|
637 |
+
copy, paste = clipboard_types[clipboard]()
|
638 |
+
|
639 |
+
|
640 |
+
def lazy_load_stub_copy(text):
|
641 |
+
"""
|
642 |
+
A stub function for copy(), which will load the real copy() function when
|
643 |
+
called so that the real copy() function is used for later calls.
|
644 |
+
|
645 |
+
This allows users to import pyperclip without having determine_clipboard()
|
646 |
+
automatically run, which will automatically select a clipboard mechanism.
|
647 |
+
This could be a problem if it selects, say, the memory-heavy PyQt4 module
|
648 |
+
but the user was just going to immediately call set_clipboard() to use a
|
649 |
+
different clipboard mechanism.
|
650 |
+
|
651 |
+
The lazy loading this stub function implements gives the user a chance to
|
652 |
+
call set_clipboard() to pick another clipboard mechanism. Or, if the user
|
653 |
+
simply calls copy() or paste() without calling set_clipboard() first,
|
654 |
+
will fall back on whatever clipboard mechanism that determine_clipboard()
|
655 |
+
automatically chooses.
|
656 |
+
"""
|
657 |
+
global copy, paste
|
658 |
+
copy, paste = determine_clipboard()
|
659 |
+
return copy(text)
|
660 |
+
|
661 |
+
|
662 |
+
def lazy_load_stub_paste():
|
663 |
+
"""
|
664 |
+
A stub function for paste(), which will load the real paste() function when
|
665 |
+
called so that the real paste() function is used for later calls.
|
666 |
+
|
667 |
+
This allows users to import pyperclip without having determine_clipboard()
|
668 |
+
automatically run, which will automatically select a clipboard mechanism.
|
669 |
+
This could be a problem if it selects, say, the memory-heavy PyQt4 module
|
670 |
+
but the user was just going to immediately call set_clipboard() to use a
|
671 |
+
different clipboard mechanism.
|
672 |
+
|
673 |
+
The lazy loading this stub function implements gives the user a chance to
|
674 |
+
call set_clipboard() to pick another clipboard mechanism. Or, if the user
|
675 |
+
simply calls copy() or paste() without calling set_clipboard() first,
|
676 |
+
will fall back on whatever clipboard mechanism that determine_clipboard()
|
677 |
+
automatically chooses.
|
678 |
+
"""
|
679 |
+
global copy, paste
|
680 |
+
copy, paste = determine_clipboard()
|
681 |
+
return paste()
|
682 |
+
|
683 |
+
|
684 |
+
def is_available() -> bool:
|
685 |
+
return copy != lazy_load_stub_copy and paste != lazy_load_stub_paste
|
686 |
+
|
687 |
+
|
688 |
+
# Initially, copy() and paste() are set to lazy loading wrappers which will
|
689 |
+
# set `copy` and `paste` to real functions the first time they're used, unless
|
690 |
+
# set_clipboard() or determine_clipboard() is called first.
|
691 |
+
copy, paste = lazy_load_stub_copy, lazy_load_stub_paste
|
692 |
+
|
693 |
+
|
694 |
+
def waitForPaste(timeout=None):
|
695 |
+
"""This function call blocks until a non-empty text string exists on the
|
696 |
+
clipboard. It returns this text.
|
697 |
+
|
698 |
+
This function raises PyperclipTimeoutException if timeout was set to
|
699 |
+
a number of seconds that has elapsed without non-empty text being put on
|
700 |
+
the clipboard."""
|
701 |
+
startTime = time.time()
|
702 |
+
while True:
|
703 |
+
clipboardText = paste()
|
704 |
+
if clipboardText != "":
|
705 |
+
return clipboardText
|
706 |
+
time.sleep(0.01)
|
707 |
+
|
708 |
+
if timeout is not None and time.time() > startTime + timeout:
|
709 |
+
raise PyperclipTimeoutException(
|
710 |
+
"waitForPaste() timed out after " + str(timeout) + " seconds."
|
711 |
+
)
|
712 |
+
|
713 |
+
|
714 |
+
def waitForNewPaste(timeout=None):
|
715 |
+
"""This function call blocks until a new text string exists on the
|
716 |
+
clipboard that is different from the text that was there when the function
|
717 |
+
was first called. It returns this text.
|
718 |
+
|
719 |
+
This function raises PyperclipTimeoutException if timeout was set to
|
720 |
+
a number of seconds that has elapsed without non-empty text being put on
|
721 |
+
the clipboard."""
|
722 |
+
startTime = time.time()
|
723 |
+
originalText = paste()
|
724 |
+
while True:
|
725 |
+
currentText = paste()
|
726 |
+
if currentText != originalText:
|
727 |
+
return currentText
|
728 |
+
time.sleep(0.01)
|
729 |
+
|
730 |
+
if timeout is not None and time.time() > startTime + timeout:
|
731 |
+
raise PyperclipTimeoutException(
|
732 |
+
"waitForNewPaste() timed out after " + str(timeout) + " seconds."
|
733 |
+
)
|
734 |
+
|
735 |
+
|
736 |
+
__all__ = [
|
737 |
+
"copy",
|
738 |
+
"paste",
|
739 |
+
"waitForPaste",
|
740 |
+
"waitForNewPaste",
|
741 |
+
"set_clipboard",
|
742 |
+
"determine_clipboard",
|
743 |
+
]
|
744 |
+
|
745 |
+
# pandas aliases
|
746 |
+
clipboard_get = paste
|
747 |
+
clipboard_set = copy
|
env-llmeval/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (19.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__init__.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.io.excel._base import (
|
2 |
+
ExcelFile,
|
3 |
+
ExcelWriter,
|
4 |
+
read_excel,
|
5 |
+
)
|
6 |
+
from pandas.io.excel._odswriter import ODSWriter as _ODSWriter
|
7 |
+
from pandas.io.excel._openpyxl import OpenpyxlWriter as _OpenpyxlWriter
|
8 |
+
from pandas.io.excel._util import register_writer
|
9 |
+
from pandas.io.excel._xlsxwriter import XlsxWriter as _XlsxWriter
|
10 |
+
|
11 |
+
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
|
12 |
+
|
13 |
+
|
14 |
+
register_writer(_OpenpyxlWriter)
|
15 |
+
|
16 |
+
register_writer(_XlsxWriter)
|
17 |
+
|
18 |
+
|
19 |
+
register_writer(_ODSWriter)
|
env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc
ADDED
Binary file (48.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc
ADDED
Binary file (7.19 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc
ADDED
Binary file (4.16 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_base.py
ADDED
@@ -0,0 +1,1659 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections.abc import (
|
4 |
+
Hashable,
|
5 |
+
Iterable,
|
6 |
+
Mapping,
|
7 |
+
Sequence,
|
8 |
+
)
|
9 |
+
import datetime
|
10 |
+
from functools import partial
|
11 |
+
from io import BytesIO
|
12 |
+
import os
|
13 |
+
from textwrap import fill
|
14 |
+
from typing import (
|
15 |
+
IO,
|
16 |
+
TYPE_CHECKING,
|
17 |
+
Any,
|
18 |
+
Callable,
|
19 |
+
Generic,
|
20 |
+
Literal,
|
21 |
+
TypeVar,
|
22 |
+
Union,
|
23 |
+
cast,
|
24 |
+
overload,
|
25 |
+
)
|
26 |
+
import warnings
|
27 |
+
import zipfile
|
28 |
+
|
29 |
+
from pandas._config import config
|
30 |
+
|
31 |
+
from pandas._libs import lib
|
32 |
+
from pandas._libs.parsers import STR_NA_VALUES
|
33 |
+
from pandas.compat._optional import (
|
34 |
+
get_version,
|
35 |
+
import_optional_dependency,
|
36 |
+
)
|
37 |
+
from pandas.errors import EmptyDataError
|
38 |
+
from pandas.util._decorators import (
|
39 |
+
Appender,
|
40 |
+
doc,
|
41 |
+
)
|
42 |
+
from pandas.util._exceptions import find_stack_level
|
43 |
+
from pandas.util._validators import check_dtype_backend
|
44 |
+
|
45 |
+
from pandas.core.dtypes.common import (
|
46 |
+
is_bool,
|
47 |
+
is_float,
|
48 |
+
is_integer,
|
49 |
+
is_list_like,
|
50 |
+
)
|
51 |
+
|
52 |
+
from pandas.core.frame import DataFrame
|
53 |
+
from pandas.core.shared_docs import _shared_docs
|
54 |
+
from pandas.util.version import Version
|
55 |
+
|
56 |
+
from pandas.io.common import (
|
57 |
+
IOHandles,
|
58 |
+
get_handle,
|
59 |
+
stringify_path,
|
60 |
+
validate_header_arg,
|
61 |
+
)
|
62 |
+
from pandas.io.excel._util import (
|
63 |
+
fill_mi_header,
|
64 |
+
get_default_engine,
|
65 |
+
get_writer,
|
66 |
+
maybe_convert_usecols,
|
67 |
+
pop_header_name,
|
68 |
+
)
|
69 |
+
from pandas.io.parsers import TextParser
|
70 |
+
from pandas.io.parsers.readers import validate_integer
|
71 |
+
|
72 |
+
if TYPE_CHECKING:
|
73 |
+
from types import TracebackType
|
74 |
+
|
75 |
+
from pandas._typing import (
|
76 |
+
DtypeArg,
|
77 |
+
DtypeBackend,
|
78 |
+
ExcelWriterIfSheetExists,
|
79 |
+
FilePath,
|
80 |
+
IntStrT,
|
81 |
+
ReadBuffer,
|
82 |
+
Self,
|
83 |
+
SequenceNotStr,
|
84 |
+
StorageOptions,
|
85 |
+
WriteExcelBuffer,
|
86 |
+
)
|
87 |
+
_read_excel_doc = (
|
88 |
+
"""
|
89 |
+
Read an Excel file into a ``pandas`` ``DataFrame``.
|
90 |
+
|
91 |
+
Supports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions
|
92 |
+
read from a local filesystem or URL. Supports an option to read
|
93 |
+
a single sheet or a list of sheets.
|
94 |
+
|
95 |
+
Parameters
|
96 |
+
----------
|
97 |
+
io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object
|
98 |
+
Any valid string path is acceptable. The string could be a URL. Valid
|
99 |
+
URL schemes include http, ftp, s3, and file. For file URLs, a host is
|
100 |
+
expected. A local file could be: ``file://localhost/path/to/table.xlsx``.
|
101 |
+
|
102 |
+
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
|
103 |
+
|
104 |
+
By file-like object, we refer to objects with a ``read()`` method,
|
105 |
+
such as a file handle (e.g. via builtin ``open`` function)
|
106 |
+
or ``StringIO``.
|
107 |
+
|
108 |
+
.. deprecated:: 2.1.0
|
109 |
+
Passing byte strings is deprecated. To read from a
|
110 |
+
byte string, wrap it in a ``BytesIO`` object.
|
111 |
+
sheet_name : str, int, list, or None, default 0
|
112 |
+
Strings are used for sheet names. Integers are used in zero-indexed
|
113 |
+
sheet positions (chart sheets do not count as a sheet position).
|
114 |
+
Lists of strings/integers are used to request multiple sheets.
|
115 |
+
Specify ``None`` to get all worksheets.
|
116 |
+
|
117 |
+
Available cases:
|
118 |
+
|
119 |
+
* Defaults to ``0``: 1st sheet as a `DataFrame`
|
120 |
+
* ``1``: 2nd sheet as a `DataFrame`
|
121 |
+
* ``"Sheet1"``: Load sheet with name "Sheet1"
|
122 |
+
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
|
123 |
+
as a dict of `DataFrame`
|
124 |
+
* ``None``: All worksheets.
|
125 |
+
|
126 |
+
header : int, list of int, default 0
|
127 |
+
Row (0-indexed) to use for the column labels of the parsed
|
128 |
+
DataFrame. If a list of integers is passed those row positions will
|
129 |
+
be combined into a ``MultiIndex``. Use None if there is no header.
|
130 |
+
names : array-like, default None
|
131 |
+
List of column names to use. If file contains no header row,
|
132 |
+
then you should explicitly pass header=None.
|
133 |
+
index_col : int, str, list of int, default None
|
134 |
+
Column (0-indexed) to use as the row labels of the DataFrame.
|
135 |
+
Pass None if there is no such column. If a list is passed,
|
136 |
+
those columns will be combined into a ``MultiIndex``. If a
|
137 |
+
subset of data is selected with ``usecols``, index_col
|
138 |
+
is based on the subset.
|
139 |
+
|
140 |
+
Missing values will be forward filled to allow roundtripping with
|
141 |
+
``to_excel`` for ``merged_cells=True``. To avoid forward filling the
|
142 |
+
missing values use ``set_index`` after reading the data instead of
|
143 |
+
``index_col``.
|
144 |
+
usecols : str, list-like, or callable, default None
|
145 |
+
* If None, then parse all columns.
|
146 |
+
* If str, then indicates comma separated list of Excel column letters
|
147 |
+
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
|
148 |
+
both sides.
|
149 |
+
* If list of int, then indicates list of column numbers to be parsed
|
150 |
+
(0-indexed).
|
151 |
+
* If list of string, then indicates list of column names to be parsed.
|
152 |
+
* If callable, then evaluate each column name against it and parse the
|
153 |
+
column if the callable returns ``True``.
|
154 |
+
|
155 |
+
Returns a subset of the columns according to behavior above.
|
156 |
+
dtype : Type name or dict of column -> type, default None
|
157 |
+
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32}}
|
158 |
+
Use ``object`` to preserve data as stored in Excel and not interpret dtype,
|
159 |
+
which will necessarily result in ``object`` dtype.
|
160 |
+
If converters are specified, they will be applied INSTEAD
|
161 |
+
of dtype conversion.
|
162 |
+
If you use ``None``, it will infer the dtype of each column based on the data.
|
163 |
+
engine : {{'openpyxl', 'calamine', 'odf', 'pyxlsb', 'xlrd'}}, default None
|
164 |
+
If io is not a buffer or path, this must be set to identify io.
|
165 |
+
Engine compatibility :
|
166 |
+
|
167 |
+
- ``openpyxl`` supports newer Excel file formats.
|
168 |
+
- ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)
|
169 |
+
and OpenDocument (.ods) file formats.
|
170 |
+
- ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
|
171 |
+
- ``pyxlsb`` supports Binary Excel files.
|
172 |
+
- ``xlrd`` supports old-style Excel files (.xls).
|
173 |
+
|
174 |
+
When ``engine=None``, the following logic will be used to determine the engine:
|
175 |
+
|
176 |
+
- If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
|
177 |
+
then `odf <https://pypi.org/project/odfpy/>`_ will be used.
|
178 |
+
- Otherwise if ``path_or_buffer`` is an xls format, ``xlrd`` will be used.
|
179 |
+
- Otherwise if ``path_or_buffer`` is in xlsb format, ``pyxlsb`` will be used.
|
180 |
+
- Otherwise ``openpyxl`` will be used.
|
181 |
+
converters : dict, default None
|
182 |
+
Dict of functions for converting values in certain columns. Keys can
|
183 |
+
either be integers or column labels, values are functions that take one
|
184 |
+
input argument, the Excel cell content, and return the transformed
|
185 |
+
content.
|
186 |
+
true_values : list, default None
|
187 |
+
Values to consider as True.
|
188 |
+
false_values : list, default None
|
189 |
+
Values to consider as False.
|
190 |
+
skiprows : list-like, int, or callable, optional
|
191 |
+
Line numbers to skip (0-indexed) or number of lines to skip (int) at the
|
192 |
+
start of the file. If callable, the callable function will be evaluated
|
193 |
+
against the row indices, returning True if the row should be skipped and
|
194 |
+
False otherwise. An example of a valid callable argument would be ``lambda
|
195 |
+
x: x in [0, 2]``.
|
196 |
+
nrows : int, default None
|
197 |
+
Number of rows to parse.
|
198 |
+
na_values : scalar, str, list-like, or dict, default None
|
199 |
+
Additional strings to recognize as NA/NaN. If dict passed, specific
|
200 |
+
per-column NA values. By default the following values are interpreted
|
201 |
+
as NaN: '"""
|
202 |
+
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
|
203 |
+
+ """'.
|
204 |
+
keep_default_na : bool, default True
|
205 |
+
Whether or not to include the default NaN values when parsing the data.
|
206 |
+
Depending on whether ``na_values`` is passed in, the behavior is as follows:
|
207 |
+
|
208 |
+
* If ``keep_default_na`` is True, and ``na_values`` are specified,
|
209 |
+
``na_values`` is appended to the default NaN values used for parsing.
|
210 |
+
* If ``keep_default_na`` is True, and ``na_values`` are not specified, only
|
211 |
+
the default NaN values are used for parsing.
|
212 |
+
* If ``keep_default_na`` is False, and ``na_values`` are specified, only
|
213 |
+
the NaN values specified ``na_values`` are used for parsing.
|
214 |
+
* If ``keep_default_na`` is False, and ``na_values`` are not specified, no
|
215 |
+
strings will be parsed as NaN.
|
216 |
+
|
217 |
+
Note that if `na_filter` is passed in as False, the ``keep_default_na`` and
|
218 |
+
``na_values`` parameters will be ignored.
|
219 |
+
na_filter : bool, default True
|
220 |
+
Detect missing value markers (empty strings and the value of na_values). In
|
221 |
+
data without any NAs, passing ``na_filter=False`` can improve the
|
222 |
+
performance of reading a large file.
|
223 |
+
verbose : bool, default False
|
224 |
+
Indicate number of NA values placed in non-numeric columns.
|
225 |
+
parse_dates : bool, list-like, or dict, default False
|
226 |
+
The behavior is as follows:
|
227 |
+
|
228 |
+
* ``bool``. If True -> try parsing the index.
|
229 |
+
* ``list`` of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
|
230 |
+
each as a separate date column.
|
231 |
+
* ``list`` of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
|
232 |
+
a single date column.
|
233 |
+
* ``dict``, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
|
234 |
+
result 'foo'
|
235 |
+
|
236 |
+
If a column or index contains an unparsable date, the entire column or
|
237 |
+
index will be returned unaltered as an object data type. If you don`t want to
|
238 |
+
parse some cells as date just change their type in Excel to "Text".
|
239 |
+
For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.
|
240 |
+
|
241 |
+
Note: A fast-path exists for iso8601-formatted dates.
|
242 |
+
date_parser : function, optional
|
243 |
+
Function to use for converting a sequence of string columns to an array of
|
244 |
+
datetime instances. The default uses ``dateutil.parser.parser`` to do the
|
245 |
+
conversion. Pandas will try to call `date_parser` in three different ways,
|
246 |
+
advancing to the next if an exception occurs: 1) Pass one or more arrays
|
247 |
+
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
|
248 |
+
string values from the columns defined by `parse_dates` into a single array
|
249 |
+
and pass that; and 3) call `date_parser` once for each row using one or
|
250 |
+
more strings (corresponding to the columns defined by `parse_dates`) as
|
251 |
+
arguments.
|
252 |
+
|
253 |
+
.. deprecated:: 2.0.0
|
254 |
+
Use ``date_format`` instead, or read in as ``object`` and then apply
|
255 |
+
:func:`to_datetime` as-needed.
|
256 |
+
date_format : str or dict of column -> format, default ``None``
|
257 |
+
If used in conjunction with ``parse_dates``, will parse dates according to this
|
258 |
+
format. For anything more complex,
|
259 |
+
please read in as ``object`` and then apply :func:`to_datetime` as-needed.
|
260 |
+
|
261 |
+
.. versionadded:: 2.0.0
|
262 |
+
thousands : str, default None
|
263 |
+
Thousands separator for parsing string columns to numeric. Note that
|
264 |
+
this parameter is only necessary for columns stored as TEXT in Excel,
|
265 |
+
any numeric columns will automatically be parsed, regardless of display
|
266 |
+
format.
|
267 |
+
decimal : str, default '.'
|
268 |
+
Character to recognize as decimal point for parsing string columns to numeric.
|
269 |
+
Note that this parameter is only necessary for columns stored as TEXT in Excel,
|
270 |
+
any numeric columns will automatically be parsed, regardless of display
|
271 |
+
format.(e.g. use ',' for European data).
|
272 |
+
|
273 |
+
.. versionadded:: 1.4.0
|
274 |
+
|
275 |
+
comment : str, default None
|
276 |
+
Comments out remainder of line. Pass a character or characters to this
|
277 |
+
argument to indicate comments in the input file. Any data between the
|
278 |
+
comment string and the end of the current line is ignored.
|
279 |
+
skipfooter : int, default 0
|
280 |
+
Rows at the end to skip (0-indexed).
|
281 |
+
{storage_options}
|
282 |
+
|
283 |
+
dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
|
284 |
+
Back-end data type applied to the resultant :class:`DataFrame`
|
285 |
+
(still experimental). Behaviour is as follows:
|
286 |
+
|
287 |
+
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
|
288 |
+
(default).
|
289 |
+
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
|
290 |
+
DataFrame.
|
291 |
+
|
292 |
+
.. versionadded:: 2.0
|
293 |
+
|
294 |
+
engine_kwargs : dict, optional
|
295 |
+
Arbitrary keyword arguments passed to excel engine.
|
296 |
+
|
297 |
+
Returns
|
298 |
+
-------
|
299 |
+
DataFrame or dict of DataFrames
|
300 |
+
DataFrame from the passed in Excel file. See notes in sheet_name
|
301 |
+
argument for more information on when a dict of DataFrames is returned.
|
302 |
+
|
303 |
+
See Also
|
304 |
+
--------
|
305 |
+
DataFrame.to_excel : Write DataFrame to an Excel file.
|
306 |
+
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
|
307 |
+
read_csv : Read a comma-separated values (csv) file into DataFrame.
|
308 |
+
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
|
309 |
+
|
310 |
+
Notes
|
311 |
+
-----
|
312 |
+
For specific information on the methods used for each Excel engine, refer to the pandas
|
313 |
+
:ref:`user guide <io.excel_reader>`
|
314 |
+
|
315 |
+
Examples
|
316 |
+
--------
|
317 |
+
The file can be read using the file name as string or an open file object:
|
318 |
+
|
319 |
+
>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
|
320 |
+
Name Value
|
321 |
+
0 string1 1
|
322 |
+
1 string2 2
|
323 |
+
2 #Comment 3
|
324 |
+
|
325 |
+
>>> pd.read_excel(open('tmp.xlsx', 'rb'),
|
326 |
+
... sheet_name='Sheet3') # doctest: +SKIP
|
327 |
+
Unnamed: 0 Name Value
|
328 |
+
0 0 string1 1
|
329 |
+
1 1 string2 2
|
330 |
+
2 2 #Comment 3
|
331 |
+
|
332 |
+
Index and header can be specified via the `index_col` and `header` arguments
|
333 |
+
|
334 |
+
>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
|
335 |
+
0 1 2
|
336 |
+
0 NaN Name Value
|
337 |
+
1 0.0 string1 1
|
338 |
+
2 1.0 string2 2
|
339 |
+
3 2.0 #Comment 3
|
340 |
+
|
341 |
+
Column types are inferred but can be explicitly specified
|
342 |
+
|
343 |
+
>>> pd.read_excel('tmp.xlsx', index_col=0,
|
344 |
+
... dtype={{'Name': str, 'Value': float}}) # doctest: +SKIP
|
345 |
+
Name Value
|
346 |
+
0 string1 1.0
|
347 |
+
1 string2 2.0
|
348 |
+
2 #Comment 3.0
|
349 |
+
|
350 |
+
True, False, and NA values, and thousands separators have defaults,
|
351 |
+
but can be explicitly specified, too. Supply the values you would like
|
352 |
+
as strings or lists of strings!
|
353 |
+
|
354 |
+
>>> pd.read_excel('tmp.xlsx', index_col=0,
|
355 |
+
... na_values=['string1', 'string2']) # doctest: +SKIP
|
356 |
+
Name Value
|
357 |
+
0 NaN 1
|
358 |
+
1 NaN 2
|
359 |
+
2 #Comment 3
|
360 |
+
|
361 |
+
Comment lines in the excel input file can be skipped using the
|
362 |
+
``comment`` kwarg.
|
363 |
+
|
364 |
+
>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
|
365 |
+
Name Value
|
366 |
+
0 string1 1.0
|
367 |
+
1 string2 2.0
|
368 |
+
2 None NaN
|
369 |
+
"""
|
370 |
+
)
|
371 |
+
|
372 |
+
|
373 |
+
@overload
|
374 |
+
def read_excel(
|
375 |
+
io,
|
376 |
+
# sheet name is str or int -> DataFrame
|
377 |
+
sheet_name: str | int = ...,
|
378 |
+
*,
|
379 |
+
header: int | Sequence[int] | None = ...,
|
380 |
+
names: SequenceNotStr[Hashable] | range | None = ...,
|
381 |
+
index_col: int | str | Sequence[int] | None = ...,
|
382 |
+
usecols: int
|
383 |
+
| str
|
384 |
+
| Sequence[int]
|
385 |
+
| Sequence[str]
|
386 |
+
| Callable[[str], bool]
|
387 |
+
| None = ...,
|
388 |
+
dtype: DtypeArg | None = ...,
|
389 |
+
engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ...,
|
390 |
+
converters: dict[str, Callable] | dict[int, Callable] | None = ...,
|
391 |
+
true_values: Iterable[Hashable] | None = ...,
|
392 |
+
false_values: Iterable[Hashable] | None = ...,
|
393 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
|
394 |
+
nrows: int | None = ...,
|
395 |
+
na_values=...,
|
396 |
+
keep_default_na: bool = ...,
|
397 |
+
na_filter: bool = ...,
|
398 |
+
verbose: bool = ...,
|
399 |
+
parse_dates: list | dict | bool = ...,
|
400 |
+
date_parser: Callable | lib.NoDefault = ...,
|
401 |
+
date_format: dict[Hashable, str] | str | None = ...,
|
402 |
+
thousands: str | None = ...,
|
403 |
+
decimal: str = ...,
|
404 |
+
comment: str | None = ...,
|
405 |
+
skipfooter: int = ...,
|
406 |
+
storage_options: StorageOptions = ...,
|
407 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
408 |
+
) -> DataFrame:
|
409 |
+
...
|
410 |
+
|
411 |
+
|
412 |
+
@overload
|
413 |
+
def read_excel(
|
414 |
+
io,
|
415 |
+
# sheet name is list or None -> dict[IntStrT, DataFrame]
|
416 |
+
sheet_name: list[IntStrT] | None,
|
417 |
+
*,
|
418 |
+
header: int | Sequence[int] | None = ...,
|
419 |
+
names: SequenceNotStr[Hashable] | range | None = ...,
|
420 |
+
index_col: int | str | Sequence[int] | None = ...,
|
421 |
+
usecols: int
|
422 |
+
| str
|
423 |
+
| Sequence[int]
|
424 |
+
| Sequence[str]
|
425 |
+
| Callable[[str], bool]
|
426 |
+
| None = ...,
|
427 |
+
dtype: DtypeArg | None = ...,
|
428 |
+
engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ...,
|
429 |
+
converters: dict[str, Callable] | dict[int, Callable] | None = ...,
|
430 |
+
true_values: Iterable[Hashable] | None = ...,
|
431 |
+
false_values: Iterable[Hashable] | None = ...,
|
432 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
|
433 |
+
nrows: int | None = ...,
|
434 |
+
na_values=...,
|
435 |
+
keep_default_na: bool = ...,
|
436 |
+
na_filter: bool = ...,
|
437 |
+
verbose: bool = ...,
|
438 |
+
parse_dates: list | dict | bool = ...,
|
439 |
+
date_parser: Callable | lib.NoDefault = ...,
|
440 |
+
date_format: dict[Hashable, str] | str | None = ...,
|
441 |
+
thousands: str | None = ...,
|
442 |
+
decimal: str = ...,
|
443 |
+
comment: str | None = ...,
|
444 |
+
skipfooter: int = ...,
|
445 |
+
storage_options: StorageOptions = ...,
|
446 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
447 |
+
) -> dict[IntStrT, DataFrame]:
|
448 |
+
...
|
449 |
+
|
450 |
+
|
451 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
452 |
+
@Appender(_read_excel_doc)
|
453 |
+
def read_excel(
|
454 |
+
io,
|
455 |
+
sheet_name: str | int | list[IntStrT] | None = 0,
|
456 |
+
*,
|
457 |
+
header: int | Sequence[int] | None = 0,
|
458 |
+
names: SequenceNotStr[Hashable] | range | None = None,
|
459 |
+
index_col: int | str | Sequence[int] | None = None,
|
460 |
+
usecols: int
|
461 |
+
| str
|
462 |
+
| Sequence[int]
|
463 |
+
| Sequence[str]
|
464 |
+
| Callable[[str], bool]
|
465 |
+
| None = None,
|
466 |
+
dtype: DtypeArg | None = None,
|
467 |
+
engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = None,
|
468 |
+
converters: dict[str, Callable] | dict[int, Callable] | None = None,
|
469 |
+
true_values: Iterable[Hashable] | None = None,
|
470 |
+
false_values: Iterable[Hashable] | None = None,
|
471 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
|
472 |
+
nrows: int | None = None,
|
473 |
+
na_values=None,
|
474 |
+
keep_default_na: bool = True,
|
475 |
+
na_filter: bool = True,
|
476 |
+
verbose: bool = False,
|
477 |
+
parse_dates: list | dict | bool = False,
|
478 |
+
date_parser: Callable | lib.NoDefault = lib.no_default,
|
479 |
+
date_format: dict[Hashable, str] | str | None = None,
|
480 |
+
thousands: str | None = None,
|
481 |
+
decimal: str = ".",
|
482 |
+
comment: str | None = None,
|
483 |
+
skipfooter: int = 0,
|
484 |
+
storage_options: StorageOptions | None = None,
|
485 |
+
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
|
486 |
+
engine_kwargs: dict | None = None,
|
487 |
+
) -> DataFrame | dict[IntStrT, DataFrame]:
|
488 |
+
check_dtype_backend(dtype_backend)
|
489 |
+
should_close = False
|
490 |
+
if engine_kwargs is None:
|
491 |
+
engine_kwargs = {}
|
492 |
+
|
493 |
+
if not isinstance(io, ExcelFile):
|
494 |
+
should_close = True
|
495 |
+
io = ExcelFile(
|
496 |
+
io,
|
497 |
+
storage_options=storage_options,
|
498 |
+
engine=engine,
|
499 |
+
engine_kwargs=engine_kwargs,
|
500 |
+
)
|
501 |
+
elif engine and engine != io.engine:
|
502 |
+
raise ValueError(
|
503 |
+
"Engine should not be specified when passing "
|
504 |
+
"an ExcelFile - ExcelFile already has the engine set"
|
505 |
+
)
|
506 |
+
|
507 |
+
try:
|
508 |
+
data = io.parse(
|
509 |
+
sheet_name=sheet_name,
|
510 |
+
header=header,
|
511 |
+
names=names,
|
512 |
+
index_col=index_col,
|
513 |
+
usecols=usecols,
|
514 |
+
dtype=dtype,
|
515 |
+
converters=converters,
|
516 |
+
true_values=true_values,
|
517 |
+
false_values=false_values,
|
518 |
+
skiprows=skiprows,
|
519 |
+
nrows=nrows,
|
520 |
+
na_values=na_values,
|
521 |
+
keep_default_na=keep_default_na,
|
522 |
+
na_filter=na_filter,
|
523 |
+
verbose=verbose,
|
524 |
+
parse_dates=parse_dates,
|
525 |
+
date_parser=date_parser,
|
526 |
+
date_format=date_format,
|
527 |
+
thousands=thousands,
|
528 |
+
decimal=decimal,
|
529 |
+
comment=comment,
|
530 |
+
skipfooter=skipfooter,
|
531 |
+
dtype_backend=dtype_backend,
|
532 |
+
)
|
533 |
+
finally:
|
534 |
+
# make sure to close opened file handles
|
535 |
+
if should_close:
|
536 |
+
io.close()
|
537 |
+
return data
|
538 |
+
|
539 |
+
|
540 |
+
_WorkbookT = TypeVar("_WorkbookT")
|
541 |
+
|
542 |
+
|
543 |
+
class BaseExcelReader(Generic[_WorkbookT]):
|
544 |
+
book: _WorkbookT
|
545 |
+
|
546 |
+
def __init__(
|
547 |
+
self,
|
548 |
+
filepath_or_buffer,
|
549 |
+
storage_options: StorageOptions | None = None,
|
550 |
+
engine_kwargs: dict | None = None,
|
551 |
+
) -> None:
|
552 |
+
if engine_kwargs is None:
|
553 |
+
engine_kwargs = {}
|
554 |
+
|
555 |
+
# First argument can also be bytes, so create a buffer
|
556 |
+
if isinstance(filepath_or_buffer, bytes):
|
557 |
+
filepath_or_buffer = BytesIO(filepath_or_buffer)
|
558 |
+
|
559 |
+
self.handles = IOHandles(
|
560 |
+
handle=filepath_or_buffer, compression={"method": None}
|
561 |
+
)
|
562 |
+
if not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
|
563 |
+
self.handles = get_handle(
|
564 |
+
filepath_or_buffer, "rb", storage_options=storage_options, is_text=False
|
565 |
+
)
|
566 |
+
|
567 |
+
if isinstance(self.handles.handle, self._workbook_class):
|
568 |
+
self.book = self.handles.handle
|
569 |
+
elif hasattr(self.handles.handle, "read"):
|
570 |
+
# N.B. xlrd.Book has a read attribute too
|
571 |
+
self.handles.handle.seek(0)
|
572 |
+
try:
|
573 |
+
self.book = self.load_workbook(self.handles.handle, engine_kwargs)
|
574 |
+
except Exception:
|
575 |
+
self.close()
|
576 |
+
raise
|
577 |
+
else:
|
578 |
+
raise ValueError(
|
579 |
+
"Must explicitly set engine if not passing in buffer or path for io."
|
580 |
+
)
|
581 |
+
|
582 |
+
@property
|
583 |
+
def _workbook_class(self) -> type[_WorkbookT]:
|
584 |
+
raise NotImplementedError
|
585 |
+
|
586 |
+
def load_workbook(self, filepath_or_buffer, engine_kwargs) -> _WorkbookT:
|
587 |
+
raise NotImplementedError
|
588 |
+
|
589 |
+
def close(self) -> None:
|
590 |
+
if hasattr(self, "book"):
|
591 |
+
if hasattr(self.book, "close"):
|
592 |
+
# pyxlsb: opens a TemporaryFile
|
593 |
+
# openpyxl: https://stackoverflow.com/questions/31416842/
|
594 |
+
# openpyxl-does-not-close-excel-workbook-in-read-only-mode
|
595 |
+
self.book.close()
|
596 |
+
elif hasattr(self.book, "release_resources"):
|
597 |
+
# xlrd
|
598 |
+
# https://github.com/python-excel/xlrd/blob/2.0.1/xlrd/book.py#L548
|
599 |
+
self.book.release_resources()
|
600 |
+
self.handles.close()
|
601 |
+
|
602 |
+
@property
|
603 |
+
def sheet_names(self) -> list[str]:
|
604 |
+
raise NotImplementedError
|
605 |
+
|
606 |
+
def get_sheet_by_name(self, name: str):
|
607 |
+
raise NotImplementedError
|
608 |
+
|
609 |
+
def get_sheet_by_index(self, index: int):
|
610 |
+
raise NotImplementedError
|
611 |
+
|
612 |
+
def get_sheet_data(self, sheet, rows: int | None = None):
|
613 |
+
raise NotImplementedError
|
614 |
+
|
615 |
+
def raise_if_bad_sheet_by_index(self, index: int) -> None:
|
616 |
+
n_sheets = len(self.sheet_names)
|
617 |
+
if index >= n_sheets:
|
618 |
+
raise ValueError(
|
619 |
+
f"Worksheet index {index} is invalid, {n_sheets} worksheets found"
|
620 |
+
)
|
621 |
+
|
622 |
+
def raise_if_bad_sheet_by_name(self, name: str) -> None:
|
623 |
+
if name not in self.sheet_names:
|
624 |
+
raise ValueError(f"Worksheet named '{name}' not found")
|
625 |
+
|
626 |
+
def _check_skiprows_func(
|
627 |
+
self,
|
628 |
+
skiprows: Callable,
|
629 |
+
rows_to_use: int,
|
630 |
+
) -> int:
|
631 |
+
"""
|
632 |
+
Determine how many file rows are required to obtain `nrows` data
|
633 |
+
rows when `skiprows` is a function.
|
634 |
+
|
635 |
+
Parameters
|
636 |
+
----------
|
637 |
+
skiprows : function
|
638 |
+
The function passed to read_excel by the user.
|
639 |
+
rows_to_use : int
|
640 |
+
The number of rows that will be needed for the header and
|
641 |
+
the data.
|
642 |
+
|
643 |
+
Returns
|
644 |
+
-------
|
645 |
+
int
|
646 |
+
"""
|
647 |
+
i = 0
|
648 |
+
rows_used_so_far = 0
|
649 |
+
while rows_used_so_far < rows_to_use:
|
650 |
+
if not skiprows(i):
|
651 |
+
rows_used_so_far += 1
|
652 |
+
i += 1
|
653 |
+
return i
|
654 |
+
|
655 |
+
def _calc_rows(
|
656 |
+
self,
|
657 |
+
header: int | Sequence[int] | None,
|
658 |
+
index_col: int | Sequence[int] | None,
|
659 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None,
|
660 |
+
nrows: int | None,
|
661 |
+
) -> int | None:
|
662 |
+
"""
|
663 |
+
If nrows specified, find the number of rows needed from the
|
664 |
+
file, otherwise return None.
|
665 |
+
|
666 |
+
|
667 |
+
Parameters
|
668 |
+
----------
|
669 |
+
header : int, list of int, or None
|
670 |
+
See read_excel docstring.
|
671 |
+
index_col : int, str, list of int, or None
|
672 |
+
See read_excel docstring.
|
673 |
+
skiprows : list-like, int, callable, or None
|
674 |
+
See read_excel docstring.
|
675 |
+
nrows : int or None
|
676 |
+
See read_excel docstring.
|
677 |
+
|
678 |
+
Returns
|
679 |
+
-------
|
680 |
+
int or None
|
681 |
+
"""
|
682 |
+
if nrows is None:
|
683 |
+
return None
|
684 |
+
if header is None:
|
685 |
+
header_rows = 1
|
686 |
+
elif is_integer(header):
|
687 |
+
header = cast(int, header)
|
688 |
+
header_rows = 1 + header
|
689 |
+
else:
|
690 |
+
header = cast(Sequence, header)
|
691 |
+
header_rows = 1 + header[-1]
|
692 |
+
# If there is a MultiIndex header and an index then there is also
|
693 |
+
# a row containing just the index name(s)
|
694 |
+
if is_list_like(header) and index_col is not None:
|
695 |
+
header = cast(Sequence, header)
|
696 |
+
if len(header) > 1:
|
697 |
+
header_rows += 1
|
698 |
+
if skiprows is None:
|
699 |
+
return header_rows + nrows
|
700 |
+
if is_integer(skiprows):
|
701 |
+
skiprows = cast(int, skiprows)
|
702 |
+
return header_rows + nrows + skiprows
|
703 |
+
if is_list_like(skiprows):
|
704 |
+
|
705 |
+
def f(skiprows: Sequence, x: int) -> bool:
|
706 |
+
return x in skiprows
|
707 |
+
|
708 |
+
skiprows = cast(Sequence, skiprows)
|
709 |
+
return self._check_skiprows_func(partial(f, skiprows), header_rows + nrows)
|
710 |
+
if callable(skiprows):
|
711 |
+
return self._check_skiprows_func(
|
712 |
+
skiprows,
|
713 |
+
header_rows + nrows,
|
714 |
+
)
|
715 |
+
# else unexpected skiprows type: read_excel will not optimize
|
716 |
+
# the number of rows read from file
|
717 |
+
return None
|
718 |
+
|
719 |
+
def parse(
|
720 |
+
self,
|
721 |
+
sheet_name: str | int | list[int] | list[str] | None = 0,
|
722 |
+
header: int | Sequence[int] | None = 0,
|
723 |
+
names: SequenceNotStr[Hashable] | range | None = None,
|
724 |
+
index_col: int | Sequence[int] | None = None,
|
725 |
+
usecols=None,
|
726 |
+
dtype: DtypeArg | None = None,
|
727 |
+
true_values: Iterable[Hashable] | None = None,
|
728 |
+
false_values: Iterable[Hashable] | None = None,
|
729 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
|
730 |
+
nrows: int | None = None,
|
731 |
+
na_values=None,
|
732 |
+
verbose: bool = False,
|
733 |
+
parse_dates: list | dict | bool = False,
|
734 |
+
date_parser: Callable | lib.NoDefault = lib.no_default,
|
735 |
+
date_format: dict[Hashable, str] | str | None = None,
|
736 |
+
thousands: str | None = None,
|
737 |
+
decimal: str = ".",
|
738 |
+
comment: str | None = None,
|
739 |
+
skipfooter: int = 0,
|
740 |
+
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
|
741 |
+
**kwds,
|
742 |
+
):
|
743 |
+
validate_header_arg(header)
|
744 |
+
validate_integer("nrows", nrows)
|
745 |
+
|
746 |
+
ret_dict = False
|
747 |
+
|
748 |
+
# Keep sheetname to maintain backwards compatibility.
|
749 |
+
sheets: list[int] | list[str]
|
750 |
+
if isinstance(sheet_name, list):
|
751 |
+
sheets = sheet_name
|
752 |
+
ret_dict = True
|
753 |
+
elif sheet_name is None:
|
754 |
+
sheets = self.sheet_names
|
755 |
+
ret_dict = True
|
756 |
+
elif isinstance(sheet_name, str):
|
757 |
+
sheets = [sheet_name]
|
758 |
+
else:
|
759 |
+
sheets = [sheet_name]
|
760 |
+
|
761 |
+
# handle same-type duplicates.
|
762 |
+
sheets = cast(Union[list[int], list[str]], list(dict.fromkeys(sheets).keys()))
|
763 |
+
|
764 |
+
output = {}
|
765 |
+
|
766 |
+
last_sheetname = None
|
767 |
+
for asheetname in sheets:
|
768 |
+
last_sheetname = asheetname
|
769 |
+
if verbose:
|
770 |
+
print(f"Reading sheet {asheetname}")
|
771 |
+
|
772 |
+
if isinstance(asheetname, str):
|
773 |
+
sheet = self.get_sheet_by_name(asheetname)
|
774 |
+
else: # assume an integer if not a string
|
775 |
+
sheet = self.get_sheet_by_index(asheetname)
|
776 |
+
|
777 |
+
file_rows_needed = self._calc_rows(header, index_col, skiprows, nrows)
|
778 |
+
data = self.get_sheet_data(sheet, file_rows_needed)
|
779 |
+
if hasattr(sheet, "close"):
|
780 |
+
# pyxlsb opens two TemporaryFiles
|
781 |
+
sheet.close()
|
782 |
+
usecols = maybe_convert_usecols(usecols)
|
783 |
+
|
784 |
+
if not data:
|
785 |
+
output[asheetname] = DataFrame()
|
786 |
+
continue
|
787 |
+
|
788 |
+
is_list_header = False
|
789 |
+
is_len_one_list_header = False
|
790 |
+
if is_list_like(header):
|
791 |
+
assert isinstance(header, Sequence)
|
792 |
+
is_list_header = True
|
793 |
+
if len(header) == 1:
|
794 |
+
is_len_one_list_header = True
|
795 |
+
|
796 |
+
if is_len_one_list_header:
|
797 |
+
header = cast(Sequence[int], header)[0]
|
798 |
+
|
799 |
+
# forward fill and pull out names for MultiIndex column
|
800 |
+
header_names = None
|
801 |
+
if header is not None and is_list_like(header):
|
802 |
+
assert isinstance(header, Sequence)
|
803 |
+
|
804 |
+
header_names = []
|
805 |
+
control_row = [True] * len(data[0])
|
806 |
+
|
807 |
+
for row in header:
|
808 |
+
if is_integer(skiprows):
|
809 |
+
assert isinstance(skiprows, int)
|
810 |
+
row += skiprows
|
811 |
+
|
812 |
+
if row > len(data) - 1:
|
813 |
+
raise ValueError(
|
814 |
+
f"header index {row} exceeds maximum index "
|
815 |
+
f"{len(data) - 1} of data.",
|
816 |
+
)
|
817 |
+
|
818 |
+
data[row], control_row = fill_mi_header(data[row], control_row)
|
819 |
+
|
820 |
+
if index_col is not None:
|
821 |
+
header_name, _ = pop_header_name(data[row], index_col)
|
822 |
+
header_names.append(header_name)
|
823 |
+
|
824 |
+
# If there is a MultiIndex header and an index then there is also
|
825 |
+
# a row containing just the index name(s)
|
826 |
+
has_index_names = False
|
827 |
+
if is_list_header and not is_len_one_list_header and index_col is not None:
|
828 |
+
index_col_list: Sequence[int]
|
829 |
+
if isinstance(index_col, int):
|
830 |
+
index_col_list = [index_col]
|
831 |
+
else:
|
832 |
+
assert isinstance(index_col, Sequence)
|
833 |
+
index_col_list = index_col
|
834 |
+
|
835 |
+
# We have to handle mi without names. If any of the entries in the data
|
836 |
+
# columns are not empty, this is a regular row
|
837 |
+
assert isinstance(header, Sequence)
|
838 |
+
if len(header) < len(data):
|
839 |
+
potential_index_names = data[len(header)]
|
840 |
+
potential_data = [
|
841 |
+
x
|
842 |
+
for i, x in enumerate(potential_index_names)
|
843 |
+
if not control_row[i] and i not in index_col_list
|
844 |
+
]
|
845 |
+
has_index_names = all(x == "" or x is None for x in potential_data)
|
846 |
+
|
847 |
+
if is_list_like(index_col):
|
848 |
+
# Forward fill values for MultiIndex index.
|
849 |
+
if header is None:
|
850 |
+
offset = 0
|
851 |
+
elif isinstance(header, int):
|
852 |
+
offset = 1 + header
|
853 |
+
else:
|
854 |
+
offset = 1 + max(header)
|
855 |
+
|
856 |
+
# GH34673: if MultiIndex names present and not defined in the header,
|
857 |
+
# offset needs to be incremented so that forward filling starts
|
858 |
+
# from the first MI value instead of the name
|
859 |
+
if has_index_names:
|
860 |
+
offset += 1
|
861 |
+
|
862 |
+
# Check if we have an empty dataset
|
863 |
+
# before trying to collect data.
|
864 |
+
if offset < len(data):
|
865 |
+
assert isinstance(index_col, Sequence)
|
866 |
+
|
867 |
+
for col in index_col:
|
868 |
+
last = data[offset][col]
|
869 |
+
|
870 |
+
for row in range(offset + 1, len(data)):
|
871 |
+
if data[row][col] == "" or data[row][col] is None:
|
872 |
+
data[row][col] = last
|
873 |
+
else:
|
874 |
+
last = data[row][col]
|
875 |
+
|
876 |
+
# GH 12292 : error when read one empty column from excel file
|
877 |
+
try:
|
878 |
+
parser = TextParser(
|
879 |
+
data,
|
880 |
+
names=names,
|
881 |
+
header=header,
|
882 |
+
index_col=index_col,
|
883 |
+
has_index_names=has_index_names,
|
884 |
+
dtype=dtype,
|
885 |
+
true_values=true_values,
|
886 |
+
false_values=false_values,
|
887 |
+
skiprows=skiprows,
|
888 |
+
nrows=nrows,
|
889 |
+
na_values=na_values,
|
890 |
+
skip_blank_lines=False, # GH 39808
|
891 |
+
parse_dates=parse_dates,
|
892 |
+
date_parser=date_parser,
|
893 |
+
date_format=date_format,
|
894 |
+
thousands=thousands,
|
895 |
+
decimal=decimal,
|
896 |
+
comment=comment,
|
897 |
+
skipfooter=skipfooter,
|
898 |
+
usecols=usecols,
|
899 |
+
dtype_backend=dtype_backend,
|
900 |
+
**kwds,
|
901 |
+
)
|
902 |
+
|
903 |
+
output[asheetname] = parser.read(nrows=nrows)
|
904 |
+
|
905 |
+
if header_names:
|
906 |
+
output[asheetname].columns = output[asheetname].columns.set_names(
|
907 |
+
header_names
|
908 |
+
)
|
909 |
+
|
910 |
+
except EmptyDataError:
|
911 |
+
# No Data, return an empty DataFrame
|
912 |
+
output[asheetname] = DataFrame()
|
913 |
+
|
914 |
+
except Exception as err:
|
915 |
+
err.args = (f"{err.args[0]} (sheet: {asheetname})", *err.args[1:])
|
916 |
+
raise err
|
917 |
+
|
918 |
+
if last_sheetname is None:
|
919 |
+
raise ValueError("Sheet name is an empty list")
|
920 |
+
|
921 |
+
if ret_dict:
|
922 |
+
return output
|
923 |
+
else:
|
924 |
+
return output[last_sheetname]
|
925 |
+
|
926 |
+
|
927 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
928 |
+
class ExcelWriter(Generic[_WorkbookT]):
|
929 |
+
"""
|
930 |
+
Class for writing DataFrame objects into excel sheets.
|
931 |
+
|
932 |
+
Default is to use:
|
933 |
+
|
934 |
+
* `xlsxwriter <https://pypi.org/project/XlsxWriter/>`__ for xlsx files if xlsxwriter
|
935 |
+
is installed otherwise `openpyxl <https://pypi.org/project/openpyxl/>`__
|
936 |
+
* `odswriter <https://pypi.org/project/odswriter/>`__ for ods files
|
937 |
+
|
938 |
+
See ``DataFrame.to_excel`` for typical usage.
|
939 |
+
|
940 |
+
The writer should be used as a context manager. Otherwise, call `close()` to save
|
941 |
+
and close any opened file handles.
|
942 |
+
|
943 |
+
Parameters
|
944 |
+
----------
|
945 |
+
path : str or typing.BinaryIO
|
946 |
+
Path to xls or xlsx or ods file.
|
947 |
+
engine : str (optional)
|
948 |
+
Engine to use for writing. If None, defaults to
|
949 |
+
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
|
950 |
+
argument.
|
951 |
+
date_format : str, default None
|
952 |
+
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
|
953 |
+
datetime_format : str, default None
|
954 |
+
Format string for datetime objects written into Excel files.
|
955 |
+
(e.g. 'YYYY-MM-DD HH:MM:SS').
|
956 |
+
mode : {{'w', 'a'}}, default 'w'
|
957 |
+
File mode to use (write or append). Append does not work with fsspec URLs.
|
958 |
+
{storage_options}
|
959 |
+
|
960 |
+
if_sheet_exists : {{'error', 'new', 'replace', 'overlay'}}, default 'error'
|
961 |
+
How to behave when trying to write to a sheet that already
|
962 |
+
exists (append mode only).
|
963 |
+
|
964 |
+
* error: raise a ValueError.
|
965 |
+
* new: Create a new sheet, with a name determined by the engine.
|
966 |
+
* replace: Delete the contents of the sheet before writing to it.
|
967 |
+
* overlay: Write contents to the existing sheet without first removing,
|
968 |
+
but possibly over top of, the existing contents.
|
969 |
+
|
970 |
+
.. versionadded:: 1.3.0
|
971 |
+
|
972 |
+
.. versionchanged:: 1.4.0
|
973 |
+
|
974 |
+
Added ``overlay`` option
|
975 |
+
|
976 |
+
engine_kwargs : dict, optional
|
977 |
+
Keyword arguments to be passed into the engine. These will be passed to
|
978 |
+
the following functions of the respective engines:
|
979 |
+
|
980 |
+
* xlsxwriter: ``xlsxwriter.Workbook(file, **engine_kwargs)``
|
981 |
+
* openpyxl (write mode): ``openpyxl.Workbook(**engine_kwargs)``
|
982 |
+
* openpyxl (append mode): ``openpyxl.load_workbook(file, **engine_kwargs)``
|
983 |
+
* odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)``
|
984 |
+
|
985 |
+
.. versionadded:: 1.3.0
|
986 |
+
|
987 |
+
Notes
|
988 |
+
-----
|
989 |
+
For compatibility with CSV writers, ExcelWriter serializes lists
|
990 |
+
and dicts to strings before writing.
|
991 |
+
|
992 |
+
Examples
|
993 |
+
--------
|
994 |
+
Default usage:
|
995 |
+
|
996 |
+
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
|
997 |
+
>>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
|
998 |
+
... df.to_excel(writer) # doctest: +SKIP
|
999 |
+
|
1000 |
+
To write to separate sheets in a single file:
|
1001 |
+
|
1002 |
+
>>> df1 = pd.DataFrame([["AAA", "BBB"]], columns=["Spam", "Egg"]) # doctest: +SKIP
|
1003 |
+
>>> df2 = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
|
1004 |
+
>>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
|
1005 |
+
... df1.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
|
1006 |
+
... df2.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
|
1007 |
+
|
1008 |
+
You can set the date format or datetime format:
|
1009 |
+
|
1010 |
+
>>> from datetime import date, datetime # doctest: +SKIP
|
1011 |
+
>>> df = pd.DataFrame(
|
1012 |
+
... [
|
1013 |
+
... [date(2014, 1, 31), date(1999, 9, 24)],
|
1014 |
+
... [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
|
1015 |
+
... ],
|
1016 |
+
... index=["Date", "Datetime"],
|
1017 |
+
... columns=["X", "Y"],
|
1018 |
+
... ) # doctest: +SKIP
|
1019 |
+
>>> with pd.ExcelWriter(
|
1020 |
+
... "path_to_file.xlsx",
|
1021 |
+
... date_format="YYYY-MM-DD",
|
1022 |
+
... datetime_format="YYYY-MM-DD HH:MM:SS"
|
1023 |
+
... ) as writer:
|
1024 |
+
... df.to_excel(writer) # doctest: +SKIP
|
1025 |
+
|
1026 |
+
You can also append to an existing Excel file:
|
1027 |
+
|
1028 |
+
>>> with pd.ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer:
|
1029 |
+
... df.to_excel(writer, sheet_name="Sheet3") # doctest: +SKIP
|
1030 |
+
|
1031 |
+
Here, the `if_sheet_exists` parameter can be set to replace a sheet if it
|
1032 |
+
already exists:
|
1033 |
+
|
1034 |
+
>>> with ExcelWriter(
|
1035 |
+
... "path_to_file.xlsx",
|
1036 |
+
... mode="a",
|
1037 |
+
... engine="openpyxl",
|
1038 |
+
... if_sheet_exists="replace",
|
1039 |
+
... ) as writer:
|
1040 |
+
... df.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
|
1041 |
+
|
1042 |
+
You can also write multiple DataFrames to a single sheet. Note that the
|
1043 |
+
``if_sheet_exists`` parameter needs to be set to ``overlay``:
|
1044 |
+
|
1045 |
+
>>> with ExcelWriter("path_to_file.xlsx",
|
1046 |
+
... mode="a",
|
1047 |
+
... engine="openpyxl",
|
1048 |
+
... if_sheet_exists="overlay",
|
1049 |
+
... ) as writer:
|
1050 |
+
... df1.to_excel(writer, sheet_name="Sheet1")
|
1051 |
+
... df2.to_excel(writer, sheet_name="Sheet1", startcol=3) # doctest: +SKIP
|
1052 |
+
|
1053 |
+
You can store Excel file in RAM:
|
1054 |
+
|
1055 |
+
>>> import io
|
1056 |
+
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])
|
1057 |
+
>>> buffer = io.BytesIO()
|
1058 |
+
>>> with pd.ExcelWriter(buffer) as writer:
|
1059 |
+
... df.to_excel(writer)
|
1060 |
+
|
1061 |
+
You can pack Excel file into zip archive:
|
1062 |
+
|
1063 |
+
>>> import zipfile # doctest: +SKIP
|
1064 |
+
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
|
1065 |
+
>>> with zipfile.ZipFile("path_to_file.zip", "w") as zf:
|
1066 |
+
... with zf.open("filename.xlsx", "w") as buffer:
|
1067 |
+
... with pd.ExcelWriter(buffer) as writer:
|
1068 |
+
... df.to_excel(writer) # doctest: +SKIP
|
1069 |
+
|
1070 |
+
You can specify additional arguments to the underlying engine:
|
1071 |
+
|
1072 |
+
>>> with pd.ExcelWriter(
|
1073 |
+
... "path_to_file.xlsx",
|
1074 |
+
... engine="xlsxwriter",
|
1075 |
+
... engine_kwargs={{"options": {{"nan_inf_to_errors": True}}}}
|
1076 |
+
... ) as writer:
|
1077 |
+
... df.to_excel(writer) # doctest: +SKIP
|
1078 |
+
|
1079 |
+
In append mode, ``engine_kwargs`` are passed through to
|
1080 |
+
openpyxl's ``load_workbook``:
|
1081 |
+
|
1082 |
+
>>> with pd.ExcelWriter(
|
1083 |
+
... "path_to_file.xlsx",
|
1084 |
+
... engine="openpyxl",
|
1085 |
+
... mode="a",
|
1086 |
+
... engine_kwargs={{"keep_vba": True}}
|
1087 |
+
... ) as writer:
|
1088 |
+
... df.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
|
1089 |
+
"""
|
1090 |
+
|
1091 |
+
# Defining an ExcelWriter implementation (see abstract methods for more...)
|
1092 |
+
|
1093 |
+
# - Mandatory
|
1094 |
+
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
|
1095 |
+
# --> called to write additional DataFrames to disk
|
1096 |
+
# - ``_supported_extensions`` (tuple of supported extensions), used to
|
1097 |
+
# check that engine supports the given extension.
|
1098 |
+
# - ``_engine`` - string that gives the engine name. Necessary to
|
1099 |
+
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
|
1100 |
+
# lookup.
|
1101 |
+
# - ``save(self)`` --> called to save file to disk
|
1102 |
+
# - Mostly mandatory (i.e. should at least exist)
|
1103 |
+
# - book, cur_sheet, path
|
1104 |
+
|
1105 |
+
# - Optional:
|
1106 |
+
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
|
1107 |
+
# with path as first argument.
|
1108 |
+
|
1109 |
+
# You also need to register the class with ``register_writer()``.
|
1110 |
+
# Technically, ExcelWriter implementations don't need to subclass
|
1111 |
+
# ExcelWriter.
|
1112 |
+
|
1113 |
+
_engine: str
|
1114 |
+
_supported_extensions: tuple[str, ...]
|
1115 |
+
|
1116 |
+
def __new__(
|
1117 |
+
cls,
|
1118 |
+
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
1119 |
+
engine: str | None = None,
|
1120 |
+
date_format: str | None = None,
|
1121 |
+
datetime_format: str | None = None,
|
1122 |
+
mode: str = "w",
|
1123 |
+
storage_options: StorageOptions | None = None,
|
1124 |
+
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
1125 |
+
engine_kwargs: dict | None = None,
|
1126 |
+
) -> Self:
|
1127 |
+
# only switch class if generic(ExcelWriter)
|
1128 |
+
if cls is ExcelWriter:
|
1129 |
+
if engine is None or (isinstance(engine, str) and engine == "auto"):
|
1130 |
+
if isinstance(path, str):
|
1131 |
+
ext = os.path.splitext(path)[-1][1:]
|
1132 |
+
else:
|
1133 |
+
ext = "xlsx"
|
1134 |
+
|
1135 |
+
try:
|
1136 |
+
engine = config.get_option(f"io.excel.{ext}.writer", silent=True)
|
1137 |
+
if engine == "auto":
|
1138 |
+
engine = get_default_engine(ext, mode="writer")
|
1139 |
+
except KeyError as err:
|
1140 |
+
raise ValueError(f"No engine for filetype: '{ext}'") from err
|
1141 |
+
|
1142 |
+
# for mypy
|
1143 |
+
assert engine is not None
|
1144 |
+
# error: Incompatible types in assignment (expression has type
|
1145 |
+
# "type[ExcelWriter[Any]]", variable has type "type[Self]")
|
1146 |
+
cls = get_writer(engine) # type: ignore[assignment]
|
1147 |
+
|
1148 |
+
return object.__new__(cls)
|
1149 |
+
|
1150 |
+
# declare external properties you can count on
|
1151 |
+
_path = None
|
1152 |
+
|
1153 |
+
@property
|
1154 |
+
def supported_extensions(self) -> tuple[str, ...]:
|
1155 |
+
"""Extensions that writer engine supports."""
|
1156 |
+
return self._supported_extensions
|
1157 |
+
|
1158 |
+
@property
|
1159 |
+
def engine(self) -> str:
|
1160 |
+
"""Name of engine."""
|
1161 |
+
return self._engine
|
1162 |
+
|
1163 |
+
@property
|
1164 |
+
def sheets(self) -> dict[str, Any]:
|
1165 |
+
"""Mapping of sheet names to sheet objects."""
|
1166 |
+
raise NotImplementedError
|
1167 |
+
|
1168 |
+
@property
|
1169 |
+
def book(self) -> _WorkbookT:
|
1170 |
+
"""
|
1171 |
+
Book instance. Class type will depend on the engine used.
|
1172 |
+
|
1173 |
+
This attribute can be used to access engine-specific features.
|
1174 |
+
"""
|
1175 |
+
raise NotImplementedError
|
1176 |
+
|
1177 |
+
def _write_cells(
|
1178 |
+
self,
|
1179 |
+
cells,
|
1180 |
+
sheet_name: str | None = None,
|
1181 |
+
startrow: int = 0,
|
1182 |
+
startcol: int = 0,
|
1183 |
+
freeze_panes: tuple[int, int] | None = None,
|
1184 |
+
) -> None:
|
1185 |
+
"""
|
1186 |
+
Write given formatted cells into Excel an excel sheet
|
1187 |
+
|
1188 |
+
Parameters
|
1189 |
+
----------
|
1190 |
+
cells : generator
|
1191 |
+
cell of formatted data to save to Excel sheet
|
1192 |
+
sheet_name : str, default None
|
1193 |
+
Name of Excel sheet, if None, then use self.cur_sheet
|
1194 |
+
startrow : upper left cell row to dump data frame
|
1195 |
+
startcol : upper left cell column to dump data frame
|
1196 |
+
freeze_panes: int tuple of length 2
|
1197 |
+
contains the bottom-most row and right-most column to freeze
|
1198 |
+
"""
|
1199 |
+
raise NotImplementedError
|
1200 |
+
|
1201 |
+
def _save(self) -> None:
|
1202 |
+
"""
|
1203 |
+
Save workbook to disk.
|
1204 |
+
"""
|
1205 |
+
raise NotImplementedError
|
1206 |
+
|
1207 |
+
def __init__(
|
1208 |
+
self,
|
1209 |
+
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
1210 |
+
engine: str | None = None,
|
1211 |
+
date_format: str | None = None,
|
1212 |
+
datetime_format: str | None = None,
|
1213 |
+
mode: str = "w",
|
1214 |
+
storage_options: StorageOptions | None = None,
|
1215 |
+
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
1216 |
+
engine_kwargs: dict[str, Any] | None = None,
|
1217 |
+
) -> None:
|
1218 |
+
# validate that this engine can handle the extension
|
1219 |
+
if isinstance(path, str):
|
1220 |
+
ext = os.path.splitext(path)[-1]
|
1221 |
+
self.check_extension(ext)
|
1222 |
+
|
1223 |
+
# use mode to open the file
|
1224 |
+
if "b" not in mode:
|
1225 |
+
mode += "b"
|
1226 |
+
# use "a" for the user to append data to excel but internally use "r+" to let
|
1227 |
+
# the excel backend first read the existing file and then write any data to it
|
1228 |
+
mode = mode.replace("a", "r+")
|
1229 |
+
|
1230 |
+
if if_sheet_exists not in (None, "error", "new", "replace", "overlay"):
|
1231 |
+
raise ValueError(
|
1232 |
+
f"'{if_sheet_exists}' is not valid for if_sheet_exists. "
|
1233 |
+
"Valid options are 'error', 'new', 'replace' and 'overlay'."
|
1234 |
+
)
|
1235 |
+
if if_sheet_exists and "r+" not in mode:
|
1236 |
+
raise ValueError("if_sheet_exists is only valid in append mode (mode='a')")
|
1237 |
+
if if_sheet_exists is None:
|
1238 |
+
if_sheet_exists = "error"
|
1239 |
+
self._if_sheet_exists = if_sheet_exists
|
1240 |
+
|
1241 |
+
# cast ExcelWriter to avoid adding 'if self._handles is not None'
|
1242 |
+
self._handles = IOHandles(
|
1243 |
+
cast(IO[bytes], path), compression={"compression": None}
|
1244 |
+
)
|
1245 |
+
if not isinstance(path, ExcelWriter):
|
1246 |
+
self._handles = get_handle(
|
1247 |
+
path, mode, storage_options=storage_options, is_text=False
|
1248 |
+
)
|
1249 |
+
self._cur_sheet = None
|
1250 |
+
|
1251 |
+
if date_format is None:
|
1252 |
+
self._date_format = "YYYY-MM-DD"
|
1253 |
+
else:
|
1254 |
+
self._date_format = date_format
|
1255 |
+
if datetime_format is None:
|
1256 |
+
self._datetime_format = "YYYY-MM-DD HH:MM:SS"
|
1257 |
+
else:
|
1258 |
+
self._datetime_format = datetime_format
|
1259 |
+
|
1260 |
+
self._mode = mode
|
1261 |
+
|
1262 |
+
@property
|
1263 |
+
def date_format(self) -> str:
|
1264 |
+
"""
|
1265 |
+
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
|
1266 |
+
"""
|
1267 |
+
return self._date_format
|
1268 |
+
|
1269 |
+
@property
|
1270 |
+
def datetime_format(self) -> str:
|
1271 |
+
"""
|
1272 |
+
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
|
1273 |
+
"""
|
1274 |
+
return self._datetime_format
|
1275 |
+
|
1276 |
+
@property
|
1277 |
+
def if_sheet_exists(self) -> str:
|
1278 |
+
"""
|
1279 |
+
How to behave when writing to a sheet that already exists in append mode.
|
1280 |
+
"""
|
1281 |
+
return self._if_sheet_exists
|
1282 |
+
|
1283 |
+
def __fspath__(self) -> str:
|
1284 |
+
return getattr(self._handles.handle, "name", "")
|
1285 |
+
|
1286 |
+
def _get_sheet_name(self, sheet_name: str | None) -> str:
|
1287 |
+
if sheet_name is None:
|
1288 |
+
sheet_name = self._cur_sheet
|
1289 |
+
if sheet_name is None: # pragma: no cover
|
1290 |
+
raise ValueError("Must pass explicit sheet_name or set _cur_sheet property")
|
1291 |
+
return sheet_name
|
1292 |
+
|
1293 |
+
def _value_with_fmt(
|
1294 |
+
self, val
|
1295 |
+
) -> tuple[
|
1296 |
+
int | float | bool | str | datetime.datetime | datetime.date, str | None
|
1297 |
+
]:
|
1298 |
+
"""
|
1299 |
+
Convert numpy types to Python types for the Excel writers.
|
1300 |
+
|
1301 |
+
Parameters
|
1302 |
+
----------
|
1303 |
+
val : object
|
1304 |
+
Value to be written into cells
|
1305 |
+
|
1306 |
+
Returns
|
1307 |
+
-------
|
1308 |
+
Tuple with the first element being the converted value and the second
|
1309 |
+
being an optional format
|
1310 |
+
"""
|
1311 |
+
fmt = None
|
1312 |
+
|
1313 |
+
if is_integer(val):
|
1314 |
+
val = int(val)
|
1315 |
+
elif is_float(val):
|
1316 |
+
val = float(val)
|
1317 |
+
elif is_bool(val):
|
1318 |
+
val = bool(val)
|
1319 |
+
elif isinstance(val, datetime.datetime):
|
1320 |
+
fmt = self._datetime_format
|
1321 |
+
elif isinstance(val, datetime.date):
|
1322 |
+
fmt = self._date_format
|
1323 |
+
elif isinstance(val, datetime.timedelta):
|
1324 |
+
val = val.total_seconds() / 86400
|
1325 |
+
fmt = "0"
|
1326 |
+
else:
|
1327 |
+
val = str(val)
|
1328 |
+
|
1329 |
+
return val, fmt
|
1330 |
+
|
1331 |
+
@classmethod
|
1332 |
+
def check_extension(cls, ext: str) -> Literal[True]:
|
1333 |
+
"""
|
1334 |
+
checks that path's extension against the Writer's supported
|
1335 |
+
extensions. If it isn't supported, raises UnsupportedFiletypeError.
|
1336 |
+
"""
|
1337 |
+
if ext.startswith("."):
|
1338 |
+
ext = ext[1:]
|
1339 |
+
if not any(ext in extension for extension in cls._supported_extensions):
|
1340 |
+
raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'")
|
1341 |
+
return True
|
1342 |
+
|
1343 |
+
# Allow use as a contextmanager
|
1344 |
+
def __enter__(self) -> Self:
|
1345 |
+
return self
|
1346 |
+
|
1347 |
+
def __exit__(
|
1348 |
+
self,
|
1349 |
+
exc_type: type[BaseException] | None,
|
1350 |
+
exc_value: BaseException | None,
|
1351 |
+
traceback: TracebackType | None,
|
1352 |
+
) -> None:
|
1353 |
+
self.close()
|
1354 |
+
|
1355 |
+
def close(self) -> None:
|
1356 |
+
"""synonym for save, to make it more file-like"""
|
1357 |
+
self._save()
|
1358 |
+
self._handles.close()
|
1359 |
+
|
1360 |
+
|
1361 |
+
XLS_SIGNATURES = (
|
1362 |
+
b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2
|
1363 |
+
b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3
|
1364 |
+
b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4
|
1365 |
+
b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary
|
1366 |
+
)
|
1367 |
+
ZIP_SIGNATURE = b"PK\x03\x04"
|
1368 |
+
PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,)))
|
1369 |
+
|
1370 |
+
|
1371 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
1372 |
+
def inspect_excel_format(
|
1373 |
+
content_or_path: FilePath | ReadBuffer[bytes],
|
1374 |
+
storage_options: StorageOptions | None = None,
|
1375 |
+
) -> str | None:
|
1376 |
+
"""
|
1377 |
+
Inspect the path or content of an excel file and get its format.
|
1378 |
+
|
1379 |
+
Adopted from xlrd: https://github.com/python-excel/xlrd.
|
1380 |
+
|
1381 |
+
Parameters
|
1382 |
+
----------
|
1383 |
+
content_or_path : str or file-like object
|
1384 |
+
Path to file or content of file to inspect. May be a URL.
|
1385 |
+
{storage_options}
|
1386 |
+
|
1387 |
+
Returns
|
1388 |
+
-------
|
1389 |
+
str or None
|
1390 |
+
Format of file if it can be determined.
|
1391 |
+
|
1392 |
+
Raises
|
1393 |
+
------
|
1394 |
+
ValueError
|
1395 |
+
If resulting stream is empty.
|
1396 |
+
BadZipFile
|
1397 |
+
If resulting stream does not have an XLS signature and is not a valid zipfile.
|
1398 |
+
"""
|
1399 |
+
if isinstance(content_or_path, bytes):
|
1400 |
+
content_or_path = BytesIO(content_or_path)
|
1401 |
+
|
1402 |
+
with get_handle(
|
1403 |
+
content_or_path, "rb", storage_options=storage_options, is_text=False
|
1404 |
+
) as handle:
|
1405 |
+
stream = handle.handle
|
1406 |
+
stream.seek(0)
|
1407 |
+
buf = stream.read(PEEK_SIZE)
|
1408 |
+
if buf is None:
|
1409 |
+
raise ValueError("stream is empty")
|
1410 |
+
assert isinstance(buf, bytes)
|
1411 |
+
peek = buf
|
1412 |
+
stream.seek(0)
|
1413 |
+
|
1414 |
+
if any(peek.startswith(sig) for sig in XLS_SIGNATURES):
|
1415 |
+
return "xls"
|
1416 |
+
elif not peek.startswith(ZIP_SIGNATURE):
|
1417 |
+
return None
|
1418 |
+
|
1419 |
+
with zipfile.ZipFile(stream) as zf:
|
1420 |
+
# Workaround for some third party files that use forward slashes and
|
1421 |
+
# lower case names.
|
1422 |
+
component_names = [
|
1423 |
+
name.replace("\\", "/").lower() for name in zf.namelist()
|
1424 |
+
]
|
1425 |
+
|
1426 |
+
if "xl/workbook.xml" in component_names:
|
1427 |
+
return "xlsx"
|
1428 |
+
if "xl/workbook.bin" in component_names:
|
1429 |
+
return "xlsb"
|
1430 |
+
if "content.xml" in component_names:
|
1431 |
+
return "ods"
|
1432 |
+
return "zip"
|
1433 |
+
|
1434 |
+
|
1435 |
+
class ExcelFile:
|
1436 |
+
"""
|
1437 |
+
Class for parsing tabular Excel sheets into DataFrame objects.
|
1438 |
+
|
1439 |
+
See read_excel for more documentation.
|
1440 |
+
|
1441 |
+
Parameters
|
1442 |
+
----------
|
1443 |
+
path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath),
|
1444 |
+
A file-like object, xlrd workbook or openpyxl workbook.
|
1445 |
+
If a string or path object, expected to be a path to a
|
1446 |
+
.xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.
|
1447 |
+
engine : str, default None
|
1448 |
+
If io is not a buffer or path, this must be set to identify io.
|
1449 |
+
Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``, ``calamine``
|
1450 |
+
Engine compatibility :
|
1451 |
+
|
1452 |
+
- ``xlrd`` supports old-style Excel files (.xls).
|
1453 |
+
- ``openpyxl`` supports newer Excel file formats.
|
1454 |
+
- ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
|
1455 |
+
- ``pyxlsb`` supports Binary Excel files.
|
1456 |
+
- ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)
|
1457 |
+
and OpenDocument (.ods) file formats.
|
1458 |
+
|
1459 |
+
.. versionchanged:: 1.2.0
|
1460 |
+
|
1461 |
+
The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_
|
1462 |
+
now only supports old-style ``.xls`` files.
|
1463 |
+
When ``engine=None``, the following logic will be
|
1464 |
+
used to determine the engine:
|
1465 |
+
|
1466 |
+
- If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
|
1467 |
+
then `odf <https://pypi.org/project/odfpy/>`_ will be used.
|
1468 |
+
- Otherwise if ``path_or_buffer`` is an xls format,
|
1469 |
+
``xlrd`` will be used.
|
1470 |
+
- Otherwise if ``path_or_buffer`` is in xlsb format,
|
1471 |
+
`pyxlsb <https://pypi.org/project/pyxlsb/>`_ will be used.
|
1472 |
+
|
1473 |
+
.. versionadded:: 1.3.0
|
1474 |
+
|
1475 |
+
- Otherwise if `openpyxl <https://pypi.org/project/openpyxl/>`_ is installed,
|
1476 |
+
then ``openpyxl`` will be used.
|
1477 |
+
- Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised.
|
1478 |
+
|
1479 |
+
.. warning::
|
1480 |
+
|
1481 |
+
Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.
|
1482 |
+
This is not supported, switch to using ``openpyxl`` instead.
|
1483 |
+
engine_kwargs : dict, optional
|
1484 |
+
Arbitrary keyword arguments passed to excel engine.
|
1485 |
+
|
1486 |
+
Examples
|
1487 |
+
--------
|
1488 |
+
>>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
|
1489 |
+
>>> with pd.ExcelFile("myfile.xls") as xls: # doctest: +SKIP
|
1490 |
+
... df1 = pd.read_excel(xls, "Sheet1") # doctest: +SKIP
|
1491 |
+
"""
|
1492 |
+
|
1493 |
+
from pandas.io.excel._calamine import CalamineReader
|
1494 |
+
from pandas.io.excel._odfreader import ODFReader
|
1495 |
+
from pandas.io.excel._openpyxl import OpenpyxlReader
|
1496 |
+
from pandas.io.excel._pyxlsb import PyxlsbReader
|
1497 |
+
from pandas.io.excel._xlrd import XlrdReader
|
1498 |
+
|
1499 |
+
_engines: Mapping[str, Any] = {
|
1500 |
+
"xlrd": XlrdReader,
|
1501 |
+
"openpyxl": OpenpyxlReader,
|
1502 |
+
"odf": ODFReader,
|
1503 |
+
"pyxlsb": PyxlsbReader,
|
1504 |
+
"calamine": CalamineReader,
|
1505 |
+
}
|
1506 |
+
|
1507 |
+
def __init__(
|
1508 |
+
self,
|
1509 |
+
path_or_buffer,
|
1510 |
+
engine: str | None = None,
|
1511 |
+
storage_options: StorageOptions | None = None,
|
1512 |
+
engine_kwargs: dict | None = None,
|
1513 |
+
) -> None:
|
1514 |
+
if engine_kwargs is None:
|
1515 |
+
engine_kwargs = {}
|
1516 |
+
|
1517 |
+
if engine is not None and engine not in self._engines:
|
1518 |
+
raise ValueError(f"Unknown engine: {engine}")
|
1519 |
+
|
1520 |
+
# First argument can also be bytes, so create a buffer
|
1521 |
+
if isinstance(path_or_buffer, bytes):
|
1522 |
+
path_or_buffer = BytesIO(path_or_buffer)
|
1523 |
+
warnings.warn(
|
1524 |
+
"Passing bytes to 'read_excel' is deprecated and "
|
1525 |
+
"will be removed in a future version. To read from a "
|
1526 |
+
"byte string, wrap it in a `BytesIO` object.",
|
1527 |
+
FutureWarning,
|
1528 |
+
stacklevel=find_stack_level(),
|
1529 |
+
)
|
1530 |
+
|
1531 |
+
# Could be a str, ExcelFile, Book, etc.
|
1532 |
+
self.io = path_or_buffer
|
1533 |
+
# Always a string
|
1534 |
+
self._io = stringify_path(path_or_buffer)
|
1535 |
+
|
1536 |
+
# Determine xlrd version if installed
|
1537 |
+
if import_optional_dependency("xlrd", errors="ignore") is None:
|
1538 |
+
xlrd_version = None
|
1539 |
+
else:
|
1540 |
+
import xlrd
|
1541 |
+
|
1542 |
+
xlrd_version = Version(get_version(xlrd))
|
1543 |
+
|
1544 |
+
if engine is None:
|
1545 |
+
# Only determine ext if it is needed
|
1546 |
+
ext: str | None
|
1547 |
+
if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book):
|
1548 |
+
ext = "xls"
|
1549 |
+
else:
|
1550 |
+
ext = inspect_excel_format(
|
1551 |
+
content_or_path=path_or_buffer, storage_options=storage_options
|
1552 |
+
)
|
1553 |
+
if ext is None:
|
1554 |
+
raise ValueError(
|
1555 |
+
"Excel file format cannot be determined, you must specify "
|
1556 |
+
"an engine manually."
|
1557 |
+
)
|
1558 |
+
|
1559 |
+
engine = config.get_option(f"io.excel.{ext}.reader", silent=True)
|
1560 |
+
if engine == "auto":
|
1561 |
+
engine = get_default_engine(ext, mode="reader")
|
1562 |
+
|
1563 |
+
assert engine is not None
|
1564 |
+
self.engine = engine
|
1565 |
+
self.storage_options = storage_options
|
1566 |
+
|
1567 |
+
self._reader = self._engines[engine](
|
1568 |
+
self._io,
|
1569 |
+
storage_options=storage_options,
|
1570 |
+
engine_kwargs=engine_kwargs,
|
1571 |
+
)
|
1572 |
+
|
1573 |
+
def __fspath__(self):
|
1574 |
+
return self._io
|
1575 |
+
|
1576 |
+
def parse(
|
1577 |
+
self,
|
1578 |
+
sheet_name: str | int | list[int] | list[str] | None = 0,
|
1579 |
+
header: int | Sequence[int] | None = 0,
|
1580 |
+
names: SequenceNotStr[Hashable] | range | None = None,
|
1581 |
+
index_col: int | Sequence[int] | None = None,
|
1582 |
+
usecols=None,
|
1583 |
+
converters=None,
|
1584 |
+
true_values: Iterable[Hashable] | None = None,
|
1585 |
+
false_values: Iterable[Hashable] | None = None,
|
1586 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
|
1587 |
+
nrows: int | None = None,
|
1588 |
+
na_values=None,
|
1589 |
+
parse_dates: list | dict | bool = False,
|
1590 |
+
date_parser: Callable | lib.NoDefault = lib.no_default,
|
1591 |
+
date_format: str | dict[Hashable, str] | None = None,
|
1592 |
+
thousands: str | None = None,
|
1593 |
+
comment: str | None = None,
|
1594 |
+
skipfooter: int = 0,
|
1595 |
+
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
|
1596 |
+
**kwds,
|
1597 |
+
) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]:
|
1598 |
+
"""
|
1599 |
+
Parse specified sheet(s) into a DataFrame.
|
1600 |
+
|
1601 |
+
Equivalent to read_excel(ExcelFile, ...) See the read_excel
|
1602 |
+
docstring for more info on accepted parameters.
|
1603 |
+
|
1604 |
+
Returns
|
1605 |
+
-------
|
1606 |
+
DataFrame or dict of DataFrames
|
1607 |
+
DataFrame from the passed in Excel file.
|
1608 |
+
|
1609 |
+
Examples
|
1610 |
+
--------
|
1611 |
+
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
|
1612 |
+
>>> df.to_excel('myfile.xlsx') # doctest: +SKIP
|
1613 |
+
>>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
|
1614 |
+
>>> file.parse() # doctest: +SKIP
|
1615 |
+
"""
|
1616 |
+
return self._reader.parse(
|
1617 |
+
sheet_name=sheet_name,
|
1618 |
+
header=header,
|
1619 |
+
names=names,
|
1620 |
+
index_col=index_col,
|
1621 |
+
usecols=usecols,
|
1622 |
+
converters=converters,
|
1623 |
+
true_values=true_values,
|
1624 |
+
false_values=false_values,
|
1625 |
+
skiprows=skiprows,
|
1626 |
+
nrows=nrows,
|
1627 |
+
na_values=na_values,
|
1628 |
+
parse_dates=parse_dates,
|
1629 |
+
date_parser=date_parser,
|
1630 |
+
date_format=date_format,
|
1631 |
+
thousands=thousands,
|
1632 |
+
comment=comment,
|
1633 |
+
skipfooter=skipfooter,
|
1634 |
+
dtype_backend=dtype_backend,
|
1635 |
+
**kwds,
|
1636 |
+
)
|
1637 |
+
|
1638 |
+
@property
|
1639 |
+
def book(self):
|
1640 |
+
return self._reader.book
|
1641 |
+
|
1642 |
+
@property
|
1643 |
+
def sheet_names(self):
|
1644 |
+
return self._reader.sheet_names
|
1645 |
+
|
1646 |
+
def close(self) -> None:
|
1647 |
+
"""close io if necessary"""
|
1648 |
+
self._reader.close()
|
1649 |
+
|
1650 |
+
def __enter__(self) -> Self:
|
1651 |
+
return self
|
1652 |
+
|
1653 |
+
def __exit__(
|
1654 |
+
self,
|
1655 |
+
exc_type: type[BaseException] | None,
|
1656 |
+
exc_value: BaseException | None,
|
1657 |
+
traceback: TracebackType | None,
|
1658 |
+
) -> None:
|
1659 |
+
self.close()
|
env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py
ADDED
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import (
|
4 |
+
TYPE_CHECKING,
|
5 |
+
cast,
|
6 |
+
)
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
from pandas._typing import (
|
11 |
+
FilePath,
|
12 |
+
ReadBuffer,
|
13 |
+
Scalar,
|
14 |
+
StorageOptions,
|
15 |
+
)
|
16 |
+
from pandas.compat._optional import import_optional_dependency
|
17 |
+
from pandas.util._decorators import doc
|
18 |
+
|
19 |
+
import pandas as pd
|
20 |
+
from pandas.core.shared_docs import _shared_docs
|
21 |
+
|
22 |
+
from pandas.io.excel._base import BaseExcelReader
|
23 |
+
|
24 |
+
if TYPE_CHECKING:
|
25 |
+
from odf.opendocument import OpenDocument
|
26 |
+
|
27 |
+
from pandas._libs.tslibs.nattype import NaTType
|
28 |
+
|
29 |
+
|
30 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
31 |
+
class ODFReader(BaseExcelReader["OpenDocument"]):
|
32 |
+
def __init__(
|
33 |
+
self,
|
34 |
+
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
35 |
+
storage_options: StorageOptions | None = None,
|
36 |
+
engine_kwargs: dict | None = None,
|
37 |
+
) -> None:
|
38 |
+
"""
|
39 |
+
Read tables out of OpenDocument formatted files.
|
40 |
+
|
41 |
+
Parameters
|
42 |
+
----------
|
43 |
+
filepath_or_buffer : str, path to be parsed or
|
44 |
+
an open readable stream.
|
45 |
+
{storage_options}
|
46 |
+
engine_kwargs : dict, optional
|
47 |
+
Arbitrary keyword arguments passed to excel engine.
|
48 |
+
"""
|
49 |
+
import_optional_dependency("odf")
|
50 |
+
super().__init__(
|
51 |
+
filepath_or_buffer,
|
52 |
+
storage_options=storage_options,
|
53 |
+
engine_kwargs=engine_kwargs,
|
54 |
+
)
|
55 |
+
|
56 |
+
@property
|
57 |
+
def _workbook_class(self) -> type[OpenDocument]:
|
58 |
+
from odf.opendocument import OpenDocument
|
59 |
+
|
60 |
+
return OpenDocument
|
61 |
+
|
62 |
+
def load_workbook(
|
63 |
+
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
|
64 |
+
) -> OpenDocument:
|
65 |
+
from odf.opendocument import load
|
66 |
+
|
67 |
+
return load(filepath_or_buffer, **engine_kwargs)
|
68 |
+
|
69 |
+
@property
|
70 |
+
def empty_value(self) -> str:
|
71 |
+
"""Property for compat with other readers."""
|
72 |
+
return ""
|
73 |
+
|
74 |
+
@property
|
75 |
+
def sheet_names(self) -> list[str]:
|
76 |
+
"""Return a list of sheet names present in the document"""
|
77 |
+
from odf.table import Table
|
78 |
+
|
79 |
+
tables = self.book.getElementsByType(Table)
|
80 |
+
return [t.getAttribute("name") for t in tables]
|
81 |
+
|
82 |
+
def get_sheet_by_index(self, index: int):
|
83 |
+
from odf.table import Table
|
84 |
+
|
85 |
+
self.raise_if_bad_sheet_by_index(index)
|
86 |
+
tables = self.book.getElementsByType(Table)
|
87 |
+
return tables[index]
|
88 |
+
|
89 |
+
def get_sheet_by_name(self, name: str):
|
90 |
+
from odf.table import Table
|
91 |
+
|
92 |
+
self.raise_if_bad_sheet_by_name(name)
|
93 |
+
tables = self.book.getElementsByType(Table)
|
94 |
+
|
95 |
+
for table in tables:
|
96 |
+
if table.getAttribute("name") == name:
|
97 |
+
return table
|
98 |
+
|
99 |
+
self.close()
|
100 |
+
raise ValueError(f"sheet {name} not found")
|
101 |
+
|
102 |
+
def get_sheet_data(
|
103 |
+
self, sheet, file_rows_needed: int | None = None
|
104 |
+
) -> list[list[Scalar | NaTType]]:
|
105 |
+
"""
|
106 |
+
Parse an ODF Table into a list of lists
|
107 |
+
"""
|
108 |
+
from odf.table import (
|
109 |
+
CoveredTableCell,
|
110 |
+
TableCell,
|
111 |
+
TableRow,
|
112 |
+
)
|
113 |
+
|
114 |
+
covered_cell_name = CoveredTableCell().qname
|
115 |
+
table_cell_name = TableCell().qname
|
116 |
+
cell_names = {covered_cell_name, table_cell_name}
|
117 |
+
|
118 |
+
sheet_rows = sheet.getElementsByType(TableRow)
|
119 |
+
empty_rows = 0
|
120 |
+
max_row_len = 0
|
121 |
+
|
122 |
+
table: list[list[Scalar | NaTType]] = []
|
123 |
+
|
124 |
+
for sheet_row in sheet_rows:
|
125 |
+
sheet_cells = [
|
126 |
+
x
|
127 |
+
for x in sheet_row.childNodes
|
128 |
+
if hasattr(x, "qname") and x.qname in cell_names
|
129 |
+
]
|
130 |
+
empty_cells = 0
|
131 |
+
table_row: list[Scalar | NaTType] = []
|
132 |
+
|
133 |
+
for sheet_cell in sheet_cells:
|
134 |
+
if sheet_cell.qname == table_cell_name:
|
135 |
+
value = self._get_cell_value(sheet_cell)
|
136 |
+
else:
|
137 |
+
value = self.empty_value
|
138 |
+
|
139 |
+
column_repeat = self._get_column_repeat(sheet_cell)
|
140 |
+
|
141 |
+
# Queue up empty values, writing only if content succeeds them
|
142 |
+
if value == self.empty_value:
|
143 |
+
empty_cells += column_repeat
|
144 |
+
else:
|
145 |
+
table_row.extend([self.empty_value] * empty_cells)
|
146 |
+
empty_cells = 0
|
147 |
+
table_row.extend([value] * column_repeat)
|
148 |
+
|
149 |
+
if max_row_len < len(table_row):
|
150 |
+
max_row_len = len(table_row)
|
151 |
+
|
152 |
+
row_repeat = self._get_row_repeat(sheet_row)
|
153 |
+
if len(table_row) == 0:
|
154 |
+
empty_rows += row_repeat
|
155 |
+
else:
|
156 |
+
# add blank rows to our table
|
157 |
+
table.extend([[self.empty_value]] * empty_rows)
|
158 |
+
empty_rows = 0
|
159 |
+
table.extend(table_row for _ in range(row_repeat))
|
160 |
+
if file_rows_needed is not None and len(table) >= file_rows_needed:
|
161 |
+
break
|
162 |
+
|
163 |
+
# Make our table square
|
164 |
+
for row in table:
|
165 |
+
if len(row) < max_row_len:
|
166 |
+
row.extend([self.empty_value] * (max_row_len - len(row)))
|
167 |
+
|
168 |
+
return table
|
169 |
+
|
170 |
+
def _get_row_repeat(self, row) -> int:
|
171 |
+
"""
|
172 |
+
Return number of times this row was repeated
|
173 |
+
Repeating an empty row appeared to be a common way
|
174 |
+
of representing sparse rows in the table.
|
175 |
+
"""
|
176 |
+
from odf.namespaces import TABLENS
|
177 |
+
|
178 |
+
return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1))
|
179 |
+
|
180 |
+
def _get_column_repeat(self, cell) -> int:
|
181 |
+
from odf.namespaces import TABLENS
|
182 |
+
|
183 |
+
return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1))
|
184 |
+
|
185 |
+
def _get_cell_value(self, cell) -> Scalar | NaTType:
|
186 |
+
from odf.namespaces import OFFICENS
|
187 |
+
|
188 |
+
if str(cell) == "#N/A":
|
189 |
+
return np.nan
|
190 |
+
|
191 |
+
cell_type = cell.attributes.get((OFFICENS, "value-type"))
|
192 |
+
if cell_type == "boolean":
|
193 |
+
if str(cell) == "TRUE":
|
194 |
+
return True
|
195 |
+
return False
|
196 |
+
if cell_type is None:
|
197 |
+
return self.empty_value
|
198 |
+
elif cell_type == "float":
|
199 |
+
# GH5394
|
200 |
+
cell_value = float(cell.attributes.get((OFFICENS, "value")))
|
201 |
+
val = int(cell_value)
|
202 |
+
if val == cell_value:
|
203 |
+
return val
|
204 |
+
return cell_value
|
205 |
+
elif cell_type == "percentage":
|
206 |
+
cell_value = cell.attributes.get((OFFICENS, "value"))
|
207 |
+
return float(cell_value)
|
208 |
+
elif cell_type == "string":
|
209 |
+
return self._get_cell_string_value(cell)
|
210 |
+
elif cell_type == "currency":
|
211 |
+
cell_value = cell.attributes.get((OFFICENS, "value"))
|
212 |
+
return float(cell_value)
|
213 |
+
elif cell_type == "date":
|
214 |
+
cell_value = cell.attributes.get((OFFICENS, "date-value"))
|
215 |
+
return pd.Timestamp(cell_value)
|
216 |
+
elif cell_type == "time":
|
217 |
+
stamp = pd.Timestamp(str(cell))
|
218 |
+
# cast needed here because Scalar doesn't include datetime.time
|
219 |
+
return cast(Scalar, stamp.time())
|
220 |
+
else:
|
221 |
+
self.close()
|
222 |
+
raise ValueError(f"Unrecognized type {cell_type}")
|
223 |
+
|
224 |
+
def _get_cell_string_value(self, cell) -> str:
|
225 |
+
"""
|
226 |
+
Find and decode OpenDocument text:s tags that represent
|
227 |
+
a run length encoded sequence of space characters.
|
228 |
+
"""
|
229 |
+
from odf.element import Element
|
230 |
+
from odf.namespaces import TEXTNS
|
231 |
+
from odf.office import Annotation
|
232 |
+
from odf.text import S
|
233 |
+
|
234 |
+
office_annotation = Annotation().qname
|
235 |
+
text_s = S().qname
|
236 |
+
|
237 |
+
value = []
|
238 |
+
|
239 |
+
for fragment in cell.childNodes:
|
240 |
+
if isinstance(fragment, Element):
|
241 |
+
if fragment.qname == text_s:
|
242 |
+
spaces = int(fragment.attributes.get((TEXTNS, "c"), 1))
|
243 |
+
value.append(" " * spaces)
|
244 |
+
elif fragment.qname == office_annotation:
|
245 |
+
continue
|
246 |
+
else:
|
247 |
+
# recursive impl needed in case of nested fragments
|
248 |
+
# with multiple spaces
|
249 |
+
# https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704
|
250 |
+
value.append(self._get_cell_string_value(fragment))
|
251 |
+
else:
|
252 |
+
value.append(str(fragment).strip("\n"))
|
253 |
+
return "".join(value)
|
env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# pyright: reportMissingImports=false
|
2 |
+
from __future__ import annotations
|
3 |
+
|
4 |
+
from typing import TYPE_CHECKING
|
5 |
+
|
6 |
+
from pandas.compat._optional import import_optional_dependency
|
7 |
+
from pandas.util._decorators import doc
|
8 |
+
|
9 |
+
from pandas.core.shared_docs import _shared_docs
|
10 |
+
|
11 |
+
from pandas.io.excel._base import BaseExcelReader
|
12 |
+
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
from pyxlsb import Workbook
|
15 |
+
|
16 |
+
from pandas._typing import (
|
17 |
+
FilePath,
|
18 |
+
ReadBuffer,
|
19 |
+
Scalar,
|
20 |
+
StorageOptions,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
class PyxlsbReader(BaseExcelReader["Workbook"]):
|
25 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
26 |
+
def __init__(
|
27 |
+
self,
|
28 |
+
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
29 |
+
storage_options: StorageOptions | None = None,
|
30 |
+
engine_kwargs: dict | None = None,
|
31 |
+
) -> None:
|
32 |
+
"""
|
33 |
+
Reader using pyxlsb engine.
|
34 |
+
|
35 |
+
Parameters
|
36 |
+
----------
|
37 |
+
filepath_or_buffer : str, path object, or Workbook
|
38 |
+
Object to be parsed.
|
39 |
+
{storage_options}
|
40 |
+
engine_kwargs : dict, optional
|
41 |
+
Arbitrary keyword arguments passed to excel engine.
|
42 |
+
"""
|
43 |
+
import_optional_dependency("pyxlsb")
|
44 |
+
# This will call load_workbook on the filepath or buffer
|
45 |
+
# And set the result to the book-attribute
|
46 |
+
super().__init__(
|
47 |
+
filepath_or_buffer,
|
48 |
+
storage_options=storage_options,
|
49 |
+
engine_kwargs=engine_kwargs,
|
50 |
+
)
|
51 |
+
|
52 |
+
@property
|
53 |
+
def _workbook_class(self) -> type[Workbook]:
|
54 |
+
from pyxlsb import Workbook
|
55 |
+
|
56 |
+
return Workbook
|
57 |
+
|
58 |
+
def load_workbook(
|
59 |
+
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
|
60 |
+
) -> Workbook:
|
61 |
+
from pyxlsb import open_workbook
|
62 |
+
|
63 |
+
# TODO: hack in buffer capability
|
64 |
+
# This might need some modifications to the Pyxlsb library
|
65 |
+
# Actual work for opening it is in xlsbpackage.py, line 20-ish
|
66 |
+
|
67 |
+
return open_workbook(filepath_or_buffer, **engine_kwargs)
|
68 |
+
|
69 |
+
@property
|
70 |
+
def sheet_names(self) -> list[str]:
|
71 |
+
return self.book.sheets
|
72 |
+
|
73 |
+
def get_sheet_by_name(self, name: str):
|
74 |
+
self.raise_if_bad_sheet_by_name(name)
|
75 |
+
return self.book.get_sheet(name)
|
76 |
+
|
77 |
+
def get_sheet_by_index(self, index: int):
|
78 |
+
self.raise_if_bad_sheet_by_index(index)
|
79 |
+
# pyxlsb sheets are indexed from 1 onwards
|
80 |
+
# There's a fix for this in the source, but the pypi package doesn't have it
|
81 |
+
return self.book.get_sheet(index + 1)
|
82 |
+
|
83 |
+
def _convert_cell(self, cell) -> Scalar:
|
84 |
+
# TODO: there is no way to distinguish between floats and datetimes in pyxlsb
|
85 |
+
# This means that there is no way to read datetime types from an xlsb file yet
|
86 |
+
if cell.v is None:
|
87 |
+
return "" # Prevents non-named columns from not showing up as Unnamed: i
|
88 |
+
if isinstance(cell.v, float):
|
89 |
+
val = int(cell.v)
|
90 |
+
if val == cell.v:
|
91 |
+
return val
|
92 |
+
else:
|
93 |
+
return float(cell.v)
|
94 |
+
|
95 |
+
return cell.v
|
96 |
+
|
97 |
+
def get_sheet_data(
|
98 |
+
self,
|
99 |
+
sheet,
|
100 |
+
file_rows_needed: int | None = None,
|
101 |
+
) -> list[list[Scalar]]:
|
102 |
+
data: list[list[Scalar]] = []
|
103 |
+
previous_row_number = -1
|
104 |
+
# When sparse=True the rows can have different lengths and empty rows are
|
105 |
+
# not returned. The cells are namedtuples of row, col, value (r, c, v).
|
106 |
+
for row in sheet.rows(sparse=True):
|
107 |
+
row_number = row[0].r
|
108 |
+
converted_row = [self._convert_cell(cell) for cell in row]
|
109 |
+
while converted_row and converted_row[-1] == "":
|
110 |
+
# trim trailing empty elements
|
111 |
+
converted_row.pop()
|
112 |
+
if converted_row:
|
113 |
+
data.extend([[]] * (row_number - previous_row_number - 1))
|
114 |
+
data.append(converted_row)
|
115 |
+
previous_row_number = row_number
|
116 |
+
if file_rows_needed is not None and len(data) >= file_rows_needed:
|
117 |
+
break
|
118 |
+
if data:
|
119 |
+
# extend rows to max_width
|
120 |
+
max_width = max(len(data_row) for data_row in data)
|
121 |
+
if min(len(data_row) for data_row in data) < max_width:
|
122 |
+
empty_cell: list[Scalar] = [""]
|
123 |
+
data = [
|
124 |
+
data_row + (max_width - len(data_row)) * empty_cell
|
125 |
+
for data_row in data
|
126 |
+
]
|
127 |
+
return data
|
env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_util.py
ADDED
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections.abc import (
|
4 |
+
Hashable,
|
5 |
+
Iterable,
|
6 |
+
MutableMapping,
|
7 |
+
Sequence,
|
8 |
+
)
|
9 |
+
from typing import (
|
10 |
+
TYPE_CHECKING,
|
11 |
+
Any,
|
12 |
+
Callable,
|
13 |
+
Literal,
|
14 |
+
TypeVar,
|
15 |
+
overload,
|
16 |
+
)
|
17 |
+
|
18 |
+
from pandas.compat._optional import import_optional_dependency
|
19 |
+
|
20 |
+
from pandas.core.dtypes.common import (
|
21 |
+
is_integer,
|
22 |
+
is_list_like,
|
23 |
+
)
|
24 |
+
|
25 |
+
if TYPE_CHECKING:
|
26 |
+
from pandas.io.excel._base import ExcelWriter
|
27 |
+
|
28 |
+
ExcelWriter_t = type[ExcelWriter]
|
29 |
+
usecols_func = TypeVar("usecols_func", bound=Callable[[Hashable], object])
|
30 |
+
|
31 |
+
_writers: MutableMapping[str, ExcelWriter_t] = {}
|
32 |
+
|
33 |
+
|
34 |
+
def register_writer(klass: ExcelWriter_t) -> None:
|
35 |
+
"""
|
36 |
+
Add engine to the excel writer registry.io.excel.
|
37 |
+
|
38 |
+
You must use this method to integrate with ``to_excel``.
|
39 |
+
|
40 |
+
Parameters
|
41 |
+
----------
|
42 |
+
klass : ExcelWriter
|
43 |
+
"""
|
44 |
+
if not callable(klass):
|
45 |
+
raise ValueError("Can only register callables as engines")
|
46 |
+
engine_name = klass._engine
|
47 |
+
_writers[engine_name] = klass
|
48 |
+
|
49 |
+
|
50 |
+
def get_default_engine(ext: str, mode: Literal["reader", "writer"] = "reader") -> str:
|
51 |
+
"""
|
52 |
+
Return the default reader/writer for the given extension.
|
53 |
+
|
54 |
+
Parameters
|
55 |
+
----------
|
56 |
+
ext : str
|
57 |
+
The excel file extension for which to get the default engine.
|
58 |
+
mode : str {'reader', 'writer'}
|
59 |
+
Whether to get the default engine for reading or writing.
|
60 |
+
Either 'reader' or 'writer'
|
61 |
+
|
62 |
+
Returns
|
63 |
+
-------
|
64 |
+
str
|
65 |
+
The default engine for the extension.
|
66 |
+
"""
|
67 |
+
_default_readers = {
|
68 |
+
"xlsx": "openpyxl",
|
69 |
+
"xlsm": "openpyxl",
|
70 |
+
"xlsb": "pyxlsb",
|
71 |
+
"xls": "xlrd",
|
72 |
+
"ods": "odf",
|
73 |
+
}
|
74 |
+
_default_writers = {
|
75 |
+
"xlsx": "openpyxl",
|
76 |
+
"xlsm": "openpyxl",
|
77 |
+
"xlsb": "pyxlsb",
|
78 |
+
"ods": "odf",
|
79 |
+
}
|
80 |
+
assert mode in ["reader", "writer"]
|
81 |
+
if mode == "writer":
|
82 |
+
# Prefer xlsxwriter over openpyxl if installed
|
83 |
+
xlsxwriter = import_optional_dependency("xlsxwriter", errors="warn")
|
84 |
+
if xlsxwriter:
|
85 |
+
_default_writers["xlsx"] = "xlsxwriter"
|
86 |
+
return _default_writers[ext]
|
87 |
+
else:
|
88 |
+
return _default_readers[ext]
|
89 |
+
|
90 |
+
|
91 |
+
def get_writer(engine_name: str) -> ExcelWriter_t:
|
92 |
+
try:
|
93 |
+
return _writers[engine_name]
|
94 |
+
except KeyError as err:
|
95 |
+
raise ValueError(f"No Excel writer '{engine_name}'") from err
|
96 |
+
|
97 |
+
|
98 |
+
def _excel2num(x: str) -> int:
|
99 |
+
"""
|
100 |
+
Convert Excel column name like 'AB' to 0-based column index.
|
101 |
+
|
102 |
+
Parameters
|
103 |
+
----------
|
104 |
+
x : str
|
105 |
+
The Excel column name to convert to a 0-based column index.
|
106 |
+
|
107 |
+
Returns
|
108 |
+
-------
|
109 |
+
num : int
|
110 |
+
The column index corresponding to the name.
|
111 |
+
|
112 |
+
Raises
|
113 |
+
------
|
114 |
+
ValueError
|
115 |
+
Part of the Excel column name was invalid.
|
116 |
+
"""
|
117 |
+
index = 0
|
118 |
+
|
119 |
+
for c in x.upper().strip():
|
120 |
+
cp = ord(c)
|
121 |
+
|
122 |
+
if cp < ord("A") or cp > ord("Z"):
|
123 |
+
raise ValueError(f"Invalid column name: {x}")
|
124 |
+
|
125 |
+
index = index * 26 + cp - ord("A") + 1
|
126 |
+
|
127 |
+
return index - 1
|
128 |
+
|
129 |
+
|
130 |
+
def _range2cols(areas: str) -> list[int]:
|
131 |
+
"""
|
132 |
+
Convert comma separated list of column names and ranges to indices.
|
133 |
+
|
134 |
+
Parameters
|
135 |
+
----------
|
136 |
+
areas : str
|
137 |
+
A string containing a sequence of column ranges (or areas).
|
138 |
+
|
139 |
+
Returns
|
140 |
+
-------
|
141 |
+
cols : list
|
142 |
+
A list of 0-based column indices.
|
143 |
+
|
144 |
+
Examples
|
145 |
+
--------
|
146 |
+
>>> _range2cols('A:E')
|
147 |
+
[0, 1, 2, 3, 4]
|
148 |
+
>>> _range2cols('A,C,Z:AB')
|
149 |
+
[0, 2, 25, 26, 27]
|
150 |
+
"""
|
151 |
+
cols: list[int] = []
|
152 |
+
|
153 |
+
for rng in areas.split(","):
|
154 |
+
if ":" in rng:
|
155 |
+
rngs = rng.split(":")
|
156 |
+
cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1))
|
157 |
+
else:
|
158 |
+
cols.append(_excel2num(rng))
|
159 |
+
|
160 |
+
return cols
|
161 |
+
|
162 |
+
|
163 |
+
@overload
|
164 |
+
def maybe_convert_usecols(usecols: str | list[int]) -> list[int]:
|
165 |
+
...
|
166 |
+
|
167 |
+
|
168 |
+
@overload
|
169 |
+
def maybe_convert_usecols(usecols: list[str]) -> list[str]:
|
170 |
+
...
|
171 |
+
|
172 |
+
|
173 |
+
@overload
|
174 |
+
def maybe_convert_usecols(usecols: usecols_func) -> usecols_func:
|
175 |
+
...
|
176 |
+
|
177 |
+
|
178 |
+
@overload
|
179 |
+
def maybe_convert_usecols(usecols: None) -> None:
|
180 |
+
...
|
181 |
+
|
182 |
+
|
183 |
+
def maybe_convert_usecols(
|
184 |
+
usecols: str | list[int] | list[str] | usecols_func | None,
|
185 |
+
) -> None | list[int] | list[str] | usecols_func:
|
186 |
+
"""
|
187 |
+
Convert `usecols` into a compatible format for parsing in `parsers.py`.
|
188 |
+
|
189 |
+
Parameters
|
190 |
+
----------
|
191 |
+
usecols : object
|
192 |
+
The use-columns object to potentially convert.
|
193 |
+
|
194 |
+
Returns
|
195 |
+
-------
|
196 |
+
converted : object
|
197 |
+
The compatible format of `usecols`.
|
198 |
+
"""
|
199 |
+
if usecols is None:
|
200 |
+
return usecols
|
201 |
+
|
202 |
+
if is_integer(usecols):
|
203 |
+
raise ValueError(
|
204 |
+
"Passing an integer for `usecols` is no longer supported. "
|
205 |
+
"Please pass in a list of int from 0 to `usecols` inclusive instead."
|
206 |
+
)
|
207 |
+
|
208 |
+
if isinstance(usecols, str):
|
209 |
+
return _range2cols(usecols)
|
210 |
+
|
211 |
+
return usecols
|
212 |
+
|
213 |
+
|
214 |
+
@overload
|
215 |
+
def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]:
|
216 |
+
...
|
217 |
+
|
218 |
+
|
219 |
+
@overload
|
220 |
+
def validate_freeze_panes(freeze_panes: None) -> Literal[False]:
|
221 |
+
...
|
222 |
+
|
223 |
+
|
224 |
+
def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool:
|
225 |
+
if freeze_panes is not None:
|
226 |
+
if len(freeze_panes) == 2 and all(
|
227 |
+
isinstance(item, int) for item in freeze_panes
|
228 |
+
):
|
229 |
+
return True
|
230 |
+
|
231 |
+
raise ValueError(
|
232 |
+
"freeze_panes must be of form (row, column) "
|
233 |
+
"where row and column are integers"
|
234 |
+
)
|
235 |
+
|
236 |
+
# freeze_panes wasn't specified, return False so it won't be applied
|
237 |
+
# to output sheet
|
238 |
+
return False
|
239 |
+
|
240 |
+
|
241 |
+
def fill_mi_header(
|
242 |
+
row: list[Hashable], control_row: list[bool]
|
243 |
+
) -> tuple[list[Hashable], list[bool]]:
|
244 |
+
"""
|
245 |
+
Forward fill blank entries in row but only inside the same parent index.
|
246 |
+
|
247 |
+
Used for creating headers in Multiindex.
|
248 |
+
|
249 |
+
Parameters
|
250 |
+
----------
|
251 |
+
row : list
|
252 |
+
List of items in a single row.
|
253 |
+
control_row : list of bool
|
254 |
+
Helps to determine if particular column is in same parent index as the
|
255 |
+
previous value. Used to stop propagation of empty cells between
|
256 |
+
different indexes.
|
257 |
+
|
258 |
+
Returns
|
259 |
+
-------
|
260 |
+
Returns changed row and control_row
|
261 |
+
"""
|
262 |
+
last = row[0]
|
263 |
+
for i in range(1, len(row)):
|
264 |
+
if not control_row[i]:
|
265 |
+
last = row[i]
|
266 |
+
|
267 |
+
if row[i] == "" or row[i] is None:
|
268 |
+
row[i] = last
|
269 |
+
else:
|
270 |
+
control_row[i] = False
|
271 |
+
last = row[i]
|
272 |
+
|
273 |
+
return row, control_row
|
274 |
+
|
275 |
+
|
276 |
+
def pop_header_name(
|
277 |
+
row: list[Hashable], index_col: int | Sequence[int]
|
278 |
+
) -> tuple[Hashable | None, list[Hashable]]:
|
279 |
+
"""
|
280 |
+
Pop the header name for MultiIndex parsing.
|
281 |
+
|
282 |
+
Parameters
|
283 |
+
----------
|
284 |
+
row : list
|
285 |
+
The data row to parse for the header name.
|
286 |
+
index_col : int, list
|
287 |
+
The index columns for our data. Assumed to be non-null.
|
288 |
+
|
289 |
+
Returns
|
290 |
+
-------
|
291 |
+
header_name : str
|
292 |
+
The extracted header name.
|
293 |
+
trimmed_row : list
|
294 |
+
The original data row with the header name removed.
|
295 |
+
"""
|
296 |
+
# Pop out header name and fill w/blank.
|
297 |
+
if is_list_like(index_col):
|
298 |
+
assert isinstance(index_col, Iterable)
|
299 |
+
i = max(index_col)
|
300 |
+
else:
|
301 |
+
assert not isinstance(index_col, Iterable)
|
302 |
+
i = index_col
|
303 |
+
|
304 |
+
header_name = row[i]
|
305 |
+
header_name = None if header_name == "" else header_name
|
306 |
+
|
307 |
+
return header_name, row[:i] + [""] + row[i + 1 :]
|
308 |
+
|
309 |
+
|
310 |
+
def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict:
|
311 |
+
"""
|
312 |
+
Used to combine two sources of kwargs for the backend engine.
|
313 |
+
|
314 |
+
Use of kwargs is deprecated, this function is solely for use in 1.3 and should
|
315 |
+
be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs
|
316 |
+
or kwargs must be None or empty respectively.
|
317 |
+
|
318 |
+
Parameters
|
319 |
+
----------
|
320 |
+
engine_kwargs: dict
|
321 |
+
kwargs to be passed through to the engine.
|
322 |
+
kwargs: dict
|
323 |
+
kwargs to be psased through to the engine (deprecated)
|
324 |
+
|
325 |
+
Returns
|
326 |
+
-------
|
327 |
+
engine_kwargs combined with kwargs
|
328 |
+
"""
|
329 |
+
if engine_kwargs is None:
|
330 |
+
result = {}
|
331 |
+
else:
|
332 |
+
result = engine_kwargs.copy()
|
333 |
+
result.update(kwargs)
|
334 |
+
return result
|
env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from datetime import time
|
4 |
+
import math
|
5 |
+
from typing import TYPE_CHECKING
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
from pandas.compat._optional import import_optional_dependency
|
10 |
+
from pandas.util._decorators import doc
|
11 |
+
|
12 |
+
from pandas.core.shared_docs import _shared_docs
|
13 |
+
|
14 |
+
from pandas.io.excel._base import BaseExcelReader
|
15 |
+
|
16 |
+
if TYPE_CHECKING:
|
17 |
+
from xlrd import Book
|
18 |
+
|
19 |
+
from pandas._typing import (
|
20 |
+
Scalar,
|
21 |
+
StorageOptions,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
class XlrdReader(BaseExcelReader["Book"]):
|
26 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
27 |
+
def __init__(
|
28 |
+
self,
|
29 |
+
filepath_or_buffer,
|
30 |
+
storage_options: StorageOptions | None = None,
|
31 |
+
engine_kwargs: dict | None = None,
|
32 |
+
) -> None:
|
33 |
+
"""
|
34 |
+
Reader using xlrd engine.
|
35 |
+
|
36 |
+
Parameters
|
37 |
+
----------
|
38 |
+
filepath_or_buffer : str, path object or Workbook
|
39 |
+
Object to be parsed.
|
40 |
+
{storage_options}
|
41 |
+
engine_kwargs : dict, optional
|
42 |
+
Arbitrary keyword arguments passed to excel engine.
|
43 |
+
"""
|
44 |
+
err_msg = "Install xlrd >= 2.0.1 for xls Excel support"
|
45 |
+
import_optional_dependency("xlrd", extra=err_msg)
|
46 |
+
super().__init__(
|
47 |
+
filepath_or_buffer,
|
48 |
+
storage_options=storage_options,
|
49 |
+
engine_kwargs=engine_kwargs,
|
50 |
+
)
|
51 |
+
|
52 |
+
@property
|
53 |
+
def _workbook_class(self) -> type[Book]:
|
54 |
+
from xlrd import Book
|
55 |
+
|
56 |
+
return Book
|
57 |
+
|
58 |
+
def load_workbook(self, filepath_or_buffer, engine_kwargs) -> Book:
|
59 |
+
from xlrd import open_workbook
|
60 |
+
|
61 |
+
if hasattr(filepath_or_buffer, "read"):
|
62 |
+
data = filepath_or_buffer.read()
|
63 |
+
return open_workbook(file_contents=data, **engine_kwargs)
|
64 |
+
else:
|
65 |
+
return open_workbook(filepath_or_buffer, **engine_kwargs)
|
66 |
+
|
67 |
+
@property
|
68 |
+
def sheet_names(self):
|
69 |
+
return self.book.sheet_names()
|
70 |
+
|
71 |
+
def get_sheet_by_name(self, name):
|
72 |
+
self.raise_if_bad_sheet_by_name(name)
|
73 |
+
return self.book.sheet_by_name(name)
|
74 |
+
|
75 |
+
def get_sheet_by_index(self, index):
|
76 |
+
self.raise_if_bad_sheet_by_index(index)
|
77 |
+
return self.book.sheet_by_index(index)
|
78 |
+
|
79 |
+
def get_sheet_data(
|
80 |
+
self, sheet, file_rows_needed: int | None = None
|
81 |
+
) -> list[list[Scalar]]:
|
82 |
+
from xlrd import (
|
83 |
+
XL_CELL_BOOLEAN,
|
84 |
+
XL_CELL_DATE,
|
85 |
+
XL_CELL_ERROR,
|
86 |
+
XL_CELL_NUMBER,
|
87 |
+
xldate,
|
88 |
+
)
|
89 |
+
|
90 |
+
epoch1904 = self.book.datemode
|
91 |
+
|
92 |
+
def _parse_cell(cell_contents, cell_typ):
|
93 |
+
"""
|
94 |
+
converts the contents of the cell into a pandas appropriate object
|
95 |
+
"""
|
96 |
+
if cell_typ == XL_CELL_DATE:
|
97 |
+
# Use the newer xlrd datetime handling.
|
98 |
+
try:
|
99 |
+
cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)
|
100 |
+
except OverflowError:
|
101 |
+
return cell_contents
|
102 |
+
|
103 |
+
# Excel doesn't distinguish between dates and time,
|
104 |
+
# so we treat dates on the epoch as times only.
|
105 |
+
# Also, Excel supports 1900 and 1904 epochs.
|
106 |
+
year = (cell_contents.timetuple())[0:3]
|
107 |
+
if (not epoch1904 and year == (1899, 12, 31)) or (
|
108 |
+
epoch1904 and year == (1904, 1, 1)
|
109 |
+
):
|
110 |
+
cell_contents = time(
|
111 |
+
cell_contents.hour,
|
112 |
+
cell_contents.minute,
|
113 |
+
cell_contents.second,
|
114 |
+
cell_contents.microsecond,
|
115 |
+
)
|
116 |
+
|
117 |
+
elif cell_typ == XL_CELL_ERROR:
|
118 |
+
cell_contents = np.nan
|
119 |
+
elif cell_typ == XL_CELL_BOOLEAN:
|
120 |
+
cell_contents = bool(cell_contents)
|
121 |
+
elif cell_typ == XL_CELL_NUMBER:
|
122 |
+
# GH5394 - Excel 'numbers' are always floats
|
123 |
+
# it's a minimal perf hit and less surprising
|
124 |
+
if math.isfinite(cell_contents):
|
125 |
+
# GH54564 - don't attempt to convert NaN/Inf
|
126 |
+
val = int(cell_contents)
|
127 |
+
if val == cell_contents:
|
128 |
+
cell_contents = val
|
129 |
+
return cell_contents
|
130 |
+
|
131 |
+
data = []
|
132 |
+
|
133 |
+
nrows = sheet.nrows
|
134 |
+
if file_rows_needed is not None:
|
135 |
+
nrows = min(nrows, file_rows_needed)
|
136 |
+
for i in range(nrows):
|
137 |
+
row = [
|
138 |
+
_parse_cell(value, typ)
|
139 |
+
for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
|
140 |
+
]
|
141 |
+
data.append(row)
|
142 |
+
|
143 |
+
return data
|
env-llmeval/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py
ADDED
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
Any,
|
7 |
+
)
|
8 |
+
|
9 |
+
from pandas.io.excel._base import ExcelWriter
|
10 |
+
from pandas.io.excel._util import (
|
11 |
+
combine_kwargs,
|
12 |
+
validate_freeze_panes,
|
13 |
+
)
|
14 |
+
|
15 |
+
if TYPE_CHECKING:
|
16 |
+
from pandas._typing import (
|
17 |
+
ExcelWriterIfSheetExists,
|
18 |
+
FilePath,
|
19 |
+
StorageOptions,
|
20 |
+
WriteExcelBuffer,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
class _XlsxStyler:
|
25 |
+
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
|
26 |
+
# Ordering necessary for both determinism and because some are keyed by
|
27 |
+
# prefixes of others.
|
28 |
+
STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {
|
29 |
+
"font": [
|
30 |
+
(("name",), "font_name"),
|
31 |
+
(("sz",), "font_size"),
|
32 |
+
(("size",), "font_size"),
|
33 |
+
(("color", "rgb"), "font_color"),
|
34 |
+
(("color",), "font_color"),
|
35 |
+
(("b",), "bold"),
|
36 |
+
(("bold",), "bold"),
|
37 |
+
(("i",), "italic"),
|
38 |
+
(("italic",), "italic"),
|
39 |
+
(("u",), "underline"),
|
40 |
+
(("underline",), "underline"),
|
41 |
+
(("strike",), "font_strikeout"),
|
42 |
+
(("vertAlign",), "font_script"),
|
43 |
+
(("vertalign",), "font_script"),
|
44 |
+
],
|
45 |
+
"number_format": [(("format_code",), "num_format"), ((), "num_format")],
|
46 |
+
"protection": [(("locked",), "locked"), (("hidden",), "hidden")],
|
47 |
+
"alignment": [
|
48 |
+
(("horizontal",), "align"),
|
49 |
+
(("vertical",), "valign"),
|
50 |
+
(("text_rotation",), "rotation"),
|
51 |
+
(("wrap_text",), "text_wrap"),
|
52 |
+
(("indent",), "indent"),
|
53 |
+
(("shrink_to_fit",), "shrink"),
|
54 |
+
],
|
55 |
+
"fill": [
|
56 |
+
(("patternType",), "pattern"),
|
57 |
+
(("patterntype",), "pattern"),
|
58 |
+
(("fill_type",), "pattern"),
|
59 |
+
(("start_color", "rgb"), "fg_color"),
|
60 |
+
(("fgColor", "rgb"), "fg_color"),
|
61 |
+
(("fgcolor", "rgb"), "fg_color"),
|
62 |
+
(("start_color",), "fg_color"),
|
63 |
+
(("fgColor",), "fg_color"),
|
64 |
+
(("fgcolor",), "fg_color"),
|
65 |
+
(("end_color", "rgb"), "bg_color"),
|
66 |
+
(("bgColor", "rgb"), "bg_color"),
|
67 |
+
(("bgcolor", "rgb"), "bg_color"),
|
68 |
+
(("end_color",), "bg_color"),
|
69 |
+
(("bgColor",), "bg_color"),
|
70 |
+
(("bgcolor",), "bg_color"),
|
71 |
+
],
|
72 |
+
"border": [
|
73 |
+
(("color", "rgb"), "border_color"),
|
74 |
+
(("color",), "border_color"),
|
75 |
+
(("style",), "border"),
|
76 |
+
(("top", "color", "rgb"), "top_color"),
|
77 |
+
(("top", "color"), "top_color"),
|
78 |
+
(("top", "style"), "top"),
|
79 |
+
(("top",), "top"),
|
80 |
+
(("right", "color", "rgb"), "right_color"),
|
81 |
+
(("right", "color"), "right_color"),
|
82 |
+
(("right", "style"), "right"),
|
83 |
+
(("right",), "right"),
|
84 |
+
(("bottom", "color", "rgb"), "bottom_color"),
|
85 |
+
(("bottom", "color"), "bottom_color"),
|
86 |
+
(("bottom", "style"), "bottom"),
|
87 |
+
(("bottom",), "bottom"),
|
88 |
+
(("left", "color", "rgb"), "left_color"),
|
89 |
+
(("left", "color"), "left_color"),
|
90 |
+
(("left", "style"), "left"),
|
91 |
+
(("left",), "left"),
|
92 |
+
],
|
93 |
+
}
|
94 |
+
|
95 |
+
@classmethod
|
96 |
+
def convert(cls, style_dict, num_format_str=None):
|
97 |
+
"""
|
98 |
+
converts a style_dict to an xlsxwriter format dict
|
99 |
+
|
100 |
+
Parameters
|
101 |
+
----------
|
102 |
+
style_dict : style dictionary to convert
|
103 |
+
num_format_str : optional number format string
|
104 |
+
"""
|
105 |
+
# Create a XlsxWriter format object.
|
106 |
+
props = {}
|
107 |
+
|
108 |
+
if num_format_str is not None:
|
109 |
+
props["num_format"] = num_format_str
|
110 |
+
|
111 |
+
if style_dict is None:
|
112 |
+
return props
|
113 |
+
|
114 |
+
if "borders" in style_dict:
|
115 |
+
style_dict = style_dict.copy()
|
116 |
+
style_dict["border"] = style_dict.pop("borders")
|
117 |
+
|
118 |
+
for style_group_key, style_group in style_dict.items():
|
119 |
+
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
|
120 |
+
# src is a sequence of keys into a nested dict
|
121 |
+
# dst is a flat key
|
122 |
+
if dst in props:
|
123 |
+
continue
|
124 |
+
v = style_group
|
125 |
+
for k in src:
|
126 |
+
try:
|
127 |
+
v = v[k]
|
128 |
+
except (KeyError, TypeError):
|
129 |
+
break
|
130 |
+
else:
|
131 |
+
props[dst] = v
|
132 |
+
|
133 |
+
if isinstance(props.get("pattern"), str):
|
134 |
+
# TODO: support other fill patterns
|
135 |
+
props["pattern"] = 0 if props["pattern"] == "none" else 1
|
136 |
+
|
137 |
+
for k in ["border", "top", "right", "bottom", "left"]:
|
138 |
+
if isinstance(props.get(k), str):
|
139 |
+
try:
|
140 |
+
props[k] = [
|
141 |
+
"none",
|
142 |
+
"thin",
|
143 |
+
"medium",
|
144 |
+
"dashed",
|
145 |
+
"dotted",
|
146 |
+
"thick",
|
147 |
+
"double",
|
148 |
+
"hair",
|
149 |
+
"mediumDashed",
|
150 |
+
"dashDot",
|
151 |
+
"mediumDashDot",
|
152 |
+
"dashDotDot",
|
153 |
+
"mediumDashDotDot",
|
154 |
+
"slantDashDot",
|
155 |
+
].index(props[k])
|
156 |
+
except ValueError:
|
157 |
+
props[k] = 2
|
158 |
+
|
159 |
+
if isinstance(props.get("font_script"), str):
|
160 |
+
props["font_script"] = ["baseline", "superscript", "subscript"].index(
|
161 |
+
props["font_script"]
|
162 |
+
)
|
163 |
+
|
164 |
+
if isinstance(props.get("underline"), str):
|
165 |
+
props["underline"] = {
|
166 |
+
"none": 0,
|
167 |
+
"single": 1,
|
168 |
+
"double": 2,
|
169 |
+
"singleAccounting": 33,
|
170 |
+
"doubleAccounting": 34,
|
171 |
+
}[props["underline"]]
|
172 |
+
|
173 |
+
# GH 30107 - xlsxwriter uses different name
|
174 |
+
if props.get("valign") == "center":
|
175 |
+
props["valign"] = "vcenter"
|
176 |
+
|
177 |
+
return props
|
178 |
+
|
179 |
+
|
180 |
+
class XlsxWriter(ExcelWriter):
|
181 |
+
_engine = "xlsxwriter"
|
182 |
+
_supported_extensions = (".xlsx",)
|
183 |
+
|
184 |
+
def __init__(
|
185 |
+
self,
|
186 |
+
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
187 |
+
engine: str | None = None,
|
188 |
+
date_format: str | None = None,
|
189 |
+
datetime_format: str | None = None,
|
190 |
+
mode: str = "w",
|
191 |
+
storage_options: StorageOptions | None = None,
|
192 |
+
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
193 |
+
engine_kwargs: dict[str, Any] | None = None,
|
194 |
+
**kwargs,
|
195 |
+
) -> None:
|
196 |
+
# Use the xlsxwriter module as the Excel writer.
|
197 |
+
from xlsxwriter import Workbook
|
198 |
+
|
199 |
+
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
200 |
+
|
201 |
+
if mode == "a":
|
202 |
+
raise ValueError("Append mode is not supported with xlsxwriter!")
|
203 |
+
|
204 |
+
super().__init__(
|
205 |
+
path,
|
206 |
+
engine=engine,
|
207 |
+
date_format=date_format,
|
208 |
+
datetime_format=datetime_format,
|
209 |
+
mode=mode,
|
210 |
+
storage_options=storage_options,
|
211 |
+
if_sheet_exists=if_sheet_exists,
|
212 |
+
engine_kwargs=engine_kwargs,
|
213 |
+
)
|
214 |
+
|
215 |
+
try:
|
216 |
+
self._book = Workbook(self._handles.handle, **engine_kwargs)
|
217 |
+
except TypeError:
|
218 |
+
self._handles.handle.close()
|
219 |
+
raise
|
220 |
+
|
221 |
+
@property
|
222 |
+
def book(self):
|
223 |
+
"""
|
224 |
+
Book instance of class xlsxwriter.Workbook.
|
225 |
+
|
226 |
+
This attribute can be used to access engine-specific features.
|
227 |
+
"""
|
228 |
+
return self._book
|
229 |
+
|
230 |
+
@property
|
231 |
+
def sheets(self) -> dict[str, Any]:
|
232 |
+
result = self.book.sheetnames
|
233 |
+
return result
|
234 |
+
|
235 |
+
def _save(self) -> None:
|
236 |
+
"""
|
237 |
+
Save workbook to disk.
|
238 |
+
"""
|
239 |
+
self.book.close()
|
240 |
+
|
241 |
+
def _write_cells(
|
242 |
+
self,
|
243 |
+
cells,
|
244 |
+
sheet_name: str | None = None,
|
245 |
+
startrow: int = 0,
|
246 |
+
startcol: int = 0,
|
247 |
+
freeze_panes: tuple[int, int] | None = None,
|
248 |
+
) -> None:
|
249 |
+
# Write the frame cells using xlsxwriter.
|
250 |
+
sheet_name = self._get_sheet_name(sheet_name)
|
251 |
+
|
252 |
+
wks = self.book.get_worksheet_by_name(sheet_name)
|
253 |
+
if wks is None:
|
254 |
+
wks = self.book.add_worksheet(sheet_name)
|
255 |
+
|
256 |
+
style_dict = {"null": None}
|
257 |
+
|
258 |
+
if validate_freeze_panes(freeze_panes):
|
259 |
+
wks.freeze_panes(*(freeze_panes))
|
260 |
+
|
261 |
+
for cell in cells:
|
262 |
+
val, fmt = self._value_with_fmt(cell.val)
|
263 |
+
|
264 |
+
stylekey = json.dumps(cell.style)
|
265 |
+
if fmt:
|
266 |
+
stylekey += fmt
|
267 |
+
|
268 |
+
if stylekey in style_dict:
|
269 |
+
style = style_dict[stylekey]
|
270 |
+
else:
|
271 |
+
style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))
|
272 |
+
style_dict[stylekey] = style
|
273 |
+
|
274 |
+
if cell.mergestart is not None and cell.mergeend is not None:
|
275 |
+
wks.merge_range(
|
276 |
+
startrow + cell.row,
|
277 |
+
startcol + cell.col,
|
278 |
+
startrow + cell.mergestart,
|
279 |
+
startcol + cell.mergeend,
|
280 |
+
val,
|
281 |
+
style,
|
282 |
+
)
|
283 |
+
else:
|
284 |
+
wks.write(startrow + cell.row, startcol + cell.col, val, style)
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__init__.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ruff: noqa: TCH004
|
2 |
+
from typing import TYPE_CHECKING
|
3 |
+
|
4 |
+
if TYPE_CHECKING:
|
5 |
+
# import modules that have public classes/functions
|
6 |
+
from pandas.io.formats import style
|
7 |
+
|
8 |
+
# and mark only those modules as public
|
9 |
+
__all__ = ["style"]
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (307 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc
ADDED
Binary file (4.52 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc
ADDED
Binary file (1.91 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc
ADDED
Binary file (10.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc
ADDED
Binary file (9.88 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc
ADDED
Binary file (24.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc
ADDED
Binary file (57.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc
ADDED
Binary file (16 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc
ADDED
Binary file (36.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc
ADDED
Binary file (16.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc
ADDED
Binary file (6.48 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc
ADDED
Binary file (137 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc
ADDED
Binary file (75.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc
ADDED
Binary file (14.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/_color_data.py
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# GH37967: Enable the use of CSS named colors, as defined in
|
2 |
+
# matplotlib.colors.CSS4_COLORS, when exporting to Excel.
|
3 |
+
# This data has been copied here, instead of being imported from matplotlib,
|
4 |
+
# not to have ``to_excel`` methods require matplotlib.
|
5 |
+
# source: matplotlib._color_data (3.3.3)
|
6 |
+
from __future__ import annotations
|
7 |
+
|
8 |
+
CSS4_COLORS = {
|
9 |
+
"aliceblue": "F0F8FF",
|
10 |
+
"antiquewhite": "FAEBD7",
|
11 |
+
"aqua": "00FFFF",
|
12 |
+
"aquamarine": "7FFFD4",
|
13 |
+
"azure": "F0FFFF",
|
14 |
+
"beige": "F5F5DC",
|
15 |
+
"bisque": "FFE4C4",
|
16 |
+
"black": "000000",
|
17 |
+
"blanchedalmond": "FFEBCD",
|
18 |
+
"blue": "0000FF",
|
19 |
+
"blueviolet": "8A2BE2",
|
20 |
+
"brown": "A52A2A",
|
21 |
+
"burlywood": "DEB887",
|
22 |
+
"cadetblue": "5F9EA0",
|
23 |
+
"chartreuse": "7FFF00",
|
24 |
+
"chocolate": "D2691E",
|
25 |
+
"coral": "FF7F50",
|
26 |
+
"cornflowerblue": "6495ED",
|
27 |
+
"cornsilk": "FFF8DC",
|
28 |
+
"crimson": "DC143C",
|
29 |
+
"cyan": "00FFFF",
|
30 |
+
"darkblue": "00008B",
|
31 |
+
"darkcyan": "008B8B",
|
32 |
+
"darkgoldenrod": "B8860B",
|
33 |
+
"darkgray": "A9A9A9",
|
34 |
+
"darkgreen": "006400",
|
35 |
+
"darkgrey": "A9A9A9",
|
36 |
+
"darkkhaki": "BDB76B",
|
37 |
+
"darkmagenta": "8B008B",
|
38 |
+
"darkolivegreen": "556B2F",
|
39 |
+
"darkorange": "FF8C00",
|
40 |
+
"darkorchid": "9932CC",
|
41 |
+
"darkred": "8B0000",
|
42 |
+
"darksalmon": "E9967A",
|
43 |
+
"darkseagreen": "8FBC8F",
|
44 |
+
"darkslateblue": "483D8B",
|
45 |
+
"darkslategray": "2F4F4F",
|
46 |
+
"darkslategrey": "2F4F4F",
|
47 |
+
"darkturquoise": "00CED1",
|
48 |
+
"darkviolet": "9400D3",
|
49 |
+
"deeppink": "FF1493",
|
50 |
+
"deepskyblue": "00BFFF",
|
51 |
+
"dimgray": "696969",
|
52 |
+
"dimgrey": "696969",
|
53 |
+
"dodgerblue": "1E90FF",
|
54 |
+
"firebrick": "B22222",
|
55 |
+
"floralwhite": "FFFAF0",
|
56 |
+
"forestgreen": "228B22",
|
57 |
+
"fuchsia": "FF00FF",
|
58 |
+
"gainsboro": "DCDCDC",
|
59 |
+
"ghostwhite": "F8F8FF",
|
60 |
+
"gold": "FFD700",
|
61 |
+
"goldenrod": "DAA520",
|
62 |
+
"gray": "808080",
|
63 |
+
"green": "008000",
|
64 |
+
"greenyellow": "ADFF2F",
|
65 |
+
"grey": "808080",
|
66 |
+
"honeydew": "F0FFF0",
|
67 |
+
"hotpink": "FF69B4",
|
68 |
+
"indianred": "CD5C5C",
|
69 |
+
"indigo": "4B0082",
|
70 |
+
"ivory": "FFFFF0",
|
71 |
+
"khaki": "F0E68C",
|
72 |
+
"lavender": "E6E6FA",
|
73 |
+
"lavenderblush": "FFF0F5",
|
74 |
+
"lawngreen": "7CFC00",
|
75 |
+
"lemonchiffon": "FFFACD",
|
76 |
+
"lightblue": "ADD8E6",
|
77 |
+
"lightcoral": "F08080",
|
78 |
+
"lightcyan": "E0FFFF",
|
79 |
+
"lightgoldenrodyellow": "FAFAD2",
|
80 |
+
"lightgray": "D3D3D3",
|
81 |
+
"lightgreen": "90EE90",
|
82 |
+
"lightgrey": "D3D3D3",
|
83 |
+
"lightpink": "FFB6C1",
|
84 |
+
"lightsalmon": "FFA07A",
|
85 |
+
"lightseagreen": "20B2AA",
|
86 |
+
"lightskyblue": "87CEFA",
|
87 |
+
"lightslategray": "778899",
|
88 |
+
"lightslategrey": "778899",
|
89 |
+
"lightsteelblue": "B0C4DE",
|
90 |
+
"lightyellow": "FFFFE0",
|
91 |
+
"lime": "00FF00",
|
92 |
+
"limegreen": "32CD32",
|
93 |
+
"linen": "FAF0E6",
|
94 |
+
"magenta": "FF00FF",
|
95 |
+
"maroon": "800000",
|
96 |
+
"mediumaquamarine": "66CDAA",
|
97 |
+
"mediumblue": "0000CD",
|
98 |
+
"mediumorchid": "BA55D3",
|
99 |
+
"mediumpurple": "9370DB",
|
100 |
+
"mediumseagreen": "3CB371",
|
101 |
+
"mediumslateblue": "7B68EE",
|
102 |
+
"mediumspringgreen": "00FA9A",
|
103 |
+
"mediumturquoise": "48D1CC",
|
104 |
+
"mediumvioletred": "C71585",
|
105 |
+
"midnightblue": "191970",
|
106 |
+
"mintcream": "F5FFFA",
|
107 |
+
"mistyrose": "FFE4E1",
|
108 |
+
"moccasin": "FFE4B5",
|
109 |
+
"navajowhite": "FFDEAD",
|
110 |
+
"navy": "000080",
|
111 |
+
"oldlace": "FDF5E6",
|
112 |
+
"olive": "808000",
|
113 |
+
"olivedrab": "6B8E23",
|
114 |
+
"orange": "FFA500",
|
115 |
+
"orangered": "FF4500",
|
116 |
+
"orchid": "DA70D6",
|
117 |
+
"palegoldenrod": "EEE8AA",
|
118 |
+
"palegreen": "98FB98",
|
119 |
+
"paleturquoise": "AFEEEE",
|
120 |
+
"palevioletred": "DB7093",
|
121 |
+
"papayawhip": "FFEFD5",
|
122 |
+
"peachpuff": "FFDAB9",
|
123 |
+
"peru": "CD853F",
|
124 |
+
"pink": "FFC0CB",
|
125 |
+
"plum": "DDA0DD",
|
126 |
+
"powderblue": "B0E0E6",
|
127 |
+
"purple": "800080",
|
128 |
+
"rebeccapurple": "663399",
|
129 |
+
"red": "FF0000",
|
130 |
+
"rosybrown": "BC8F8F",
|
131 |
+
"royalblue": "4169E1",
|
132 |
+
"saddlebrown": "8B4513",
|
133 |
+
"salmon": "FA8072",
|
134 |
+
"sandybrown": "F4A460",
|
135 |
+
"seagreen": "2E8B57",
|
136 |
+
"seashell": "FFF5EE",
|
137 |
+
"sienna": "A0522D",
|
138 |
+
"silver": "C0C0C0",
|
139 |
+
"skyblue": "87CEEB",
|
140 |
+
"slateblue": "6A5ACD",
|
141 |
+
"slategray": "708090",
|
142 |
+
"slategrey": "708090",
|
143 |
+
"snow": "FFFAFA",
|
144 |
+
"springgreen": "00FF7F",
|
145 |
+
"steelblue": "4682B4",
|
146 |
+
"tan": "D2B48C",
|
147 |
+
"teal": "008080",
|
148 |
+
"thistle": "D8BFD8",
|
149 |
+
"tomato": "FF6347",
|
150 |
+
"turquoise": "40E0D0",
|
151 |
+
"violet": "EE82EE",
|
152 |
+
"wheat": "F5DEB3",
|
153 |
+
"white": "FFFFFF",
|
154 |
+
"whitesmoke": "F5F5F5",
|
155 |
+
"yellow": "FFFF00",
|
156 |
+
"yellowgreen": "9ACD32",
|
157 |
+
}
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/console.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Internal module for console introspection
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from shutil import get_terminal_size
|
7 |
+
|
8 |
+
|
9 |
+
def get_console_size() -> tuple[int | None, int | None]:
|
10 |
+
"""
|
11 |
+
Return console size as tuple = (width, height).
|
12 |
+
|
13 |
+
Returns (None,None) in non-interactive session.
|
14 |
+
"""
|
15 |
+
from pandas import get_option
|
16 |
+
|
17 |
+
display_width = get_option("display.width")
|
18 |
+
display_height = get_option("display.max_rows")
|
19 |
+
|
20 |
+
# Consider
|
21 |
+
# interactive shell terminal, can detect term size
|
22 |
+
# interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term
|
23 |
+
# size non-interactive script, should disregard term size
|
24 |
+
|
25 |
+
# in addition
|
26 |
+
# width,height have default values, but setting to 'None' signals
|
27 |
+
# should use Auto-Detection, But only in interactive shell-terminal.
|
28 |
+
# Simple. yeah.
|
29 |
+
|
30 |
+
if in_interactive_session():
|
31 |
+
if in_ipython_frontend():
|
32 |
+
# sane defaults for interactive non-shell terminal
|
33 |
+
# match default for width,height in config_init
|
34 |
+
from pandas._config.config import get_default_val
|
35 |
+
|
36 |
+
terminal_width = get_default_val("display.width")
|
37 |
+
terminal_height = get_default_val("display.max_rows")
|
38 |
+
else:
|
39 |
+
# pure terminal
|
40 |
+
terminal_width, terminal_height = get_terminal_size()
|
41 |
+
else:
|
42 |
+
terminal_width, terminal_height = None, None
|
43 |
+
|
44 |
+
# Note if the User sets width/Height to None (auto-detection)
|
45 |
+
# and we're in a script (non-inter), this will return (None,None)
|
46 |
+
# caller needs to deal.
|
47 |
+
return display_width or terminal_width, display_height or terminal_height
|
48 |
+
|
49 |
+
|
50 |
+
# ----------------------------------------------------------------------
|
51 |
+
# Detect our environment
|
52 |
+
|
53 |
+
|
54 |
+
def in_interactive_session() -> bool:
|
55 |
+
"""
|
56 |
+
Check if we're running in an interactive shell.
|
57 |
+
|
58 |
+
Returns
|
59 |
+
-------
|
60 |
+
bool
|
61 |
+
True if running under python/ipython interactive shell.
|
62 |
+
"""
|
63 |
+
from pandas import get_option
|
64 |
+
|
65 |
+
def check_main():
|
66 |
+
try:
|
67 |
+
import __main__ as main
|
68 |
+
except ModuleNotFoundError:
|
69 |
+
return get_option("mode.sim_interactive")
|
70 |
+
return not hasattr(main, "__file__") or get_option("mode.sim_interactive")
|
71 |
+
|
72 |
+
try:
|
73 |
+
# error: Name '__IPYTHON__' is not defined
|
74 |
+
return __IPYTHON__ or check_main() # type: ignore[name-defined]
|
75 |
+
except NameError:
|
76 |
+
return check_main()
|
77 |
+
|
78 |
+
|
79 |
+
def in_ipython_frontend() -> bool:
|
80 |
+
"""
|
81 |
+
Check if we're inside an IPython zmq frontend.
|
82 |
+
|
83 |
+
Returns
|
84 |
+
-------
|
85 |
+
bool
|
86 |
+
"""
|
87 |
+
try:
|
88 |
+
# error: Name 'get_ipython' is not defined
|
89 |
+
ip = get_ipython() # type: ignore[name-defined]
|
90 |
+
return "zmq" in str(type(ip)).lower()
|
91 |
+
except NameError:
|
92 |
+
pass
|
93 |
+
|
94 |
+
return False
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/css.py
ADDED
@@ -0,0 +1,421 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Utilities for interpreting CSS from Stylers for formatting non-HTML outputs.
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
import re
|
7 |
+
from typing import (
|
8 |
+
TYPE_CHECKING,
|
9 |
+
Callable,
|
10 |
+
)
|
11 |
+
import warnings
|
12 |
+
|
13 |
+
from pandas.errors import CSSWarning
|
14 |
+
from pandas.util._exceptions import find_stack_level
|
15 |
+
|
16 |
+
if TYPE_CHECKING:
|
17 |
+
from collections.abc import (
|
18 |
+
Generator,
|
19 |
+
Iterable,
|
20 |
+
Iterator,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
def _side_expander(prop_fmt: str) -> Callable:
|
25 |
+
"""
|
26 |
+
Wrapper to expand shorthand property into top, right, bottom, left properties
|
27 |
+
|
28 |
+
Parameters
|
29 |
+
----------
|
30 |
+
side : str
|
31 |
+
The border side to expand into properties
|
32 |
+
|
33 |
+
Returns
|
34 |
+
-------
|
35 |
+
function: Return to call when a 'border(-{side}): {value}' string is encountered
|
36 |
+
"""
|
37 |
+
|
38 |
+
def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]:
|
39 |
+
"""
|
40 |
+
Expand shorthand property into side-specific property (top, right, bottom, left)
|
41 |
+
|
42 |
+
Parameters
|
43 |
+
----------
|
44 |
+
prop (str): CSS property name
|
45 |
+
value (str): String token for property
|
46 |
+
|
47 |
+
Yields
|
48 |
+
------
|
49 |
+
Tuple (str, str): Expanded property, value
|
50 |
+
"""
|
51 |
+
tokens = value.split()
|
52 |
+
try:
|
53 |
+
mapping = self.SIDE_SHORTHANDS[len(tokens)]
|
54 |
+
except KeyError:
|
55 |
+
warnings.warn(
|
56 |
+
f'Could not expand "{prop}: {value}"',
|
57 |
+
CSSWarning,
|
58 |
+
stacklevel=find_stack_level(),
|
59 |
+
)
|
60 |
+
return
|
61 |
+
for key, idx in zip(self.SIDES, mapping):
|
62 |
+
yield prop_fmt.format(key), tokens[idx]
|
63 |
+
|
64 |
+
return expand
|
65 |
+
|
66 |
+
|
67 |
+
def _border_expander(side: str = "") -> Callable:
|
68 |
+
"""
|
69 |
+
Wrapper to expand 'border' property into border color, style, and width properties
|
70 |
+
|
71 |
+
Parameters
|
72 |
+
----------
|
73 |
+
side : str
|
74 |
+
The border side to expand into properties
|
75 |
+
|
76 |
+
Returns
|
77 |
+
-------
|
78 |
+
function: Return to call when a 'border(-{side}): {value}' string is encountered
|
79 |
+
"""
|
80 |
+
if side != "":
|
81 |
+
side = f"-{side}"
|
82 |
+
|
83 |
+
def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]:
|
84 |
+
"""
|
85 |
+
Expand border into color, style, and width tuples
|
86 |
+
|
87 |
+
Parameters
|
88 |
+
----------
|
89 |
+
prop : str
|
90 |
+
CSS property name passed to styler
|
91 |
+
value : str
|
92 |
+
Value passed to styler for property
|
93 |
+
|
94 |
+
Yields
|
95 |
+
------
|
96 |
+
Tuple (str, str): Expanded property, value
|
97 |
+
"""
|
98 |
+
tokens = value.split()
|
99 |
+
if len(tokens) == 0 or len(tokens) > 3:
|
100 |
+
warnings.warn(
|
101 |
+
f'Too many tokens provided to "{prop}" (expected 1-3)',
|
102 |
+
CSSWarning,
|
103 |
+
stacklevel=find_stack_level(),
|
104 |
+
)
|
105 |
+
|
106 |
+
# TODO: Can we use current color as initial value to comply with CSS standards?
|
107 |
+
border_declarations = {
|
108 |
+
f"border{side}-color": "black",
|
109 |
+
f"border{side}-style": "none",
|
110 |
+
f"border{side}-width": "medium",
|
111 |
+
}
|
112 |
+
for token in tokens:
|
113 |
+
if token.lower() in self.BORDER_STYLES:
|
114 |
+
border_declarations[f"border{side}-style"] = token
|
115 |
+
elif any(ratio in token.lower() for ratio in self.BORDER_WIDTH_RATIOS):
|
116 |
+
border_declarations[f"border{side}-width"] = token
|
117 |
+
else:
|
118 |
+
border_declarations[f"border{side}-color"] = token
|
119 |
+
# TODO: Warn user if item entered more than once (e.g. "border: red green")
|
120 |
+
|
121 |
+
# Per CSS, "border" will reset previous "border-*" definitions
|
122 |
+
yield from self.atomize(border_declarations.items())
|
123 |
+
|
124 |
+
return expand
|
125 |
+
|
126 |
+
|
127 |
+
class CSSResolver:
|
128 |
+
"""
|
129 |
+
A callable for parsing and resolving CSS to atomic properties.
|
130 |
+
"""
|
131 |
+
|
132 |
+
UNIT_RATIOS = {
|
133 |
+
"pt": ("pt", 1),
|
134 |
+
"em": ("em", 1),
|
135 |
+
"rem": ("pt", 12),
|
136 |
+
"ex": ("em", 0.5),
|
137 |
+
# 'ch':
|
138 |
+
"px": ("pt", 0.75),
|
139 |
+
"pc": ("pt", 12),
|
140 |
+
"in": ("pt", 72),
|
141 |
+
"cm": ("in", 1 / 2.54),
|
142 |
+
"mm": ("in", 1 / 25.4),
|
143 |
+
"q": ("mm", 0.25),
|
144 |
+
"!!default": ("em", 0),
|
145 |
+
}
|
146 |
+
|
147 |
+
FONT_SIZE_RATIOS = UNIT_RATIOS.copy()
|
148 |
+
FONT_SIZE_RATIOS.update(
|
149 |
+
{
|
150 |
+
"%": ("em", 0.01),
|
151 |
+
"xx-small": ("rem", 0.5),
|
152 |
+
"x-small": ("rem", 0.625),
|
153 |
+
"small": ("rem", 0.8),
|
154 |
+
"medium": ("rem", 1),
|
155 |
+
"large": ("rem", 1.125),
|
156 |
+
"x-large": ("rem", 1.5),
|
157 |
+
"xx-large": ("rem", 2),
|
158 |
+
"smaller": ("em", 1 / 1.2),
|
159 |
+
"larger": ("em", 1.2),
|
160 |
+
"!!default": ("em", 1),
|
161 |
+
}
|
162 |
+
)
|
163 |
+
|
164 |
+
MARGIN_RATIOS = UNIT_RATIOS.copy()
|
165 |
+
MARGIN_RATIOS.update({"none": ("pt", 0)})
|
166 |
+
|
167 |
+
BORDER_WIDTH_RATIOS = UNIT_RATIOS.copy()
|
168 |
+
BORDER_WIDTH_RATIOS.update(
|
169 |
+
{
|
170 |
+
"none": ("pt", 0),
|
171 |
+
"thick": ("px", 4),
|
172 |
+
"medium": ("px", 2),
|
173 |
+
"thin": ("px", 1),
|
174 |
+
# Default: medium only if solid
|
175 |
+
}
|
176 |
+
)
|
177 |
+
|
178 |
+
BORDER_STYLES = [
|
179 |
+
"none",
|
180 |
+
"hidden",
|
181 |
+
"dotted",
|
182 |
+
"dashed",
|
183 |
+
"solid",
|
184 |
+
"double",
|
185 |
+
"groove",
|
186 |
+
"ridge",
|
187 |
+
"inset",
|
188 |
+
"outset",
|
189 |
+
"mediumdashdot",
|
190 |
+
"dashdotdot",
|
191 |
+
"hair",
|
192 |
+
"mediumdashdotdot",
|
193 |
+
"dashdot",
|
194 |
+
"slantdashdot",
|
195 |
+
"mediumdashed",
|
196 |
+
]
|
197 |
+
|
198 |
+
SIDE_SHORTHANDS = {
|
199 |
+
1: [0, 0, 0, 0],
|
200 |
+
2: [0, 1, 0, 1],
|
201 |
+
3: [0, 1, 2, 1],
|
202 |
+
4: [0, 1, 2, 3],
|
203 |
+
}
|
204 |
+
|
205 |
+
SIDES = ("top", "right", "bottom", "left")
|
206 |
+
|
207 |
+
CSS_EXPANSIONS = {
|
208 |
+
**{
|
209 |
+
(f"border-{prop}" if prop else "border"): _border_expander(prop)
|
210 |
+
for prop in ["", "top", "right", "bottom", "left"]
|
211 |
+
},
|
212 |
+
**{
|
213 |
+
f"border-{prop}": _side_expander(f"border-{{:s}}-{prop}")
|
214 |
+
for prop in ["color", "style", "width"]
|
215 |
+
},
|
216 |
+
"margin": _side_expander("margin-{:s}"),
|
217 |
+
"padding": _side_expander("padding-{:s}"),
|
218 |
+
}
|
219 |
+
|
220 |
+
def __call__(
|
221 |
+
self,
|
222 |
+
declarations: str | Iterable[tuple[str, str]],
|
223 |
+
inherited: dict[str, str] | None = None,
|
224 |
+
) -> dict[str, str]:
|
225 |
+
"""
|
226 |
+
The given declarations to atomic properties.
|
227 |
+
|
228 |
+
Parameters
|
229 |
+
----------
|
230 |
+
declarations_str : str | Iterable[tuple[str, str]]
|
231 |
+
A CSS string or set of CSS declaration tuples
|
232 |
+
e.g. "font-weight: bold; background: blue" or
|
233 |
+
{("font-weight", "bold"), ("background", "blue")}
|
234 |
+
inherited : dict, optional
|
235 |
+
Atomic properties indicating the inherited style context in which
|
236 |
+
declarations_str is to be resolved. ``inherited`` should already
|
237 |
+
be resolved, i.e. valid output of this method.
|
238 |
+
|
239 |
+
Returns
|
240 |
+
-------
|
241 |
+
dict
|
242 |
+
Atomic CSS 2.2 properties.
|
243 |
+
|
244 |
+
Examples
|
245 |
+
--------
|
246 |
+
>>> resolve = CSSResolver()
|
247 |
+
>>> inherited = {'font-family': 'serif', 'font-weight': 'bold'}
|
248 |
+
>>> out = resolve('''
|
249 |
+
... border-color: BLUE RED;
|
250 |
+
... font-size: 1em;
|
251 |
+
... font-size: 2em;
|
252 |
+
... font-weight: normal;
|
253 |
+
... font-weight: inherit;
|
254 |
+
... ''', inherited)
|
255 |
+
>>> sorted(out.items()) # doctest: +NORMALIZE_WHITESPACE
|
256 |
+
[('border-bottom-color', 'blue'),
|
257 |
+
('border-left-color', 'red'),
|
258 |
+
('border-right-color', 'red'),
|
259 |
+
('border-top-color', 'blue'),
|
260 |
+
('font-family', 'serif'),
|
261 |
+
('font-size', '24pt'),
|
262 |
+
('font-weight', 'bold')]
|
263 |
+
"""
|
264 |
+
if isinstance(declarations, str):
|
265 |
+
declarations = self.parse(declarations)
|
266 |
+
props = dict(self.atomize(declarations))
|
267 |
+
if inherited is None:
|
268 |
+
inherited = {}
|
269 |
+
|
270 |
+
props = self._update_initial(props, inherited)
|
271 |
+
props = self._update_font_size(props, inherited)
|
272 |
+
return self._update_other_units(props)
|
273 |
+
|
274 |
+
def _update_initial(
|
275 |
+
self,
|
276 |
+
props: dict[str, str],
|
277 |
+
inherited: dict[str, str],
|
278 |
+
) -> dict[str, str]:
|
279 |
+
# 1. resolve inherited, initial
|
280 |
+
for prop, val in inherited.items():
|
281 |
+
if prop not in props:
|
282 |
+
props[prop] = val
|
283 |
+
|
284 |
+
new_props = props.copy()
|
285 |
+
for prop, val in props.items():
|
286 |
+
if val == "inherit":
|
287 |
+
val = inherited.get(prop, "initial")
|
288 |
+
|
289 |
+
if val in ("initial", None):
|
290 |
+
# we do not define a complete initial stylesheet
|
291 |
+
del new_props[prop]
|
292 |
+
else:
|
293 |
+
new_props[prop] = val
|
294 |
+
return new_props
|
295 |
+
|
296 |
+
def _update_font_size(
|
297 |
+
self,
|
298 |
+
props: dict[str, str],
|
299 |
+
inherited: dict[str, str],
|
300 |
+
) -> dict[str, str]:
|
301 |
+
# 2. resolve relative font size
|
302 |
+
if props.get("font-size"):
|
303 |
+
props["font-size"] = self.size_to_pt(
|
304 |
+
props["font-size"],
|
305 |
+
self._get_font_size(inherited),
|
306 |
+
conversions=self.FONT_SIZE_RATIOS,
|
307 |
+
)
|
308 |
+
return props
|
309 |
+
|
310 |
+
def _get_font_size(self, props: dict[str, str]) -> float | None:
|
311 |
+
if props.get("font-size"):
|
312 |
+
font_size_string = props["font-size"]
|
313 |
+
return self._get_float_font_size_from_pt(font_size_string)
|
314 |
+
return None
|
315 |
+
|
316 |
+
def _get_float_font_size_from_pt(self, font_size_string: str) -> float:
|
317 |
+
assert font_size_string.endswith("pt")
|
318 |
+
return float(font_size_string.rstrip("pt"))
|
319 |
+
|
320 |
+
def _update_other_units(self, props: dict[str, str]) -> dict[str, str]:
|
321 |
+
font_size = self._get_font_size(props)
|
322 |
+
# 3. TODO: resolve other font-relative units
|
323 |
+
for side in self.SIDES:
|
324 |
+
prop = f"border-{side}-width"
|
325 |
+
if prop in props:
|
326 |
+
props[prop] = self.size_to_pt(
|
327 |
+
props[prop],
|
328 |
+
em_pt=font_size,
|
329 |
+
conversions=self.BORDER_WIDTH_RATIOS,
|
330 |
+
)
|
331 |
+
|
332 |
+
for prop in [f"margin-{side}", f"padding-{side}"]:
|
333 |
+
if prop in props:
|
334 |
+
# TODO: support %
|
335 |
+
props[prop] = self.size_to_pt(
|
336 |
+
props[prop],
|
337 |
+
em_pt=font_size,
|
338 |
+
conversions=self.MARGIN_RATIOS,
|
339 |
+
)
|
340 |
+
return props
|
341 |
+
|
342 |
+
def size_to_pt(self, in_val, em_pt=None, conversions=UNIT_RATIOS) -> str:
|
343 |
+
def _error():
|
344 |
+
warnings.warn(
|
345 |
+
f"Unhandled size: {repr(in_val)}",
|
346 |
+
CSSWarning,
|
347 |
+
stacklevel=find_stack_level(),
|
348 |
+
)
|
349 |
+
return self.size_to_pt("1!!default", conversions=conversions)
|
350 |
+
|
351 |
+
match = re.match(r"^(\S*?)([a-zA-Z%!].*)", in_val)
|
352 |
+
if match is None:
|
353 |
+
return _error()
|
354 |
+
|
355 |
+
val, unit = match.groups()
|
356 |
+
if val == "":
|
357 |
+
# hack for 'large' etc.
|
358 |
+
val = 1
|
359 |
+
else:
|
360 |
+
try:
|
361 |
+
val = float(val)
|
362 |
+
except ValueError:
|
363 |
+
return _error()
|
364 |
+
|
365 |
+
while unit != "pt":
|
366 |
+
if unit == "em":
|
367 |
+
if em_pt is None:
|
368 |
+
unit = "rem"
|
369 |
+
else:
|
370 |
+
val *= em_pt
|
371 |
+
unit = "pt"
|
372 |
+
continue
|
373 |
+
|
374 |
+
try:
|
375 |
+
unit, mul = conversions[unit]
|
376 |
+
except KeyError:
|
377 |
+
return _error()
|
378 |
+
val *= mul
|
379 |
+
|
380 |
+
val = round(val, 5)
|
381 |
+
if int(val) == val:
|
382 |
+
size_fmt = f"{int(val):d}pt"
|
383 |
+
else:
|
384 |
+
size_fmt = f"{val:f}pt"
|
385 |
+
return size_fmt
|
386 |
+
|
387 |
+
def atomize(self, declarations: Iterable) -> Generator[tuple[str, str], None, None]:
|
388 |
+
for prop, value in declarations:
|
389 |
+
prop = prop.lower()
|
390 |
+
value = value.lower()
|
391 |
+
if prop in self.CSS_EXPANSIONS:
|
392 |
+
expand = self.CSS_EXPANSIONS[prop]
|
393 |
+
yield from expand(self, prop, value)
|
394 |
+
else:
|
395 |
+
yield prop, value
|
396 |
+
|
397 |
+
def parse(self, declarations_str: str) -> Iterator[tuple[str, str]]:
|
398 |
+
"""
|
399 |
+
Generates (prop, value) pairs from declarations.
|
400 |
+
|
401 |
+
In a future version may generate parsed tokens from tinycss/tinycss2
|
402 |
+
|
403 |
+
Parameters
|
404 |
+
----------
|
405 |
+
declarations_str : str
|
406 |
+
"""
|
407 |
+
for decl in declarations_str.split(";"):
|
408 |
+
if not decl.strip():
|
409 |
+
continue
|
410 |
+
prop, sep, val = decl.partition(":")
|
411 |
+
prop = prop.strip().lower()
|
412 |
+
# TODO: don't lowercase case sensitive parts of values (strings)
|
413 |
+
val = val.strip().lower()
|
414 |
+
if sep:
|
415 |
+
yield prop, val
|
416 |
+
else:
|
417 |
+
warnings.warn(
|
418 |
+
f"Ill-formatted attribute: expected a colon in {repr(decl)}",
|
419 |
+
CSSWarning,
|
420 |
+
stacklevel=find_stack_level(),
|
421 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/excel.py
ADDED
@@ -0,0 +1,962 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Utilities for conversion to writer-agnostic Excel representation.
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from collections.abc import (
|
7 |
+
Hashable,
|
8 |
+
Iterable,
|
9 |
+
Mapping,
|
10 |
+
Sequence,
|
11 |
+
)
|
12 |
+
import functools
|
13 |
+
import itertools
|
14 |
+
import re
|
15 |
+
from typing import (
|
16 |
+
TYPE_CHECKING,
|
17 |
+
Any,
|
18 |
+
Callable,
|
19 |
+
cast,
|
20 |
+
)
|
21 |
+
import warnings
|
22 |
+
|
23 |
+
import numpy as np
|
24 |
+
|
25 |
+
from pandas._libs.lib import is_list_like
|
26 |
+
from pandas.util._decorators import doc
|
27 |
+
from pandas.util._exceptions import find_stack_level
|
28 |
+
|
29 |
+
from pandas.core.dtypes import missing
|
30 |
+
from pandas.core.dtypes.common import (
|
31 |
+
is_float,
|
32 |
+
is_scalar,
|
33 |
+
)
|
34 |
+
|
35 |
+
from pandas import (
|
36 |
+
DataFrame,
|
37 |
+
Index,
|
38 |
+
MultiIndex,
|
39 |
+
PeriodIndex,
|
40 |
+
)
|
41 |
+
import pandas.core.common as com
|
42 |
+
from pandas.core.shared_docs import _shared_docs
|
43 |
+
|
44 |
+
from pandas.io.formats._color_data import CSS4_COLORS
|
45 |
+
from pandas.io.formats.css import (
|
46 |
+
CSSResolver,
|
47 |
+
CSSWarning,
|
48 |
+
)
|
49 |
+
from pandas.io.formats.format import get_level_lengths
|
50 |
+
from pandas.io.formats.printing import pprint_thing
|
51 |
+
|
52 |
+
if TYPE_CHECKING:
|
53 |
+
from pandas._typing import (
|
54 |
+
FilePath,
|
55 |
+
IndexLabel,
|
56 |
+
StorageOptions,
|
57 |
+
WriteExcelBuffer,
|
58 |
+
)
|
59 |
+
|
60 |
+
from pandas import ExcelWriter
|
61 |
+
|
62 |
+
|
63 |
+
class ExcelCell:
|
64 |
+
__fields__ = ("row", "col", "val", "style", "mergestart", "mergeend")
|
65 |
+
__slots__ = __fields__
|
66 |
+
|
67 |
+
def __init__(
|
68 |
+
self,
|
69 |
+
row: int,
|
70 |
+
col: int,
|
71 |
+
val,
|
72 |
+
style=None,
|
73 |
+
mergestart: int | None = None,
|
74 |
+
mergeend: int | None = None,
|
75 |
+
) -> None:
|
76 |
+
self.row = row
|
77 |
+
self.col = col
|
78 |
+
self.val = val
|
79 |
+
self.style = style
|
80 |
+
self.mergestart = mergestart
|
81 |
+
self.mergeend = mergeend
|
82 |
+
|
83 |
+
|
84 |
+
class CssExcelCell(ExcelCell):
|
85 |
+
def __init__(
|
86 |
+
self,
|
87 |
+
row: int,
|
88 |
+
col: int,
|
89 |
+
val,
|
90 |
+
style: dict | None,
|
91 |
+
css_styles: dict[tuple[int, int], list[tuple[str, Any]]] | None,
|
92 |
+
css_row: int,
|
93 |
+
css_col: int,
|
94 |
+
css_converter: Callable | None,
|
95 |
+
**kwargs,
|
96 |
+
) -> None:
|
97 |
+
if css_styles and css_converter:
|
98 |
+
# Use dict to get only one (case-insensitive) declaration per property
|
99 |
+
declaration_dict = {
|
100 |
+
prop.lower(): val for prop, val in css_styles[css_row, css_col]
|
101 |
+
}
|
102 |
+
# Convert to frozenset for order-invariant caching
|
103 |
+
unique_declarations = frozenset(declaration_dict.items())
|
104 |
+
style = css_converter(unique_declarations)
|
105 |
+
|
106 |
+
super().__init__(row=row, col=col, val=val, style=style, **kwargs)
|
107 |
+
|
108 |
+
|
109 |
+
class CSSToExcelConverter:
|
110 |
+
"""
|
111 |
+
A callable for converting CSS declarations to ExcelWriter styles
|
112 |
+
|
113 |
+
Supports parts of CSS 2.2, with minimal CSS 3.0 support (e.g. text-shadow),
|
114 |
+
focusing on font styling, backgrounds, borders and alignment.
|
115 |
+
|
116 |
+
Operates by first computing CSS styles in a fairly generic
|
117 |
+
way (see :meth:`compute_css`) then determining Excel style
|
118 |
+
properties from CSS properties (see :meth:`build_xlstyle`).
|
119 |
+
|
120 |
+
Parameters
|
121 |
+
----------
|
122 |
+
inherited : str, optional
|
123 |
+
CSS declarations understood to be the containing scope for the
|
124 |
+
CSS processed by :meth:`__call__`.
|
125 |
+
"""
|
126 |
+
|
127 |
+
NAMED_COLORS = CSS4_COLORS
|
128 |
+
|
129 |
+
VERTICAL_MAP = {
|
130 |
+
"top": "top",
|
131 |
+
"text-top": "top",
|
132 |
+
"middle": "center",
|
133 |
+
"baseline": "bottom",
|
134 |
+
"bottom": "bottom",
|
135 |
+
"text-bottom": "bottom",
|
136 |
+
# OpenXML also has 'justify', 'distributed'
|
137 |
+
}
|
138 |
+
|
139 |
+
BOLD_MAP = {
|
140 |
+
"bold": True,
|
141 |
+
"bolder": True,
|
142 |
+
"600": True,
|
143 |
+
"700": True,
|
144 |
+
"800": True,
|
145 |
+
"900": True,
|
146 |
+
"normal": False,
|
147 |
+
"lighter": False,
|
148 |
+
"100": False,
|
149 |
+
"200": False,
|
150 |
+
"300": False,
|
151 |
+
"400": False,
|
152 |
+
"500": False,
|
153 |
+
}
|
154 |
+
|
155 |
+
ITALIC_MAP = {
|
156 |
+
"normal": False,
|
157 |
+
"italic": True,
|
158 |
+
"oblique": True,
|
159 |
+
}
|
160 |
+
|
161 |
+
FAMILY_MAP = {
|
162 |
+
"serif": 1, # roman
|
163 |
+
"sans-serif": 2, # swiss
|
164 |
+
"cursive": 4, # script
|
165 |
+
"fantasy": 5, # decorative
|
166 |
+
}
|
167 |
+
|
168 |
+
BORDER_STYLE_MAP = {
|
169 |
+
style.lower(): style
|
170 |
+
for style in [
|
171 |
+
"dashed",
|
172 |
+
"mediumDashDot",
|
173 |
+
"dashDotDot",
|
174 |
+
"hair",
|
175 |
+
"dotted",
|
176 |
+
"mediumDashDotDot",
|
177 |
+
"double",
|
178 |
+
"dashDot",
|
179 |
+
"slantDashDot",
|
180 |
+
"mediumDashed",
|
181 |
+
]
|
182 |
+
}
|
183 |
+
|
184 |
+
# NB: Most of the methods here could be classmethods, as only __init__
|
185 |
+
# and __call__ make use of instance attributes. We leave them as
|
186 |
+
# instancemethods so that users can easily experiment with extensions
|
187 |
+
# without monkey-patching.
|
188 |
+
inherited: dict[str, str] | None
|
189 |
+
|
190 |
+
def __init__(self, inherited: str | None = None) -> None:
|
191 |
+
if inherited is not None:
|
192 |
+
self.inherited = self.compute_css(inherited)
|
193 |
+
else:
|
194 |
+
self.inherited = None
|
195 |
+
# We should avoid cache on the __call__ method.
|
196 |
+
# Otherwise once the method __call__ has been called
|
197 |
+
# garbage collection no longer deletes the instance.
|
198 |
+
self._call_cached = functools.cache(self._call_uncached)
|
199 |
+
|
200 |
+
compute_css = CSSResolver()
|
201 |
+
|
202 |
+
def __call__(
|
203 |
+
self, declarations: str | frozenset[tuple[str, str]]
|
204 |
+
) -> dict[str, dict[str, str]]:
|
205 |
+
"""
|
206 |
+
Convert CSS declarations to ExcelWriter style.
|
207 |
+
|
208 |
+
Parameters
|
209 |
+
----------
|
210 |
+
declarations : str | frozenset[tuple[str, str]]
|
211 |
+
CSS string or set of CSS declaration tuples.
|
212 |
+
e.g. "font-weight: bold; background: blue" or
|
213 |
+
{("font-weight", "bold"), ("background", "blue")}
|
214 |
+
|
215 |
+
Returns
|
216 |
+
-------
|
217 |
+
xlstyle : dict
|
218 |
+
A style as interpreted by ExcelWriter when found in
|
219 |
+
ExcelCell.style.
|
220 |
+
"""
|
221 |
+
return self._call_cached(declarations)
|
222 |
+
|
223 |
+
def _call_uncached(
|
224 |
+
self, declarations: str | frozenset[tuple[str, str]]
|
225 |
+
) -> dict[str, dict[str, str]]:
|
226 |
+
properties = self.compute_css(declarations, self.inherited)
|
227 |
+
return self.build_xlstyle(properties)
|
228 |
+
|
229 |
+
def build_xlstyle(self, props: Mapping[str, str]) -> dict[str, dict[str, str]]:
|
230 |
+
out = {
|
231 |
+
"alignment": self.build_alignment(props),
|
232 |
+
"border": self.build_border(props),
|
233 |
+
"fill": self.build_fill(props),
|
234 |
+
"font": self.build_font(props),
|
235 |
+
"number_format": self.build_number_format(props),
|
236 |
+
}
|
237 |
+
|
238 |
+
# TODO: handle cell width and height: needs support in pandas.io.excel
|
239 |
+
|
240 |
+
def remove_none(d: dict[str, str | None]) -> None:
|
241 |
+
"""Remove key where value is None, through nested dicts"""
|
242 |
+
for k, v in list(d.items()):
|
243 |
+
if v is None:
|
244 |
+
del d[k]
|
245 |
+
elif isinstance(v, dict):
|
246 |
+
remove_none(v)
|
247 |
+
if not v:
|
248 |
+
del d[k]
|
249 |
+
|
250 |
+
remove_none(out)
|
251 |
+
return out
|
252 |
+
|
253 |
+
def build_alignment(self, props: Mapping[str, str]) -> dict[str, bool | str | None]:
|
254 |
+
# TODO: text-indent, padding-left -> alignment.indent
|
255 |
+
return {
|
256 |
+
"horizontal": props.get("text-align"),
|
257 |
+
"vertical": self._get_vertical_alignment(props),
|
258 |
+
"wrap_text": self._get_is_wrap_text(props),
|
259 |
+
}
|
260 |
+
|
261 |
+
def _get_vertical_alignment(self, props: Mapping[str, str]) -> str | None:
|
262 |
+
vertical_align = props.get("vertical-align")
|
263 |
+
if vertical_align:
|
264 |
+
return self.VERTICAL_MAP.get(vertical_align)
|
265 |
+
return None
|
266 |
+
|
267 |
+
def _get_is_wrap_text(self, props: Mapping[str, str]) -> bool | None:
|
268 |
+
if props.get("white-space") is None:
|
269 |
+
return None
|
270 |
+
return bool(props["white-space"] not in ("nowrap", "pre", "pre-line"))
|
271 |
+
|
272 |
+
def build_border(
|
273 |
+
self, props: Mapping[str, str]
|
274 |
+
) -> dict[str, dict[str, str | None]]:
|
275 |
+
return {
|
276 |
+
side: {
|
277 |
+
"style": self._border_style(
|
278 |
+
props.get(f"border-{side}-style"),
|
279 |
+
props.get(f"border-{side}-width"),
|
280 |
+
self.color_to_excel(props.get(f"border-{side}-color")),
|
281 |
+
),
|
282 |
+
"color": self.color_to_excel(props.get(f"border-{side}-color")),
|
283 |
+
}
|
284 |
+
for side in ["top", "right", "bottom", "left"]
|
285 |
+
}
|
286 |
+
|
287 |
+
def _border_style(self, style: str | None, width: str | None, color: str | None):
|
288 |
+
# convert styles and widths to openxml, one of:
|
289 |
+
# 'dashDot'
|
290 |
+
# 'dashDotDot'
|
291 |
+
# 'dashed'
|
292 |
+
# 'dotted'
|
293 |
+
# 'double'
|
294 |
+
# 'hair'
|
295 |
+
# 'medium'
|
296 |
+
# 'mediumDashDot'
|
297 |
+
# 'mediumDashDotDot'
|
298 |
+
# 'mediumDashed'
|
299 |
+
# 'slantDashDot'
|
300 |
+
# 'thick'
|
301 |
+
# 'thin'
|
302 |
+
if width is None and style is None and color is None:
|
303 |
+
# Return None will remove "border" from style dictionary
|
304 |
+
return None
|
305 |
+
|
306 |
+
if width is None and style is None:
|
307 |
+
# Return "none" will keep "border" in style dictionary
|
308 |
+
return "none"
|
309 |
+
|
310 |
+
if style in ("none", "hidden"):
|
311 |
+
return "none"
|
312 |
+
|
313 |
+
width_name = self._get_width_name(width)
|
314 |
+
if width_name is None:
|
315 |
+
return "none"
|
316 |
+
|
317 |
+
if style in (None, "groove", "ridge", "inset", "outset", "solid"):
|
318 |
+
# not handled
|
319 |
+
return width_name
|
320 |
+
|
321 |
+
if style == "double":
|
322 |
+
return "double"
|
323 |
+
if style == "dotted":
|
324 |
+
if width_name in ("hair", "thin"):
|
325 |
+
return "dotted"
|
326 |
+
return "mediumDashDotDot"
|
327 |
+
if style == "dashed":
|
328 |
+
if width_name in ("hair", "thin"):
|
329 |
+
return "dashed"
|
330 |
+
return "mediumDashed"
|
331 |
+
elif style in self.BORDER_STYLE_MAP:
|
332 |
+
# Excel-specific styles
|
333 |
+
return self.BORDER_STYLE_MAP[style]
|
334 |
+
else:
|
335 |
+
warnings.warn(
|
336 |
+
f"Unhandled border style format: {repr(style)}",
|
337 |
+
CSSWarning,
|
338 |
+
stacklevel=find_stack_level(),
|
339 |
+
)
|
340 |
+
return "none"
|
341 |
+
|
342 |
+
def _get_width_name(self, width_input: str | None) -> str | None:
|
343 |
+
width = self._width_to_float(width_input)
|
344 |
+
if width < 1e-5:
|
345 |
+
return None
|
346 |
+
elif width < 1.3:
|
347 |
+
return "thin"
|
348 |
+
elif width < 2.8:
|
349 |
+
return "medium"
|
350 |
+
return "thick"
|
351 |
+
|
352 |
+
def _width_to_float(self, width: str | None) -> float:
|
353 |
+
if width is None:
|
354 |
+
width = "2pt"
|
355 |
+
return self._pt_to_float(width)
|
356 |
+
|
357 |
+
def _pt_to_float(self, pt_string: str) -> float:
|
358 |
+
assert pt_string.endswith("pt")
|
359 |
+
return float(pt_string.rstrip("pt"))
|
360 |
+
|
361 |
+
def build_fill(self, props: Mapping[str, str]):
|
362 |
+
# TODO: perhaps allow for special properties
|
363 |
+
# -excel-pattern-bgcolor and -excel-pattern-type
|
364 |
+
fill_color = props.get("background-color")
|
365 |
+
if fill_color not in (None, "transparent", "none"):
|
366 |
+
return {"fgColor": self.color_to_excel(fill_color), "patternType": "solid"}
|
367 |
+
|
368 |
+
def build_number_format(self, props: Mapping[str, str]) -> dict[str, str | None]:
|
369 |
+
fc = props.get("number-format")
|
370 |
+
fc = fc.replace("§", ";") if isinstance(fc, str) else fc
|
371 |
+
return {"format_code": fc}
|
372 |
+
|
373 |
+
def build_font(
|
374 |
+
self, props: Mapping[str, str]
|
375 |
+
) -> dict[str, bool | float | str | None]:
|
376 |
+
font_names = self._get_font_names(props)
|
377 |
+
decoration = self._get_decoration(props)
|
378 |
+
return {
|
379 |
+
"name": font_names[0] if font_names else None,
|
380 |
+
"family": self._select_font_family(font_names),
|
381 |
+
"size": self._get_font_size(props),
|
382 |
+
"bold": self._get_is_bold(props),
|
383 |
+
"italic": self._get_is_italic(props),
|
384 |
+
"underline": ("single" if "underline" in decoration else None),
|
385 |
+
"strike": ("line-through" in decoration) or None,
|
386 |
+
"color": self.color_to_excel(props.get("color")),
|
387 |
+
# shadow if nonzero digit before shadow color
|
388 |
+
"shadow": self._get_shadow(props),
|
389 |
+
}
|
390 |
+
|
391 |
+
def _get_is_bold(self, props: Mapping[str, str]) -> bool | None:
|
392 |
+
weight = props.get("font-weight")
|
393 |
+
if weight:
|
394 |
+
return self.BOLD_MAP.get(weight)
|
395 |
+
return None
|
396 |
+
|
397 |
+
def _get_is_italic(self, props: Mapping[str, str]) -> bool | None:
|
398 |
+
font_style = props.get("font-style")
|
399 |
+
if font_style:
|
400 |
+
return self.ITALIC_MAP.get(font_style)
|
401 |
+
return None
|
402 |
+
|
403 |
+
def _get_decoration(self, props: Mapping[str, str]) -> Sequence[str]:
|
404 |
+
decoration = props.get("text-decoration")
|
405 |
+
if decoration is not None:
|
406 |
+
return decoration.split()
|
407 |
+
else:
|
408 |
+
return ()
|
409 |
+
|
410 |
+
def _get_underline(self, decoration: Sequence[str]) -> str | None:
|
411 |
+
if "underline" in decoration:
|
412 |
+
return "single"
|
413 |
+
return None
|
414 |
+
|
415 |
+
def _get_shadow(self, props: Mapping[str, str]) -> bool | None:
|
416 |
+
if "text-shadow" in props:
|
417 |
+
return bool(re.search("^[^#(]*[1-9]", props["text-shadow"]))
|
418 |
+
return None
|
419 |
+
|
420 |
+
def _get_font_names(self, props: Mapping[str, str]) -> Sequence[str]:
|
421 |
+
font_names_tmp = re.findall(
|
422 |
+
r"""(?x)
|
423 |
+
(
|
424 |
+
"(?:[^"]|\\")+"
|
425 |
+
|
|
426 |
+
'(?:[^']|\\')+'
|
427 |
+
|
|
428 |
+
[^'",]+
|
429 |
+
)(?=,|\s*$)
|
430 |
+
""",
|
431 |
+
props.get("font-family", ""),
|
432 |
+
)
|
433 |
+
|
434 |
+
font_names = []
|
435 |
+
for name in font_names_tmp:
|
436 |
+
if name[:1] == '"':
|
437 |
+
name = name[1:-1].replace('\\"', '"')
|
438 |
+
elif name[:1] == "'":
|
439 |
+
name = name[1:-1].replace("\\'", "'")
|
440 |
+
else:
|
441 |
+
name = name.strip()
|
442 |
+
if name:
|
443 |
+
font_names.append(name)
|
444 |
+
return font_names
|
445 |
+
|
446 |
+
def _get_font_size(self, props: Mapping[str, str]) -> float | None:
|
447 |
+
size = props.get("font-size")
|
448 |
+
if size is None:
|
449 |
+
return size
|
450 |
+
return self._pt_to_float(size)
|
451 |
+
|
452 |
+
def _select_font_family(self, font_names: Sequence[str]) -> int | None:
|
453 |
+
family = None
|
454 |
+
for name in font_names:
|
455 |
+
family = self.FAMILY_MAP.get(name)
|
456 |
+
if family:
|
457 |
+
break
|
458 |
+
|
459 |
+
return family
|
460 |
+
|
461 |
+
def color_to_excel(self, val: str | None) -> str | None:
|
462 |
+
if val is None:
|
463 |
+
return None
|
464 |
+
|
465 |
+
if self._is_hex_color(val):
|
466 |
+
return self._convert_hex_to_excel(val)
|
467 |
+
|
468 |
+
try:
|
469 |
+
return self.NAMED_COLORS[val]
|
470 |
+
except KeyError:
|
471 |
+
warnings.warn(
|
472 |
+
f"Unhandled color format: {repr(val)}",
|
473 |
+
CSSWarning,
|
474 |
+
stacklevel=find_stack_level(),
|
475 |
+
)
|
476 |
+
return None
|
477 |
+
|
478 |
+
def _is_hex_color(self, color_string: str) -> bool:
|
479 |
+
return bool(color_string.startswith("#"))
|
480 |
+
|
481 |
+
def _convert_hex_to_excel(self, color_string: str) -> str:
|
482 |
+
code = color_string.lstrip("#")
|
483 |
+
if self._is_shorthand_color(color_string):
|
484 |
+
return (code[0] * 2 + code[1] * 2 + code[2] * 2).upper()
|
485 |
+
else:
|
486 |
+
return code.upper()
|
487 |
+
|
488 |
+
def _is_shorthand_color(self, color_string: str) -> bool:
|
489 |
+
"""Check if color code is shorthand.
|
490 |
+
|
491 |
+
#FFF is a shorthand as opposed to full #FFFFFF.
|
492 |
+
"""
|
493 |
+
code = color_string.lstrip("#")
|
494 |
+
if len(code) == 3:
|
495 |
+
return True
|
496 |
+
elif len(code) == 6:
|
497 |
+
return False
|
498 |
+
else:
|
499 |
+
raise ValueError(f"Unexpected color {color_string}")
|
500 |
+
|
501 |
+
|
502 |
+
class ExcelFormatter:
|
503 |
+
"""
|
504 |
+
Class for formatting a DataFrame to a list of ExcelCells,
|
505 |
+
|
506 |
+
Parameters
|
507 |
+
----------
|
508 |
+
df : DataFrame or Styler
|
509 |
+
na_rep: na representation
|
510 |
+
float_format : str, default None
|
511 |
+
Format string for floating point numbers
|
512 |
+
cols : sequence, optional
|
513 |
+
Columns to write
|
514 |
+
header : bool or sequence of str, default True
|
515 |
+
Write out column names. If a list of string is given it is
|
516 |
+
assumed to be aliases for the column names
|
517 |
+
index : bool, default True
|
518 |
+
output row names (index)
|
519 |
+
index_label : str or sequence, default None
|
520 |
+
Column label for index column(s) if desired. If None is given, and
|
521 |
+
`header` and `index` are True, then the index names are used. A
|
522 |
+
sequence should be given if the DataFrame uses MultiIndex.
|
523 |
+
merge_cells : bool, default False
|
524 |
+
Format MultiIndex and Hierarchical Rows as merged cells.
|
525 |
+
inf_rep : str, default `'inf'`
|
526 |
+
representation for np.inf values (which aren't representable in Excel)
|
527 |
+
A `'-'` sign will be added in front of -inf.
|
528 |
+
style_converter : callable, optional
|
529 |
+
This translates Styler styles (CSS) into ExcelWriter styles.
|
530 |
+
Defaults to ``CSSToExcelConverter()``.
|
531 |
+
It should have signature css_declarations string -> excel style.
|
532 |
+
This is only called for body cells.
|
533 |
+
"""
|
534 |
+
|
535 |
+
max_rows = 2**20
|
536 |
+
max_cols = 2**14
|
537 |
+
|
538 |
+
def __init__(
|
539 |
+
self,
|
540 |
+
df,
|
541 |
+
na_rep: str = "",
|
542 |
+
float_format: str | None = None,
|
543 |
+
cols: Sequence[Hashable] | None = None,
|
544 |
+
header: Sequence[Hashable] | bool = True,
|
545 |
+
index: bool = True,
|
546 |
+
index_label: IndexLabel | None = None,
|
547 |
+
merge_cells: bool = False,
|
548 |
+
inf_rep: str = "inf",
|
549 |
+
style_converter: Callable | None = None,
|
550 |
+
) -> None:
|
551 |
+
self.rowcounter = 0
|
552 |
+
self.na_rep = na_rep
|
553 |
+
if not isinstance(df, DataFrame):
|
554 |
+
self.styler = df
|
555 |
+
self.styler._compute() # calculate applied styles
|
556 |
+
df = df.data
|
557 |
+
if style_converter is None:
|
558 |
+
style_converter = CSSToExcelConverter()
|
559 |
+
self.style_converter: Callable | None = style_converter
|
560 |
+
else:
|
561 |
+
self.styler = None
|
562 |
+
self.style_converter = None
|
563 |
+
self.df = df
|
564 |
+
if cols is not None:
|
565 |
+
# all missing, raise
|
566 |
+
if not len(Index(cols).intersection(df.columns)):
|
567 |
+
raise KeyError("passes columns are not ALL present dataframe")
|
568 |
+
|
569 |
+
if len(Index(cols).intersection(df.columns)) != len(set(cols)):
|
570 |
+
# Deprecated in GH#17295, enforced in 1.0.0
|
571 |
+
raise KeyError("Not all names specified in 'columns' are found")
|
572 |
+
|
573 |
+
self.df = df.reindex(columns=cols)
|
574 |
+
|
575 |
+
self.columns = self.df.columns
|
576 |
+
self.float_format = float_format
|
577 |
+
self.index = index
|
578 |
+
self.index_label = index_label
|
579 |
+
self.header = header
|
580 |
+
self.merge_cells = merge_cells
|
581 |
+
self.inf_rep = inf_rep
|
582 |
+
|
583 |
+
@property
|
584 |
+
def header_style(self) -> dict[str, dict[str, str | bool]]:
|
585 |
+
return {
|
586 |
+
"font": {"bold": True},
|
587 |
+
"borders": {
|
588 |
+
"top": "thin",
|
589 |
+
"right": "thin",
|
590 |
+
"bottom": "thin",
|
591 |
+
"left": "thin",
|
592 |
+
},
|
593 |
+
"alignment": {"horizontal": "center", "vertical": "top"},
|
594 |
+
}
|
595 |
+
|
596 |
+
def _format_value(self, val):
|
597 |
+
if is_scalar(val) and missing.isna(val):
|
598 |
+
val = self.na_rep
|
599 |
+
elif is_float(val):
|
600 |
+
if missing.isposinf_scalar(val):
|
601 |
+
val = self.inf_rep
|
602 |
+
elif missing.isneginf_scalar(val):
|
603 |
+
val = f"-{self.inf_rep}"
|
604 |
+
elif self.float_format is not None:
|
605 |
+
val = float(self.float_format % val)
|
606 |
+
if getattr(val, "tzinfo", None) is not None:
|
607 |
+
raise ValueError(
|
608 |
+
"Excel does not support datetimes with "
|
609 |
+
"timezones. Please ensure that datetimes "
|
610 |
+
"are timezone unaware before writing to Excel."
|
611 |
+
)
|
612 |
+
return val
|
613 |
+
|
614 |
+
def _format_header_mi(self) -> Iterable[ExcelCell]:
|
615 |
+
if self.columns.nlevels > 1:
|
616 |
+
if not self.index:
|
617 |
+
raise NotImplementedError(
|
618 |
+
"Writing to Excel with MultiIndex columns and no "
|
619 |
+
"index ('index'=False) is not yet implemented."
|
620 |
+
)
|
621 |
+
|
622 |
+
if not (self._has_aliases or self.header):
|
623 |
+
return
|
624 |
+
|
625 |
+
columns = self.columns
|
626 |
+
level_strs = columns._format_multi(
|
627 |
+
sparsify=self.merge_cells, include_names=False
|
628 |
+
)
|
629 |
+
level_lengths = get_level_lengths(level_strs)
|
630 |
+
coloffset = 0
|
631 |
+
lnum = 0
|
632 |
+
|
633 |
+
if self.index and isinstance(self.df.index, MultiIndex):
|
634 |
+
coloffset = len(self.df.index[0]) - 1
|
635 |
+
|
636 |
+
if self.merge_cells:
|
637 |
+
# Format multi-index as a merged cells.
|
638 |
+
for lnum, name in enumerate(columns.names):
|
639 |
+
yield ExcelCell(
|
640 |
+
row=lnum,
|
641 |
+
col=coloffset,
|
642 |
+
val=name,
|
643 |
+
style=self.header_style,
|
644 |
+
)
|
645 |
+
|
646 |
+
for lnum, (spans, levels, level_codes) in enumerate(
|
647 |
+
zip(level_lengths, columns.levels, columns.codes)
|
648 |
+
):
|
649 |
+
values = levels.take(level_codes)
|
650 |
+
for i, span_val in spans.items():
|
651 |
+
mergestart, mergeend = None, None
|
652 |
+
if span_val > 1:
|
653 |
+
mergestart, mergeend = lnum, coloffset + i + span_val
|
654 |
+
yield CssExcelCell(
|
655 |
+
row=lnum,
|
656 |
+
col=coloffset + i + 1,
|
657 |
+
val=values[i],
|
658 |
+
style=self.header_style,
|
659 |
+
css_styles=getattr(self.styler, "ctx_columns", None),
|
660 |
+
css_row=lnum,
|
661 |
+
css_col=i,
|
662 |
+
css_converter=self.style_converter,
|
663 |
+
mergestart=mergestart,
|
664 |
+
mergeend=mergeend,
|
665 |
+
)
|
666 |
+
else:
|
667 |
+
# Format in legacy format with dots to indicate levels.
|
668 |
+
for i, values in enumerate(zip(*level_strs)):
|
669 |
+
v = ".".join(map(pprint_thing, values))
|
670 |
+
yield CssExcelCell(
|
671 |
+
row=lnum,
|
672 |
+
col=coloffset + i + 1,
|
673 |
+
val=v,
|
674 |
+
style=self.header_style,
|
675 |
+
css_styles=getattr(self.styler, "ctx_columns", None),
|
676 |
+
css_row=lnum,
|
677 |
+
css_col=i,
|
678 |
+
css_converter=self.style_converter,
|
679 |
+
)
|
680 |
+
|
681 |
+
self.rowcounter = lnum
|
682 |
+
|
683 |
+
def _format_header_regular(self) -> Iterable[ExcelCell]:
|
684 |
+
if self._has_aliases or self.header:
|
685 |
+
coloffset = 0
|
686 |
+
|
687 |
+
if self.index:
|
688 |
+
coloffset = 1
|
689 |
+
if isinstance(self.df.index, MultiIndex):
|
690 |
+
coloffset = len(self.df.index.names)
|
691 |
+
|
692 |
+
colnames = self.columns
|
693 |
+
if self._has_aliases:
|
694 |
+
self.header = cast(Sequence, self.header)
|
695 |
+
if len(self.header) != len(self.columns):
|
696 |
+
raise ValueError(
|
697 |
+
f"Writing {len(self.columns)} cols "
|
698 |
+
f"but got {len(self.header)} aliases"
|
699 |
+
)
|
700 |
+
colnames = self.header
|
701 |
+
|
702 |
+
for colindex, colname in enumerate(colnames):
|
703 |
+
yield CssExcelCell(
|
704 |
+
row=self.rowcounter,
|
705 |
+
col=colindex + coloffset,
|
706 |
+
val=colname,
|
707 |
+
style=self.header_style,
|
708 |
+
css_styles=getattr(self.styler, "ctx_columns", None),
|
709 |
+
css_row=0,
|
710 |
+
css_col=colindex,
|
711 |
+
css_converter=self.style_converter,
|
712 |
+
)
|
713 |
+
|
714 |
+
def _format_header(self) -> Iterable[ExcelCell]:
|
715 |
+
gen: Iterable[ExcelCell]
|
716 |
+
|
717 |
+
if isinstance(self.columns, MultiIndex):
|
718 |
+
gen = self._format_header_mi()
|
719 |
+
else:
|
720 |
+
gen = self._format_header_regular()
|
721 |
+
|
722 |
+
gen2: Iterable[ExcelCell] = ()
|
723 |
+
|
724 |
+
if self.df.index.names:
|
725 |
+
row = [x if x is not None else "" for x in self.df.index.names] + [
|
726 |
+
""
|
727 |
+
] * len(self.columns)
|
728 |
+
if functools.reduce(lambda x, y: x and y, (x != "" for x in row)):
|
729 |
+
gen2 = (
|
730 |
+
ExcelCell(self.rowcounter, colindex, val, self.header_style)
|
731 |
+
for colindex, val in enumerate(row)
|
732 |
+
)
|
733 |
+
self.rowcounter += 1
|
734 |
+
return itertools.chain(gen, gen2)
|
735 |
+
|
736 |
+
def _format_body(self) -> Iterable[ExcelCell]:
|
737 |
+
if isinstance(self.df.index, MultiIndex):
|
738 |
+
return self._format_hierarchical_rows()
|
739 |
+
else:
|
740 |
+
return self._format_regular_rows()
|
741 |
+
|
742 |
+
def _format_regular_rows(self) -> Iterable[ExcelCell]:
|
743 |
+
if self._has_aliases or self.header:
|
744 |
+
self.rowcounter += 1
|
745 |
+
|
746 |
+
# output index and index_label?
|
747 |
+
if self.index:
|
748 |
+
# check aliases
|
749 |
+
# if list only take first as this is not a MultiIndex
|
750 |
+
if self.index_label and isinstance(
|
751 |
+
self.index_label, (list, tuple, np.ndarray, Index)
|
752 |
+
):
|
753 |
+
index_label = self.index_label[0]
|
754 |
+
# if string good to go
|
755 |
+
elif self.index_label and isinstance(self.index_label, str):
|
756 |
+
index_label = self.index_label
|
757 |
+
else:
|
758 |
+
index_label = self.df.index.names[0]
|
759 |
+
|
760 |
+
if isinstance(self.columns, MultiIndex):
|
761 |
+
self.rowcounter += 1
|
762 |
+
|
763 |
+
if index_label and self.header is not False:
|
764 |
+
yield ExcelCell(self.rowcounter - 1, 0, index_label, self.header_style)
|
765 |
+
|
766 |
+
# write index_values
|
767 |
+
index_values = self.df.index
|
768 |
+
if isinstance(self.df.index, PeriodIndex):
|
769 |
+
index_values = self.df.index.to_timestamp()
|
770 |
+
|
771 |
+
for idx, idxval in enumerate(index_values):
|
772 |
+
yield CssExcelCell(
|
773 |
+
row=self.rowcounter + idx,
|
774 |
+
col=0,
|
775 |
+
val=idxval,
|
776 |
+
style=self.header_style,
|
777 |
+
css_styles=getattr(self.styler, "ctx_index", None),
|
778 |
+
css_row=idx,
|
779 |
+
css_col=0,
|
780 |
+
css_converter=self.style_converter,
|
781 |
+
)
|
782 |
+
coloffset = 1
|
783 |
+
else:
|
784 |
+
coloffset = 0
|
785 |
+
|
786 |
+
yield from self._generate_body(coloffset)
|
787 |
+
|
788 |
+
def _format_hierarchical_rows(self) -> Iterable[ExcelCell]:
|
789 |
+
if self._has_aliases or self.header:
|
790 |
+
self.rowcounter += 1
|
791 |
+
|
792 |
+
gcolidx = 0
|
793 |
+
|
794 |
+
if self.index:
|
795 |
+
index_labels = self.df.index.names
|
796 |
+
# check for aliases
|
797 |
+
if self.index_label and isinstance(
|
798 |
+
self.index_label, (list, tuple, np.ndarray, Index)
|
799 |
+
):
|
800 |
+
index_labels = self.index_label
|
801 |
+
|
802 |
+
# MultiIndex columns require an extra row
|
803 |
+
# with index names (blank if None) for
|
804 |
+
# unambiguous round-trip, unless not merging,
|
805 |
+
# in which case the names all go on one row Issue #11328
|
806 |
+
if isinstance(self.columns, MultiIndex) and self.merge_cells:
|
807 |
+
self.rowcounter += 1
|
808 |
+
|
809 |
+
# if index labels are not empty go ahead and dump
|
810 |
+
if com.any_not_none(*index_labels) and self.header is not False:
|
811 |
+
for cidx, name in enumerate(index_labels):
|
812 |
+
yield ExcelCell(self.rowcounter - 1, cidx, name, self.header_style)
|
813 |
+
|
814 |
+
if self.merge_cells:
|
815 |
+
# Format hierarchical rows as merged cells.
|
816 |
+
level_strs = self.df.index._format_multi(
|
817 |
+
sparsify=True, include_names=False
|
818 |
+
)
|
819 |
+
level_lengths = get_level_lengths(level_strs)
|
820 |
+
|
821 |
+
for spans, levels, level_codes in zip(
|
822 |
+
level_lengths, self.df.index.levels, self.df.index.codes
|
823 |
+
):
|
824 |
+
values = levels.take(
|
825 |
+
level_codes,
|
826 |
+
allow_fill=levels._can_hold_na,
|
827 |
+
fill_value=levels._na_value,
|
828 |
+
)
|
829 |
+
|
830 |
+
for i, span_val in spans.items():
|
831 |
+
mergestart, mergeend = None, None
|
832 |
+
if span_val > 1:
|
833 |
+
mergestart = self.rowcounter + i + span_val - 1
|
834 |
+
mergeend = gcolidx
|
835 |
+
yield CssExcelCell(
|
836 |
+
row=self.rowcounter + i,
|
837 |
+
col=gcolidx,
|
838 |
+
val=values[i],
|
839 |
+
style=self.header_style,
|
840 |
+
css_styles=getattr(self.styler, "ctx_index", None),
|
841 |
+
css_row=i,
|
842 |
+
css_col=gcolidx,
|
843 |
+
css_converter=self.style_converter,
|
844 |
+
mergestart=mergestart,
|
845 |
+
mergeend=mergeend,
|
846 |
+
)
|
847 |
+
gcolidx += 1
|
848 |
+
|
849 |
+
else:
|
850 |
+
# Format hierarchical rows with non-merged values.
|
851 |
+
for indexcolvals in zip(*self.df.index):
|
852 |
+
for idx, indexcolval in enumerate(indexcolvals):
|
853 |
+
yield CssExcelCell(
|
854 |
+
row=self.rowcounter + idx,
|
855 |
+
col=gcolidx,
|
856 |
+
val=indexcolval,
|
857 |
+
style=self.header_style,
|
858 |
+
css_styles=getattr(self.styler, "ctx_index", None),
|
859 |
+
css_row=idx,
|
860 |
+
css_col=gcolidx,
|
861 |
+
css_converter=self.style_converter,
|
862 |
+
)
|
863 |
+
gcolidx += 1
|
864 |
+
|
865 |
+
yield from self._generate_body(gcolidx)
|
866 |
+
|
867 |
+
@property
|
868 |
+
def _has_aliases(self) -> bool:
|
869 |
+
"""Whether the aliases for column names are present."""
|
870 |
+
return is_list_like(self.header)
|
871 |
+
|
872 |
+
def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]:
|
873 |
+
# Write the body of the frame data series by series.
|
874 |
+
for colidx in range(len(self.columns)):
|
875 |
+
series = self.df.iloc[:, colidx]
|
876 |
+
for i, val in enumerate(series):
|
877 |
+
yield CssExcelCell(
|
878 |
+
row=self.rowcounter + i,
|
879 |
+
col=colidx + coloffset,
|
880 |
+
val=val,
|
881 |
+
style=None,
|
882 |
+
css_styles=getattr(self.styler, "ctx", None),
|
883 |
+
css_row=i,
|
884 |
+
css_col=colidx,
|
885 |
+
css_converter=self.style_converter,
|
886 |
+
)
|
887 |
+
|
888 |
+
def get_formatted_cells(self) -> Iterable[ExcelCell]:
|
889 |
+
for cell in itertools.chain(self._format_header(), self._format_body()):
|
890 |
+
cell.val = self._format_value(cell.val)
|
891 |
+
yield cell
|
892 |
+
|
893 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
894 |
+
def write(
|
895 |
+
self,
|
896 |
+
writer: FilePath | WriteExcelBuffer | ExcelWriter,
|
897 |
+
sheet_name: str = "Sheet1",
|
898 |
+
startrow: int = 0,
|
899 |
+
startcol: int = 0,
|
900 |
+
freeze_panes: tuple[int, int] | None = None,
|
901 |
+
engine: str | None = None,
|
902 |
+
storage_options: StorageOptions | None = None,
|
903 |
+
engine_kwargs: dict | None = None,
|
904 |
+
) -> None:
|
905 |
+
"""
|
906 |
+
writer : path-like, file-like, or ExcelWriter object
|
907 |
+
File path or existing ExcelWriter
|
908 |
+
sheet_name : str, default 'Sheet1'
|
909 |
+
Name of sheet which will contain DataFrame
|
910 |
+
startrow :
|
911 |
+
upper left cell row to dump data frame
|
912 |
+
startcol :
|
913 |
+
upper left cell column to dump data frame
|
914 |
+
freeze_panes : tuple of integer (length 2), default None
|
915 |
+
Specifies the one-based bottommost row and rightmost column that
|
916 |
+
is to be frozen
|
917 |
+
engine : string, default None
|
918 |
+
write engine to use if writer is a path - you can also set this
|
919 |
+
via the options ``io.excel.xlsx.writer``,
|
920 |
+
or ``io.excel.xlsm.writer``.
|
921 |
+
|
922 |
+
{storage_options}
|
923 |
+
|
924 |
+
engine_kwargs: dict, optional
|
925 |
+
Arbitrary keyword arguments passed to excel engine.
|
926 |
+
"""
|
927 |
+
from pandas.io.excel import ExcelWriter
|
928 |
+
|
929 |
+
num_rows, num_cols = self.df.shape
|
930 |
+
if num_rows > self.max_rows or num_cols > self.max_cols:
|
931 |
+
raise ValueError(
|
932 |
+
f"This sheet is too large! Your sheet size is: {num_rows}, {num_cols} "
|
933 |
+
f"Max sheet size is: {self.max_rows}, {self.max_cols}"
|
934 |
+
)
|
935 |
+
|
936 |
+
if engine_kwargs is None:
|
937 |
+
engine_kwargs = {}
|
938 |
+
|
939 |
+
formatted_cells = self.get_formatted_cells()
|
940 |
+
if isinstance(writer, ExcelWriter):
|
941 |
+
need_save = False
|
942 |
+
else:
|
943 |
+
writer = ExcelWriter(
|
944 |
+
writer,
|
945 |
+
engine=engine,
|
946 |
+
storage_options=storage_options,
|
947 |
+
engine_kwargs=engine_kwargs,
|
948 |
+
)
|
949 |
+
need_save = True
|
950 |
+
|
951 |
+
try:
|
952 |
+
writer._write_cells(
|
953 |
+
formatted_cells,
|
954 |
+
sheet_name,
|
955 |
+
startrow=startrow,
|
956 |
+
startcol=startcol,
|
957 |
+
freeze_panes=freeze_panes,
|
958 |
+
)
|
959 |
+
finally:
|
960 |
+
# make sure to close opened file handles
|
961 |
+
if need_save:
|
962 |
+
writer.close()
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/format.py
ADDED
@@ -0,0 +1,2058 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Internal module for formatting output data in csv, html, xml,
|
3 |
+
and latex files. This module also applies to display formatting.
|
4 |
+
"""
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
from collections.abc import (
|
8 |
+
Generator,
|
9 |
+
Hashable,
|
10 |
+
Mapping,
|
11 |
+
Sequence,
|
12 |
+
)
|
13 |
+
from contextlib import contextmanager
|
14 |
+
from csv import QUOTE_NONE
|
15 |
+
from decimal import Decimal
|
16 |
+
from functools import partial
|
17 |
+
from io import StringIO
|
18 |
+
import math
|
19 |
+
import re
|
20 |
+
from shutil import get_terminal_size
|
21 |
+
from typing import (
|
22 |
+
TYPE_CHECKING,
|
23 |
+
Any,
|
24 |
+
Callable,
|
25 |
+
Final,
|
26 |
+
cast,
|
27 |
+
)
|
28 |
+
|
29 |
+
import numpy as np
|
30 |
+
|
31 |
+
from pandas._config.config import (
|
32 |
+
get_option,
|
33 |
+
set_option,
|
34 |
+
)
|
35 |
+
|
36 |
+
from pandas._libs import lib
|
37 |
+
from pandas._libs.missing import NA
|
38 |
+
from pandas._libs.tslibs import (
|
39 |
+
NaT,
|
40 |
+
Timedelta,
|
41 |
+
Timestamp,
|
42 |
+
)
|
43 |
+
from pandas._libs.tslibs.nattype import NaTType
|
44 |
+
|
45 |
+
from pandas.core.dtypes.common import (
|
46 |
+
is_complex_dtype,
|
47 |
+
is_float,
|
48 |
+
is_integer,
|
49 |
+
is_list_like,
|
50 |
+
is_numeric_dtype,
|
51 |
+
is_scalar,
|
52 |
+
)
|
53 |
+
from pandas.core.dtypes.dtypes import (
|
54 |
+
CategoricalDtype,
|
55 |
+
DatetimeTZDtype,
|
56 |
+
ExtensionDtype,
|
57 |
+
)
|
58 |
+
from pandas.core.dtypes.missing import (
|
59 |
+
isna,
|
60 |
+
notna,
|
61 |
+
)
|
62 |
+
|
63 |
+
from pandas.core.arrays import (
|
64 |
+
Categorical,
|
65 |
+
DatetimeArray,
|
66 |
+
ExtensionArray,
|
67 |
+
TimedeltaArray,
|
68 |
+
)
|
69 |
+
from pandas.core.arrays.string_ import StringDtype
|
70 |
+
from pandas.core.base import PandasObject
|
71 |
+
import pandas.core.common as com
|
72 |
+
from pandas.core.indexes.api import (
|
73 |
+
Index,
|
74 |
+
MultiIndex,
|
75 |
+
PeriodIndex,
|
76 |
+
ensure_index,
|
77 |
+
)
|
78 |
+
from pandas.core.indexes.datetimes import DatetimeIndex
|
79 |
+
from pandas.core.indexes.timedeltas import TimedeltaIndex
|
80 |
+
from pandas.core.reshape.concat import concat
|
81 |
+
|
82 |
+
from pandas.io.common import (
|
83 |
+
check_parent_directory,
|
84 |
+
stringify_path,
|
85 |
+
)
|
86 |
+
from pandas.io.formats import printing
|
87 |
+
|
88 |
+
if TYPE_CHECKING:
|
89 |
+
from pandas._typing import (
|
90 |
+
ArrayLike,
|
91 |
+
Axes,
|
92 |
+
ColspaceArgType,
|
93 |
+
ColspaceType,
|
94 |
+
CompressionOptions,
|
95 |
+
FilePath,
|
96 |
+
FloatFormatType,
|
97 |
+
FormattersType,
|
98 |
+
IndexLabel,
|
99 |
+
SequenceNotStr,
|
100 |
+
StorageOptions,
|
101 |
+
WriteBuffer,
|
102 |
+
)
|
103 |
+
|
104 |
+
from pandas import (
|
105 |
+
DataFrame,
|
106 |
+
Series,
|
107 |
+
)
|
108 |
+
|
109 |
+
|
110 |
+
common_docstring: Final = """
|
111 |
+
Parameters
|
112 |
+
----------
|
113 |
+
buf : str, Path or StringIO-like, optional, default None
|
114 |
+
Buffer to write to. If None, the output is returned as a string.
|
115 |
+
columns : array-like, optional, default None
|
116 |
+
The subset of columns to write. Writes all columns by default.
|
117 |
+
col_space : %(col_space_type)s, optional
|
118 |
+
%(col_space)s.
|
119 |
+
header : %(header_type)s, optional
|
120 |
+
%(header)s.
|
121 |
+
index : bool, optional, default True
|
122 |
+
Whether to print index (row) labels.
|
123 |
+
na_rep : str, optional, default 'NaN'
|
124 |
+
String representation of ``NaN`` to use.
|
125 |
+
formatters : list, tuple or dict of one-param. functions, optional
|
126 |
+
Formatter functions to apply to columns' elements by position or
|
127 |
+
name.
|
128 |
+
The result of each function must be a unicode string.
|
129 |
+
List/tuple must be of length equal to the number of columns.
|
130 |
+
float_format : one-parameter function, optional, default None
|
131 |
+
Formatter function to apply to columns' elements if they are
|
132 |
+
floats. This function must return a unicode string and will be
|
133 |
+
applied only to the non-``NaN`` elements, with ``NaN`` being
|
134 |
+
handled by ``na_rep``.
|
135 |
+
sparsify : bool, optional, default True
|
136 |
+
Set to False for a DataFrame with a hierarchical index to print
|
137 |
+
every multiindex key at each row.
|
138 |
+
index_names : bool, optional, default True
|
139 |
+
Prints the names of the indexes.
|
140 |
+
justify : str, default None
|
141 |
+
How to justify the column labels. If None uses the option from
|
142 |
+
the print configuration (controlled by set_option), 'right' out
|
143 |
+
of the box. Valid values are
|
144 |
+
|
145 |
+
* left
|
146 |
+
* right
|
147 |
+
* center
|
148 |
+
* justify
|
149 |
+
* justify-all
|
150 |
+
* start
|
151 |
+
* end
|
152 |
+
* inherit
|
153 |
+
* match-parent
|
154 |
+
* initial
|
155 |
+
* unset.
|
156 |
+
max_rows : int, optional
|
157 |
+
Maximum number of rows to display in the console.
|
158 |
+
max_cols : int, optional
|
159 |
+
Maximum number of columns to display in the console.
|
160 |
+
show_dimensions : bool, default False
|
161 |
+
Display DataFrame dimensions (number of rows by number of columns).
|
162 |
+
decimal : str, default '.'
|
163 |
+
Character recognized as decimal separator, e.g. ',' in Europe.
|
164 |
+
"""
|
165 |
+
|
166 |
+
VALID_JUSTIFY_PARAMETERS = (
|
167 |
+
"left",
|
168 |
+
"right",
|
169 |
+
"center",
|
170 |
+
"justify",
|
171 |
+
"justify-all",
|
172 |
+
"start",
|
173 |
+
"end",
|
174 |
+
"inherit",
|
175 |
+
"match-parent",
|
176 |
+
"initial",
|
177 |
+
"unset",
|
178 |
+
)
|
179 |
+
|
180 |
+
return_docstring: Final = """
|
181 |
+
Returns
|
182 |
+
-------
|
183 |
+
str or None
|
184 |
+
If buf is None, returns the result as a string. Otherwise returns
|
185 |
+
None.
|
186 |
+
"""
|
187 |
+
|
188 |
+
|
189 |
+
class SeriesFormatter:
|
190 |
+
"""
|
191 |
+
Implement the main logic of Series.to_string, which underlies
|
192 |
+
Series.__repr__.
|
193 |
+
"""
|
194 |
+
|
195 |
+
def __init__(
|
196 |
+
self,
|
197 |
+
series: Series,
|
198 |
+
*,
|
199 |
+
length: bool | str = True,
|
200 |
+
header: bool = True,
|
201 |
+
index: bool = True,
|
202 |
+
na_rep: str = "NaN",
|
203 |
+
name: bool = False,
|
204 |
+
float_format: str | None = None,
|
205 |
+
dtype: bool = True,
|
206 |
+
max_rows: int | None = None,
|
207 |
+
min_rows: int | None = None,
|
208 |
+
) -> None:
|
209 |
+
self.series = series
|
210 |
+
self.buf = StringIO()
|
211 |
+
self.name = name
|
212 |
+
self.na_rep = na_rep
|
213 |
+
self.header = header
|
214 |
+
self.length = length
|
215 |
+
self.index = index
|
216 |
+
self.max_rows = max_rows
|
217 |
+
self.min_rows = min_rows
|
218 |
+
|
219 |
+
if float_format is None:
|
220 |
+
float_format = get_option("display.float_format")
|
221 |
+
self.float_format = float_format
|
222 |
+
self.dtype = dtype
|
223 |
+
self.adj = printing.get_adjustment()
|
224 |
+
|
225 |
+
self._chk_truncate()
|
226 |
+
|
227 |
+
def _chk_truncate(self) -> None:
|
228 |
+
self.tr_row_num: int | None
|
229 |
+
|
230 |
+
min_rows = self.min_rows
|
231 |
+
max_rows = self.max_rows
|
232 |
+
# truncation determined by max_rows, actual truncated number of rows
|
233 |
+
# used below by min_rows
|
234 |
+
is_truncated_vertically = max_rows and (len(self.series) > max_rows)
|
235 |
+
series = self.series
|
236 |
+
if is_truncated_vertically:
|
237 |
+
max_rows = cast(int, max_rows)
|
238 |
+
if min_rows:
|
239 |
+
# if min_rows is set (not None or 0), set max_rows to minimum
|
240 |
+
# of both
|
241 |
+
max_rows = min(min_rows, max_rows)
|
242 |
+
if max_rows == 1:
|
243 |
+
row_num = max_rows
|
244 |
+
series = series.iloc[:max_rows]
|
245 |
+
else:
|
246 |
+
row_num = max_rows // 2
|
247 |
+
series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
|
248 |
+
self.tr_row_num = row_num
|
249 |
+
else:
|
250 |
+
self.tr_row_num = None
|
251 |
+
self.tr_series = series
|
252 |
+
self.is_truncated_vertically = is_truncated_vertically
|
253 |
+
|
254 |
+
def _get_footer(self) -> str:
|
255 |
+
name = self.series.name
|
256 |
+
footer = ""
|
257 |
+
|
258 |
+
index = self.series.index
|
259 |
+
if (
|
260 |
+
isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex))
|
261 |
+
and index.freq is not None
|
262 |
+
):
|
263 |
+
footer += f"Freq: {index.freqstr}"
|
264 |
+
|
265 |
+
if self.name is not False and name is not None:
|
266 |
+
if footer:
|
267 |
+
footer += ", "
|
268 |
+
|
269 |
+
series_name = printing.pprint_thing(name, escape_chars=("\t", "\r", "\n"))
|
270 |
+
footer += f"Name: {series_name}"
|
271 |
+
|
272 |
+
if self.length is True or (
|
273 |
+
self.length == "truncate" and self.is_truncated_vertically
|
274 |
+
):
|
275 |
+
if footer:
|
276 |
+
footer += ", "
|
277 |
+
footer += f"Length: {len(self.series)}"
|
278 |
+
|
279 |
+
if self.dtype is not False and self.dtype is not None:
|
280 |
+
dtype_name = getattr(self.tr_series.dtype, "name", None)
|
281 |
+
if dtype_name:
|
282 |
+
if footer:
|
283 |
+
footer += ", "
|
284 |
+
footer += f"dtype: {printing.pprint_thing(dtype_name)}"
|
285 |
+
|
286 |
+
# level infos are added to the end and in a new line, like it is done
|
287 |
+
# for Categoricals
|
288 |
+
if isinstance(self.tr_series.dtype, CategoricalDtype):
|
289 |
+
level_info = self.tr_series._values._get_repr_footer()
|
290 |
+
if footer:
|
291 |
+
footer += "\n"
|
292 |
+
footer += level_info
|
293 |
+
|
294 |
+
return str(footer)
|
295 |
+
|
296 |
+
def _get_formatted_values(self) -> list[str]:
|
297 |
+
return format_array(
|
298 |
+
self.tr_series._values,
|
299 |
+
None,
|
300 |
+
float_format=self.float_format,
|
301 |
+
na_rep=self.na_rep,
|
302 |
+
leading_space=self.index,
|
303 |
+
)
|
304 |
+
|
305 |
+
def to_string(self) -> str:
|
306 |
+
series = self.tr_series
|
307 |
+
footer = self._get_footer()
|
308 |
+
|
309 |
+
if len(series) == 0:
|
310 |
+
return f"{type(self.series).__name__}([], {footer})"
|
311 |
+
|
312 |
+
index = series.index
|
313 |
+
have_header = _has_names(index)
|
314 |
+
if isinstance(index, MultiIndex):
|
315 |
+
fmt_index = index._format_multi(include_names=True, sparsify=None)
|
316 |
+
adj = printing.get_adjustment()
|
317 |
+
fmt_index = adj.adjoin(2, *fmt_index).split("\n")
|
318 |
+
else:
|
319 |
+
fmt_index = index._format_flat(include_name=True)
|
320 |
+
fmt_values = self._get_formatted_values()
|
321 |
+
|
322 |
+
if self.is_truncated_vertically:
|
323 |
+
n_header_rows = 0
|
324 |
+
row_num = self.tr_row_num
|
325 |
+
row_num = cast(int, row_num)
|
326 |
+
width = self.adj.len(fmt_values[row_num - 1])
|
327 |
+
if width > 3:
|
328 |
+
dot_str = "..."
|
329 |
+
else:
|
330 |
+
dot_str = ".."
|
331 |
+
# Series uses mode=center because it has single value columns
|
332 |
+
# DataFrame uses mode=left
|
333 |
+
dot_str = self.adj.justify([dot_str], width, mode="center")[0]
|
334 |
+
fmt_values.insert(row_num + n_header_rows, dot_str)
|
335 |
+
fmt_index.insert(row_num + 1, "")
|
336 |
+
|
337 |
+
if self.index:
|
338 |
+
result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
|
339 |
+
else:
|
340 |
+
result = self.adj.adjoin(3, fmt_values)
|
341 |
+
|
342 |
+
if self.header and have_header:
|
343 |
+
result = fmt_index[0] + "\n" + result
|
344 |
+
|
345 |
+
if footer:
|
346 |
+
result += "\n" + footer
|
347 |
+
|
348 |
+
return str("".join(result))
|
349 |
+
|
350 |
+
|
351 |
+
def get_dataframe_repr_params() -> dict[str, Any]:
|
352 |
+
"""Get the parameters used to repr(dataFrame) calls using DataFrame.to_string.
|
353 |
+
|
354 |
+
Supplying these parameters to DataFrame.to_string is equivalent to calling
|
355 |
+
``repr(DataFrame)``. This is useful if you want to adjust the repr output.
|
356 |
+
|
357 |
+
.. versionadded:: 1.4.0
|
358 |
+
|
359 |
+
Example
|
360 |
+
-------
|
361 |
+
>>> import pandas as pd
|
362 |
+
>>>
|
363 |
+
>>> df = pd.DataFrame([[1, 2], [3, 4]])
|
364 |
+
>>> repr_params = pd.io.formats.format.get_dataframe_repr_params()
|
365 |
+
>>> repr(df) == df.to_string(**repr_params)
|
366 |
+
True
|
367 |
+
"""
|
368 |
+
from pandas.io.formats import console
|
369 |
+
|
370 |
+
if get_option("display.expand_frame_repr"):
|
371 |
+
line_width, _ = console.get_console_size()
|
372 |
+
else:
|
373 |
+
line_width = None
|
374 |
+
return {
|
375 |
+
"max_rows": get_option("display.max_rows"),
|
376 |
+
"min_rows": get_option("display.min_rows"),
|
377 |
+
"max_cols": get_option("display.max_columns"),
|
378 |
+
"max_colwidth": get_option("display.max_colwidth"),
|
379 |
+
"show_dimensions": get_option("display.show_dimensions"),
|
380 |
+
"line_width": line_width,
|
381 |
+
}
|
382 |
+
|
383 |
+
|
384 |
+
def get_series_repr_params() -> dict[str, Any]:
|
385 |
+
"""Get the parameters used to repr(Series) calls using Series.to_string.
|
386 |
+
|
387 |
+
Supplying these parameters to Series.to_string is equivalent to calling
|
388 |
+
``repr(series)``. This is useful if you want to adjust the series repr output.
|
389 |
+
|
390 |
+
.. versionadded:: 1.4.0
|
391 |
+
|
392 |
+
Example
|
393 |
+
-------
|
394 |
+
>>> import pandas as pd
|
395 |
+
>>>
|
396 |
+
>>> ser = pd.Series([1, 2, 3, 4])
|
397 |
+
>>> repr_params = pd.io.formats.format.get_series_repr_params()
|
398 |
+
>>> repr(ser) == ser.to_string(**repr_params)
|
399 |
+
True
|
400 |
+
"""
|
401 |
+
width, height = get_terminal_size()
|
402 |
+
max_rows_opt = get_option("display.max_rows")
|
403 |
+
max_rows = height if max_rows_opt == 0 else max_rows_opt
|
404 |
+
min_rows = height if max_rows_opt == 0 else get_option("display.min_rows")
|
405 |
+
|
406 |
+
return {
|
407 |
+
"name": True,
|
408 |
+
"dtype": True,
|
409 |
+
"min_rows": min_rows,
|
410 |
+
"max_rows": max_rows,
|
411 |
+
"length": get_option("display.show_dimensions"),
|
412 |
+
}
|
413 |
+
|
414 |
+
|
415 |
+
class DataFrameFormatter:
|
416 |
+
"""
|
417 |
+
Class for processing dataframe formatting options and data.
|
418 |
+
|
419 |
+
Used by DataFrame.to_string, which backs DataFrame.__repr__.
|
420 |
+
"""
|
421 |
+
|
422 |
+
__doc__ = __doc__ if __doc__ else ""
|
423 |
+
__doc__ += common_docstring + return_docstring
|
424 |
+
|
425 |
+
def __init__(
|
426 |
+
self,
|
427 |
+
frame: DataFrame,
|
428 |
+
columns: Axes | None = None,
|
429 |
+
col_space: ColspaceArgType | None = None,
|
430 |
+
header: bool | SequenceNotStr[str] = True,
|
431 |
+
index: bool = True,
|
432 |
+
na_rep: str = "NaN",
|
433 |
+
formatters: FormattersType | None = None,
|
434 |
+
justify: str | None = None,
|
435 |
+
float_format: FloatFormatType | None = None,
|
436 |
+
sparsify: bool | None = None,
|
437 |
+
index_names: bool = True,
|
438 |
+
max_rows: int | None = None,
|
439 |
+
min_rows: int | None = None,
|
440 |
+
max_cols: int | None = None,
|
441 |
+
show_dimensions: bool | str = False,
|
442 |
+
decimal: str = ".",
|
443 |
+
bold_rows: bool = False,
|
444 |
+
escape: bool = True,
|
445 |
+
) -> None:
|
446 |
+
self.frame = frame
|
447 |
+
self.columns = self._initialize_columns(columns)
|
448 |
+
self.col_space = self._initialize_colspace(col_space)
|
449 |
+
self.header = header
|
450 |
+
self.index = index
|
451 |
+
self.na_rep = na_rep
|
452 |
+
self.formatters = self._initialize_formatters(formatters)
|
453 |
+
self.justify = self._initialize_justify(justify)
|
454 |
+
self.float_format = float_format
|
455 |
+
self.sparsify = self._initialize_sparsify(sparsify)
|
456 |
+
self.show_index_names = index_names
|
457 |
+
self.decimal = decimal
|
458 |
+
self.bold_rows = bold_rows
|
459 |
+
self.escape = escape
|
460 |
+
self.max_rows = max_rows
|
461 |
+
self.min_rows = min_rows
|
462 |
+
self.max_cols = max_cols
|
463 |
+
self.show_dimensions = show_dimensions
|
464 |
+
|
465 |
+
self.max_cols_fitted = self._calc_max_cols_fitted()
|
466 |
+
self.max_rows_fitted = self._calc_max_rows_fitted()
|
467 |
+
|
468 |
+
self.tr_frame = self.frame
|
469 |
+
self.truncate()
|
470 |
+
self.adj = printing.get_adjustment()
|
471 |
+
|
472 |
+
def get_strcols(self) -> list[list[str]]:
|
473 |
+
"""
|
474 |
+
Render a DataFrame to a list of columns (as lists of strings).
|
475 |
+
"""
|
476 |
+
strcols = self._get_strcols_without_index()
|
477 |
+
|
478 |
+
if self.index:
|
479 |
+
str_index = self._get_formatted_index(self.tr_frame)
|
480 |
+
strcols.insert(0, str_index)
|
481 |
+
|
482 |
+
return strcols
|
483 |
+
|
484 |
+
@property
|
485 |
+
def should_show_dimensions(self) -> bool:
|
486 |
+
return self.show_dimensions is True or (
|
487 |
+
self.show_dimensions == "truncate" and self.is_truncated
|
488 |
+
)
|
489 |
+
|
490 |
+
@property
|
491 |
+
def is_truncated(self) -> bool:
|
492 |
+
return bool(self.is_truncated_horizontally or self.is_truncated_vertically)
|
493 |
+
|
494 |
+
@property
|
495 |
+
def is_truncated_horizontally(self) -> bool:
|
496 |
+
return bool(self.max_cols_fitted and (len(self.columns) > self.max_cols_fitted))
|
497 |
+
|
498 |
+
@property
|
499 |
+
def is_truncated_vertically(self) -> bool:
|
500 |
+
return bool(self.max_rows_fitted and (len(self.frame) > self.max_rows_fitted))
|
501 |
+
|
502 |
+
@property
|
503 |
+
def dimensions_info(self) -> str:
|
504 |
+
return f"\n\n[{len(self.frame)} rows x {len(self.frame.columns)} columns]"
|
505 |
+
|
506 |
+
@property
|
507 |
+
def has_index_names(self) -> bool:
|
508 |
+
return _has_names(self.frame.index)
|
509 |
+
|
510 |
+
@property
|
511 |
+
def has_column_names(self) -> bool:
|
512 |
+
return _has_names(self.frame.columns)
|
513 |
+
|
514 |
+
@property
|
515 |
+
def show_row_idx_names(self) -> bool:
|
516 |
+
return all((self.has_index_names, self.index, self.show_index_names))
|
517 |
+
|
518 |
+
@property
|
519 |
+
def show_col_idx_names(self) -> bool:
|
520 |
+
return all((self.has_column_names, self.show_index_names, self.header))
|
521 |
+
|
522 |
+
@property
|
523 |
+
def max_rows_displayed(self) -> int:
|
524 |
+
return min(self.max_rows or len(self.frame), len(self.frame))
|
525 |
+
|
526 |
+
def _initialize_sparsify(self, sparsify: bool | None) -> bool:
|
527 |
+
if sparsify is None:
|
528 |
+
return get_option("display.multi_sparse")
|
529 |
+
return sparsify
|
530 |
+
|
531 |
+
def _initialize_formatters(
|
532 |
+
self, formatters: FormattersType | None
|
533 |
+
) -> FormattersType:
|
534 |
+
if formatters is None:
|
535 |
+
return {}
|
536 |
+
elif len(self.frame.columns) == len(formatters) or isinstance(formatters, dict):
|
537 |
+
return formatters
|
538 |
+
else:
|
539 |
+
raise ValueError(
|
540 |
+
f"Formatters length({len(formatters)}) should match "
|
541 |
+
f"DataFrame number of columns({len(self.frame.columns)})"
|
542 |
+
)
|
543 |
+
|
544 |
+
def _initialize_justify(self, justify: str | None) -> str:
|
545 |
+
if justify is None:
|
546 |
+
return get_option("display.colheader_justify")
|
547 |
+
else:
|
548 |
+
return justify
|
549 |
+
|
550 |
+
def _initialize_columns(self, columns: Axes | None) -> Index:
|
551 |
+
if columns is not None:
|
552 |
+
cols = ensure_index(columns)
|
553 |
+
self.frame = self.frame[cols]
|
554 |
+
return cols
|
555 |
+
else:
|
556 |
+
return self.frame.columns
|
557 |
+
|
558 |
+
def _initialize_colspace(self, col_space: ColspaceArgType | None) -> ColspaceType:
|
559 |
+
result: ColspaceType
|
560 |
+
|
561 |
+
if col_space is None:
|
562 |
+
result = {}
|
563 |
+
elif isinstance(col_space, (int, str)):
|
564 |
+
result = {"": col_space}
|
565 |
+
result.update({column: col_space for column in self.frame.columns})
|
566 |
+
elif isinstance(col_space, Mapping):
|
567 |
+
for column in col_space.keys():
|
568 |
+
if column not in self.frame.columns and column != "":
|
569 |
+
raise ValueError(
|
570 |
+
f"Col_space is defined for an unknown column: {column}"
|
571 |
+
)
|
572 |
+
result = col_space
|
573 |
+
else:
|
574 |
+
if len(self.frame.columns) != len(col_space):
|
575 |
+
raise ValueError(
|
576 |
+
f"Col_space length({len(col_space)}) should match "
|
577 |
+
f"DataFrame number of columns({len(self.frame.columns)})"
|
578 |
+
)
|
579 |
+
result = dict(zip(self.frame.columns, col_space))
|
580 |
+
return result
|
581 |
+
|
582 |
+
def _calc_max_cols_fitted(self) -> int | None:
|
583 |
+
"""Number of columns fitting the screen."""
|
584 |
+
if not self._is_in_terminal():
|
585 |
+
return self.max_cols
|
586 |
+
|
587 |
+
width, _ = get_terminal_size()
|
588 |
+
if self._is_screen_narrow(width):
|
589 |
+
return width
|
590 |
+
else:
|
591 |
+
return self.max_cols
|
592 |
+
|
593 |
+
def _calc_max_rows_fitted(self) -> int | None:
|
594 |
+
"""Number of rows with data fitting the screen."""
|
595 |
+
max_rows: int | None
|
596 |
+
|
597 |
+
if self._is_in_terminal():
|
598 |
+
_, height = get_terminal_size()
|
599 |
+
if self.max_rows == 0:
|
600 |
+
# rows available to fill with actual data
|
601 |
+
return height - self._get_number_of_auxiliary_rows()
|
602 |
+
|
603 |
+
if self._is_screen_short(height):
|
604 |
+
max_rows = height
|
605 |
+
else:
|
606 |
+
max_rows = self.max_rows
|
607 |
+
else:
|
608 |
+
max_rows = self.max_rows
|
609 |
+
|
610 |
+
return self._adjust_max_rows(max_rows)
|
611 |
+
|
612 |
+
def _adjust_max_rows(self, max_rows: int | None) -> int | None:
|
613 |
+
"""Adjust max_rows using display logic.
|
614 |
+
|
615 |
+
See description here:
|
616 |
+
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
|
617 |
+
|
618 |
+
GH #37359
|
619 |
+
"""
|
620 |
+
if max_rows:
|
621 |
+
if (len(self.frame) > max_rows) and self.min_rows:
|
622 |
+
# if truncated, set max_rows showed to min_rows
|
623 |
+
max_rows = min(self.min_rows, max_rows)
|
624 |
+
return max_rows
|
625 |
+
|
626 |
+
def _is_in_terminal(self) -> bool:
|
627 |
+
"""Check if the output is to be shown in terminal."""
|
628 |
+
return bool(self.max_cols == 0 or self.max_rows == 0)
|
629 |
+
|
630 |
+
def _is_screen_narrow(self, max_width) -> bool:
|
631 |
+
return bool(self.max_cols == 0 and len(self.frame.columns) > max_width)
|
632 |
+
|
633 |
+
def _is_screen_short(self, max_height) -> bool:
|
634 |
+
return bool(self.max_rows == 0 and len(self.frame) > max_height)
|
635 |
+
|
636 |
+
def _get_number_of_auxiliary_rows(self) -> int:
|
637 |
+
"""Get number of rows occupied by prompt, dots and dimension info."""
|
638 |
+
dot_row = 1
|
639 |
+
prompt_row = 1
|
640 |
+
num_rows = dot_row + prompt_row
|
641 |
+
|
642 |
+
if self.show_dimensions:
|
643 |
+
num_rows += len(self.dimensions_info.splitlines())
|
644 |
+
|
645 |
+
if self.header:
|
646 |
+
num_rows += 1
|
647 |
+
|
648 |
+
return num_rows
|
649 |
+
|
650 |
+
def truncate(self) -> None:
|
651 |
+
"""
|
652 |
+
Check whether the frame should be truncated. If so, slice the frame up.
|
653 |
+
"""
|
654 |
+
if self.is_truncated_horizontally:
|
655 |
+
self._truncate_horizontally()
|
656 |
+
|
657 |
+
if self.is_truncated_vertically:
|
658 |
+
self._truncate_vertically()
|
659 |
+
|
660 |
+
def _truncate_horizontally(self) -> None:
|
661 |
+
"""Remove columns, which are not to be displayed and adjust formatters.
|
662 |
+
|
663 |
+
Attributes affected:
|
664 |
+
- tr_frame
|
665 |
+
- formatters
|
666 |
+
- tr_col_num
|
667 |
+
"""
|
668 |
+
assert self.max_cols_fitted is not None
|
669 |
+
col_num = self.max_cols_fitted // 2
|
670 |
+
if col_num >= 1:
|
671 |
+
left = self.tr_frame.iloc[:, :col_num]
|
672 |
+
right = self.tr_frame.iloc[:, -col_num:]
|
673 |
+
self.tr_frame = concat((left, right), axis=1)
|
674 |
+
|
675 |
+
# truncate formatter
|
676 |
+
if isinstance(self.formatters, (list, tuple)):
|
677 |
+
self.formatters = [
|
678 |
+
*self.formatters[:col_num],
|
679 |
+
*self.formatters[-col_num:],
|
680 |
+
]
|
681 |
+
else:
|
682 |
+
col_num = cast(int, self.max_cols)
|
683 |
+
self.tr_frame = self.tr_frame.iloc[:, :col_num]
|
684 |
+
self.tr_col_num = col_num
|
685 |
+
|
686 |
+
def _truncate_vertically(self) -> None:
|
687 |
+
"""Remove rows, which are not to be displayed.
|
688 |
+
|
689 |
+
Attributes affected:
|
690 |
+
- tr_frame
|
691 |
+
- tr_row_num
|
692 |
+
"""
|
693 |
+
assert self.max_rows_fitted is not None
|
694 |
+
row_num = self.max_rows_fitted // 2
|
695 |
+
if row_num >= 1:
|
696 |
+
_len = len(self.tr_frame)
|
697 |
+
_slice = np.hstack([np.arange(row_num), np.arange(_len - row_num, _len)])
|
698 |
+
self.tr_frame = self.tr_frame.iloc[_slice]
|
699 |
+
else:
|
700 |
+
row_num = cast(int, self.max_rows)
|
701 |
+
self.tr_frame = self.tr_frame.iloc[:row_num, :]
|
702 |
+
self.tr_row_num = row_num
|
703 |
+
|
704 |
+
def _get_strcols_without_index(self) -> list[list[str]]:
|
705 |
+
strcols: list[list[str]] = []
|
706 |
+
|
707 |
+
if not is_list_like(self.header) and not self.header:
|
708 |
+
for i, c in enumerate(self.tr_frame):
|
709 |
+
fmt_values = self.format_col(i)
|
710 |
+
fmt_values = _make_fixed_width(
|
711 |
+
strings=fmt_values,
|
712 |
+
justify=self.justify,
|
713 |
+
minimum=int(self.col_space.get(c, 0)),
|
714 |
+
adj=self.adj,
|
715 |
+
)
|
716 |
+
strcols.append(fmt_values)
|
717 |
+
return strcols
|
718 |
+
|
719 |
+
if is_list_like(self.header):
|
720 |
+
# cast here since can't be bool if is_list_like
|
721 |
+
self.header = cast(list[str], self.header)
|
722 |
+
if len(self.header) != len(self.columns):
|
723 |
+
raise ValueError(
|
724 |
+
f"Writing {len(self.columns)} cols "
|
725 |
+
f"but got {len(self.header)} aliases"
|
726 |
+
)
|
727 |
+
str_columns = [[label] for label in self.header]
|
728 |
+
else:
|
729 |
+
str_columns = self._get_formatted_column_labels(self.tr_frame)
|
730 |
+
|
731 |
+
if self.show_row_idx_names:
|
732 |
+
for x in str_columns:
|
733 |
+
x.append("")
|
734 |
+
|
735 |
+
for i, c in enumerate(self.tr_frame):
|
736 |
+
cheader = str_columns[i]
|
737 |
+
header_colwidth = max(
|
738 |
+
int(self.col_space.get(c, 0)), *(self.adj.len(x) for x in cheader)
|
739 |
+
)
|
740 |
+
fmt_values = self.format_col(i)
|
741 |
+
fmt_values = _make_fixed_width(
|
742 |
+
fmt_values, self.justify, minimum=header_colwidth, adj=self.adj
|
743 |
+
)
|
744 |
+
|
745 |
+
max_len = max(*(self.adj.len(x) for x in fmt_values), header_colwidth)
|
746 |
+
cheader = self.adj.justify(cheader, max_len, mode=self.justify)
|
747 |
+
strcols.append(cheader + fmt_values)
|
748 |
+
|
749 |
+
return strcols
|
750 |
+
|
751 |
+
def format_col(self, i: int) -> list[str]:
|
752 |
+
frame = self.tr_frame
|
753 |
+
formatter = self._get_formatter(i)
|
754 |
+
return format_array(
|
755 |
+
frame.iloc[:, i]._values,
|
756 |
+
formatter,
|
757 |
+
float_format=self.float_format,
|
758 |
+
na_rep=self.na_rep,
|
759 |
+
space=self.col_space.get(frame.columns[i]),
|
760 |
+
decimal=self.decimal,
|
761 |
+
leading_space=self.index,
|
762 |
+
)
|
763 |
+
|
764 |
+
def _get_formatter(self, i: str | int) -> Callable | None:
|
765 |
+
if isinstance(self.formatters, (list, tuple)):
|
766 |
+
if is_integer(i):
|
767 |
+
i = cast(int, i)
|
768 |
+
return self.formatters[i]
|
769 |
+
else:
|
770 |
+
return None
|
771 |
+
else:
|
772 |
+
if is_integer(i) and i not in self.columns:
|
773 |
+
i = self.columns[i]
|
774 |
+
return self.formatters.get(i, None)
|
775 |
+
|
776 |
+
def _get_formatted_column_labels(self, frame: DataFrame) -> list[list[str]]:
|
777 |
+
from pandas.core.indexes.multi import sparsify_labels
|
778 |
+
|
779 |
+
columns = frame.columns
|
780 |
+
|
781 |
+
if isinstance(columns, MultiIndex):
|
782 |
+
fmt_columns = columns._format_multi(sparsify=False, include_names=False)
|
783 |
+
fmt_columns = list(zip(*fmt_columns))
|
784 |
+
dtypes = self.frame.dtypes._values
|
785 |
+
|
786 |
+
# if we have a Float level, they don't use leading space at all
|
787 |
+
restrict_formatting = any(level.is_floating for level in columns.levels)
|
788 |
+
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
|
789 |
+
|
790 |
+
def space_format(x, y):
|
791 |
+
if (
|
792 |
+
y not in self.formatters
|
793 |
+
and need_leadsp[x]
|
794 |
+
and not restrict_formatting
|
795 |
+
):
|
796 |
+
return " " + y
|
797 |
+
return y
|
798 |
+
|
799 |
+
str_columns_tuple = list(
|
800 |
+
zip(*([space_format(x, y) for y in x] for x in fmt_columns))
|
801 |
+
)
|
802 |
+
if self.sparsify and len(str_columns_tuple):
|
803 |
+
str_columns_tuple = sparsify_labels(str_columns_tuple)
|
804 |
+
|
805 |
+
str_columns = [list(x) for x in zip(*str_columns_tuple)]
|
806 |
+
else:
|
807 |
+
fmt_columns = columns._format_flat(include_name=False)
|
808 |
+
dtypes = self.frame.dtypes
|
809 |
+
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
|
810 |
+
str_columns = [
|
811 |
+
[" " + x if not self._get_formatter(i) and need_leadsp[x] else x]
|
812 |
+
for i, x in enumerate(fmt_columns)
|
813 |
+
]
|
814 |
+
# self.str_columns = str_columns
|
815 |
+
return str_columns
|
816 |
+
|
817 |
+
def _get_formatted_index(self, frame: DataFrame) -> list[str]:
|
818 |
+
# Note: this is only used by to_string() and to_latex(), not by
|
819 |
+
# to_html(). so safe to cast col_space here.
|
820 |
+
col_space = {k: cast(int, v) for k, v in self.col_space.items()}
|
821 |
+
index = frame.index
|
822 |
+
columns = frame.columns
|
823 |
+
fmt = self._get_formatter("__index__")
|
824 |
+
|
825 |
+
if isinstance(index, MultiIndex):
|
826 |
+
fmt_index = index._format_multi(
|
827 |
+
sparsify=self.sparsify,
|
828 |
+
include_names=self.show_row_idx_names,
|
829 |
+
formatter=fmt,
|
830 |
+
)
|
831 |
+
else:
|
832 |
+
fmt_index = [
|
833 |
+
index._format_flat(include_name=self.show_row_idx_names, formatter=fmt)
|
834 |
+
]
|
835 |
+
|
836 |
+
fmt_index = [
|
837 |
+
tuple(
|
838 |
+
_make_fixed_width(
|
839 |
+
list(x), justify="left", minimum=col_space.get("", 0), adj=self.adj
|
840 |
+
)
|
841 |
+
)
|
842 |
+
for x in fmt_index
|
843 |
+
]
|
844 |
+
|
845 |
+
adjoined = self.adj.adjoin(1, *fmt_index).split("\n")
|
846 |
+
|
847 |
+
# empty space for columns
|
848 |
+
if self.show_col_idx_names:
|
849 |
+
col_header = [str(x) for x in self._get_column_name_list()]
|
850 |
+
else:
|
851 |
+
col_header = [""] * columns.nlevels
|
852 |
+
|
853 |
+
if self.header:
|
854 |
+
return col_header + adjoined
|
855 |
+
else:
|
856 |
+
return adjoined
|
857 |
+
|
858 |
+
def _get_column_name_list(self) -> list[Hashable]:
|
859 |
+
names: list[Hashable] = []
|
860 |
+
columns = self.frame.columns
|
861 |
+
if isinstance(columns, MultiIndex):
|
862 |
+
names.extend("" if name is None else name for name in columns.names)
|
863 |
+
else:
|
864 |
+
names.append("" if columns.name is None else columns.name)
|
865 |
+
return names
|
866 |
+
|
867 |
+
|
868 |
+
class DataFrameRenderer:
|
869 |
+
"""Class for creating dataframe output in multiple formats.
|
870 |
+
|
871 |
+
Called in pandas.core.generic.NDFrame:
|
872 |
+
- to_csv
|
873 |
+
- to_latex
|
874 |
+
|
875 |
+
Called in pandas.core.frame.DataFrame:
|
876 |
+
- to_html
|
877 |
+
- to_string
|
878 |
+
|
879 |
+
Parameters
|
880 |
+
----------
|
881 |
+
fmt : DataFrameFormatter
|
882 |
+
Formatter with the formatting options.
|
883 |
+
"""
|
884 |
+
|
885 |
+
def __init__(self, fmt: DataFrameFormatter) -> None:
|
886 |
+
self.fmt = fmt
|
887 |
+
|
888 |
+
def to_html(
|
889 |
+
self,
|
890 |
+
buf: FilePath | WriteBuffer[str] | None = None,
|
891 |
+
encoding: str | None = None,
|
892 |
+
classes: str | list | tuple | None = None,
|
893 |
+
notebook: bool = False,
|
894 |
+
border: int | bool | None = None,
|
895 |
+
table_id: str | None = None,
|
896 |
+
render_links: bool = False,
|
897 |
+
) -> str | None:
|
898 |
+
"""
|
899 |
+
Render a DataFrame to a html table.
|
900 |
+
|
901 |
+
Parameters
|
902 |
+
----------
|
903 |
+
buf : str, path object, file-like object, or None, default None
|
904 |
+
String, path object (implementing ``os.PathLike[str]``), or file-like
|
905 |
+
object implementing a string ``write()`` function. If None, the result is
|
906 |
+
returned as a string.
|
907 |
+
encoding : str, default “utf-8”
|
908 |
+
Set character encoding.
|
909 |
+
classes : str or list-like
|
910 |
+
classes to include in the `class` attribute of the opening
|
911 |
+
``<table>`` tag, in addition to the default "dataframe".
|
912 |
+
notebook : {True, False}, optional, default False
|
913 |
+
Whether the generated HTML is for IPython Notebook.
|
914 |
+
border : int
|
915 |
+
A ``border=border`` attribute is included in the opening
|
916 |
+
``<table>`` tag. Default ``pd.options.display.html.border``.
|
917 |
+
table_id : str, optional
|
918 |
+
A css id is included in the opening `<table>` tag if specified.
|
919 |
+
render_links : bool, default False
|
920 |
+
Convert URLs to HTML links.
|
921 |
+
"""
|
922 |
+
from pandas.io.formats.html import (
|
923 |
+
HTMLFormatter,
|
924 |
+
NotebookFormatter,
|
925 |
+
)
|
926 |
+
|
927 |
+
Klass = NotebookFormatter if notebook else HTMLFormatter
|
928 |
+
|
929 |
+
html_formatter = Klass(
|
930 |
+
self.fmt,
|
931 |
+
classes=classes,
|
932 |
+
border=border,
|
933 |
+
table_id=table_id,
|
934 |
+
render_links=render_links,
|
935 |
+
)
|
936 |
+
string = html_formatter.to_string()
|
937 |
+
return save_to_buffer(string, buf=buf, encoding=encoding)
|
938 |
+
|
939 |
+
def to_string(
|
940 |
+
self,
|
941 |
+
buf: FilePath | WriteBuffer[str] | None = None,
|
942 |
+
encoding: str | None = None,
|
943 |
+
line_width: int | None = None,
|
944 |
+
) -> str | None:
|
945 |
+
"""
|
946 |
+
Render a DataFrame to a console-friendly tabular output.
|
947 |
+
|
948 |
+
Parameters
|
949 |
+
----------
|
950 |
+
buf : str, path object, file-like object, or None, default None
|
951 |
+
String, path object (implementing ``os.PathLike[str]``), or file-like
|
952 |
+
object implementing a string ``write()`` function. If None, the result is
|
953 |
+
returned as a string.
|
954 |
+
encoding: str, default “utf-8”
|
955 |
+
Set character encoding.
|
956 |
+
line_width : int, optional
|
957 |
+
Width to wrap a line in characters.
|
958 |
+
"""
|
959 |
+
from pandas.io.formats.string import StringFormatter
|
960 |
+
|
961 |
+
string_formatter = StringFormatter(self.fmt, line_width=line_width)
|
962 |
+
string = string_formatter.to_string()
|
963 |
+
return save_to_buffer(string, buf=buf, encoding=encoding)
|
964 |
+
|
965 |
+
def to_csv(
|
966 |
+
self,
|
967 |
+
path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
|
968 |
+
encoding: str | None = None,
|
969 |
+
sep: str = ",",
|
970 |
+
columns: Sequence[Hashable] | None = None,
|
971 |
+
index_label: IndexLabel | None = None,
|
972 |
+
mode: str = "w",
|
973 |
+
compression: CompressionOptions = "infer",
|
974 |
+
quoting: int | None = None,
|
975 |
+
quotechar: str = '"',
|
976 |
+
lineterminator: str | None = None,
|
977 |
+
chunksize: int | None = None,
|
978 |
+
date_format: str | None = None,
|
979 |
+
doublequote: bool = True,
|
980 |
+
escapechar: str | None = None,
|
981 |
+
errors: str = "strict",
|
982 |
+
storage_options: StorageOptions | None = None,
|
983 |
+
) -> str | None:
|
984 |
+
"""
|
985 |
+
Render dataframe as comma-separated file.
|
986 |
+
"""
|
987 |
+
from pandas.io.formats.csvs import CSVFormatter
|
988 |
+
|
989 |
+
if path_or_buf is None:
|
990 |
+
created_buffer = True
|
991 |
+
path_or_buf = StringIO()
|
992 |
+
else:
|
993 |
+
created_buffer = False
|
994 |
+
|
995 |
+
csv_formatter = CSVFormatter(
|
996 |
+
path_or_buf=path_or_buf,
|
997 |
+
lineterminator=lineterminator,
|
998 |
+
sep=sep,
|
999 |
+
encoding=encoding,
|
1000 |
+
errors=errors,
|
1001 |
+
compression=compression,
|
1002 |
+
quoting=quoting,
|
1003 |
+
cols=columns,
|
1004 |
+
index_label=index_label,
|
1005 |
+
mode=mode,
|
1006 |
+
chunksize=chunksize,
|
1007 |
+
quotechar=quotechar,
|
1008 |
+
date_format=date_format,
|
1009 |
+
doublequote=doublequote,
|
1010 |
+
escapechar=escapechar,
|
1011 |
+
storage_options=storage_options,
|
1012 |
+
formatter=self.fmt,
|
1013 |
+
)
|
1014 |
+
csv_formatter.save()
|
1015 |
+
|
1016 |
+
if created_buffer:
|
1017 |
+
assert isinstance(path_or_buf, StringIO)
|
1018 |
+
content = path_or_buf.getvalue()
|
1019 |
+
path_or_buf.close()
|
1020 |
+
return content
|
1021 |
+
|
1022 |
+
return None
|
1023 |
+
|
1024 |
+
|
1025 |
+
def save_to_buffer(
|
1026 |
+
string: str,
|
1027 |
+
buf: FilePath | WriteBuffer[str] | None = None,
|
1028 |
+
encoding: str | None = None,
|
1029 |
+
) -> str | None:
|
1030 |
+
"""
|
1031 |
+
Perform serialization. Write to buf or return as string if buf is None.
|
1032 |
+
"""
|
1033 |
+
with _get_buffer(buf, encoding=encoding) as fd:
|
1034 |
+
fd.write(string)
|
1035 |
+
if buf is None:
|
1036 |
+
# error: "WriteBuffer[str]" has no attribute "getvalue"
|
1037 |
+
return fd.getvalue() # type: ignore[attr-defined]
|
1038 |
+
return None
|
1039 |
+
|
1040 |
+
|
1041 |
+
@contextmanager
|
1042 |
+
def _get_buffer(
|
1043 |
+
buf: FilePath | WriteBuffer[str] | None, encoding: str | None = None
|
1044 |
+
) -> Generator[WriteBuffer[str], None, None] | Generator[StringIO, None, None]:
|
1045 |
+
"""
|
1046 |
+
Context manager to open, yield and close buffer for filenames or Path-like
|
1047 |
+
objects, otherwise yield buf unchanged.
|
1048 |
+
"""
|
1049 |
+
if buf is not None:
|
1050 |
+
buf = stringify_path(buf)
|
1051 |
+
else:
|
1052 |
+
buf = StringIO()
|
1053 |
+
|
1054 |
+
if encoding is None:
|
1055 |
+
encoding = "utf-8"
|
1056 |
+
elif not isinstance(buf, str):
|
1057 |
+
raise ValueError("buf is not a file name and encoding is specified.")
|
1058 |
+
|
1059 |
+
if hasattr(buf, "write"):
|
1060 |
+
# Incompatible types in "yield" (actual type "Union[str, WriteBuffer[str],
|
1061 |
+
# StringIO]", expected type "Union[WriteBuffer[str], StringIO]")
|
1062 |
+
yield buf # type: ignore[misc]
|
1063 |
+
elif isinstance(buf, str):
|
1064 |
+
check_parent_directory(str(buf))
|
1065 |
+
with open(buf, "w", encoding=encoding, newline="") as f:
|
1066 |
+
# GH#30034 open instead of codecs.open prevents a file leak
|
1067 |
+
# if we have an invalid encoding argument.
|
1068 |
+
# newline="" is needed to roundtrip correctly on
|
1069 |
+
# windows test_to_latex_filename
|
1070 |
+
yield f
|
1071 |
+
else:
|
1072 |
+
raise TypeError("buf is not a file name and it has no write method")
|
1073 |
+
|
1074 |
+
|
1075 |
+
# ----------------------------------------------------------------------
|
1076 |
+
# Array formatters
|
1077 |
+
|
1078 |
+
|
1079 |
+
def format_array(
|
1080 |
+
values: ArrayLike,
|
1081 |
+
formatter: Callable | None,
|
1082 |
+
float_format: FloatFormatType | None = None,
|
1083 |
+
na_rep: str = "NaN",
|
1084 |
+
digits: int | None = None,
|
1085 |
+
space: str | int | None = None,
|
1086 |
+
justify: str = "right",
|
1087 |
+
decimal: str = ".",
|
1088 |
+
leading_space: bool | None = True,
|
1089 |
+
quoting: int | None = None,
|
1090 |
+
fallback_formatter: Callable | None = None,
|
1091 |
+
) -> list[str]:
|
1092 |
+
"""
|
1093 |
+
Format an array for printing.
|
1094 |
+
|
1095 |
+
Parameters
|
1096 |
+
----------
|
1097 |
+
values : np.ndarray or ExtensionArray
|
1098 |
+
formatter
|
1099 |
+
float_format
|
1100 |
+
na_rep
|
1101 |
+
digits
|
1102 |
+
space
|
1103 |
+
justify
|
1104 |
+
decimal
|
1105 |
+
leading_space : bool, optional, default True
|
1106 |
+
Whether the array should be formatted with a leading space.
|
1107 |
+
When an array as a column of a Series or DataFrame, we do want
|
1108 |
+
the leading space to pad between columns.
|
1109 |
+
|
1110 |
+
When formatting an Index subclass
|
1111 |
+
(e.g. IntervalIndex._get_values_for_csv), we don't want the
|
1112 |
+
leading space since it should be left-aligned.
|
1113 |
+
fallback_formatter
|
1114 |
+
|
1115 |
+
Returns
|
1116 |
+
-------
|
1117 |
+
List[str]
|
1118 |
+
"""
|
1119 |
+
fmt_klass: type[_GenericArrayFormatter]
|
1120 |
+
if lib.is_np_dtype(values.dtype, "M"):
|
1121 |
+
fmt_klass = _Datetime64Formatter
|
1122 |
+
values = cast(DatetimeArray, values)
|
1123 |
+
elif isinstance(values.dtype, DatetimeTZDtype):
|
1124 |
+
fmt_klass = _Datetime64TZFormatter
|
1125 |
+
values = cast(DatetimeArray, values)
|
1126 |
+
elif lib.is_np_dtype(values.dtype, "m"):
|
1127 |
+
fmt_klass = _Timedelta64Formatter
|
1128 |
+
values = cast(TimedeltaArray, values)
|
1129 |
+
elif isinstance(values.dtype, ExtensionDtype):
|
1130 |
+
fmt_klass = _ExtensionArrayFormatter
|
1131 |
+
elif lib.is_np_dtype(values.dtype, "fc"):
|
1132 |
+
fmt_klass = FloatArrayFormatter
|
1133 |
+
elif lib.is_np_dtype(values.dtype, "iu"):
|
1134 |
+
fmt_klass = _IntArrayFormatter
|
1135 |
+
else:
|
1136 |
+
fmt_klass = _GenericArrayFormatter
|
1137 |
+
|
1138 |
+
if space is None:
|
1139 |
+
space = 12
|
1140 |
+
|
1141 |
+
if float_format is None:
|
1142 |
+
float_format = get_option("display.float_format")
|
1143 |
+
|
1144 |
+
if digits is None:
|
1145 |
+
digits = get_option("display.precision")
|
1146 |
+
|
1147 |
+
fmt_obj = fmt_klass(
|
1148 |
+
values,
|
1149 |
+
digits=digits,
|
1150 |
+
na_rep=na_rep,
|
1151 |
+
float_format=float_format,
|
1152 |
+
formatter=formatter,
|
1153 |
+
space=space,
|
1154 |
+
justify=justify,
|
1155 |
+
decimal=decimal,
|
1156 |
+
leading_space=leading_space,
|
1157 |
+
quoting=quoting,
|
1158 |
+
fallback_formatter=fallback_formatter,
|
1159 |
+
)
|
1160 |
+
|
1161 |
+
return fmt_obj.get_result()
|
1162 |
+
|
1163 |
+
|
1164 |
+
class _GenericArrayFormatter:
|
1165 |
+
def __init__(
|
1166 |
+
self,
|
1167 |
+
values: ArrayLike,
|
1168 |
+
digits: int = 7,
|
1169 |
+
formatter: Callable | None = None,
|
1170 |
+
na_rep: str = "NaN",
|
1171 |
+
space: str | int = 12,
|
1172 |
+
float_format: FloatFormatType | None = None,
|
1173 |
+
justify: str = "right",
|
1174 |
+
decimal: str = ".",
|
1175 |
+
quoting: int | None = None,
|
1176 |
+
fixed_width: bool = True,
|
1177 |
+
leading_space: bool | None = True,
|
1178 |
+
fallback_formatter: Callable | None = None,
|
1179 |
+
) -> None:
|
1180 |
+
self.values = values
|
1181 |
+
self.digits = digits
|
1182 |
+
self.na_rep = na_rep
|
1183 |
+
self.space = space
|
1184 |
+
self.formatter = formatter
|
1185 |
+
self.float_format = float_format
|
1186 |
+
self.justify = justify
|
1187 |
+
self.decimal = decimal
|
1188 |
+
self.quoting = quoting
|
1189 |
+
self.fixed_width = fixed_width
|
1190 |
+
self.leading_space = leading_space
|
1191 |
+
self.fallback_formatter = fallback_formatter
|
1192 |
+
|
1193 |
+
def get_result(self) -> list[str]:
|
1194 |
+
fmt_values = self._format_strings()
|
1195 |
+
return _make_fixed_width(fmt_values, self.justify)
|
1196 |
+
|
1197 |
+
def _format_strings(self) -> list[str]:
|
1198 |
+
if self.float_format is None:
|
1199 |
+
float_format = get_option("display.float_format")
|
1200 |
+
if float_format is None:
|
1201 |
+
precision = get_option("display.precision")
|
1202 |
+
float_format = lambda x: _trim_zeros_single_float(
|
1203 |
+
f"{x: .{precision:d}f}"
|
1204 |
+
)
|
1205 |
+
else:
|
1206 |
+
float_format = self.float_format
|
1207 |
+
|
1208 |
+
if self.formatter is not None:
|
1209 |
+
formatter = self.formatter
|
1210 |
+
elif self.fallback_formatter is not None:
|
1211 |
+
formatter = self.fallback_formatter
|
1212 |
+
else:
|
1213 |
+
quote_strings = self.quoting is not None and self.quoting != QUOTE_NONE
|
1214 |
+
formatter = partial(
|
1215 |
+
printing.pprint_thing,
|
1216 |
+
escape_chars=("\t", "\r", "\n"),
|
1217 |
+
quote_strings=quote_strings,
|
1218 |
+
)
|
1219 |
+
|
1220 |
+
def _format(x):
|
1221 |
+
if self.na_rep is not None and is_scalar(x) and isna(x):
|
1222 |
+
if x is None:
|
1223 |
+
return "None"
|
1224 |
+
elif x is NA:
|
1225 |
+
return str(NA)
|
1226 |
+
elif lib.is_float(x) and np.isinf(x):
|
1227 |
+
# TODO(3.0): this will be unreachable when use_inf_as_na
|
1228 |
+
# deprecation is enforced
|
1229 |
+
return str(x)
|
1230 |
+
elif x is NaT or isinstance(x, (np.datetime64, np.timedelta64)):
|
1231 |
+
return "NaT"
|
1232 |
+
return self.na_rep
|
1233 |
+
elif isinstance(x, PandasObject):
|
1234 |
+
return str(x)
|
1235 |
+
elif isinstance(x, StringDtype):
|
1236 |
+
return repr(x)
|
1237 |
+
else:
|
1238 |
+
# object dtype
|
1239 |
+
return str(formatter(x))
|
1240 |
+
|
1241 |
+
vals = self.values
|
1242 |
+
if not isinstance(vals, np.ndarray):
|
1243 |
+
raise TypeError(
|
1244 |
+
"ExtensionArray formatting should use _ExtensionArrayFormatter"
|
1245 |
+
)
|
1246 |
+
inferred = lib.map_infer(vals, is_float)
|
1247 |
+
is_float_type = (
|
1248 |
+
inferred
|
1249 |
+
# vals may have 2 or more dimensions
|
1250 |
+
& np.all(notna(vals), axis=tuple(range(1, len(vals.shape))))
|
1251 |
+
)
|
1252 |
+
leading_space = self.leading_space
|
1253 |
+
if leading_space is None:
|
1254 |
+
leading_space = is_float_type.any()
|
1255 |
+
|
1256 |
+
fmt_values = []
|
1257 |
+
for i, v in enumerate(vals):
|
1258 |
+
if (not is_float_type[i] or self.formatter is not None) and leading_space:
|
1259 |
+
fmt_values.append(f" {_format(v)}")
|
1260 |
+
elif is_float_type[i]:
|
1261 |
+
fmt_values.append(float_format(v))
|
1262 |
+
else:
|
1263 |
+
if leading_space is False:
|
1264 |
+
# False specifically, so that the default is
|
1265 |
+
# to include a space if we get here.
|
1266 |
+
tpl = "{v}"
|
1267 |
+
else:
|
1268 |
+
tpl = " {v}"
|
1269 |
+
fmt_values.append(tpl.format(v=_format(v)))
|
1270 |
+
|
1271 |
+
return fmt_values
|
1272 |
+
|
1273 |
+
|
1274 |
+
class FloatArrayFormatter(_GenericArrayFormatter):
|
1275 |
+
def __init__(self, *args, **kwargs) -> None:
|
1276 |
+
super().__init__(*args, **kwargs)
|
1277 |
+
|
1278 |
+
# float_format is expected to be a string
|
1279 |
+
# formatter should be used to pass a function
|
1280 |
+
if self.float_format is not None and self.formatter is None:
|
1281 |
+
# GH21625, GH22270
|
1282 |
+
self.fixed_width = False
|
1283 |
+
if callable(self.float_format):
|
1284 |
+
self.formatter = self.float_format
|
1285 |
+
self.float_format = None
|
1286 |
+
|
1287 |
+
def _value_formatter(
|
1288 |
+
self,
|
1289 |
+
float_format: FloatFormatType | None = None,
|
1290 |
+
threshold: float | None = None,
|
1291 |
+
) -> Callable:
|
1292 |
+
"""Returns a function to be applied on each value to format it"""
|
1293 |
+
# the float_format parameter supersedes self.float_format
|
1294 |
+
if float_format is None:
|
1295 |
+
float_format = self.float_format
|
1296 |
+
|
1297 |
+
# we are going to compose different functions, to first convert to
|
1298 |
+
# a string, then replace the decimal symbol, and finally chop according
|
1299 |
+
# to the threshold
|
1300 |
+
|
1301 |
+
# when there is no float_format, we use str instead of '%g'
|
1302 |
+
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
|
1303 |
+
if float_format:
|
1304 |
+
|
1305 |
+
def base_formatter(v):
|
1306 |
+
assert float_format is not None # for mypy
|
1307 |
+
# error: "str" not callable
|
1308 |
+
# error: Unexpected keyword argument "value" for "__call__" of
|
1309 |
+
# "EngFormatter"
|
1310 |
+
return (
|
1311 |
+
float_format(value=v) # type: ignore[operator,call-arg]
|
1312 |
+
if notna(v)
|
1313 |
+
else self.na_rep
|
1314 |
+
)
|
1315 |
+
|
1316 |
+
else:
|
1317 |
+
|
1318 |
+
def base_formatter(v):
|
1319 |
+
return str(v) if notna(v) else self.na_rep
|
1320 |
+
|
1321 |
+
if self.decimal != ".":
|
1322 |
+
|
1323 |
+
def decimal_formatter(v):
|
1324 |
+
return base_formatter(v).replace(".", self.decimal, 1)
|
1325 |
+
|
1326 |
+
else:
|
1327 |
+
decimal_formatter = base_formatter
|
1328 |
+
|
1329 |
+
if threshold is None:
|
1330 |
+
return decimal_formatter
|
1331 |
+
|
1332 |
+
def formatter(value):
|
1333 |
+
if notna(value):
|
1334 |
+
if abs(value) > threshold:
|
1335 |
+
return decimal_formatter(value)
|
1336 |
+
else:
|
1337 |
+
return decimal_formatter(0.0)
|
1338 |
+
else:
|
1339 |
+
return self.na_rep
|
1340 |
+
|
1341 |
+
return formatter
|
1342 |
+
|
1343 |
+
def get_result_as_array(self) -> np.ndarray:
|
1344 |
+
"""
|
1345 |
+
Returns the float values converted into strings using
|
1346 |
+
the parameters given at initialisation, as a numpy array
|
1347 |
+
"""
|
1348 |
+
|
1349 |
+
def format_with_na_rep(values: ArrayLike, formatter: Callable, na_rep: str):
|
1350 |
+
mask = isna(values)
|
1351 |
+
formatted = np.array(
|
1352 |
+
[
|
1353 |
+
formatter(val) if not m else na_rep
|
1354 |
+
for val, m in zip(values.ravel(), mask.ravel())
|
1355 |
+
]
|
1356 |
+
).reshape(values.shape)
|
1357 |
+
return formatted
|
1358 |
+
|
1359 |
+
def format_complex_with_na_rep(
|
1360 |
+
values: ArrayLike, formatter: Callable, na_rep: str
|
1361 |
+
):
|
1362 |
+
real_values = np.real(values).ravel() # type: ignore[arg-type]
|
1363 |
+
imag_values = np.imag(values).ravel() # type: ignore[arg-type]
|
1364 |
+
real_mask, imag_mask = isna(real_values), isna(imag_values)
|
1365 |
+
formatted_lst = []
|
1366 |
+
for val, real_val, imag_val, re_isna, im_isna in zip(
|
1367 |
+
values.ravel(),
|
1368 |
+
real_values,
|
1369 |
+
imag_values,
|
1370 |
+
real_mask,
|
1371 |
+
imag_mask,
|
1372 |
+
):
|
1373 |
+
if not re_isna and not im_isna:
|
1374 |
+
formatted_lst.append(formatter(val))
|
1375 |
+
elif not re_isna: # xxx+nanj
|
1376 |
+
formatted_lst.append(f"{formatter(real_val)}+{na_rep}j")
|
1377 |
+
elif not im_isna: # nan[+/-]xxxj
|
1378 |
+
# The imaginary part may either start with a "-" or a space
|
1379 |
+
imag_formatted = formatter(imag_val).strip()
|
1380 |
+
if imag_formatted.startswith("-"):
|
1381 |
+
formatted_lst.append(f"{na_rep}{imag_formatted}j")
|
1382 |
+
else:
|
1383 |
+
formatted_lst.append(f"{na_rep}+{imag_formatted}j")
|
1384 |
+
else: # nan+nanj
|
1385 |
+
formatted_lst.append(f"{na_rep}+{na_rep}j")
|
1386 |
+
return np.array(formatted_lst).reshape(values.shape)
|
1387 |
+
|
1388 |
+
if self.formatter is not None:
|
1389 |
+
return format_with_na_rep(self.values, self.formatter, self.na_rep)
|
1390 |
+
|
1391 |
+
if self.fixed_width:
|
1392 |
+
threshold = get_option("display.chop_threshold")
|
1393 |
+
else:
|
1394 |
+
threshold = None
|
1395 |
+
|
1396 |
+
# if we have a fixed_width, we'll need to try different float_format
|
1397 |
+
def format_values_with(float_format):
|
1398 |
+
formatter = self._value_formatter(float_format, threshold)
|
1399 |
+
|
1400 |
+
# default formatter leaves a space to the left when formatting
|
1401 |
+
# floats, must be consistent for left-justifying NaNs (GH #25061)
|
1402 |
+
na_rep = " " + self.na_rep if self.justify == "left" else self.na_rep
|
1403 |
+
|
1404 |
+
# different formatting strategies for complex and non-complex data
|
1405 |
+
# need to distinguish complex and float NaNs (GH #53762)
|
1406 |
+
values = self.values
|
1407 |
+
is_complex = is_complex_dtype(values)
|
1408 |
+
|
1409 |
+
# separate the wheat from the chaff
|
1410 |
+
if is_complex:
|
1411 |
+
values = format_complex_with_na_rep(values, formatter, na_rep)
|
1412 |
+
else:
|
1413 |
+
values = format_with_na_rep(values, formatter, na_rep)
|
1414 |
+
|
1415 |
+
if self.fixed_width:
|
1416 |
+
if is_complex:
|
1417 |
+
result = _trim_zeros_complex(values, self.decimal)
|
1418 |
+
else:
|
1419 |
+
result = _trim_zeros_float(values, self.decimal)
|
1420 |
+
return np.asarray(result, dtype="object")
|
1421 |
+
|
1422 |
+
return values
|
1423 |
+
|
1424 |
+
# There is a special default string when we are fixed-width
|
1425 |
+
# The default is otherwise to use str instead of a formatting string
|
1426 |
+
float_format: FloatFormatType | None
|
1427 |
+
if self.float_format is None:
|
1428 |
+
if self.fixed_width:
|
1429 |
+
if self.leading_space is True:
|
1430 |
+
fmt_str = "{value: .{digits:d}f}"
|
1431 |
+
else:
|
1432 |
+
fmt_str = "{value:.{digits:d}f}"
|
1433 |
+
float_format = partial(fmt_str.format, digits=self.digits)
|
1434 |
+
else:
|
1435 |
+
float_format = self.float_format
|
1436 |
+
else:
|
1437 |
+
float_format = lambda value: self.float_format % value
|
1438 |
+
|
1439 |
+
formatted_values = format_values_with(float_format)
|
1440 |
+
|
1441 |
+
if not self.fixed_width:
|
1442 |
+
return formatted_values
|
1443 |
+
|
1444 |
+
# we need do convert to engineering format if some values are too small
|
1445 |
+
# and would appear as 0, or if some values are too big and take too
|
1446 |
+
# much space
|
1447 |
+
|
1448 |
+
if len(formatted_values) > 0:
|
1449 |
+
maxlen = max(len(x) for x in formatted_values)
|
1450 |
+
too_long = maxlen > self.digits + 6
|
1451 |
+
else:
|
1452 |
+
too_long = False
|
1453 |
+
|
1454 |
+
abs_vals = np.abs(self.values)
|
1455 |
+
# this is pretty arbitrary for now
|
1456 |
+
# large values: more that 8 characters including decimal symbol
|
1457 |
+
# and first digit, hence > 1e6
|
1458 |
+
has_large_values = (abs_vals > 1e6).any()
|
1459 |
+
has_small_values = ((abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)).any()
|
1460 |
+
|
1461 |
+
if has_small_values or (too_long and has_large_values):
|
1462 |
+
if self.leading_space is True:
|
1463 |
+
fmt_str = "{value: .{digits:d}e}"
|
1464 |
+
else:
|
1465 |
+
fmt_str = "{value:.{digits:d}e}"
|
1466 |
+
float_format = partial(fmt_str.format, digits=self.digits)
|
1467 |
+
formatted_values = format_values_with(float_format)
|
1468 |
+
|
1469 |
+
return formatted_values
|
1470 |
+
|
1471 |
+
def _format_strings(self) -> list[str]:
|
1472 |
+
return list(self.get_result_as_array())
|
1473 |
+
|
1474 |
+
|
1475 |
+
class _IntArrayFormatter(_GenericArrayFormatter):
|
1476 |
+
def _format_strings(self) -> list[str]:
|
1477 |
+
if self.leading_space is False:
|
1478 |
+
formatter_str = lambda x: f"{x:d}".format(x=x)
|
1479 |
+
else:
|
1480 |
+
formatter_str = lambda x: f"{x: d}".format(x=x)
|
1481 |
+
formatter = self.formatter or formatter_str
|
1482 |
+
fmt_values = [formatter(x) for x in self.values]
|
1483 |
+
return fmt_values
|
1484 |
+
|
1485 |
+
|
1486 |
+
class _Datetime64Formatter(_GenericArrayFormatter):
|
1487 |
+
values: DatetimeArray
|
1488 |
+
|
1489 |
+
def __init__(
|
1490 |
+
self,
|
1491 |
+
values: DatetimeArray,
|
1492 |
+
nat_rep: str = "NaT",
|
1493 |
+
date_format: None = None,
|
1494 |
+
**kwargs,
|
1495 |
+
) -> None:
|
1496 |
+
super().__init__(values, **kwargs)
|
1497 |
+
self.nat_rep = nat_rep
|
1498 |
+
self.date_format = date_format
|
1499 |
+
|
1500 |
+
def _format_strings(self) -> list[str]:
|
1501 |
+
"""we by definition have DO NOT have a TZ"""
|
1502 |
+
values = self.values
|
1503 |
+
|
1504 |
+
if self.formatter is not None:
|
1505 |
+
return [self.formatter(x) for x in values]
|
1506 |
+
|
1507 |
+
fmt_values = values._format_native_types(
|
1508 |
+
na_rep=self.nat_rep, date_format=self.date_format
|
1509 |
+
)
|
1510 |
+
return fmt_values.tolist()
|
1511 |
+
|
1512 |
+
|
1513 |
+
class _ExtensionArrayFormatter(_GenericArrayFormatter):
|
1514 |
+
values: ExtensionArray
|
1515 |
+
|
1516 |
+
def _format_strings(self) -> list[str]:
|
1517 |
+
values = self.values
|
1518 |
+
|
1519 |
+
formatter = self.formatter
|
1520 |
+
fallback_formatter = None
|
1521 |
+
if formatter is None:
|
1522 |
+
fallback_formatter = values._formatter(boxed=True)
|
1523 |
+
|
1524 |
+
if isinstance(values, Categorical):
|
1525 |
+
# Categorical is special for now, so that we can preserve tzinfo
|
1526 |
+
array = values._internal_get_values()
|
1527 |
+
else:
|
1528 |
+
array = np.asarray(values, dtype=object)
|
1529 |
+
|
1530 |
+
fmt_values = format_array(
|
1531 |
+
array,
|
1532 |
+
formatter,
|
1533 |
+
float_format=self.float_format,
|
1534 |
+
na_rep=self.na_rep,
|
1535 |
+
digits=self.digits,
|
1536 |
+
space=self.space,
|
1537 |
+
justify=self.justify,
|
1538 |
+
decimal=self.decimal,
|
1539 |
+
leading_space=self.leading_space,
|
1540 |
+
quoting=self.quoting,
|
1541 |
+
fallback_formatter=fallback_formatter,
|
1542 |
+
)
|
1543 |
+
return fmt_values
|
1544 |
+
|
1545 |
+
|
1546 |
+
def format_percentiles(
|
1547 |
+
percentiles: (np.ndarray | Sequence[float]),
|
1548 |
+
) -> list[str]:
|
1549 |
+
"""
|
1550 |
+
Outputs rounded and formatted percentiles.
|
1551 |
+
|
1552 |
+
Parameters
|
1553 |
+
----------
|
1554 |
+
percentiles : list-like, containing floats from interval [0,1]
|
1555 |
+
|
1556 |
+
Returns
|
1557 |
+
-------
|
1558 |
+
formatted : list of strings
|
1559 |
+
|
1560 |
+
Notes
|
1561 |
+
-----
|
1562 |
+
Rounding precision is chosen so that: (1) if any two elements of
|
1563 |
+
``percentiles`` differ, they remain different after rounding
|
1564 |
+
(2) no entry is *rounded* to 0% or 100%.
|
1565 |
+
Any non-integer is always rounded to at least 1 decimal place.
|
1566 |
+
|
1567 |
+
Examples
|
1568 |
+
--------
|
1569 |
+
Keeps all entries different after rounding:
|
1570 |
+
|
1571 |
+
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
|
1572 |
+
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
|
1573 |
+
|
1574 |
+
No element is rounded to 0% or 100% (unless already equal to it).
|
1575 |
+
Duplicates are allowed:
|
1576 |
+
|
1577 |
+
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
|
1578 |
+
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
|
1579 |
+
"""
|
1580 |
+
percentiles = np.asarray(percentiles)
|
1581 |
+
|
1582 |
+
# It checks for np.nan as well
|
1583 |
+
if (
|
1584 |
+
not is_numeric_dtype(percentiles)
|
1585 |
+
or not np.all(percentiles >= 0)
|
1586 |
+
or not np.all(percentiles <= 1)
|
1587 |
+
):
|
1588 |
+
raise ValueError("percentiles should all be in the interval [0,1]")
|
1589 |
+
|
1590 |
+
percentiles = 100 * percentiles
|
1591 |
+
prec = get_precision(percentiles)
|
1592 |
+
percentiles_round_type = percentiles.round(prec).astype(int)
|
1593 |
+
|
1594 |
+
int_idx = np.isclose(percentiles_round_type, percentiles)
|
1595 |
+
|
1596 |
+
if np.all(int_idx):
|
1597 |
+
out = percentiles_round_type.astype(str)
|
1598 |
+
return [i + "%" for i in out]
|
1599 |
+
|
1600 |
+
unique_pcts = np.unique(percentiles)
|
1601 |
+
prec = get_precision(unique_pcts)
|
1602 |
+
out = np.empty_like(percentiles, dtype=object)
|
1603 |
+
out[int_idx] = percentiles[int_idx].round().astype(int).astype(str)
|
1604 |
+
|
1605 |
+
out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
|
1606 |
+
return [i + "%" for i in out]
|
1607 |
+
|
1608 |
+
|
1609 |
+
def get_precision(array: np.ndarray | Sequence[float]) -> int:
|
1610 |
+
to_begin = array[0] if array[0] > 0 else None
|
1611 |
+
to_end = 100 - array[-1] if array[-1] < 100 else None
|
1612 |
+
diff = np.ediff1d(array, to_begin=to_begin, to_end=to_end)
|
1613 |
+
diff = abs(diff)
|
1614 |
+
prec = -np.floor(np.log10(np.min(diff))).astype(int)
|
1615 |
+
prec = max(1, prec)
|
1616 |
+
return prec
|
1617 |
+
|
1618 |
+
|
1619 |
+
def _format_datetime64(x: NaTType | Timestamp, nat_rep: str = "NaT") -> str:
|
1620 |
+
if x is NaT:
|
1621 |
+
return nat_rep
|
1622 |
+
|
1623 |
+
# Timestamp.__str__ falls back to datetime.datetime.__str__ = isoformat(sep=' ')
|
1624 |
+
# so it already uses string formatting rather than strftime (faster).
|
1625 |
+
return str(x)
|
1626 |
+
|
1627 |
+
|
1628 |
+
def _format_datetime64_dateonly(
|
1629 |
+
x: NaTType | Timestamp,
|
1630 |
+
nat_rep: str = "NaT",
|
1631 |
+
date_format: str | None = None,
|
1632 |
+
) -> str:
|
1633 |
+
if isinstance(x, NaTType):
|
1634 |
+
return nat_rep
|
1635 |
+
|
1636 |
+
if date_format:
|
1637 |
+
return x.strftime(date_format)
|
1638 |
+
else:
|
1639 |
+
# Timestamp._date_repr relies on string formatting (faster than strftime)
|
1640 |
+
return x._date_repr
|
1641 |
+
|
1642 |
+
|
1643 |
+
def get_format_datetime64(
|
1644 |
+
is_dates_only: bool, nat_rep: str = "NaT", date_format: str | None = None
|
1645 |
+
) -> Callable:
|
1646 |
+
"""Return a formatter callable taking a datetime64 as input and providing
|
1647 |
+
a string as output"""
|
1648 |
+
|
1649 |
+
if is_dates_only:
|
1650 |
+
return lambda x: _format_datetime64_dateonly(
|
1651 |
+
x, nat_rep=nat_rep, date_format=date_format
|
1652 |
+
)
|
1653 |
+
else:
|
1654 |
+
return lambda x: _format_datetime64(x, nat_rep=nat_rep)
|
1655 |
+
|
1656 |
+
|
1657 |
+
class _Datetime64TZFormatter(_Datetime64Formatter):
|
1658 |
+
values: DatetimeArray
|
1659 |
+
|
1660 |
+
def _format_strings(self) -> list[str]:
|
1661 |
+
"""we by definition have a TZ"""
|
1662 |
+
ido = self.values._is_dates_only
|
1663 |
+
values = self.values.astype(object)
|
1664 |
+
formatter = self.formatter or get_format_datetime64(
|
1665 |
+
ido, date_format=self.date_format
|
1666 |
+
)
|
1667 |
+
fmt_values = [formatter(x) for x in values]
|
1668 |
+
|
1669 |
+
return fmt_values
|
1670 |
+
|
1671 |
+
|
1672 |
+
class _Timedelta64Formatter(_GenericArrayFormatter):
|
1673 |
+
values: TimedeltaArray
|
1674 |
+
|
1675 |
+
def __init__(
|
1676 |
+
self,
|
1677 |
+
values: TimedeltaArray,
|
1678 |
+
nat_rep: str = "NaT",
|
1679 |
+
**kwargs,
|
1680 |
+
) -> None:
|
1681 |
+
# TODO: nat_rep is never passed, na_rep is.
|
1682 |
+
super().__init__(values, **kwargs)
|
1683 |
+
self.nat_rep = nat_rep
|
1684 |
+
|
1685 |
+
def _format_strings(self) -> list[str]:
|
1686 |
+
formatter = self.formatter or get_format_timedelta64(
|
1687 |
+
self.values, nat_rep=self.nat_rep, box=False
|
1688 |
+
)
|
1689 |
+
return [formatter(x) for x in self.values]
|
1690 |
+
|
1691 |
+
|
1692 |
+
def get_format_timedelta64(
|
1693 |
+
values: TimedeltaArray,
|
1694 |
+
nat_rep: str | float = "NaT",
|
1695 |
+
box: bool = False,
|
1696 |
+
) -> Callable:
|
1697 |
+
"""
|
1698 |
+
Return a formatter function for a range of timedeltas.
|
1699 |
+
These will all have the same format argument
|
1700 |
+
|
1701 |
+
If box, then show the return in quotes
|
1702 |
+
"""
|
1703 |
+
even_days = values._is_dates_only
|
1704 |
+
|
1705 |
+
if even_days:
|
1706 |
+
format = None
|
1707 |
+
else:
|
1708 |
+
format = "long"
|
1709 |
+
|
1710 |
+
def _formatter(x):
|
1711 |
+
if x is None or (is_scalar(x) and isna(x)):
|
1712 |
+
return nat_rep
|
1713 |
+
|
1714 |
+
if not isinstance(x, Timedelta):
|
1715 |
+
x = Timedelta(x)
|
1716 |
+
|
1717 |
+
# Timedelta._repr_base uses string formatting (faster than strftime)
|
1718 |
+
result = x._repr_base(format=format)
|
1719 |
+
if box:
|
1720 |
+
result = f"'{result}'"
|
1721 |
+
return result
|
1722 |
+
|
1723 |
+
return _formatter
|
1724 |
+
|
1725 |
+
|
1726 |
+
def _make_fixed_width(
|
1727 |
+
strings: list[str],
|
1728 |
+
justify: str = "right",
|
1729 |
+
minimum: int | None = None,
|
1730 |
+
adj: printing._TextAdjustment | None = None,
|
1731 |
+
) -> list[str]:
|
1732 |
+
if len(strings) == 0 or justify == "all":
|
1733 |
+
return strings
|
1734 |
+
|
1735 |
+
if adj is None:
|
1736 |
+
adjustment = printing.get_adjustment()
|
1737 |
+
else:
|
1738 |
+
adjustment = adj
|
1739 |
+
|
1740 |
+
max_len = max(adjustment.len(x) for x in strings)
|
1741 |
+
|
1742 |
+
if minimum is not None:
|
1743 |
+
max_len = max(minimum, max_len)
|
1744 |
+
|
1745 |
+
conf_max = get_option("display.max_colwidth")
|
1746 |
+
if conf_max is not None and max_len > conf_max:
|
1747 |
+
max_len = conf_max
|
1748 |
+
|
1749 |
+
def just(x: str) -> str:
|
1750 |
+
if conf_max is not None:
|
1751 |
+
if (conf_max > 3) & (adjustment.len(x) > max_len):
|
1752 |
+
x = x[: max_len - 3] + "..."
|
1753 |
+
return x
|
1754 |
+
|
1755 |
+
strings = [just(x) for x in strings]
|
1756 |
+
result = adjustment.justify(strings, max_len, mode=justify)
|
1757 |
+
return result
|
1758 |
+
|
1759 |
+
|
1760 |
+
def _trim_zeros_complex(str_complexes: ArrayLike, decimal: str = ".") -> list[str]:
|
1761 |
+
"""
|
1762 |
+
Separates the real and imaginary parts from the complex number, and
|
1763 |
+
executes the _trim_zeros_float method on each of those.
|
1764 |
+
"""
|
1765 |
+
real_part, imag_part = [], []
|
1766 |
+
for x in str_complexes:
|
1767 |
+
# Complex numbers are represented as "(-)xxx(+/-)xxxj"
|
1768 |
+
# The split will give [{"", "-"}, "xxx", "+/-", "xxx", "j", ""]
|
1769 |
+
# Therefore, the imaginary part is the 4th and 3rd last elements,
|
1770 |
+
# and the real part is everything before the imaginary part
|
1771 |
+
trimmed = re.split(r"([j+-])", x)
|
1772 |
+
real_part.append("".join(trimmed[:-4]))
|
1773 |
+
imag_part.append("".join(trimmed[-4:-2]))
|
1774 |
+
|
1775 |
+
# We want to align the lengths of the real and imaginary parts of each complex
|
1776 |
+
# number, as well as the lengths the real (resp. complex) parts of all numbers
|
1777 |
+
# in the array
|
1778 |
+
n = len(str_complexes)
|
1779 |
+
padded_parts = _trim_zeros_float(real_part + imag_part, decimal)
|
1780 |
+
if len(padded_parts) == 0:
|
1781 |
+
return []
|
1782 |
+
padded_length = max(len(part) for part in padded_parts) - 1
|
1783 |
+
padded = [
|
1784 |
+
real_pt # real part, possibly NaN
|
1785 |
+
+ imag_pt[0] # +/-
|
1786 |
+
+ f"{imag_pt[1:]:>{padded_length}}" # complex part (no sign), possibly nan
|
1787 |
+
+ "j"
|
1788 |
+
for real_pt, imag_pt in zip(padded_parts[:n], padded_parts[n:])
|
1789 |
+
]
|
1790 |
+
return padded
|
1791 |
+
|
1792 |
+
|
1793 |
+
def _trim_zeros_single_float(str_float: str) -> str:
|
1794 |
+
"""
|
1795 |
+
Trims trailing zeros after a decimal point,
|
1796 |
+
leaving just one if necessary.
|
1797 |
+
"""
|
1798 |
+
str_float = str_float.rstrip("0")
|
1799 |
+
if str_float.endswith("."):
|
1800 |
+
str_float += "0"
|
1801 |
+
|
1802 |
+
return str_float
|
1803 |
+
|
1804 |
+
|
1805 |
+
def _trim_zeros_float(
|
1806 |
+
str_floats: ArrayLike | list[str], decimal: str = "."
|
1807 |
+
) -> list[str]:
|
1808 |
+
"""
|
1809 |
+
Trims the maximum number of trailing zeros equally from
|
1810 |
+
all numbers containing decimals, leaving just one if
|
1811 |
+
necessary.
|
1812 |
+
"""
|
1813 |
+
trimmed = str_floats
|
1814 |
+
number_regex = re.compile(rf"^\s*[\+-]?[0-9]+\{decimal}[0-9]*$")
|
1815 |
+
|
1816 |
+
def is_number_with_decimal(x) -> bool:
|
1817 |
+
return re.match(number_regex, x) is not None
|
1818 |
+
|
1819 |
+
def should_trim(values: ArrayLike | list[str]) -> bool:
|
1820 |
+
"""
|
1821 |
+
Determine if an array of strings should be trimmed.
|
1822 |
+
|
1823 |
+
Returns True if all numbers containing decimals (defined by the
|
1824 |
+
above regular expression) within the array end in a zero, otherwise
|
1825 |
+
returns False.
|
1826 |
+
"""
|
1827 |
+
numbers = [x for x in values if is_number_with_decimal(x)]
|
1828 |
+
return len(numbers) > 0 and all(x.endswith("0") for x in numbers)
|
1829 |
+
|
1830 |
+
while should_trim(trimmed):
|
1831 |
+
trimmed = [x[:-1] if is_number_with_decimal(x) else x for x in trimmed]
|
1832 |
+
|
1833 |
+
# leave one 0 after the decimal points if need be.
|
1834 |
+
result = [
|
1835 |
+
x + "0" if is_number_with_decimal(x) and x.endswith(decimal) else x
|
1836 |
+
for x in trimmed
|
1837 |
+
]
|
1838 |
+
return result
|
1839 |
+
|
1840 |
+
|
1841 |
+
def _has_names(index: Index) -> bool:
|
1842 |
+
if isinstance(index, MultiIndex):
|
1843 |
+
return com.any_not_none(*index.names)
|
1844 |
+
else:
|
1845 |
+
return index.name is not None
|
1846 |
+
|
1847 |
+
|
1848 |
+
class EngFormatter:
|
1849 |
+
"""
|
1850 |
+
Formats float values according to engineering format.
|
1851 |
+
|
1852 |
+
Based on matplotlib.ticker.EngFormatter
|
1853 |
+
"""
|
1854 |
+
|
1855 |
+
# The SI engineering prefixes
|
1856 |
+
ENG_PREFIXES = {
|
1857 |
+
-24: "y",
|
1858 |
+
-21: "z",
|
1859 |
+
-18: "a",
|
1860 |
+
-15: "f",
|
1861 |
+
-12: "p",
|
1862 |
+
-9: "n",
|
1863 |
+
-6: "u",
|
1864 |
+
-3: "m",
|
1865 |
+
0: "",
|
1866 |
+
3: "k",
|
1867 |
+
6: "M",
|
1868 |
+
9: "G",
|
1869 |
+
12: "T",
|
1870 |
+
15: "P",
|
1871 |
+
18: "E",
|
1872 |
+
21: "Z",
|
1873 |
+
24: "Y",
|
1874 |
+
}
|
1875 |
+
|
1876 |
+
def __init__(
|
1877 |
+
self, accuracy: int | None = None, use_eng_prefix: bool = False
|
1878 |
+
) -> None:
|
1879 |
+
self.accuracy = accuracy
|
1880 |
+
self.use_eng_prefix = use_eng_prefix
|
1881 |
+
|
1882 |
+
def __call__(self, num: float) -> str:
|
1883 |
+
"""
|
1884 |
+
Formats a number in engineering notation, appending a letter
|
1885 |
+
representing the power of 1000 of the original number. Some examples:
|
1886 |
+
>>> format_eng = EngFormatter(accuracy=0, use_eng_prefix=True)
|
1887 |
+
>>> format_eng(0)
|
1888 |
+
' 0'
|
1889 |
+
>>> format_eng = EngFormatter(accuracy=1, use_eng_prefix=True)
|
1890 |
+
>>> format_eng(1_000_000)
|
1891 |
+
' 1.0M'
|
1892 |
+
>>> format_eng = EngFormatter(accuracy=2, use_eng_prefix=False)
|
1893 |
+
>>> format_eng("-1e-6")
|
1894 |
+
'-1.00E-06'
|
1895 |
+
|
1896 |
+
@param num: the value to represent
|
1897 |
+
@type num: either a numeric value or a string that can be converted to
|
1898 |
+
a numeric value (as per decimal.Decimal constructor)
|
1899 |
+
|
1900 |
+
@return: engineering formatted string
|
1901 |
+
"""
|
1902 |
+
dnum = Decimal(str(num))
|
1903 |
+
|
1904 |
+
if Decimal.is_nan(dnum):
|
1905 |
+
return "NaN"
|
1906 |
+
|
1907 |
+
if Decimal.is_infinite(dnum):
|
1908 |
+
return "inf"
|
1909 |
+
|
1910 |
+
sign = 1
|
1911 |
+
|
1912 |
+
if dnum < 0: # pragma: no cover
|
1913 |
+
sign = -1
|
1914 |
+
dnum = -dnum
|
1915 |
+
|
1916 |
+
if dnum != 0:
|
1917 |
+
pow10 = Decimal(int(math.floor(dnum.log10() / 3) * 3))
|
1918 |
+
else:
|
1919 |
+
pow10 = Decimal(0)
|
1920 |
+
|
1921 |
+
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
|
1922 |
+
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
|
1923 |
+
int_pow10 = int(pow10)
|
1924 |
+
|
1925 |
+
if self.use_eng_prefix:
|
1926 |
+
prefix = self.ENG_PREFIXES[int_pow10]
|
1927 |
+
elif int_pow10 < 0:
|
1928 |
+
prefix = f"E-{-int_pow10:02d}"
|
1929 |
+
else:
|
1930 |
+
prefix = f"E+{int_pow10:02d}"
|
1931 |
+
|
1932 |
+
mant = sign * dnum / (10**pow10)
|
1933 |
+
|
1934 |
+
if self.accuracy is None: # pragma: no cover
|
1935 |
+
format_str = "{mant: g}{prefix}"
|
1936 |
+
else:
|
1937 |
+
format_str = f"{{mant: .{self.accuracy:d}f}}{{prefix}}"
|
1938 |
+
|
1939 |
+
formatted = format_str.format(mant=mant, prefix=prefix)
|
1940 |
+
|
1941 |
+
return formatted
|
1942 |
+
|
1943 |
+
|
1944 |
+
def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> None:
|
1945 |
+
"""
|
1946 |
+
Format float representation in DataFrame with SI notation.
|
1947 |
+
|
1948 |
+
Parameters
|
1949 |
+
----------
|
1950 |
+
accuracy : int, default 3
|
1951 |
+
Number of decimal digits after the floating point.
|
1952 |
+
use_eng_prefix : bool, default False
|
1953 |
+
Whether to represent a value with SI prefixes.
|
1954 |
+
|
1955 |
+
Returns
|
1956 |
+
-------
|
1957 |
+
None
|
1958 |
+
|
1959 |
+
Examples
|
1960 |
+
--------
|
1961 |
+
>>> df = pd.DataFrame([1e-9, 1e-3, 1, 1e3, 1e6])
|
1962 |
+
>>> df
|
1963 |
+
0
|
1964 |
+
0 1.000000e-09
|
1965 |
+
1 1.000000e-03
|
1966 |
+
2 1.000000e+00
|
1967 |
+
3 1.000000e+03
|
1968 |
+
4 1.000000e+06
|
1969 |
+
|
1970 |
+
>>> pd.set_eng_float_format(accuracy=1)
|
1971 |
+
>>> df
|
1972 |
+
0
|
1973 |
+
0 1.0E-09
|
1974 |
+
1 1.0E-03
|
1975 |
+
2 1.0E+00
|
1976 |
+
3 1.0E+03
|
1977 |
+
4 1.0E+06
|
1978 |
+
|
1979 |
+
>>> pd.set_eng_float_format(use_eng_prefix=True)
|
1980 |
+
>>> df
|
1981 |
+
0
|
1982 |
+
0 1.000n
|
1983 |
+
1 1.000m
|
1984 |
+
2 1.000
|
1985 |
+
3 1.000k
|
1986 |
+
4 1.000M
|
1987 |
+
|
1988 |
+
>>> pd.set_eng_float_format(accuracy=1, use_eng_prefix=True)
|
1989 |
+
>>> df
|
1990 |
+
0
|
1991 |
+
0 1.0n
|
1992 |
+
1 1.0m
|
1993 |
+
2 1.0
|
1994 |
+
3 1.0k
|
1995 |
+
4 1.0M
|
1996 |
+
|
1997 |
+
>>> pd.set_option("display.float_format", None) # unset option
|
1998 |
+
"""
|
1999 |
+
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
|
2000 |
+
|
2001 |
+
|
2002 |
+
def get_level_lengths(
|
2003 |
+
levels: Any, sentinel: bool | object | str = ""
|
2004 |
+
) -> list[dict[int, int]]:
|
2005 |
+
"""
|
2006 |
+
For each index in each level the function returns lengths of indexes.
|
2007 |
+
|
2008 |
+
Parameters
|
2009 |
+
----------
|
2010 |
+
levels : list of lists
|
2011 |
+
List of values on for level.
|
2012 |
+
sentinel : string, optional
|
2013 |
+
Value which states that no new index starts on there.
|
2014 |
+
|
2015 |
+
Returns
|
2016 |
+
-------
|
2017 |
+
Returns list of maps. For each level returns map of indexes (key is index
|
2018 |
+
in row and value is length of index).
|
2019 |
+
"""
|
2020 |
+
if len(levels) == 0:
|
2021 |
+
return []
|
2022 |
+
|
2023 |
+
control = [True] * len(levels[0])
|
2024 |
+
|
2025 |
+
result = []
|
2026 |
+
for level in levels:
|
2027 |
+
last_index = 0
|
2028 |
+
|
2029 |
+
lengths = {}
|
2030 |
+
for i, key in enumerate(level):
|
2031 |
+
if control[i] and key == sentinel:
|
2032 |
+
pass
|
2033 |
+
else:
|
2034 |
+
control[i] = False
|
2035 |
+
lengths[last_index] = i - last_index
|
2036 |
+
last_index = i
|
2037 |
+
|
2038 |
+
lengths[last_index] = len(level) - last_index
|
2039 |
+
|
2040 |
+
result.append(lengths)
|
2041 |
+
|
2042 |
+
return result
|
2043 |
+
|
2044 |
+
|
2045 |
+
def buffer_put_lines(buf: WriteBuffer[str], lines: list[str]) -> None:
|
2046 |
+
"""
|
2047 |
+
Appends lines to a buffer.
|
2048 |
+
|
2049 |
+
Parameters
|
2050 |
+
----------
|
2051 |
+
buf
|
2052 |
+
The buffer to write to
|
2053 |
+
lines
|
2054 |
+
The lines to append.
|
2055 |
+
"""
|
2056 |
+
if any(isinstance(x, str) for x in lines):
|
2057 |
+
lines = [str(x) for x in lines]
|
2058 |
+
buf.write("\n".join(lines))
|
env-llmeval/lib/python3.10/site-packages/pandas/io/formats/html.py
ADDED
@@ -0,0 +1,646 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Module for formatting output data in HTML.
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from textwrap import dedent
|
7 |
+
from typing import (
|
8 |
+
TYPE_CHECKING,
|
9 |
+
Any,
|
10 |
+
Final,
|
11 |
+
cast,
|
12 |
+
)
|
13 |
+
|
14 |
+
from pandas._config import get_option
|
15 |
+
|
16 |
+
from pandas._libs import lib
|
17 |
+
|
18 |
+
from pandas import (
|
19 |
+
MultiIndex,
|
20 |
+
option_context,
|
21 |
+
)
|
22 |
+
|
23 |
+
from pandas.io.common import is_url
|
24 |
+
from pandas.io.formats.format import (
|
25 |
+
DataFrameFormatter,
|
26 |
+
get_level_lengths,
|
27 |
+
)
|
28 |
+
from pandas.io.formats.printing import pprint_thing
|
29 |
+
|
30 |
+
if TYPE_CHECKING:
|
31 |
+
from collections.abc import (
|
32 |
+
Hashable,
|
33 |
+
Iterable,
|
34 |
+
Mapping,
|
35 |
+
)
|
36 |
+
|
37 |
+
|
38 |
+
class HTMLFormatter:
|
39 |
+
"""
|
40 |
+
Internal class for formatting output data in html.
|
41 |
+
This class is intended for shared functionality between
|
42 |
+
DataFrame.to_html() and DataFrame._repr_html_().
|
43 |
+
Any logic in common with other output formatting methods
|
44 |
+
should ideally be inherited from classes in format.py
|
45 |
+
and this class responsible for only producing html markup.
|
46 |
+
"""
|
47 |
+
|
48 |
+
indent_delta: Final = 2
|
49 |
+
|
50 |
+
def __init__(
|
51 |
+
self,
|
52 |
+
formatter: DataFrameFormatter,
|
53 |
+
classes: str | list[str] | tuple[str, ...] | None = None,
|
54 |
+
border: int | bool | None = None,
|
55 |
+
table_id: str | None = None,
|
56 |
+
render_links: bool = False,
|
57 |
+
) -> None:
|
58 |
+
self.fmt = formatter
|
59 |
+
self.classes = classes
|
60 |
+
|
61 |
+
self.frame = self.fmt.frame
|
62 |
+
self.columns = self.fmt.tr_frame.columns
|
63 |
+
self.elements: list[str] = []
|
64 |
+
self.bold_rows = self.fmt.bold_rows
|
65 |
+
self.escape = self.fmt.escape
|
66 |
+
self.show_dimensions = self.fmt.show_dimensions
|
67 |
+
if border is None or border is True:
|
68 |
+
border = cast(int, get_option("display.html.border"))
|
69 |
+
elif not border:
|
70 |
+
border = None
|
71 |
+
|
72 |
+
self.border = border
|
73 |
+
self.table_id = table_id
|
74 |
+
self.render_links = render_links
|
75 |
+
|
76 |
+
self.col_space = {}
|
77 |
+
is_multi_index = isinstance(self.columns, MultiIndex)
|
78 |
+
for column, value in self.fmt.col_space.items():
|
79 |
+
col_space_value = f"{value}px" if isinstance(value, int) else value
|
80 |
+
self.col_space[column] = col_space_value
|
81 |
+
# GH 53885: Handling case where column is index
|
82 |
+
# Flatten the data in the multi index and add in the map
|
83 |
+
if is_multi_index and isinstance(column, tuple):
|
84 |
+
for column_index in column:
|
85 |
+
self.col_space[str(column_index)] = col_space_value
|
86 |
+
|
87 |
+
def to_string(self) -> str:
|
88 |
+
lines = self.render()
|
89 |
+
if any(isinstance(x, str) for x in lines):
|
90 |
+
lines = [str(x) for x in lines]
|
91 |
+
return "\n".join(lines)
|
92 |
+
|
93 |
+
def render(self) -> list[str]:
|
94 |
+
self._write_table()
|
95 |
+
|
96 |
+
if self.should_show_dimensions:
|
97 |
+
by = chr(215) # × # noqa: RUF003
|
98 |
+
self.write(
|
99 |
+
f"<p>{len(self.frame)} rows {by} {len(self.frame.columns)} columns</p>"
|
100 |
+
)
|
101 |
+
|
102 |
+
return self.elements
|
103 |
+
|
104 |
+
@property
|
105 |
+
def should_show_dimensions(self) -> bool:
|
106 |
+
return self.fmt.should_show_dimensions
|
107 |
+
|
108 |
+
@property
|
109 |
+
def show_row_idx_names(self) -> bool:
|
110 |
+
return self.fmt.show_row_idx_names
|
111 |
+
|
112 |
+
@property
|
113 |
+
def show_col_idx_names(self) -> bool:
|
114 |
+
return self.fmt.show_col_idx_names
|
115 |
+
|
116 |
+
@property
|
117 |
+
def row_levels(self) -> int:
|
118 |
+
if self.fmt.index:
|
119 |
+
# showing (row) index
|
120 |
+
return self.frame.index.nlevels
|
121 |
+
elif self.show_col_idx_names:
|
122 |
+
# see gh-22579
|
123 |
+
# Column misalignment also occurs for
|
124 |
+
# a standard index when the columns index is named.
|
125 |
+
# If the row index is not displayed a column of
|
126 |
+
# blank cells need to be included before the DataFrame values.
|
127 |
+
return 1
|
128 |
+
# not showing (row) index
|
129 |
+
return 0
|
130 |
+
|
131 |
+
def _get_columns_formatted_values(self) -> Iterable:
|
132 |
+
return self.columns
|
133 |
+
|
134 |
+
@property
|
135 |
+
def is_truncated(self) -> bool:
|
136 |
+
return self.fmt.is_truncated
|
137 |
+
|
138 |
+
@property
|
139 |
+
def ncols(self) -> int:
|
140 |
+
return len(self.fmt.tr_frame.columns)
|
141 |
+
|
142 |
+
def write(self, s: Any, indent: int = 0) -> None:
|
143 |
+
rs = pprint_thing(s)
|
144 |
+
self.elements.append(" " * indent + rs)
|
145 |
+
|
146 |
+
def write_th(
|
147 |
+
self, s: Any, header: bool = False, indent: int = 0, tags: str | None = None
|
148 |
+
) -> None:
|
149 |
+
"""
|
150 |
+
Method for writing a formatted <th> cell.
|
151 |
+
|
152 |
+
If col_space is set on the formatter then that is used for
|
153 |
+
the value of min-width.
|
154 |
+
|
155 |
+
Parameters
|
156 |
+
----------
|
157 |
+
s : object
|
158 |
+
The data to be written inside the cell.
|
159 |
+
header : bool, default False
|
160 |
+
Set to True if the <th> is for use inside <thead>. This will
|
161 |
+
cause min-width to be set if there is one.
|
162 |
+
indent : int, default 0
|
163 |
+
The indentation level of the cell.
|
164 |
+
tags : str, default None
|
165 |
+
Tags to include in the cell.
|
166 |
+
|
167 |
+
Returns
|
168 |
+
-------
|
169 |
+
A written <th> cell.
|
170 |
+
"""
|
171 |
+
col_space = self.col_space.get(s, None)
|
172 |
+
|
173 |
+
if header and col_space is not None:
|
174 |
+
tags = tags or ""
|
175 |
+
tags += f'style="min-width: {col_space};"'
|
176 |
+
|
177 |
+
self._write_cell(s, kind="th", indent=indent, tags=tags)
|
178 |
+
|
179 |
+
def write_td(self, s: Any, indent: int = 0, tags: str | None = None) -> None:
|
180 |
+
self._write_cell(s, kind="td", indent=indent, tags=tags)
|
181 |
+
|
182 |
+
def _write_cell(
|
183 |
+
self, s: Any, kind: str = "td", indent: int = 0, tags: str | None = None
|
184 |
+
) -> None:
|
185 |
+
if tags is not None:
|
186 |
+
start_tag = f"<{kind} {tags}>"
|
187 |
+
else:
|
188 |
+
start_tag = f"<{kind}>"
|
189 |
+
|
190 |
+
if self.escape:
|
191 |
+
# escape & first to prevent double escaping of &
|
192 |
+
esc = {"&": r"&", "<": r"<", ">": r">"}
|
193 |
+
else:
|
194 |
+
esc = {}
|
195 |
+
|
196 |
+
rs = pprint_thing(s, escape_chars=esc).strip()
|
197 |
+
|
198 |
+
if self.render_links and is_url(rs):
|
199 |
+
rs_unescaped = pprint_thing(s, escape_chars={}).strip()
|
200 |
+
start_tag += f'<a href="{rs_unescaped}" target="_blank">'
|
201 |
+
end_a = "</a>"
|
202 |
+
else:
|
203 |
+
end_a = ""
|
204 |
+
|
205 |
+
self.write(f"{start_tag}{rs}{end_a}</{kind}>", indent)
|
206 |
+
|
207 |
+
def write_tr(
|
208 |
+
self,
|
209 |
+
line: Iterable,
|
210 |
+
indent: int = 0,
|
211 |
+
indent_delta: int = 0,
|
212 |
+
header: bool = False,
|
213 |
+
align: str | None = None,
|
214 |
+
tags: dict[int, str] | None = None,
|
215 |
+
nindex_levels: int = 0,
|
216 |
+
) -> None:
|
217 |
+
if tags is None:
|
218 |
+
tags = {}
|
219 |
+
|
220 |
+
if align is None:
|
221 |
+
self.write("<tr>", indent)
|
222 |
+
else:
|
223 |
+
self.write(f'<tr style="text-align: {align};">', indent)
|
224 |
+
indent += indent_delta
|
225 |
+
|
226 |
+
for i, s in enumerate(line):
|
227 |
+
val_tag = tags.get(i, None)
|
228 |
+
if header or (self.bold_rows and i < nindex_levels):
|
229 |
+
self.write_th(s, indent=indent, header=header, tags=val_tag)
|
230 |
+
else:
|
231 |
+
self.write_td(s, indent, tags=val_tag)
|
232 |
+
|
233 |
+
indent -= indent_delta
|
234 |
+
self.write("</tr>", indent)
|
235 |
+
|
236 |
+
def _write_table(self, indent: int = 0) -> None:
|
237 |
+
_classes = ["dataframe"] # Default class.
|
238 |
+
use_mathjax = get_option("display.html.use_mathjax")
|
239 |
+
if not use_mathjax:
|
240 |
+
_classes.append("tex2jax_ignore")
|
241 |
+
if self.classes is not None:
|
242 |
+
if isinstance(self.classes, str):
|
243 |
+
self.classes = self.classes.split()
|
244 |
+
if not isinstance(self.classes, (list, tuple)):
|
245 |
+
raise TypeError(
|
246 |
+
"classes must be a string, list, "
|
247 |
+
f"or tuple, not {type(self.classes)}"
|
248 |
+
)
|
249 |
+
_classes.extend(self.classes)
|
250 |
+
|
251 |
+
if self.table_id is None:
|
252 |
+
id_section = ""
|
253 |
+
else:
|
254 |
+
id_section = f' id="{self.table_id}"'
|
255 |
+
|
256 |
+
if self.border is None:
|
257 |
+
border_attr = ""
|
258 |
+
else:
|
259 |
+
border_attr = f' border="{self.border}"'
|
260 |
+
|
261 |
+
self.write(
|
262 |
+
f'<table{border_attr} class="{" ".join(_classes)}"{id_section}>',
|
263 |
+
indent,
|
264 |
+
)
|
265 |
+
|
266 |
+
if self.fmt.header or self.show_row_idx_names:
|
267 |
+
self._write_header(indent + self.indent_delta)
|
268 |
+
|
269 |
+
self._write_body(indent + self.indent_delta)
|
270 |
+
|
271 |
+
self.write("</table>", indent)
|
272 |
+
|
273 |
+
def _write_col_header(self, indent: int) -> None:
|
274 |
+
row: list[Hashable]
|
275 |
+
is_truncated_horizontally = self.fmt.is_truncated_horizontally
|
276 |
+
if isinstance(self.columns, MultiIndex):
|
277 |
+
template = 'colspan="{span:d}" halign="left"'
|
278 |
+
|
279 |
+
sentinel: lib.NoDefault | bool
|
280 |
+
if self.fmt.sparsify:
|
281 |
+
# GH3547
|
282 |
+
sentinel = lib.no_default
|
283 |
+
else:
|
284 |
+
sentinel = False
|
285 |
+
levels = self.columns._format_multi(sparsify=sentinel, include_names=False)
|
286 |
+
level_lengths = get_level_lengths(levels, sentinel)
|
287 |
+
inner_lvl = len(level_lengths) - 1
|
288 |
+
for lnum, (records, values) in enumerate(zip(level_lengths, levels)):
|
289 |
+
if is_truncated_horizontally:
|
290 |
+
# modify the header lines
|
291 |
+
ins_col = self.fmt.tr_col_num
|
292 |
+
if self.fmt.sparsify:
|
293 |
+
recs_new = {}
|
294 |
+
# Increment tags after ... col.
|
295 |
+
for tag, span in list(records.items()):
|
296 |
+
if tag >= ins_col:
|
297 |
+
recs_new[tag + 1] = span
|
298 |
+
elif tag + span > ins_col:
|
299 |
+
recs_new[tag] = span + 1
|
300 |
+
if lnum == inner_lvl:
|
301 |
+
values = (
|
302 |
+
values[:ins_col] + ("...",) + values[ins_col:]
|
303 |
+
)
|
304 |
+
else:
|
305 |
+
# sparse col headers do not receive a ...
|
306 |
+
values = (
|
307 |
+
values[:ins_col]
|
308 |
+
+ (values[ins_col - 1],)
|
309 |
+
+ values[ins_col:]
|
310 |
+
)
|
311 |
+
else:
|
312 |
+
recs_new[tag] = span
|
313 |
+
# if ins_col lies between tags, all col headers
|
314 |
+
# get ...
|
315 |
+
if tag + span == ins_col:
|
316 |
+
recs_new[ins_col] = 1
|
317 |
+
values = values[:ins_col] + ("...",) + values[ins_col:]
|
318 |
+
records = recs_new
|
319 |
+
inner_lvl = len(level_lengths) - 1
|
320 |
+
if lnum == inner_lvl:
|
321 |
+
records[ins_col] = 1
|
322 |
+
else:
|
323 |
+
recs_new = {}
|
324 |
+
for tag, span in list(records.items()):
|
325 |
+
if tag >= ins_col:
|
326 |
+
recs_new[tag + 1] = span
|
327 |
+
else:
|
328 |
+
recs_new[tag] = span
|
329 |
+
recs_new[ins_col] = 1
|
330 |
+
records = recs_new
|
331 |
+
values = values[:ins_col] + ["..."] + values[ins_col:]
|
332 |
+
|
333 |
+
# see gh-22579
|
334 |
+
# Column Offset Bug with to_html(index=False) with
|
335 |
+
# MultiIndex Columns and Index.
|
336 |
+
# Initially fill row with blank cells before column names.
|
337 |
+
# TODO: Refactor to remove code duplication with code
|
338 |
+
# block below for standard columns index.
|
339 |
+
row = [""] * (self.row_levels - 1)
|
340 |
+
if self.fmt.index or self.show_col_idx_names:
|
341 |
+
# see gh-22747
|
342 |
+
# If to_html(index_names=False) do not show columns
|
343 |
+
# index names.
|
344 |
+
# TODO: Refactor to use _get_column_name_list from
|
345 |
+
# DataFrameFormatter class and create a
|
346 |
+
# _get_formatted_column_labels function for code
|
347 |
+
# parity with DataFrameFormatter class.
|
348 |
+
if self.fmt.show_index_names:
|
349 |
+
name = self.columns.names[lnum]
|
350 |
+
row.append(pprint_thing(name or ""))
|
351 |
+
else:
|
352 |
+
row.append("")
|
353 |
+
|
354 |
+
tags = {}
|
355 |
+
j = len(row)
|
356 |
+
for i, v in enumerate(values):
|
357 |
+
if i in records:
|
358 |
+
if records[i] > 1:
|
359 |
+
tags[j] = template.format(span=records[i])
|
360 |
+
else:
|
361 |
+
continue
|
362 |
+
j += 1
|
363 |
+
row.append(v)
|
364 |
+
self.write_tr(row, indent, self.indent_delta, tags=tags, header=True)
|
365 |
+
else:
|
366 |
+
# see gh-22579
|
367 |
+
# Column misalignment also occurs for
|
368 |
+
# a standard index when the columns index is named.
|
369 |
+
# Initially fill row with blank cells before column names.
|
370 |
+
# TODO: Refactor to remove code duplication with code block
|
371 |
+
# above for columns MultiIndex.
|
372 |
+
row = [""] * (self.row_levels - 1)
|
373 |
+
if self.fmt.index or self.show_col_idx_names:
|
374 |
+
# see gh-22747
|
375 |
+
# If to_html(index_names=False) do not show columns
|
376 |
+
# index names.
|
377 |
+
# TODO: Refactor to use _get_column_name_list from
|
378 |
+
# DataFrameFormatter class.
|
379 |
+
if self.fmt.show_index_names:
|
380 |
+
row.append(self.columns.name or "")
|
381 |
+
else:
|
382 |
+
row.append("")
|
383 |
+
row.extend(self._get_columns_formatted_values())
|
384 |
+
align = self.fmt.justify
|
385 |
+
|
386 |
+
if is_truncated_horizontally:
|
387 |
+
ins_col = self.row_levels + self.fmt.tr_col_num
|
388 |
+
row.insert(ins_col, "...")
|
389 |
+
|
390 |
+
self.write_tr(row, indent, self.indent_delta, header=True, align=align)
|
391 |
+
|
392 |
+
def _write_row_header(self, indent: int) -> None:
|
393 |
+
is_truncated_horizontally = self.fmt.is_truncated_horizontally
|
394 |
+
row = [x if x is not None else "" for x in self.frame.index.names] + [""] * (
|
395 |
+
self.ncols + (1 if is_truncated_horizontally else 0)
|
396 |
+
)
|
397 |
+
self.write_tr(row, indent, self.indent_delta, header=True)
|
398 |
+
|
399 |
+
def _write_header(self, indent: int) -> None:
|
400 |
+
self.write("<thead>", indent)
|
401 |
+
|
402 |
+
if self.fmt.header:
|
403 |
+
self._write_col_header(indent + self.indent_delta)
|
404 |
+
|
405 |
+
if self.show_row_idx_names:
|
406 |
+
self._write_row_header(indent + self.indent_delta)
|
407 |
+
|
408 |
+
self.write("</thead>", indent)
|
409 |
+
|
410 |
+
def _get_formatted_values(self) -> dict[int, list[str]]:
|
411 |
+
with option_context("display.max_colwidth", None):
|
412 |
+
fmt_values = {i: self.fmt.format_col(i) for i in range(self.ncols)}
|
413 |
+
return fmt_values
|
414 |
+
|
415 |
+
def _write_body(self, indent: int) -> None:
|
416 |
+
self.write("<tbody>", indent)
|
417 |
+
fmt_values = self._get_formatted_values()
|
418 |
+
|
419 |
+
# write values
|
420 |
+
if self.fmt.index and isinstance(self.frame.index, MultiIndex):
|
421 |
+
self._write_hierarchical_rows(fmt_values, indent + self.indent_delta)
|
422 |
+
else:
|
423 |
+
self._write_regular_rows(fmt_values, indent + self.indent_delta)
|
424 |
+
|
425 |
+
self.write("</tbody>", indent)
|
426 |
+
|
427 |
+
def _write_regular_rows(
|
428 |
+
self, fmt_values: Mapping[int, list[str]], indent: int
|
429 |
+
) -> None:
|
430 |
+
is_truncated_horizontally = self.fmt.is_truncated_horizontally
|
431 |
+
is_truncated_vertically = self.fmt.is_truncated_vertically
|
432 |
+
|
433 |
+
nrows = len(self.fmt.tr_frame)
|
434 |
+
|
435 |
+
if self.fmt.index:
|
436 |
+
fmt = self.fmt._get_formatter("__index__")
|
437 |
+
if fmt is not None:
|
438 |
+
index_values = self.fmt.tr_frame.index.map(fmt)
|
439 |
+
else:
|
440 |
+
# only reached with non-Multi index
|
441 |
+
index_values = self.fmt.tr_frame.index._format_flat(include_name=False)
|
442 |
+
|
443 |
+
row: list[str] = []
|
444 |
+
for i in range(nrows):
|
445 |
+
if is_truncated_vertically and i == (self.fmt.tr_row_num):
|
446 |
+
str_sep_row = ["..."] * len(row)
|
447 |
+
self.write_tr(
|
448 |
+
str_sep_row,
|
449 |
+
indent,
|
450 |
+
self.indent_delta,
|
451 |
+
tags=None,
|
452 |
+
nindex_levels=self.row_levels,
|
453 |
+
)
|
454 |
+
|
455 |
+
row = []
|
456 |
+
if self.fmt.index:
|
457 |
+
row.append(index_values[i])
|
458 |
+
# see gh-22579
|
459 |
+
# Column misalignment also occurs for
|
460 |
+
# a standard index when the columns index is named.
|
461 |
+
# Add blank cell before data cells.
|
462 |
+
elif self.show_col_idx_names:
|
463 |
+
row.append("")
|
464 |
+
row.extend(fmt_values[j][i] for j in range(self.ncols))
|
465 |
+
|
466 |
+
if is_truncated_horizontally:
|
467 |
+
dot_col_ix = self.fmt.tr_col_num + self.row_levels
|
468 |
+
row.insert(dot_col_ix, "...")
|
469 |
+
self.write_tr(
|
470 |
+
row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels
|
471 |
+
)
|
472 |
+
|
473 |
+
def _write_hierarchical_rows(
|
474 |
+
self, fmt_values: Mapping[int, list[str]], indent: int
|
475 |
+
) -> None:
|
476 |
+
template = 'rowspan="{span}" valign="top"'
|
477 |
+
|
478 |
+
is_truncated_horizontally = self.fmt.is_truncated_horizontally
|
479 |
+
is_truncated_vertically = self.fmt.is_truncated_vertically
|
480 |
+
frame = self.fmt.tr_frame
|
481 |
+
nrows = len(frame)
|
482 |
+
|
483 |
+
assert isinstance(frame.index, MultiIndex)
|
484 |
+
idx_values = frame.index._format_multi(sparsify=False, include_names=False)
|
485 |
+
idx_values = list(zip(*idx_values))
|
486 |
+
|
487 |
+
if self.fmt.sparsify:
|
488 |
+
# GH3547
|
489 |
+
sentinel = lib.no_default
|
490 |
+
levels = frame.index._format_multi(sparsify=sentinel, include_names=False)
|
491 |
+
|
492 |
+
level_lengths = get_level_lengths(levels, sentinel)
|
493 |
+
inner_lvl = len(level_lengths) - 1
|
494 |
+
if is_truncated_vertically:
|
495 |
+
# Insert ... row and adjust idx_values and
|
496 |
+
# level_lengths to take this into account.
|
497 |
+
ins_row = self.fmt.tr_row_num
|
498 |
+
inserted = False
|
499 |
+
for lnum, records in enumerate(level_lengths):
|
500 |
+
rec_new = {}
|
501 |
+
for tag, span in list(records.items()):
|
502 |
+
if tag >= ins_row:
|
503 |
+
rec_new[tag + 1] = span
|
504 |
+
elif tag + span > ins_row:
|
505 |
+
rec_new[tag] = span + 1
|
506 |
+
|
507 |
+
# GH 14882 - Make sure insertion done once
|
508 |
+
if not inserted:
|
509 |
+
dot_row = list(idx_values[ins_row - 1])
|
510 |
+
dot_row[-1] = "..."
|
511 |
+
idx_values.insert(ins_row, tuple(dot_row))
|
512 |
+
inserted = True
|
513 |
+
else:
|
514 |
+
dot_row = list(idx_values[ins_row])
|
515 |
+
dot_row[inner_lvl - lnum] = "..."
|
516 |
+
idx_values[ins_row] = tuple(dot_row)
|
517 |
+
else:
|
518 |
+
rec_new[tag] = span
|
519 |
+
# If ins_row lies between tags, all cols idx cols
|
520 |
+
# receive ...
|
521 |
+
if tag + span == ins_row:
|
522 |
+
rec_new[ins_row] = 1
|
523 |
+
if lnum == 0:
|
524 |
+
idx_values.insert(
|
525 |
+
ins_row, tuple(["..."] * len(level_lengths))
|
526 |
+
)
|
527 |
+
|
528 |
+
# GH 14882 - Place ... in correct level
|
529 |
+
elif inserted:
|
530 |
+
dot_row = list(idx_values[ins_row])
|
531 |
+
dot_row[inner_lvl - lnum] = "..."
|
532 |
+
idx_values[ins_row] = tuple(dot_row)
|
533 |
+
level_lengths[lnum] = rec_new
|
534 |
+
|
535 |
+
level_lengths[inner_lvl][ins_row] = 1
|
536 |
+
for ix_col in fmt_values:
|
537 |
+
fmt_values[ix_col].insert(ins_row, "...")
|
538 |
+
nrows += 1
|
539 |
+
|
540 |
+
for i in range(nrows):
|
541 |
+
row = []
|
542 |
+
tags = {}
|
543 |
+
|
544 |
+
sparse_offset = 0
|
545 |
+
j = 0
|
546 |
+
for records, v in zip(level_lengths, idx_values[i]):
|
547 |
+
if i in records:
|
548 |
+
if records[i] > 1:
|
549 |
+
tags[j] = template.format(span=records[i])
|
550 |
+
else:
|
551 |
+
sparse_offset += 1
|
552 |
+
continue
|
553 |
+
|
554 |
+
j += 1
|
555 |
+
row.append(v)
|
556 |
+
|
557 |
+
row.extend(fmt_values[j][i] for j in range(self.ncols))
|
558 |
+
if is_truncated_horizontally:
|
559 |
+
row.insert(
|
560 |
+
self.row_levels - sparse_offset + self.fmt.tr_col_num, "..."
|
561 |
+
)
|
562 |
+
self.write_tr(
|
563 |
+
row,
|
564 |
+
indent,
|
565 |
+
self.indent_delta,
|
566 |
+
tags=tags,
|
567 |
+
nindex_levels=len(levels) - sparse_offset,
|
568 |
+
)
|
569 |
+
else:
|
570 |
+
row = []
|
571 |
+
for i in range(len(frame)):
|
572 |
+
if is_truncated_vertically and i == (self.fmt.tr_row_num):
|
573 |
+
str_sep_row = ["..."] * len(row)
|
574 |
+
self.write_tr(
|
575 |
+
str_sep_row,
|
576 |
+
indent,
|
577 |
+
self.indent_delta,
|
578 |
+
tags=None,
|
579 |
+
nindex_levels=self.row_levels,
|
580 |
+
)
|
581 |
+
|
582 |
+
idx_values = list(
|
583 |
+
zip(*frame.index._format_multi(sparsify=False, include_names=False))
|
584 |
+
)
|
585 |
+
row = []
|
586 |
+
row.extend(idx_values[i])
|
587 |
+
row.extend(fmt_values[j][i] for j in range(self.ncols))
|
588 |
+
if is_truncated_horizontally:
|
589 |
+
row.insert(self.row_levels + self.fmt.tr_col_num, "...")
|
590 |
+
self.write_tr(
|
591 |
+
row,
|
592 |
+
indent,
|
593 |
+
self.indent_delta,
|
594 |
+
tags=None,
|
595 |
+
nindex_levels=frame.index.nlevels,
|
596 |
+
)
|
597 |
+
|
598 |
+
|
599 |
+
class NotebookFormatter(HTMLFormatter):
|
600 |
+
"""
|
601 |
+
Internal class for formatting output data in html for display in Jupyter
|
602 |
+
Notebooks. This class is intended for functionality specific to
|
603 |
+
DataFrame._repr_html_() and DataFrame.to_html(notebook=True)
|
604 |
+
"""
|
605 |
+
|
606 |
+
def _get_formatted_values(self) -> dict[int, list[str]]:
|
607 |
+
return {i: self.fmt.format_col(i) for i in range(self.ncols)}
|
608 |
+
|
609 |
+
def _get_columns_formatted_values(self) -> list[str]:
|
610 |
+
# only reached with non-Multi Index
|
611 |
+
return self.columns._format_flat(include_name=False)
|
612 |
+
|
613 |
+
def write_style(self) -> None:
|
614 |
+
# We use the "scoped" attribute here so that the desired
|
615 |
+
# style properties for the data frame are not then applied
|
616 |
+
# throughout the entire notebook.
|
617 |
+
template_first = """\
|
618 |
+
<style scoped>"""
|
619 |
+
template_last = """\
|
620 |
+
</style>"""
|
621 |
+
template_select = """\
|
622 |
+
.dataframe %s {
|
623 |
+
%s: %s;
|
624 |
+
}"""
|
625 |
+
element_props = [
|
626 |
+
("tbody tr th:only-of-type", "vertical-align", "middle"),
|
627 |
+
("tbody tr th", "vertical-align", "top"),
|
628 |
+
]
|
629 |
+
if isinstance(self.columns, MultiIndex):
|
630 |
+
element_props.append(("thead tr th", "text-align", "left"))
|
631 |
+
if self.show_row_idx_names:
|
632 |
+
element_props.append(
|
633 |
+
("thead tr:last-of-type th", "text-align", "right")
|
634 |
+
)
|
635 |
+
else:
|
636 |
+
element_props.append(("thead th", "text-align", "right"))
|
637 |
+
template_mid = "\n\n".join(template_select % t for t in element_props)
|
638 |
+
template = dedent(f"{template_first}\n{template_mid}\n{template_last}")
|
639 |
+
self.write(template)
|
640 |
+
|
641 |
+
def render(self) -> list[str]:
|
642 |
+
self.write("<div>")
|
643 |
+
self.write_style()
|
644 |
+
super().render()
|
645 |
+
self.write("</div>")
|
646 |
+
return self.elements
|