Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/llama-3b/global_step100/bf16_zero_pp_rank_246_mp_rank_01_optim_states.pt +3 -0
- ckpts/llama-3b/global_step100/bf16_zero_pp_rank_86_mp_rank_03_optim_states.pt +3 -0
- venv/lib/python3.10/site-packages/pandas/_config/__init__.py +57 -0
- venv/lib/python3.10/site-packages/pandas/_config/__pycache__/config.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/_config/__pycache__/dates.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/_config/config.py +948 -0
- venv/lib/python3.10/site-packages/pandas/_config/dates.py +25 -0
- venv/lib/python3.10/site-packages/pandas/_config/display.py +62 -0
- venv/lib/python3.10/site-packages/pandas/_config/localization.py +172 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py +747 -0
- venv/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/__init__.py +19 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_calamine.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odswriter.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_util.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlrd.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlsxwriter.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/_base.py +1659 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/_calamine.py +121 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py +253 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py +357 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py +639 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py +127 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/_util.py +334 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py +143 -0
- venv/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py +284 -0
- venv/lib/python3.10/site-packages/pandas/io/json/__init__.py +15 -0
- venv/lib/python3.10/site-packages/pandas/io/json/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/io/json/__pycache__/_json.cpython-310.pyc +0 -0
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_246_mp_rank_01_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:93a03ca010fbfc32878d25adc31f0cec4181a3b8699988e1220cc92e8b9758c1
|
3 |
+
size 41830212
|
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_86_mp_rank_03_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a01bbb61578142434caaa00771c149abb902d97e02fe82695fe2d0b545a370ca
|
3 |
+
size 41830394
|
venv/lib/python3.10/site-packages/pandas/_config/__init__.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
pandas._config is considered explicitly upstream of everything else in pandas,
|
3 |
+
should have no intra-pandas dependencies.
|
4 |
+
|
5 |
+
importing `dates` and `display` ensures that keys needed by _libs
|
6 |
+
are initialized.
|
7 |
+
"""
|
8 |
+
__all__ = [
|
9 |
+
"config",
|
10 |
+
"detect_console_encoding",
|
11 |
+
"get_option",
|
12 |
+
"set_option",
|
13 |
+
"reset_option",
|
14 |
+
"describe_option",
|
15 |
+
"option_context",
|
16 |
+
"options",
|
17 |
+
"using_copy_on_write",
|
18 |
+
"warn_copy_on_write",
|
19 |
+
]
|
20 |
+
from pandas._config import config
|
21 |
+
from pandas._config import dates # pyright: ignore[reportUnusedImport] # noqa: F401
|
22 |
+
from pandas._config.config import (
|
23 |
+
_global_config,
|
24 |
+
describe_option,
|
25 |
+
get_option,
|
26 |
+
option_context,
|
27 |
+
options,
|
28 |
+
reset_option,
|
29 |
+
set_option,
|
30 |
+
)
|
31 |
+
from pandas._config.display import detect_console_encoding
|
32 |
+
|
33 |
+
|
34 |
+
def using_copy_on_write() -> bool:
|
35 |
+
_mode_options = _global_config["mode"]
|
36 |
+
return (
|
37 |
+
_mode_options["copy_on_write"] is True
|
38 |
+
and _mode_options["data_manager"] == "block"
|
39 |
+
)
|
40 |
+
|
41 |
+
|
42 |
+
def warn_copy_on_write() -> bool:
|
43 |
+
_mode_options = _global_config["mode"]
|
44 |
+
return (
|
45 |
+
_mode_options["copy_on_write"] == "warn"
|
46 |
+
and _mode_options["data_manager"] == "block"
|
47 |
+
)
|
48 |
+
|
49 |
+
|
50 |
+
def using_nullable_dtypes() -> bool:
|
51 |
+
_mode_options = _global_config["mode"]
|
52 |
+
return _mode_options["nullable_dtypes"]
|
53 |
+
|
54 |
+
|
55 |
+
def using_pyarrow_string_dtype() -> bool:
|
56 |
+
_mode_options = _global_config["future"]
|
57 |
+
return _mode_options["infer_string"]
|
venv/lib/python3.10/site-packages/pandas/_config/__pycache__/config.cpython-310.pyc
ADDED
Binary file (26.4 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/_config/__pycache__/dates.cpython-310.pyc
ADDED
Binary file (748 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/_config/config.py
ADDED
@@ -0,0 +1,948 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The config module holds package-wide configurables and provides
|
3 |
+
a uniform API for working with them.
|
4 |
+
|
5 |
+
Overview
|
6 |
+
========
|
7 |
+
|
8 |
+
This module supports the following requirements:
|
9 |
+
- options are referenced using keys in dot.notation, e.g. "x.y.option - z".
|
10 |
+
- keys are case-insensitive.
|
11 |
+
- functions should accept partial/regex keys, when unambiguous.
|
12 |
+
- options can be registered by modules at import time.
|
13 |
+
- options can be registered at init-time (via core.config_init)
|
14 |
+
- options have a default value, and (optionally) a description and
|
15 |
+
validation function associated with them.
|
16 |
+
- options can be deprecated, in which case referencing them
|
17 |
+
should produce a warning.
|
18 |
+
- deprecated options can optionally be rerouted to a replacement
|
19 |
+
so that accessing a deprecated option reroutes to a differently
|
20 |
+
named option.
|
21 |
+
- options can be reset to their default value.
|
22 |
+
- all option can be reset to their default value at once.
|
23 |
+
- all options in a certain sub - namespace can be reset at once.
|
24 |
+
- the user can set / get / reset or ask for the description of an option.
|
25 |
+
- a developer can register and mark an option as deprecated.
|
26 |
+
- you can register a callback to be invoked when the option value
|
27 |
+
is set or reset. Changing the stored value is considered misuse, but
|
28 |
+
is not verboten.
|
29 |
+
|
30 |
+
Implementation
|
31 |
+
==============
|
32 |
+
|
33 |
+
- Data is stored using nested dictionaries, and should be accessed
|
34 |
+
through the provided API.
|
35 |
+
|
36 |
+
- "Registered options" and "Deprecated options" have metadata associated
|
37 |
+
with them, which are stored in auxiliary dictionaries keyed on the
|
38 |
+
fully-qualified key, e.g. "x.y.z.option".
|
39 |
+
|
40 |
+
- the config_init module is imported by the package's __init__.py file.
|
41 |
+
placing any register_option() calls there will ensure those options
|
42 |
+
are available as soon as pandas is loaded. If you use register_option
|
43 |
+
in a module, it will only be available after that module is imported,
|
44 |
+
which you should be aware of.
|
45 |
+
|
46 |
+
- `config_prefix` is a context_manager (for use with the `with` keyword)
|
47 |
+
which can save developers some typing, see the docstring.
|
48 |
+
|
49 |
+
"""
|
50 |
+
|
51 |
+
from __future__ import annotations
|
52 |
+
|
53 |
+
from contextlib import (
|
54 |
+
ContextDecorator,
|
55 |
+
contextmanager,
|
56 |
+
)
|
57 |
+
import re
|
58 |
+
from typing import (
|
59 |
+
TYPE_CHECKING,
|
60 |
+
Any,
|
61 |
+
Callable,
|
62 |
+
Generic,
|
63 |
+
NamedTuple,
|
64 |
+
cast,
|
65 |
+
)
|
66 |
+
import warnings
|
67 |
+
|
68 |
+
from pandas._typing import (
|
69 |
+
F,
|
70 |
+
T,
|
71 |
+
)
|
72 |
+
from pandas.util._exceptions import find_stack_level
|
73 |
+
|
74 |
+
if TYPE_CHECKING:
|
75 |
+
from collections.abc import (
|
76 |
+
Generator,
|
77 |
+
Iterable,
|
78 |
+
)
|
79 |
+
|
80 |
+
|
81 |
+
class DeprecatedOption(NamedTuple):
|
82 |
+
key: str
|
83 |
+
msg: str | None
|
84 |
+
rkey: str | None
|
85 |
+
removal_ver: str | None
|
86 |
+
|
87 |
+
|
88 |
+
class RegisteredOption(NamedTuple):
|
89 |
+
key: str
|
90 |
+
defval: object
|
91 |
+
doc: str
|
92 |
+
validator: Callable[[object], Any] | None
|
93 |
+
cb: Callable[[str], Any] | None
|
94 |
+
|
95 |
+
|
96 |
+
# holds deprecated option metadata
|
97 |
+
_deprecated_options: dict[str, DeprecatedOption] = {}
|
98 |
+
|
99 |
+
# holds registered option metadata
|
100 |
+
_registered_options: dict[str, RegisteredOption] = {}
|
101 |
+
|
102 |
+
# holds the current values for registered options
|
103 |
+
_global_config: dict[str, Any] = {}
|
104 |
+
|
105 |
+
# keys which have a special meaning
|
106 |
+
_reserved_keys: list[str] = ["all"]
|
107 |
+
|
108 |
+
|
109 |
+
class OptionError(AttributeError, KeyError):
|
110 |
+
"""
|
111 |
+
Exception raised for pandas.options.
|
112 |
+
|
113 |
+
Backwards compatible with KeyError checks.
|
114 |
+
|
115 |
+
Examples
|
116 |
+
--------
|
117 |
+
>>> pd.options.context
|
118 |
+
Traceback (most recent call last):
|
119 |
+
OptionError: No such option
|
120 |
+
"""
|
121 |
+
|
122 |
+
|
123 |
+
#
|
124 |
+
# User API
|
125 |
+
|
126 |
+
|
127 |
+
def _get_single_key(pat: str, silent: bool) -> str:
|
128 |
+
keys = _select_options(pat)
|
129 |
+
if len(keys) == 0:
|
130 |
+
if not silent:
|
131 |
+
_warn_if_deprecated(pat)
|
132 |
+
raise OptionError(f"No such keys(s): {repr(pat)}")
|
133 |
+
if len(keys) > 1:
|
134 |
+
raise OptionError("Pattern matched multiple keys")
|
135 |
+
key = keys[0]
|
136 |
+
|
137 |
+
if not silent:
|
138 |
+
_warn_if_deprecated(key)
|
139 |
+
|
140 |
+
key = _translate_key(key)
|
141 |
+
|
142 |
+
return key
|
143 |
+
|
144 |
+
|
145 |
+
def _get_option(pat: str, silent: bool = False) -> Any:
|
146 |
+
key = _get_single_key(pat, silent)
|
147 |
+
|
148 |
+
# walk the nested dict
|
149 |
+
root, k = _get_root(key)
|
150 |
+
return root[k]
|
151 |
+
|
152 |
+
|
153 |
+
def _set_option(*args, **kwargs) -> None:
|
154 |
+
# must at least 1 arg deal with constraints later
|
155 |
+
nargs = len(args)
|
156 |
+
if not nargs or nargs % 2 != 0:
|
157 |
+
raise ValueError("Must provide an even number of non-keyword arguments")
|
158 |
+
|
159 |
+
# default to false
|
160 |
+
silent = kwargs.pop("silent", False)
|
161 |
+
|
162 |
+
if kwargs:
|
163 |
+
kwarg = next(iter(kwargs.keys()))
|
164 |
+
raise TypeError(f'_set_option() got an unexpected keyword argument "{kwarg}"')
|
165 |
+
|
166 |
+
for k, v in zip(args[::2], args[1::2]):
|
167 |
+
key = _get_single_key(k, silent)
|
168 |
+
|
169 |
+
o = _get_registered_option(key)
|
170 |
+
if o and o.validator:
|
171 |
+
o.validator(v)
|
172 |
+
|
173 |
+
# walk the nested dict
|
174 |
+
root, k_root = _get_root(key)
|
175 |
+
root[k_root] = v
|
176 |
+
|
177 |
+
if o.cb:
|
178 |
+
if silent:
|
179 |
+
with warnings.catch_warnings(record=True):
|
180 |
+
o.cb(key)
|
181 |
+
else:
|
182 |
+
o.cb(key)
|
183 |
+
|
184 |
+
|
185 |
+
def _describe_option(pat: str = "", _print_desc: bool = True) -> str | None:
|
186 |
+
keys = _select_options(pat)
|
187 |
+
if len(keys) == 0:
|
188 |
+
raise OptionError("No such keys(s)")
|
189 |
+
|
190 |
+
s = "\n".join([_build_option_description(k) for k in keys])
|
191 |
+
|
192 |
+
if _print_desc:
|
193 |
+
print(s)
|
194 |
+
return None
|
195 |
+
return s
|
196 |
+
|
197 |
+
|
198 |
+
def _reset_option(pat: str, silent: bool = False) -> None:
|
199 |
+
keys = _select_options(pat)
|
200 |
+
|
201 |
+
if len(keys) == 0:
|
202 |
+
raise OptionError("No such keys(s)")
|
203 |
+
|
204 |
+
if len(keys) > 1 and len(pat) < 4 and pat != "all":
|
205 |
+
raise ValueError(
|
206 |
+
"You must specify at least 4 characters when "
|
207 |
+
"resetting multiple keys, use the special keyword "
|
208 |
+
'"all" to reset all the options to their default value'
|
209 |
+
)
|
210 |
+
|
211 |
+
for k in keys:
|
212 |
+
_set_option(k, _registered_options[k].defval, silent=silent)
|
213 |
+
|
214 |
+
|
215 |
+
def get_default_val(pat: str):
|
216 |
+
key = _get_single_key(pat, silent=True)
|
217 |
+
return _get_registered_option(key).defval
|
218 |
+
|
219 |
+
|
220 |
+
class DictWrapper:
|
221 |
+
"""provide attribute-style access to a nested dict"""
|
222 |
+
|
223 |
+
d: dict[str, Any]
|
224 |
+
|
225 |
+
def __init__(self, d: dict[str, Any], prefix: str = "") -> None:
|
226 |
+
object.__setattr__(self, "d", d)
|
227 |
+
object.__setattr__(self, "prefix", prefix)
|
228 |
+
|
229 |
+
def __setattr__(self, key: str, val: Any) -> None:
|
230 |
+
prefix = object.__getattribute__(self, "prefix")
|
231 |
+
if prefix:
|
232 |
+
prefix += "."
|
233 |
+
prefix += key
|
234 |
+
# you can't set new keys
|
235 |
+
# can you can't overwrite subtrees
|
236 |
+
if key in self.d and not isinstance(self.d[key], dict):
|
237 |
+
_set_option(prefix, val)
|
238 |
+
else:
|
239 |
+
raise OptionError("You can only set the value of existing options")
|
240 |
+
|
241 |
+
def __getattr__(self, key: str):
|
242 |
+
prefix = object.__getattribute__(self, "prefix")
|
243 |
+
if prefix:
|
244 |
+
prefix += "."
|
245 |
+
prefix += key
|
246 |
+
try:
|
247 |
+
v = object.__getattribute__(self, "d")[key]
|
248 |
+
except KeyError as err:
|
249 |
+
raise OptionError("No such option") from err
|
250 |
+
if isinstance(v, dict):
|
251 |
+
return DictWrapper(v, prefix)
|
252 |
+
else:
|
253 |
+
return _get_option(prefix)
|
254 |
+
|
255 |
+
def __dir__(self) -> list[str]:
|
256 |
+
return list(self.d.keys())
|
257 |
+
|
258 |
+
|
259 |
+
# For user convenience, we'd like to have the available options described
|
260 |
+
# in the docstring. For dev convenience we'd like to generate the docstrings
|
261 |
+
# dynamically instead of maintaining them by hand. To this, we use the
|
262 |
+
# class below which wraps functions inside a callable, and converts
|
263 |
+
# __doc__ into a property function. The doctsrings below are templates
|
264 |
+
# using the py2.6+ advanced formatting syntax to plug in a concise list
|
265 |
+
# of options, and option descriptions.
|
266 |
+
|
267 |
+
|
268 |
+
class CallableDynamicDoc(Generic[T]):
|
269 |
+
def __init__(self, func: Callable[..., T], doc_tmpl: str) -> None:
|
270 |
+
self.__doc_tmpl__ = doc_tmpl
|
271 |
+
self.__func__ = func
|
272 |
+
|
273 |
+
def __call__(self, *args, **kwds) -> T:
|
274 |
+
return self.__func__(*args, **kwds)
|
275 |
+
|
276 |
+
# error: Signature of "__doc__" incompatible with supertype "object"
|
277 |
+
@property
|
278 |
+
def __doc__(self) -> str: # type: ignore[override]
|
279 |
+
opts_desc = _describe_option("all", _print_desc=False)
|
280 |
+
opts_list = pp_options_list(list(_registered_options.keys()))
|
281 |
+
return self.__doc_tmpl__.format(opts_desc=opts_desc, opts_list=opts_list)
|
282 |
+
|
283 |
+
|
284 |
+
_get_option_tmpl = """
|
285 |
+
get_option(pat)
|
286 |
+
|
287 |
+
Retrieves the value of the specified option.
|
288 |
+
|
289 |
+
Available options:
|
290 |
+
|
291 |
+
{opts_list}
|
292 |
+
|
293 |
+
Parameters
|
294 |
+
----------
|
295 |
+
pat : str
|
296 |
+
Regexp which should match a single option.
|
297 |
+
Note: partial matches are supported for convenience, but unless you use the
|
298 |
+
full option name (e.g. x.y.z.option_name), your code may break in future
|
299 |
+
versions if new options with similar names are introduced.
|
300 |
+
|
301 |
+
Returns
|
302 |
+
-------
|
303 |
+
result : the value of the option
|
304 |
+
|
305 |
+
Raises
|
306 |
+
------
|
307 |
+
OptionError : if no such option exists
|
308 |
+
|
309 |
+
Notes
|
310 |
+
-----
|
311 |
+
Please reference the :ref:`User Guide <options>` for more information.
|
312 |
+
|
313 |
+
The available options with its descriptions:
|
314 |
+
|
315 |
+
{opts_desc}
|
316 |
+
|
317 |
+
Examples
|
318 |
+
--------
|
319 |
+
>>> pd.get_option('display.max_columns') # doctest: +SKIP
|
320 |
+
4
|
321 |
+
"""
|
322 |
+
|
323 |
+
_set_option_tmpl = """
|
324 |
+
set_option(pat, value)
|
325 |
+
|
326 |
+
Sets the value of the specified option.
|
327 |
+
|
328 |
+
Available options:
|
329 |
+
|
330 |
+
{opts_list}
|
331 |
+
|
332 |
+
Parameters
|
333 |
+
----------
|
334 |
+
pat : str
|
335 |
+
Regexp which should match a single option.
|
336 |
+
Note: partial matches are supported for convenience, but unless you use the
|
337 |
+
full option name (e.g. x.y.z.option_name), your code may break in future
|
338 |
+
versions if new options with similar names are introduced.
|
339 |
+
value : object
|
340 |
+
New value of option.
|
341 |
+
|
342 |
+
Returns
|
343 |
+
-------
|
344 |
+
None
|
345 |
+
|
346 |
+
Raises
|
347 |
+
------
|
348 |
+
OptionError if no such option exists
|
349 |
+
|
350 |
+
Notes
|
351 |
+
-----
|
352 |
+
Please reference the :ref:`User Guide <options>` for more information.
|
353 |
+
|
354 |
+
The available options with its descriptions:
|
355 |
+
|
356 |
+
{opts_desc}
|
357 |
+
|
358 |
+
Examples
|
359 |
+
--------
|
360 |
+
>>> pd.set_option('display.max_columns', 4)
|
361 |
+
>>> df = pd.DataFrame([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
|
362 |
+
>>> df
|
363 |
+
0 1 ... 3 4
|
364 |
+
0 1 2 ... 4 5
|
365 |
+
1 6 7 ... 9 10
|
366 |
+
[2 rows x 5 columns]
|
367 |
+
>>> pd.reset_option('display.max_columns')
|
368 |
+
"""
|
369 |
+
|
370 |
+
_describe_option_tmpl = """
|
371 |
+
describe_option(pat, _print_desc=False)
|
372 |
+
|
373 |
+
Prints the description for one or more registered options.
|
374 |
+
|
375 |
+
Call with no arguments to get a listing for all registered options.
|
376 |
+
|
377 |
+
Available options:
|
378 |
+
|
379 |
+
{opts_list}
|
380 |
+
|
381 |
+
Parameters
|
382 |
+
----------
|
383 |
+
pat : str
|
384 |
+
Regexp pattern. All matching keys will have their description displayed.
|
385 |
+
_print_desc : bool, default True
|
386 |
+
If True (default) the description(s) will be printed to stdout.
|
387 |
+
Otherwise, the description(s) will be returned as a unicode string
|
388 |
+
(for testing).
|
389 |
+
|
390 |
+
Returns
|
391 |
+
-------
|
392 |
+
None by default, the description(s) as a unicode string if _print_desc
|
393 |
+
is False
|
394 |
+
|
395 |
+
Notes
|
396 |
+
-----
|
397 |
+
Please reference the :ref:`User Guide <options>` for more information.
|
398 |
+
|
399 |
+
The available options with its descriptions:
|
400 |
+
|
401 |
+
{opts_desc}
|
402 |
+
|
403 |
+
Examples
|
404 |
+
--------
|
405 |
+
>>> pd.describe_option('display.max_columns') # doctest: +SKIP
|
406 |
+
display.max_columns : int
|
407 |
+
If max_cols is exceeded, switch to truncate view...
|
408 |
+
"""
|
409 |
+
|
410 |
+
_reset_option_tmpl = """
|
411 |
+
reset_option(pat)
|
412 |
+
|
413 |
+
Reset one or more options to their default value.
|
414 |
+
|
415 |
+
Pass "all" as argument to reset all options.
|
416 |
+
|
417 |
+
Available options:
|
418 |
+
|
419 |
+
{opts_list}
|
420 |
+
|
421 |
+
Parameters
|
422 |
+
----------
|
423 |
+
pat : str/regex
|
424 |
+
If specified only options matching `prefix*` will be reset.
|
425 |
+
Note: partial matches are supported for convenience, but unless you
|
426 |
+
use the full option name (e.g. x.y.z.option_name), your code may break
|
427 |
+
in future versions if new options with similar names are introduced.
|
428 |
+
|
429 |
+
Returns
|
430 |
+
-------
|
431 |
+
None
|
432 |
+
|
433 |
+
Notes
|
434 |
+
-----
|
435 |
+
Please reference the :ref:`User Guide <options>` for more information.
|
436 |
+
|
437 |
+
The available options with its descriptions:
|
438 |
+
|
439 |
+
{opts_desc}
|
440 |
+
|
441 |
+
Examples
|
442 |
+
--------
|
443 |
+
>>> pd.reset_option('display.max_columns') # doctest: +SKIP
|
444 |
+
"""
|
445 |
+
|
446 |
+
# bind the functions with their docstrings into a Callable
|
447 |
+
# and use that as the functions exposed in pd.api
|
448 |
+
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
|
449 |
+
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
|
450 |
+
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
|
451 |
+
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
|
452 |
+
options = DictWrapper(_global_config)
|
453 |
+
|
454 |
+
#
|
455 |
+
# Functions for use by pandas developers, in addition to User - api
|
456 |
+
|
457 |
+
|
458 |
+
class option_context(ContextDecorator):
|
459 |
+
"""
|
460 |
+
Context manager to temporarily set options in the `with` statement context.
|
461 |
+
|
462 |
+
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
|
463 |
+
|
464 |
+
Examples
|
465 |
+
--------
|
466 |
+
>>> from pandas import option_context
|
467 |
+
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
|
468 |
+
... pass
|
469 |
+
"""
|
470 |
+
|
471 |
+
def __init__(self, *args) -> None:
|
472 |
+
if len(args) % 2 != 0 or len(args) < 2:
|
473 |
+
raise ValueError(
|
474 |
+
"Need to invoke as option_context(pat, val, [(pat, val), ...])."
|
475 |
+
)
|
476 |
+
|
477 |
+
self.ops = list(zip(args[::2], args[1::2]))
|
478 |
+
|
479 |
+
def __enter__(self) -> None:
|
480 |
+
self.undo = [(pat, _get_option(pat)) for pat, val in self.ops]
|
481 |
+
|
482 |
+
for pat, val in self.ops:
|
483 |
+
_set_option(pat, val, silent=True)
|
484 |
+
|
485 |
+
def __exit__(self, *args) -> None:
|
486 |
+
if self.undo:
|
487 |
+
for pat, val in self.undo:
|
488 |
+
_set_option(pat, val, silent=True)
|
489 |
+
|
490 |
+
|
491 |
+
def register_option(
|
492 |
+
key: str,
|
493 |
+
defval: object,
|
494 |
+
doc: str = "",
|
495 |
+
validator: Callable[[object], Any] | None = None,
|
496 |
+
cb: Callable[[str], Any] | None = None,
|
497 |
+
) -> None:
|
498 |
+
"""
|
499 |
+
Register an option in the package-wide pandas config object
|
500 |
+
|
501 |
+
Parameters
|
502 |
+
----------
|
503 |
+
key : str
|
504 |
+
Fully-qualified key, e.g. "x.y.option - z".
|
505 |
+
defval : object
|
506 |
+
Default value of the option.
|
507 |
+
doc : str
|
508 |
+
Description of the option.
|
509 |
+
validator : Callable, optional
|
510 |
+
Function of a single argument, should raise `ValueError` if
|
511 |
+
called with a value which is not a legal value for the option.
|
512 |
+
cb
|
513 |
+
a function of a single argument "key", which is called
|
514 |
+
immediately after an option value is set/reset. key is
|
515 |
+
the full name of the option.
|
516 |
+
|
517 |
+
Raises
|
518 |
+
------
|
519 |
+
ValueError if `validator` is specified and `defval` is not a valid value.
|
520 |
+
|
521 |
+
"""
|
522 |
+
import keyword
|
523 |
+
import tokenize
|
524 |
+
|
525 |
+
key = key.lower()
|
526 |
+
|
527 |
+
if key in _registered_options:
|
528 |
+
raise OptionError(f"Option '{key}' has already been registered")
|
529 |
+
if key in _reserved_keys:
|
530 |
+
raise OptionError(f"Option '{key}' is a reserved key")
|
531 |
+
|
532 |
+
# the default value should be legal
|
533 |
+
if validator:
|
534 |
+
validator(defval)
|
535 |
+
|
536 |
+
# walk the nested dict, creating dicts as needed along the path
|
537 |
+
path = key.split(".")
|
538 |
+
|
539 |
+
for k in path:
|
540 |
+
if not re.match("^" + tokenize.Name + "$", k):
|
541 |
+
raise ValueError(f"{k} is not a valid identifier")
|
542 |
+
if keyword.iskeyword(k):
|
543 |
+
raise ValueError(f"{k} is a python keyword")
|
544 |
+
|
545 |
+
cursor = _global_config
|
546 |
+
msg = "Path prefix to option '{option}' is already an option"
|
547 |
+
|
548 |
+
for i, p in enumerate(path[:-1]):
|
549 |
+
if not isinstance(cursor, dict):
|
550 |
+
raise OptionError(msg.format(option=".".join(path[:i])))
|
551 |
+
if p not in cursor:
|
552 |
+
cursor[p] = {}
|
553 |
+
cursor = cursor[p]
|
554 |
+
|
555 |
+
if not isinstance(cursor, dict):
|
556 |
+
raise OptionError(msg.format(option=".".join(path[:-1])))
|
557 |
+
|
558 |
+
cursor[path[-1]] = defval # initialize
|
559 |
+
|
560 |
+
# save the option metadata
|
561 |
+
_registered_options[key] = RegisteredOption(
|
562 |
+
key=key, defval=defval, doc=doc, validator=validator, cb=cb
|
563 |
+
)
|
564 |
+
|
565 |
+
|
566 |
+
def deprecate_option(
|
567 |
+
key: str,
|
568 |
+
msg: str | None = None,
|
569 |
+
rkey: str | None = None,
|
570 |
+
removal_ver: str | None = None,
|
571 |
+
) -> None:
|
572 |
+
"""
|
573 |
+
Mark option `key` as deprecated, if code attempts to access this option,
|
574 |
+
a warning will be produced, using `msg` if given, or a default message
|
575 |
+
if not.
|
576 |
+
if `rkey` is given, any access to the key will be re-routed to `rkey`.
|
577 |
+
|
578 |
+
Neither the existence of `key` nor that if `rkey` is checked. If they
|
579 |
+
do not exist, any subsequence access will fail as usual, after the
|
580 |
+
deprecation warning is given.
|
581 |
+
|
582 |
+
Parameters
|
583 |
+
----------
|
584 |
+
key : str
|
585 |
+
Name of the option to be deprecated.
|
586 |
+
must be a fully-qualified option name (e.g "x.y.z.rkey").
|
587 |
+
msg : str, optional
|
588 |
+
Warning message to output when the key is referenced.
|
589 |
+
if no message is given a default message will be emitted.
|
590 |
+
rkey : str, optional
|
591 |
+
Name of an option to reroute access to.
|
592 |
+
If specified, any referenced `key` will be
|
593 |
+
re-routed to `rkey` including set/get/reset.
|
594 |
+
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
|
595 |
+
used by the default message if no `msg` is specified.
|
596 |
+
removal_ver : str, optional
|
597 |
+
Specifies the version in which this option will
|
598 |
+
be removed. used by the default message if no `msg` is specified.
|
599 |
+
|
600 |
+
Raises
|
601 |
+
------
|
602 |
+
OptionError
|
603 |
+
If the specified key has already been deprecated.
|
604 |
+
"""
|
605 |
+
key = key.lower()
|
606 |
+
|
607 |
+
if key in _deprecated_options:
|
608 |
+
raise OptionError(f"Option '{key}' has already been defined as deprecated.")
|
609 |
+
|
610 |
+
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
|
611 |
+
|
612 |
+
|
613 |
+
#
|
614 |
+
# functions internal to the module
|
615 |
+
|
616 |
+
|
617 |
+
def _select_options(pat: str) -> list[str]:
|
618 |
+
"""
|
619 |
+
returns a list of keys matching `pat`
|
620 |
+
|
621 |
+
if pat=="all", returns all registered options
|
622 |
+
"""
|
623 |
+
# short-circuit for exact key
|
624 |
+
if pat in _registered_options:
|
625 |
+
return [pat]
|
626 |
+
|
627 |
+
# else look through all of them
|
628 |
+
keys = sorted(_registered_options.keys())
|
629 |
+
if pat == "all": # reserved key
|
630 |
+
return keys
|
631 |
+
|
632 |
+
return [k for k in keys if re.search(pat, k, re.I)]
|
633 |
+
|
634 |
+
|
635 |
+
def _get_root(key: str) -> tuple[dict[str, Any], str]:
|
636 |
+
path = key.split(".")
|
637 |
+
cursor = _global_config
|
638 |
+
for p in path[:-1]:
|
639 |
+
cursor = cursor[p]
|
640 |
+
return cursor, path[-1]
|
641 |
+
|
642 |
+
|
643 |
+
def _is_deprecated(key: str) -> bool:
|
644 |
+
"""Returns True if the given option has been deprecated"""
|
645 |
+
key = key.lower()
|
646 |
+
return key in _deprecated_options
|
647 |
+
|
648 |
+
|
649 |
+
def _get_deprecated_option(key: str):
|
650 |
+
"""
|
651 |
+
Retrieves the metadata for a deprecated option, if `key` is deprecated.
|
652 |
+
|
653 |
+
Returns
|
654 |
+
-------
|
655 |
+
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
|
656 |
+
"""
|
657 |
+
try:
|
658 |
+
d = _deprecated_options[key]
|
659 |
+
except KeyError:
|
660 |
+
return None
|
661 |
+
else:
|
662 |
+
return d
|
663 |
+
|
664 |
+
|
665 |
+
def _get_registered_option(key: str):
|
666 |
+
"""
|
667 |
+
Retrieves the option metadata if `key` is a registered option.
|
668 |
+
|
669 |
+
Returns
|
670 |
+
-------
|
671 |
+
RegisteredOption (namedtuple) if key is deprecated, None otherwise
|
672 |
+
"""
|
673 |
+
return _registered_options.get(key)
|
674 |
+
|
675 |
+
|
676 |
+
def _translate_key(key: str) -> str:
|
677 |
+
"""
|
678 |
+
if key id deprecated and a replacement key defined, will return the
|
679 |
+
replacement key, otherwise returns `key` as - is
|
680 |
+
"""
|
681 |
+
d = _get_deprecated_option(key)
|
682 |
+
if d:
|
683 |
+
return d.rkey or key
|
684 |
+
else:
|
685 |
+
return key
|
686 |
+
|
687 |
+
|
688 |
+
def _warn_if_deprecated(key: str) -> bool:
|
689 |
+
"""
|
690 |
+
Checks if `key` is a deprecated option and if so, prints a warning.
|
691 |
+
|
692 |
+
Returns
|
693 |
+
-------
|
694 |
+
bool - True if `key` is deprecated, False otherwise.
|
695 |
+
"""
|
696 |
+
d = _get_deprecated_option(key)
|
697 |
+
if d:
|
698 |
+
if d.msg:
|
699 |
+
warnings.warn(
|
700 |
+
d.msg,
|
701 |
+
FutureWarning,
|
702 |
+
stacklevel=find_stack_level(),
|
703 |
+
)
|
704 |
+
else:
|
705 |
+
msg = f"'{key}' is deprecated"
|
706 |
+
if d.removal_ver:
|
707 |
+
msg += f" and will be removed in {d.removal_ver}"
|
708 |
+
if d.rkey:
|
709 |
+
msg += f", please use '{d.rkey}' instead."
|
710 |
+
else:
|
711 |
+
msg += ", please refrain from using it."
|
712 |
+
|
713 |
+
warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
|
714 |
+
return True
|
715 |
+
return False
|
716 |
+
|
717 |
+
|
718 |
+
def _build_option_description(k: str) -> str:
|
719 |
+
"""Builds a formatted description of a registered option and prints it"""
|
720 |
+
o = _get_registered_option(k)
|
721 |
+
d = _get_deprecated_option(k)
|
722 |
+
|
723 |
+
s = f"{k} "
|
724 |
+
|
725 |
+
if o.doc:
|
726 |
+
s += "\n".join(o.doc.strip().split("\n"))
|
727 |
+
else:
|
728 |
+
s += "No description available."
|
729 |
+
|
730 |
+
if o:
|
731 |
+
s += f"\n [default: {o.defval}] [currently: {_get_option(k, True)}]"
|
732 |
+
|
733 |
+
if d:
|
734 |
+
rkey = d.rkey or ""
|
735 |
+
s += "\n (Deprecated"
|
736 |
+
s += f", use `{rkey}` instead."
|
737 |
+
s += ")"
|
738 |
+
|
739 |
+
return s
|
740 |
+
|
741 |
+
|
742 |
+
def pp_options_list(keys: Iterable[str], width: int = 80, _print: bool = False):
|
743 |
+
"""Builds a concise listing of available options, grouped by prefix"""
|
744 |
+
from itertools import groupby
|
745 |
+
from textwrap import wrap
|
746 |
+
|
747 |
+
def pp(name: str, ks: Iterable[str]) -> list[str]:
|
748 |
+
pfx = "- " + name + ".[" if name else ""
|
749 |
+
ls = wrap(
|
750 |
+
", ".join(ks),
|
751 |
+
width,
|
752 |
+
initial_indent=pfx,
|
753 |
+
subsequent_indent=" ",
|
754 |
+
break_long_words=False,
|
755 |
+
)
|
756 |
+
if ls and ls[-1] and name:
|
757 |
+
ls[-1] = ls[-1] + "]"
|
758 |
+
return ls
|
759 |
+
|
760 |
+
ls: list[str] = []
|
761 |
+
singles = [x for x in sorted(keys) if x.find(".") < 0]
|
762 |
+
if singles:
|
763 |
+
ls += pp("", singles)
|
764 |
+
keys = [x for x in keys if x.find(".") >= 0]
|
765 |
+
|
766 |
+
for k, g in groupby(sorted(keys), lambda x: x[: x.rfind(".")]):
|
767 |
+
ks = [x[len(k) + 1 :] for x in list(g)]
|
768 |
+
ls += pp(k, ks)
|
769 |
+
s = "\n".join(ls)
|
770 |
+
if _print:
|
771 |
+
print(s)
|
772 |
+
else:
|
773 |
+
return s
|
774 |
+
|
775 |
+
|
776 |
+
#
|
777 |
+
# helpers
|
778 |
+
|
779 |
+
|
780 |
+
@contextmanager
|
781 |
+
def config_prefix(prefix: str) -> Generator[None, None, None]:
|
782 |
+
"""
|
783 |
+
contextmanager for multiple invocations of API with a common prefix
|
784 |
+
|
785 |
+
supported API functions: (register / get / set )__option
|
786 |
+
|
787 |
+
Warning: This is not thread - safe, and won't work properly if you import
|
788 |
+
the API functions into your module using the "from x import y" construct.
|
789 |
+
|
790 |
+
Example
|
791 |
+
-------
|
792 |
+
import pandas._config.config as cf
|
793 |
+
with cf.config_prefix("display.font"):
|
794 |
+
cf.register_option("color", "red")
|
795 |
+
cf.register_option("size", " 5 pt")
|
796 |
+
cf.set_option(size, " 6 pt")
|
797 |
+
cf.get_option(size)
|
798 |
+
...
|
799 |
+
|
800 |
+
etc'
|
801 |
+
|
802 |
+
will register options "display.font.color", "display.font.size", set the
|
803 |
+
value of "display.font.size"... and so on.
|
804 |
+
"""
|
805 |
+
# Note: reset_option relies on set_option, and on key directly
|
806 |
+
# it does not fit in to this monkey-patching scheme
|
807 |
+
|
808 |
+
global register_option, get_option, set_option
|
809 |
+
|
810 |
+
def wrap(func: F) -> F:
|
811 |
+
def inner(key: str, *args, **kwds):
|
812 |
+
pkey = f"{prefix}.{key}"
|
813 |
+
return func(pkey, *args, **kwds)
|
814 |
+
|
815 |
+
return cast(F, inner)
|
816 |
+
|
817 |
+
_register_option = register_option
|
818 |
+
_get_option = get_option
|
819 |
+
_set_option = set_option
|
820 |
+
set_option = wrap(set_option)
|
821 |
+
get_option = wrap(get_option)
|
822 |
+
register_option = wrap(register_option)
|
823 |
+
try:
|
824 |
+
yield
|
825 |
+
finally:
|
826 |
+
set_option = _set_option
|
827 |
+
get_option = _get_option
|
828 |
+
register_option = _register_option
|
829 |
+
|
830 |
+
|
831 |
+
# These factories and methods are handy for use as the validator
|
832 |
+
# arg in register_option
|
833 |
+
|
834 |
+
|
835 |
+
def is_type_factory(_type: type[Any]) -> Callable[[Any], None]:
|
836 |
+
"""
|
837 |
+
|
838 |
+
Parameters
|
839 |
+
----------
|
840 |
+
`_type` - a type to be compared against (e.g. type(x) == `_type`)
|
841 |
+
|
842 |
+
Returns
|
843 |
+
-------
|
844 |
+
validator - a function of a single argument x , which raises
|
845 |
+
ValueError if type(x) is not equal to `_type`
|
846 |
+
|
847 |
+
"""
|
848 |
+
|
849 |
+
def inner(x) -> None:
|
850 |
+
if type(x) != _type:
|
851 |
+
raise ValueError(f"Value must have type '{_type}'")
|
852 |
+
|
853 |
+
return inner
|
854 |
+
|
855 |
+
|
856 |
+
def is_instance_factory(_type) -> Callable[[Any], None]:
|
857 |
+
"""
|
858 |
+
|
859 |
+
Parameters
|
860 |
+
----------
|
861 |
+
`_type` - the type to be checked against
|
862 |
+
|
863 |
+
Returns
|
864 |
+
-------
|
865 |
+
validator - a function of a single argument x , which raises
|
866 |
+
ValueError if x is not an instance of `_type`
|
867 |
+
|
868 |
+
"""
|
869 |
+
if isinstance(_type, (tuple, list)):
|
870 |
+
_type = tuple(_type)
|
871 |
+
type_repr = "|".join(map(str, _type))
|
872 |
+
else:
|
873 |
+
type_repr = f"'{_type}'"
|
874 |
+
|
875 |
+
def inner(x) -> None:
|
876 |
+
if not isinstance(x, _type):
|
877 |
+
raise ValueError(f"Value must be an instance of {type_repr}")
|
878 |
+
|
879 |
+
return inner
|
880 |
+
|
881 |
+
|
882 |
+
def is_one_of_factory(legal_values) -> Callable[[Any], None]:
|
883 |
+
callables = [c for c in legal_values if callable(c)]
|
884 |
+
legal_values = [c for c in legal_values if not callable(c)]
|
885 |
+
|
886 |
+
def inner(x) -> None:
|
887 |
+
if x not in legal_values:
|
888 |
+
if not any(c(x) for c in callables):
|
889 |
+
uvals = [str(lval) for lval in legal_values]
|
890 |
+
pp_values = "|".join(uvals)
|
891 |
+
msg = f"Value must be one of {pp_values}"
|
892 |
+
if len(callables):
|
893 |
+
msg += " or a callable"
|
894 |
+
raise ValueError(msg)
|
895 |
+
|
896 |
+
return inner
|
897 |
+
|
898 |
+
|
899 |
+
def is_nonnegative_int(value: object) -> None:
|
900 |
+
"""
|
901 |
+
Verify that value is None or a positive int.
|
902 |
+
|
903 |
+
Parameters
|
904 |
+
----------
|
905 |
+
value : None or int
|
906 |
+
The `value` to be checked.
|
907 |
+
|
908 |
+
Raises
|
909 |
+
------
|
910 |
+
ValueError
|
911 |
+
When the value is not None or is a negative integer
|
912 |
+
"""
|
913 |
+
if value is None:
|
914 |
+
return
|
915 |
+
|
916 |
+
elif isinstance(value, int):
|
917 |
+
if value >= 0:
|
918 |
+
return
|
919 |
+
|
920 |
+
msg = "Value must be a nonnegative integer or None"
|
921 |
+
raise ValueError(msg)
|
922 |
+
|
923 |
+
|
924 |
+
# common type validators, for convenience
|
925 |
+
# usage: register_option(... , validator = is_int)
|
926 |
+
is_int = is_type_factory(int)
|
927 |
+
is_bool = is_type_factory(bool)
|
928 |
+
is_float = is_type_factory(float)
|
929 |
+
is_str = is_type_factory(str)
|
930 |
+
is_text = is_instance_factory((str, bytes))
|
931 |
+
|
932 |
+
|
933 |
+
def is_callable(obj) -> bool:
|
934 |
+
"""
|
935 |
+
|
936 |
+
Parameters
|
937 |
+
----------
|
938 |
+
`obj` - the object to be checked
|
939 |
+
|
940 |
+
Returns
|
941 |
+
-------
|
942 |
+
validator - returns True if object is callable
|
943 |
+
raises ValueError otherwise.
|
944 |
+
|
945 |
+
"""
|
946 |
+
if not callable(obj):
|
947 |
+
raise ValueError("Value must be a callable")
|
948 |
+
return True
|
venv/lib/python3.10/site-packages/pandas/_config/dates.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
config for datetime formatting
|
3 |
+
"""
|
4 |
+
from __future__ import annotations
|
5 |
+
|
6 |
+
from pandas._config import config as cf
|
7 |
+
|
8 |
+
pc_date_dayfirst_doc = """
|
9 |
+
: boolean
|
10 |
+
When True, prints and parses dates with the day first, eg 20/01/2005
|
11 |
+
"""
|
12 |
+
|
13 |
+
pc_date_yearfirst_doc = """
|
14 |
+
: boolean
|
15 |
+
When True, prints and parses dates with the year first, eg 2005/01/20
|
16 |
+
"""
|
17 |
+
|
18 |
+
with cf.config_prefix("display"):
|
19 |
+
# Needed upstream of `_libs` because these are used in tslibs.parsing
|
20 |
+
cf.register_option(
|
21 |
+
"date_dayfirst", False, pc_date_dayfirst_doc, validator=cf.is_bool
|
22 |
+
)
|
23 |
+
cf.register_option(
|
24 |
+
"date_yearfirst", False, pc_date_yearfirst_doc, validator=cf.is_bool
|
25 |
+
)
|
venv/lib/python3.10/site-packages/pandas/_config/display.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Unopinionated display configuration.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
import locale
|
8 |
+
import sys
|
9 |
+
|
10 |
+
from pandas._config import config as cf
|
11 |
+
|
12 |
+
# -----------------------------------------------------------------------------
|
13 |
+
# Global formatting options
|
14 |
+
_initial_defencoding: str | None = None
|
15 |
+
|
16 |
+
|
17 |
+
def detect_console_encoding() -> str:
|
18 |
+
"""
|
19 |
+
Try to find the most capable encoding supported by the console.
|
20 |
+
slightly modified from the way IPython handles the same issue.
|
21 |
+
"""
|
22 |
+
global _initial_defencoding
|
23 |
+
|
24 |
+
encoding = None
|
25 |
+
try:
|
26 |
+
encoding = sys.stdout.encoding or sys.stdin.encoding
|
27 |
+
except (AttributeError, OSError):
|
28 |
+
pass
|
29 |
+
|
30 |
+
# try again for something better
|
31 |
+
if not encoding or "ascii" in encoding.lower():
|
32 |
+
try:
|
33 |
+
encoding = locale.getpreferredencoding()
|
34 |
+
except locale.Error:
|
35 |
+
# can be raised by locale.setlocale(), which is
|
36 |
+
# called by getpreferredencoding
|
37 |
+
# (on some systems, see stdlib locale docs)
|
38 |
+
pass
|
39 |
+
|
40 |
+
# when all else fails. this will usually be "ascii"
|
41 |
+
if not encoding or "ascii" in encoding.lower():
|
42 |
+
encoding = sys.getdefaultencoding()
|
43 |
+
|
44 |
+
# GH#3360, save the reported defencoding at import time
|
45 |
+
# MPL backends may change it. Make available for debugging.
|
46 |
+
if not _initial_defencoding:
|
47 |
+
_initial_defencoding = sys.getdefaultencoding()
|
48 |
+
|
49 |
+
return encoding
|
50 |
+
|
51 |
+
|
52 |
+
pc_encoding_doc = """
|
53 |
+
: str/unicode
|
54 |
+
Defaults to the detected encoding of the console.
|
55 |
+
Specifies the encoding to be used for strings returned by to_string,
|
56 |
+
these are generally strings meant to be displayed on the console.
|
57 |
+
"""
|
58 |
+
|
59 |
+
with cf.config_prefix("display"):
|
60 |
+
cf.register_option(
|
61 |
+
"encoding", detect_console_encoding(), pc_encoding_doc, validator=cf.is_text
|
62 |
+
)
|
venv/lib/python3.10/site-packages/pandas/_config/localization.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Helpers for configuring locale settings.
|
3 |
+
|
4 |
+
Name `localization` is chosen to avoid overlap with builtin `locale` module.
|
5 |
+
"""
|
6 |
+
from __future__ import annotations
|
7 |
+
|
8 |
+
from contextlib import contextmanager
|
9 |
+
import locale
|
10 |
+
import platform
|
11 |
+
import re
|
12 |
+
import subprocess
|
13 |
+
from typing import TYPE_CHECKING
|
14 |
+
|
15 |
+
from pandas._config.config import options
|
16 |
+
|
17 |
+
if TYPE_CHECKING:
|
18 |
+
from collections.abc import Generator
|
19 |
+
|
20 |
+
|
21 |
+
@contextmanager
|
22 |
+
def set_locale(
|
23 |
+
new_locale: str | tuple[str, str], lc_var: int = locale.LC_ALL
|
24 |
+
) -> Generator[str | tuple[str, str], None, None]:
|
25 |
+
"""
|
26 |
+
Context manager for temporarily setting a locale.
|
27 |
+
|
28 |
+
Parameters
|
29 |
+
----------
|
30 |
+
new_locale : str or tuple
|
31 |
+
A string of the form <language_country>.<encoding>. For example to set
|
32 |
+
the current locale to US English with a UTF8 encoding, you would pass
|
33 |
+
"en_US.UTF-8".
|
34 |
+
lc_var : int, default `locale.LC_ALL`
|
35 |
+
The category of the locale being set.
|
36 |
+
|
37 |
+
Notes
|
38 |
+
-----
|
39 |
+
This is useful when you want to run a particular block of code under a
|
40 |
+
particular locale, without globally setting the locale. This probably isn't
|
41 |
+
thread-safe.
|
42 |
+
"""
|
43 |
+
# getlocale is not always compliant with setlocale, use setlocale. GH#46595
|
44 |
+
current_locale = locale.setlocale(lc_var)
|
45 |
+
|
46 |
+
try:
|
47 |
+
locale.setlocale(lc_var, new_locale)
|
48 |
+
normalized_code, normalized_encoding = locale.getlocale()
|
49 |
+
if normalized_code is not None and normalized_encoding is not None:
|
50 |
+
yield f"{normalized_code}.{normalized_encoding}"
|
51 |
+
else:
|
52 |
+
yield new_locale
|
53 |
+
finally:
|
54 |
+
locale.setlocale(lc_var, current_locale)
|
55 |
+
|
56 |
+
|
57 |
+
def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool:
|
58 |
+
"""
|
59 |
+
Check to see if we can set a locale, and subsequently get the locale,
|
60 |
+
without raising an Exception.
|
61 |
+
|
62 |
+
Parameters
|
63 |
+
----------
|
64 |
+
lc : str
|
65 |
+
The locale to attempt to set.
|
66 |
+
lc_var : int, default `locale.LC_ALL`
|
67 |
+
The category of the locale being set.
|
68 |
+
|
69 |
+
Returns
|
70 |
+
-------
|
71 |
+
bool
|
72 |
+
Whether the passed locale can be set
|
73 |
+
"""
|
74 |
+
try:
|
75 |
+
with set_locale(lc, lc_var=lc_var):
|
76 |
+
pass
|
77 |
+
except (ValueError, locale.Error):
|
78 |
+
# horrible name for a Exception subclass
|
79 |
+
return False
|
80 |
+
else:
|
81 |
+
return True
|
82 |
+
|
83 |
+
|
84 |
+
def _valid_locales(locales: list[str] | str, normalize: bool) -> list[str]:
|
85 |
+
"""
|
86 |
+
Return a list of normalized locales that do not throw an ``Exception``
|
87 |
+
when set.
|
88 |
+
|
89 |
+
Parameters
|
90 |
+
----------
|
91 |
+
locales : str
|
92 |
+
A string where each locale is separated by a newline.
|
93 |
+
normalize : bool
|
94 |
+
Whether to call ``locale.normalize`` on each locale.
|
95 |
+
|
96 |
+
Returns
|
97 |
+
-------
|
98 |
+
valid_locales : list
|
99 |
+
A list of valid locales.
|
100 |
+
"""
|
101 |
+
return [
|
102 |
+
loc
|
103 |
+
for loc in (
|
104 |
+
locale.normalize(loc.strip()) if normalize else loc.strip()
|
105 |
+
for loc in locales
|
106 |
+
)
|
107 |
+
if can_set_locale(loc)
|
108 |
+
]
|
109 |
+
|
110 |
+
|
111 |
+
def get_locales(
|
112 |
+
prefix: str | None = None,
|
113 |
+
normalize: bool = True,
|
114 |
+
) -> list[str]:
|
115 |
+
"""
|
116 |
+
Get all the locales that are available on the system.
|
117 |
+
|
118 |
+
Parameters
|
119 |
+
----------
|
120 |
+
prefix : str
|
121 |
+
If not ``None`` then return only those locales with the prefix
|
122 |
+
provided. For example to get all English language locales (those that
|
123 |
+
start with ``"en"``), pass ``prefix="en"``.
|
124 |
+
normalize : bool
|
125 |
+
Call ``locale.normalize`` on the resulting list of available locales.
|
126 |
+
If ``True``, only locales that can be set without throwing an
|
127 |
+
``Exception`` are returned.
|
128 |
+
|
129 |
+
Returns
|
130 |
+
-------
|
131 |
+
locales : list of strings
|
132 |
+
A list of locale strings that can be set with ``locale.setlocale()``.
|
133 |
+
For example::
|
134 |
+
|
135 |
+
locale.setlocale(locale.LC_ALL, locale_string)
|
136 |
+
|
137 |
+
On error will return an empty list (no locale available, e.g. Windows)
|
138 |
+
|
139 |
+
"""
|
140 |
+
if platform.system() in ("Linux", "Darwin"):
|
141 |
+
raw_locales = subprocess.check_output(["locale", "-a"])
|
142 |
+
else:
|
143 |
+
# Other platforms e.g. windows platforms don't define "locale -a"
|
144 |
+
# Note: is_platform_windows causes circular import here
|
145 |
+
return []
|
146 |
+
|
147 |
+
try:
|
148 |
+
# raw_locales is "\n" separated list of locales
|
149 |
+
# it may contain non-decodable parts, so split
|
150 |
+
# extract what we can and then rejoin.
|
151 |
+
split_raw_locales = raw_locales.split(b"\n")
|
152 |
+
out_locales = []
|
153 |
+
for x in split_raw_locales:
|
154 |
+
try:
|
155 |
+
out_locales.append(str(x, encoding=options.display.encoding))
|
156 |
+
except UnicodeError:
|
157 |
+
# 'locale -a' is used to populated 'raw_locales' and on
|
158 |
+
# Redhat 7 Linux (and maybe others) prints locale names
|
159 |
+
# using windows-1252 encoding. Bug only triggered by
|
160 |
+
# a few special characters and when there is an
|
161 |
+
# extensive list of installed locales.
|
162 |
+
out_locales.append(str(x, encoding="windows-1252"))
|
163 |
+
|
164 |
+
except TypeError:
|
165 |
+
pass
|
166 |
+
|
167 |
+
if prefix is None:
|
168 |
+
return _valid_locales(out_locales, normalize)
|
169 |
+
|
170 |
+
pattern = re.compile(f"{prefix}.*")
|
171 |
+
found = pattern.findall("\n".join(out_locales))
|
172 |
+
return _valid_locales(found, normalize)
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (324 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc
ADDED
Binary file (1.2 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc
ADDED
Binary file (1.32 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc
ADDED
Binary file (5.25 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc
ADDED
Binary file (27.8 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc
ADDED
Binary file (4.24 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc
ADDED
Binary file (8.79 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc
ADDED
Binary file (36.5 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc
ADDED
Binary file (7.66 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc
ADDED
Binary file (18.7 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc
ADDED
Binary file (5.83 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc
ADDED
Binary file (138 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc
ADDED
Binary file (2.31 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc
ADDED
Binary file (79.2 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc
ADDED
Binary file (103 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc
ADDED
Binary file (34 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py
ADDED
@@ -0,0 +1,747 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Pyperclip
|
3 |
+
|
4 |
+
A cross-platform clipboard module for Python,
|
5 |
+
with copy & paste functions for plain text.
|
6 |
+
By Al Sweigart [email protected]
|
7 |
+
Licence at LICENSES/PYPERCLIP_LICENSE
|
8 |
+
|
9 |
+
Usage:
|
10 |
+
import pyperclip
|
11 |
+
pyperclip.copy('The text to be copied to the clipboard.')
|
12 |
+
spam = pyperclip.paste()
|
13 |
+
|
14 |
+
if not pyperclip.is_available():
|
15 |
+
print("Copy functionality unavailable!")
|
16 |
+
|
17 |
+
On Windows, no additional modules are needed.
|
18 |
+
On Mac, the pyobjc module is used, falling back to the pbcopy and pbpaste cli
|
19 |
+
commands. (These commands should come with OS X.).
|
20 |
+
On Linux, install xclip, xsel, or wl-clipboard (for "wayland" sessions) via
|
21 |
+
package manager.
|
22 |
+
For example, in Debian:
|
23 |
+
sudo apt-get install xclip
|
24 |
+
sudo apt-get install xsel
|
25 |
+
sudo apt-get install wl-clipboard
|
26 |
+
|
27 |
+
Otherwise on Linux, you will need the PyQt5 modules installed.
|
28 |
+
|
29 |
+
This module does not work with PyGObject yet.
|
30 |
+
|
31 |
+
Cygwin is currently not supported.
|
32 |
+
|
33 |
+
Security Note: This module runs programs with these names:
|
34 |
+
- pbcopy
|
35 |
+
- pbpaste
|
36 |
+
- xclip
|
37 |
+
- xsel
|
38 |
+
- wl-copy/wl-paste
|
39 |
+
- klipper
|
40 |
+
- qdbus
|
41 |
+
A malicious user could rename or add programs with these names, tricking
|
42 |
+
Pyperclip into running them with whatever permissions the Python process has.
|
43 |
+
|
44 |
+
"""
|
45 |
+
|
46 |
+
__version__ = "1.8.2"
|
47 |
+
|
48 |
+
|
49 |
+
import contextlib
|
50 |
+
import ctypes
|
51 |
+
from ctypes import (
|
52 |
+
c_size_t,
|
53 |
+
c_wchar,
|
54 |
+
c_wchar_p,
|
55 |
+
get_errno,
|
56 |
+
sizeof,
|
57 |
+
)
|
58 |
+
import os
|
59 |
+
import platform
|
60 |
+
from shutil import which as _executable_exists
|
61 |
+
import subprocess
|
62 |
+
import time
|
63 |
+
import warnings
|
64 |
+
|
65 |
+
from pandas.errors import (
|
66 |
+
PyperclipException,
|
67 |
+
PyperclipWindowsException,
|
68 |
+
)
|
69 |
+
from pandas.util._exceptions import find_stack_level
|
70 |
+
|
71 |
+
# `import PyQt4` sys.exit()s if DISPLAY is not in the environment.
|
72 |
+
# Thus, we need to detect the presence of $DISPLAY manually
|
73 |
+
# and not load PyQt4 if it is absent.
|
74 |
+
HAS_DISPLAY = os.getenv("DISPLAY")
|
75 |
+
|
76 |
+
EXCEPT_MSG = """
|
77 |
+
Pyperclip could not find a copy/paste mechanism for your system.
|
78 |
+
For more information, please visit
|
79 |
+
https://pyperclip.readthedocs.io/en/latest/index.html#not-implemented-error
|
80 |
+
"""
|
81 |
+
|
82 |
+
ENCODING = "utf-8"
|
83 |
+
|
84 |
+
|
85 |
+
class PyperclipTimeoutException(PyperclipException):
|
86 |
+
pass
|
87 |
+
|
88 |
+
|
89 |
+
def _stringifyText(text) -> str:
|
90 |
+
acceptedTypes = (str, int, float, bool)
|
91 |
+
if not isinstance(text, acceptedTypes):
|
92 |
+
raise PyperclipException(
|
93 |
+
f"only str, int, float, and bool values "
|
94 |
+
f"can be copied to the clipboard, not {type(text).__name__}"
|
95 |
+
)
|
96 |
+
return str(text)
|
97 |
+
|
98 |
+
|
99 |
+
def init_osx_pbcopy_clipboard():
|
100 |
+
def copy_osx_pbcopy(text):
|
101 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
102 |
+
with subprocess.Popen(
|
103 |
+
["pbcopy", "w"], stdin=subprocess.PIPE, close_fds=True
|
104 |
+
) as p:
|
105 |
+
p.communicate(input=text.encode(ENCODING))
|
106 |
+
|
107 |
+
def paste_osx_pbcopy():
|
108 |
+
with subprocess.Popen(
|
109 |
+
["pbpaste", "r"], stdout=subprocess.PIPE, close_fds=True
|
110 |
+
) as p:
|
111 |
+
stdout = p.communicate()[0]
|
112 |
+
return stdout.decode(ENCODING)
|
113 |
+
|
114 |
+
return copy_osx_pbcopy, paste_osx_pbcopy
|
115 |
+
|
116 |
+
|
117 |
+
def init_osx_pyobjc_clipboard():
|
118 |
+
def copy_osx_pyobjc(text):
|
119 |
+
"""Copy string argument to clipboard"""
|
120 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
121 |
+
newStr = Foundation.NSString.stringWithString_(text).nsstring()
|
122 |
+
newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding)
|
123 |
+
board = AppKit.NSPasteboard.generalPasteboard()
|
124 |
+
board.declareTypes_owner_([AppKit.NSStringPboardType], None)
|
125 |
+
board.setData_forType_(newData, AppKit.NSStringPboardType)
|
126 |
+
|
127 |
+
def paste_osx_pyobjc():
|
128 |
+
"""Returns contents of clipboard"""
|
129 |
+
board = AppKit.NSPasteboard.generalPasteboard()
|
130 |
+
content = board.stringForType_(AppKit.NSStringPboardType)
|
131 |
+
return content
|
132 |
+
|
133 |
+
return copy_osx_pyobjc, paste_osx_pyobjc
|
134 |
+
|
135 |
+
|
136 |
+
def init_qt_clipboard():
|
137 |
+
global QApplication
|
138 |
+
# $DISPLAY should exist
|
139 |
+
|
140 |
+
# Try to import from qtpy, but if that fails try PyQt5 then PyQt4
|
141 |
+
try:
|
142 |
+
from qtpy.QtWidgets import QApplication
|
143 |
+
except ImportError:
|
144 |
+
try:
|
145 |
+
from PyQt5.QtWidgets import QApplication
|
146 |
+
except ImportError:
|
147 |
+
from PyQt4.QtGui import QApplication
|
148 |
+
|
149 |
+
app = QApplication.instance()
|
150 |
+
if app is None:
|
151 |
+
app = QApplication([])
|
152 |
+
|
153 |
+
def copy_qt(text):
|
154 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
155 |
+
cb = app.clipboard()
|
156 |
+
cb.setText(text)
|
157 |
+
|
158 |
+
def paste_qt() -> str:
|
159 |
+
cb = app.clipboard()
|
160 |
+
return str(cb.text())
|
161 |
+
|
162 |
+
return copy_qt, paste_qt
|
163 |
+
|
164 |
+
|
165 |
+
def init_xclip_clipboard():
|
166 |
+
DEFAULT_SELECTION = "c"
|
167 |
+
PRIMARY_SELECTION = "p"
|
168 |
+
|
169 |
+
def copy_xclip(text, primary=False):
|
170 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
171 |
+
selection = DEFAULT_SELECTION
|
172 |
+
if primary:
|
173 |
+
selection = PRIMARY_SELECTION
|
174 |
+
with subprocess.Popen(
|
175 |
+
["xclip", "-selection", selection], stdin=subprocess.PIPE, close_fds=True
|
176 |
+
) as p:
|
177 |
+
p.communicate(input=text.encode(ENCODING))
|
178 |
+
|
179 |
+
def paste_xclip(primary=False):
|
180 |
+
selection = DEFAULT_SELECTION
|
181 |
+
if primary:
|
182 |
+
selection = PRIMARY_SELECTION
|
183 |
+
with subprocess.Popen(
|
184 |
+
["xclip", "-selection", selection, "-o"],
|
185 |
+
stdout=subprocess.PIPE,
|
186 |
+
stderr=subprocess.PIPE,
|
187 |
+
close_fds=True,
|
188 |
+
) as p:
|
189 |
+
stdout = p.communicate()[0]
|
190 |
+
# Intentionally ignore extraneous output on stderr when clipboard is empty
|
191 |
+
return stdout.decode(ENCODING)
|
192 |
+
|
193 |
+
return copy_xclip, paste_xclip
|
194 |
+
|
195 |
+
|
196 |
+
def init_xsel_clipboard():
|
197 |
+
DEFAULT_SELECTION = "-b"
|
198 |
+
PRIMARY_SELECTION = "-p"
|
199 |
+
|
200 |
+
def copy_xsel(text, primary=False):
|
201 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
202 |
+
selection_flag = DEFAULT_SELECTION
|
203 |
+
if primary:
|
204 |
+
selection_flag = PRIMARY_SELECTION
|
205 |
+
with subprocess.Popen(
|
206 |
+
["xsel", selection_flag, "-i"], stdin=subprocess.PIPE, close_fds=True
|
207 |
+
) as p:
|
208 |
+
p.communicate(input=text.encode(ENCODING))
|
209 |
+
|
210 |
+
def paste_xsel(primary=False):
|
211 |
+
selection_flag = DEFAULT_SELECTION
|
212 |
+
if primary:
|
213 |
+
selection_flag = PRIMARY_SELECTION
|
214 |
+
with subprocess.Popen(
|
215 |
+
["xsel", selection_flag, "-o"], stdout=subprocess.PIPE, close_fds=True
|
216 |
+
) as p:
|
217 |
+
stdout = p.communicate()[0]
|
218 |
+
return stdout.decode(ENCODING)
|
219 |
+
|
220 |
+
return copy_xsel, paste_xsel
|
221 |
+
|
222 |
+
|
223 |
+
def init_wl_clipboard():
|
224 |
+
PRIMARY_SELECTION = "-p"
|
225 |
+
|
226 |
+
def copy_wl(text, primary=False):
|
227 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
228 |
+
args = ["wl-copy"]
|
229 |
+
if primary:
|
230 |
+
args.append(PRIMARY_SELECTION)
|
231 |
+
if not text:
|
232 |
+
args.append("--clear")
|
233 |
+
subprocess.check_call(args, close_fds=True)
|
234 |
+
else:
|
235 |
+
p = subprocess.Popen(args, stdin=subprocess.PIPE, close_fds=True)
|
236 |
+
p.communicate(input=text.encode(ENCODING))
|
237 |
+
|
238 |
+
def paste_wl(primary=False):
|
239 |
+
args = ["wl-paste", "-n"]
|
240 |
+
if primary:
|
241 |
+
args.append(PRIMARY_SELECTION)
|
242 |
+
p = subprocess.Popen(args, stdout=subprocess.PIPE, close_fds=True)
|
243 |
+
stdout, _stderr = p.communicate()
|
244 |
+
return stdout.decode(ENCODING)
|
245 |
+
|
246 |
+
return copy_wl, paste_wl
|
247 |
+
|
248 |
+
|
249 |
+
def init_klipper_clipboard():
|
250 |
+
def copy_klipper(text):
|
251 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
252 |
+
with subprocess.Popen(
|
253 |
+
[
|
254 |
+
"qdbus",
|
255 |
+
"org.kde.klipper",
|
256 |
+
"/klipper",
|
257 |
+
"setClipboardContents",
|
258 |
+
text.encode(ENCODING),
|
259 |
+
],
|
260 |
+
stdin=subprocess.PIPE,
|
261 |
+
close_fds=True,
|
262 |
+
) as p:
|
263 |
+
p.communicate(input=None)
|
264 |
+
|
265 |
+
def paste_klipper():
|
266 |
+
with subprocess.Popen(
|
267 |
+
["qdbus", "org.kde.klipper", "/klipper", "getClipboardContents"],
|
268 |
+
stdout=subprocess.PIPE,
|
269 |
+
close_fds=True,
|
270 |
+
) as p:
|
271 |
+
stdout = p.communicate()[0]
|
272 |
+
|
273 |
+
# Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
|
274 |
+
# TODO: https://github.com/asweigart/pyperclip/issues/43
|
275 |
+
clipboardContents = stdout.decode(ENCODING)
|
276 |
+
# even if blank, Klipper will append a newline at the end
|
277 |
+
assert len(clipboardContents) > 0
|
278 |
+
# make sure that newline is there
|
279 |
+
assert clipboardContents.endswith("\n")
|
280 |
+
if clipboardContents.endswith("\n"):
|
281 |
+
clipboardContents = clipboardContents[:-1]
|
282 |
+
return clipboardContents
|
283 |
+
|
284 |
+
return copy_klipper, paste_klipper
|
285 |
+
|
286 |
+
|
287 |
+
def init_dev_clipboard_clipboard():
|
288 |
+
def copy_dev_clipboard(text):
|
289 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
290 |
+
if text == "":
|
291 |
+
warnings.warn(
|
292 |
+
"Pyperclip cannot copy a blank string to the clipboard on Cygwin. "
|
293 |
+
"This is effectively a no-op.",
|
294 |
+
stacklevel=find_stack_level(),
|
295 |
+
)
|
296 |
+
if "\r" in text:
|
297 |
+
warnings.warn(
|
298 |
+
"Pyperclip cannot handle \\r characters on Cygwin.",
|
299 |
+
stacklevel=find_stack_level(),
|
300 |
+
)
|
301 |
+
|
302 |
+
with open("/dev/clipboard", "w", encoding="utf-8") as fd:
|
303 |
+
fd.write(text)
|
304 |
+
|
305 |
+
def paste_dev_clipboard() -> str:
|
306 |
+
with open("/dev/clipboard", encoding="utf-8") as fd:
|
307 |
+
content = fd.read()
|
308 |
+
return content
|
309 |
+
|
310 |
+
return copy_dev_clipboard, paste_dev_clipboard
|
311 |
+
|
312 |
+
|
313 |
+
def init_no_clipboard():
|
314 |
+
class ClipboardUnavailable:
|
315 |
+
def __call__(self, *args, **kwargs):
|
316 |
+
raise PyperclipException(EXCEPT_MSG)
|
317 |
+
|
318 |
+
def __bool__(self) -> bool:
|
319 |
+
return False
|
320 |
+
|
321 |
+
return ClipboardUnavailable(), ClipboardUnavailable()
|
322 |
+
|
323 |
+
|
324 |
+
# Windows-related clipboard functions:
|
325 |
+
class CheckedCall:
|
326 |
+
def __init__(self, f) -> None:
|
327 |
+
super().__setattr__("f", f)
|
328 |
+
|
329 |
+
def __call__(self, *args):
|
330 |
+
ret = self.f(*args)
|
331 |
+
if not ret and get_errno():
|
332 |
+
raise PyperclipWindowsException("Error calling " + self.f.__name__)
|
333 |
+
return ret
|
334 |
+
|
335 |
+
def __setattr__(self, key, value):
|
336 |
+
setattr(self.f, key, value)
|
337 |
+
|
338 |
+
|
339 |
+
def init_windows_clipboard():
|
340 |
+
global HGLOBAL, LPVOID, DWORD, LPCSTR, INT
|
341 |
+
global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE
|
342 |
+
from ctypes.wintypes import (
|
343 |
+
BOOL,
|
344 |
+
DWORD,
|
345 |
+
HANDLE,
|
346 |
+
HGLOBAL,
|
347 |
+
HINSTANCE,
|
348 |
+
HMENU,
|
349 |
+
HWND,
|
350 |
+
INT,
|
351 |
+
LPCSTR,
|
352 |
+
LPVOID,
|
353 |
+
UINT,
|
354 |
+
)
|
355 |
+
|
356 |
+
windll = ctypes.windll
|
357 |
+
msvcrt = ctypes.CDLL("msvcrt")
|
358 |
+
|
359 |
+
safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA)
|
360 |
+
safeCreateWindowExA.argtypes = [
|
361 |
+
DWORD,
|
362 |
+
LPCSTR,
|
363 |
+
LPCSTR,
|
364 |
+
DWORD,
|
365 |
+
INT,
|
366 |
+
INT,
|
367 |
+
INT,
|
368 |
+
INT,
|
369 |
+
HWND,
|
370 |
+
HMENU,
|
371 |
+
HINSTANCE,
|
372 |
+
LPVOID,
|
373 |
+
]
|
374 |
+
safeCreateWindowExA.restype = HWND
|
375 |
+
|
376 |
+
safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow)
|
377 |
+
safeDestroyWindow.argtypes = [HWND]
|
378 |
+
safeDestroyWindow.restype = BOOL
|
379 |
+
|
380 |
+
OpenClipboard = windll.user32.OpenClipboard
|
381 |
+
OpenClipboard.argtypes = [HWND]
|
382 |
+
OpenClipboard.restype = BOOL
|
383 |
+
|
384 |
+
safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard)
|
385 |
+
safeCloseClipboard.argtypes = []
|
386 |
+
safeCloseClipboard.restype = BOOL
|
387 |
+
|
388 |
+
safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard)
|
389 |
+
safeEmptyClipboard.argtypes = []
|
390 |
+
safeEmptyClipboard.restype = BOOL
|
391 |
+
|
392 |
+
safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData)
|
393 |
+
safeGetClipboardData.argtypes = [UINT]
|
394 |
+
safeGetClipboardData.restype = HANDLE
|
395 |
+
|
396 |
+
safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData)
|
397 |
+
safeSetClipboardData.argtypes = [UINT, HANDLE]
|
398 |
+
safeSetClipboardData.restype = HANDLE
|
399 |
+
|
400 |
+
safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc)
|
401 |
+
safeGlobalAlloc.argtypes = [UINT, c_size_t]
|
402 |
+
safeGlobalAlloc.restype = HGLOBAL
|
403 |
+
|
404 |
+
safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock)
|
405 |
+
safeGlobalLock.argtypes = [HGLOBAL]
|
406 |
+
safeGlobalLock.restype = LPVOID
|
407 |
+
|
408 |
+
safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock)
|
409 |
+
safeGlobalUnlock.argtypes = [HGLOBAL]
|
410 |
+
safeGlobalUnlock.restype = BOOL
|
411 |
+
|
412 |
+
wcslen = CheckedCall(msvcrt.wcslen)
|
413 |
+
wcslen.argtypes = [c_wchar_p]
|
414 |
+
wcslen.restype = UINT
|
415 |
+
|
416 |
+
GMEM_MOVEABLE = 0x0002
|
417 |
+
CF_UNICODETEXT = 13
|
418 |
+
|
419 |
+
@contextlib.contextmanager
|
420 |
+
def window():
|
421 |
+
"""
|
422 |
+
Context that provides a valid Windows hwnd.
|
423 |
+
"""
|
424 |
+
# we really just need the hwnd, so setting "STATIC"
|
425 |
+
# as predefined lpClass is just fine.
|
426 |
+
hwnd = safeCreateWindowExA(
|
427 |
+
0, b"STATIC", None, 0, 0, 0, 0, 0, None, None, None, None
|
428 |
+
)
|
429 |
+
try:
|
430 |
+
yield hwnd
|
431 |
+
finally:
|
432 |
+
safeDestroyWindow(hwnd)
|
433 |
+
|
434 |
+
@contextlib.contextmanager
|
435 |
+
def clipboard(hwnd):
|
436 |
+
"""
|
437 |
+
Context manager that opens the clipboard and prevents
|
438 |
+
other applications from modifying the clipboard content.
|
439 |
+
"""
|
440 |
+
# We may not get the clipboard handle immediately because
|
441 |
+
# some other application is accessing it (?)
|
442 |
+
# We try for at least 500ms to get the clipboard.
|
443 |
+
t = time.time() + 0.5
|
444 |
+
success = False
|
445 |
+
while time.time() < t:
|
446 |
+
success = OpenClipboard(hwnd)
|
447 |
+
if success:
|
448 |
+
break
|
449 |
+
time.sleep(0.01)
|
450 |
+
if not success:
|
451 |
+
raise PyperclipWindowsException("Error calling OpenClipboard")
|
452 |
+
|
453 |
+
try:
|
454 |
+
yield
|
455 |
+
finally:
|
456 |
+
safeCloseClipboard()
|
457 |
+
|
458 |
+
def copy_windows(text):
|
459 |
+
# This function is heavily based on
|
460 |
+
# http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard
|
461 |
+
|
462 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
463 |
+
|
464 |
+
with window() as hwnd:
|
465 |
+
# http://msdn.com/ms649048
|
466 |
+
# If an application calls OpenClipboard with hwnd set to NULL,
|
467 |
+
# EmptyClipboard sets the clipboard owner to NULL;
|
468 |
+
# this causes SetClipboardData to fail.
|
469 |
+
# => We need a valid hwnd to copy something.
|
470 |
+
with clipboard(hwnd):
|
471 |
+
safeEmptyClipboard()
|
472 |
+
|
473 |
+
if text:
|
474 |
+
# http://msdn.com/ms649051
|
475 |
+
# If the hMem parameter identifies a memory object,
|
476 |
+
# the object must have been allocated using the
|
477 |
+
# function with the GMEM_MOVEABLE flag.
|
478 |
+
count = wcslen(text) + 1
|
479 |
+
handle = safeGlobalAlloc(GMEM_MOVEABLE, count * sizeof(c_wchar))
|
480 |
+
locked_handle = safeGlobalLock(handle)
|
481 |
+
|
482 |
+
ctypes.memmove(
|
483 |
+
c_wchar_p(locked_handle),
|
484 |
+
c_wchar_p(text),
|
485 |
+
count * sizeof(c_wchar),
|
486 |
+
)
|
487 |
+
|
488 |
+
safeGlobalUnlock(handle)
|
489 |
+
safeSetClipboardData(CF_UNICODETEXT, handle)
|
490 |
+
|
491 |
+
def paste_windows():
|
492 |
+
with clipboard(None):
|
493 |
+
handle = safeGetClipboardData(CF_UNICODETEXT)
|
494 |
+
if not handle:
|
495 |
+
# GetClipboardData may return NULL with errno == NO_ERROR
|
496 |
+
# if the clipboard is empty.
|
497 |
+
# (Also, it may return a handle to an empty buffer,
|
498 |
+
# but technically that's not empty)
|
499 |
+
return ""
|
500 |
+
return c_wchar_p(handle).value
|
501 |
+
|
502 |
+
return copy_windows, paste_windows
|
503 |
+
|
504 |
+
|
505 |
+
def init_wsl_clipboard():
|
506 |
+
def copy_wsl(text):
|
507 |
+
text = _stringifyText(text) # Converts non-str values to str.
|
508 |
+
with subprocess.Popen(["clip.exe"], stdin=subprocess.PIPE, close_fds=True) as p:
|
509 |
+
p.communicate(input=text.encode(ENCODING))
|
510 |
+
|
511 |
+
def paste_wsl():
|
512 |
+
with subprocess.Popen(
|
513 |
+
["powershell.exe", "-command", "Get-Clipboard"],
|
514 |
+
stdout=subprocess.PIPE,
|
515 |
+
stderr=subprocess.PIPE,
|
516 |
+
close_fds=True,
|
517 |
+
) as p:
|
518 |
+
stdout = p.communicate()[0]
|
519 |
+
# WSL appends "\r\n" to the contents.
|
520 |
+
return stdout[:-2].decode(ENCODING)
|
521 |
+
|
522 |
+
return copy_wsl, paste_wsl
|
523 |
+
|
524 |
+
|
525 |
+
# Automatic detection of clipboard mechanisms
|
526 |
+
# and importing is done in determine_clipboard():
|
527 |
+
def determine_clipboard():
|
528 |
+
"""
|
529 |
+
Determine the OS/platform and set the copy() and paste() functions
|
530 |
+
accordingly.
|
531 |
+
"""
|
532 |
+
global Foundation, AppKit, qtpy, PyQt4, PyQt5
|
533 |
+
|
534 |
+
# Setup for the CYGWIN platform:
|
535 |
+
if (
|
536 |
+
"cygwin" in platform.system().lower()
|
537 |
+
): # Cygwin has a variety of values returned by platform.system(),
|
538 |
+
# such as 'CYGWIN_NT-6.1'
|
539 |
+
# FIXME(pyperclip#55): pyperclip currently does not support Cygwin,
|
540 |
+
# see https://github.com/asweigart/pyperclip/issues/55
|
541 |
+
if os.path.exists("/dev/clipboard"):
|
542 |
+
warnings.warn(
|
543 |
+
"Pyperclip's support for Cygwin is not perfect, "
|
544 |
+
"see https://github.com/asweigart/pyperclip/issues/55",
|
545 |
+
stacklevel=find_stack_level(),
|
546 |
+
)
|
547 |
+
return init_dev_clipboard_clipboard()
|
548 |
+
|
549 |
+
# Setup for the WINDOWS platform:
|
550 |
+
elif os.name == "nt" or platform.system() == "Windows":
|
551 |
+
return init_windows_clipboard()
|
552 |
+
|
553 |
+
if platform.system() == "Linux":
|
554 |
+
if _executable_exists("wslconfig.exe"):
|
555 |
+
return init_wsl_clipboard()
|
556 |
+
|
557 |
+
# Setup for the macOS platform:
|
558 |
+
if os.name == "mac" or platform.system() == "Darwin":
|
559 |
+
try:
|
560 |
+
import AppKit
|
561 |
+
import Foundation # check if pyobjc is installed
|
562 |
+
except ImportError:
|
563 |
+
return init_osx_pbcopy_clipboard()
|
564 |
+
else:
|
565 |
+
return init_osx_pyobjc_clipboard()
|
566 |
+
|
567 |
+
# Setup for the LINUX platform:
|
568 |
+
if HAS_DISPLAY:
|
569 |
+
if os.environ.get("WAYLAND_DISPLAY") and _executable_exists("wl-copy"):
|
570 |
+
return init_wl_clipboard()
|
571 |
+
if _executable_exists("xsel"):
|
572 |
+
return init_xsel_clipboard()
|
573 |
+
if _executable_exists("xclip"):
|
574 |
+
return init_xclip_clipboard()
|
575 |
+
if _executable_exists("klipper") and _executable_exists("qdbus"):
|
576 |
+
return init_klipper_clipboard()
|
577 |
+
|
578 |
+
try:
|
579 |
+
# qtpy is a small abstraction layer that lets you write applications
|
580 |
+
# using a single api call to either PyQt or PySide.
|
581 |
+
# https://pypi.python.org/project/QtPy
|
582 |
+
import qtpy # check if qtpy is installed
|
583 |
+
except ImportError:
|
584 |
+
# If qtpy isn't installed, fall back on importing PyQt4.
|
585 |
+
try:
|
586 |
+
import PyQt5 # check if PyQt5 is installed
|
587 |
+
except ImportError:
|
588 |
+
try:
|
589 |
+
import PyQt4 # check if PyQt4 is installed
|
590 |
+
except ImportError:
|
591 |
+
pass # We want to fail fast for all non-ImportError exceptions.
|
592 |
+
else:
|
593 |
+
return init_qt_clipboard()
|
594 |
+
else:
|
595 |
+
return init_qt_clipboard()
|
596 |
+
else:
|
597 |
+
return init_qt_clipboard()
|
598 |
+
|
599 |
+
return init_no_clipboard()
|
600 |
+
|
601 |
+
|
602 |
+
def set_clipboard(clipboard):
|
603 |
+
"""
|
604 |
+
Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how
|
605 |
+
the copy() and paste() functions interact with the operating system to
|
606 |
+
implement the copy/paste feature. The clipboard parameter must be one of:
|
607 |
+
- pbcopy
|
608 |
+
- pyobjc (default on macOS)
|
609 |
+
- qt
|
610 |
+
- xclip
|
611 |
+
- xsel
|
612 |
+
- klipper
|
613 |
+
- windows (default on Windows)
|
614 |
+
- no (this is what is set when no clipboard mechanism can be found)
|
615 |
+
"""
|
616 |
+
global copy, paste
|
617 |
+
|
618 |
+
clipboard_types = {
|
619 |
+
"pbcopy": init_osx_pbcopy_clipboard,
|
620 |
+
"pyobjc": init_osx_pyobjc_clipboard,
|
621 |
+
"qt": init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5'
|
622 |
+
"xclip": init_xclip_clipboard,
|
623 |
+
"xsel": init_xsel_clipboard,
|
624 |
+
"wl-clipboard": init_wl_clipboard,
|
625 |
+
"klipper": init_klipper_clipboard,
|
626 |
+
"windows": init_windows_clipboard,
|
627 |
+
"no": init_no_clipboard,
|
628 |
+
}
|
629 |
+
|
630 |
+
if clipboard not in clipboard_types:
|
631 |
+
allowed_clipboard_types = [repr(_) for _ in clipboard_types]
|
632 |
+
raise ValueError(
|
633 |
+
f"Argument must be one of {', '.join(allowed_clipboard_types)}"
|
634 |
+
)
|
635 |
+
|
636 |
+
# Sets pyperclip's copy() and paste() functions:
|
637 |
+
copy, paste = clipboard_types[clipboard]()
|
638 |
+
|
639 |
+
|
640 |
+
def lazy_load_stub_copy(text):
|
641 |
+
"""
|
642 |
+
A stub function for copy(), which will load the real copy() function when
|
643 |
+
called so that the real copy() function is used for later calls.
|
644 |
+
|
645 |
+
This allows users to import pyperclip without having determine_clipboard()
|
646 |
+
automatically run, which will automatically select a clipboard mechanism.
|
647 |
+
This could be a problem if it selects, say, the memory-heavy PyQt4 module
|
648 |
+
but the user was just going to immediately call set_clipboard() to use a
|
649 |
+
different clipboard mechanism.
|
650 |
+
|
651 |
+
The lazy loading this stub function implements gives the user a chance to
|
652 |
+
call set_clipboard() to pick another clipboard mechanism. Or, if the user
|
653 |
+
simply calls copy() or paste() without calling set_clipboard() first,
|
654 |
+
will fall back on whatever clipboard mechanism that determine_clipboard()
|
655 |
+
automatically chooses.
|
656 |
+
"""
|
657 |
+
global copy, paste
|
658 |
+
copy, paste = determine_clipboard()
|
659 |
+
return copy(text)
|
660 |
+
|
661 |
+
|
662 |
+
def lazy_load_stub_paste():
|
663 |
+
"""
|
664 |
+
A stub function for paste(), which will load the real paste() function when
|
665 |
+
called so that the real paste() function is used for later calls.
|
666 |
+
|
667 |
+
This allows users to import pyperclip without having determine_clipboard()
|
668 |
+
automatically run, which will automatically select a clipboard mechanism.
|
669 |
+
This could be a problem if it selects, say, the memory-heavy PyQt4 module
|
670 |
+
but the user was just going to immediately call set_clipboard() to use a
|
671 |
+
different clipboard mechanism.
|
672 |
+
|
673 |
+
The lazy loading this stub function implements gives the user a chance to
|
674 |
+
call set_clipboard() to pick another clipboard mechanism. Or, if the user
|
675 |
+
simply calls copy() or paste() without calling set_clipboard() first,
|
676 |
+
will fall back on whatever clipboard mechanism that determine_clipboard()
|
677 |
+
automatically chooses.
|
678 |
+
"""
|
679 |
+
global copy, paste
|
680 |
+
copy, paste = determine_clipboard()
|
681 |
+
return paste()
|
682 |
+
|
683 |
+
|
684 |
+
def is_available() -> bool:
|
685 |
+
return copy != lazy_load_stub_copy and paste != lazy_load_stub_paste
|
686 |
+
|
687 |
+
|
688 |
+
# Initially, copy() and paste() are set to lazy loading wrappers which will
|
689 |
+
# set `copy` and `paste` to real functions the first time they're used, unless
|
690 |
+
# set_clipboard() or determine_clipboard() is called first.
|
691 |
+
copy, paste = lazy_load_stub_copy, lazy_load_stub_paste
|
692 |
+
|
693 |
+
|
694 |
+
def waitForPaste(timeout=None):
|
695 |
+
"""This function call blocks until a non-empty text string exists on the
|
696 |
+
clipboard. It returns this text.
|
697 |
+
|
698 |
+
This function raises PyperclipTimeoutException if timeout was set to
|
699 |
+
a number of seconds that has elapsed without non-empty text being put on
|
700 |
+
the clipboard."""
|
701 |
+
startTime = time.time()
|
702 |
+
while True:
|
703 |
+
clipboardText = paste()
|
704 |
+
if clipboardText != "":
|
705 |
+
return clipboardText
|
706 |
+
time.sleep(0.01)
|
707 |
+
|
708 |
+
if timeout is not None and time.time() > startTime + timeout:
|
709 |
+
raise PyperclipTimeoutException(
|
710 |
+
"waitForPaste() timed out after " + str(timeout) + " seconds."
|
711 |
+
)
|
712 |
+
|
713 |
+
|
714 |
+
def waitForNewPaste(timeout=None):
|
715 |
+
"""This function call blocks until a new text string exists on the
|
716 |
+
clipboard that is different from the text that was there when the function
|
717 |
+
was first called. It returns this text.
|
718 |
+
|
719 |
+
This function raises PyperclipTimeoutException if timeout was set to
|
720 |
+
a number of seconds that has elapsed without non-empty text being put on
|
721 |
+
the clipboard."""
|
722 |
+
startTime = time.time()
|
723 |
+
originalText = paste()
|
724 |
+
while True:
|
725 |
+
currentText = paste()
|
726 |
+
if currentText != originalText:
|
727 |
+
return currentText
|
728 |
+
time.sleep(0.01)
|
729 |
+
|
730 |
+
if timeout is not None and time.time() > startTime + timeout:
|
731 |
+
raise PyperclipTimeoutException(
|
732 |
+
"waitForNewPaste() timed out after " + str(timeout) + " seconds."
|
733 |
+
)
|
734 |
+
|
735 |
+
|
736 |
+
__all__ = [
|
737 |
+
"copy",
|
738 |
+
"paste",
|
739 |
+
"waitForPaste",
|
740 |
+
"waitForNewPaste",
|
741 |
+
"set_clipboard",
|
742 |
+
"determine_clipboard",
|
743 |
+
]
|
744 |
+
|
745 |
+
# pandas aliases
|
746 |
+
clipboard_get = paste
|
747 |
+
clipboard_set = copy
|
venv/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (19.6 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/excel/__init__.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.io.excel._base import (
|
2 |
+
ExcelFile,
|
3 |
+
ExcelWriter,
|
4 |
+
read_excel,
|
5 |
+
)
|
6 |
+
from pandas.io.excel._odswriter import ODSWriter as _ODSWriter
|
7 |
+
from pandas.io.excel._openpyxl import OpenpyxlWriter as _OpenpyxlWriter
|
8 |
+
from pandas.io.excel._util import register_writer
|
9 |
+
from pandas.io.excel._xlsxwriter import XlsxWriter as _XlsxWriter
|
10 |
+
|
11 |
+
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
|
12 |
+
|
13 |
+
|
14 |
+
register_writer(_OpenpyxlWriter)
|
15 |
+
|
16 |
+
register_writer(_XlsxWriter)
|
17 |
+
|
18 |
+
|
19 |
+
register_writer(_ODSWriter)
|
venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (636 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc
ADDED
Binary file (48.1 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_calamine.cpython-310.pyc
ADDED
Binary file (4.35 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc
ADDED
Binary file (7.19 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odswriter.cpython-310.pyc
ADDED
Binary file (8.78 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc
ADDED
Binary file (17.8 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc
ADDED
Binary file (4.17 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_util.cpython-310.pyc
ADDED
Binary file (8.4 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlrd.cpython-310.pyc
ADDED
Binary file (4.24 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlsxwriter.cpython-310.pyc
ADDED
Binary file (6.14 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/excel/_base.py
ADDED
@@ -0,0 +1,1659 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections.abc import (
|
4 |
+
Hashable,
|
5 |
+
Iterable,
|
6 |
+
Mapping,
|
7 |
+
Sequence,
|
8 |
+
)
|
9 |
+
import datetime
|
10 |
+
from functools import partial
|
11 |
+
from io import BytesIO
|
12 |
+
import os
|
13 |
+
from textwrap import fill
|
14 |
+
from typing import (
|
15 |
+
IO,
|
16 |
+
TYPE_CHECKING,
|
17 |
+
Any,
|
18 |
+
Callable,
|
19 |
+
Generic,
|
20 |
+
Literal,
|
21 |
+
TypeVar,
|
22 |
+
Union,
|
23 |
+
cast,
|
24 |
+
overload,
|
25 |
+
)
|
26 |
+
import warnings
|
27 |
+
import zipfile
|
28 |
+
|
29 |
+
from pandas._config import config
|
30 |
+
|
31 |
+
from pandas._libs import lib
|
32 |
+
from pandas._libs.parsers import STR_NA_VALUES
|
33 |
+
from pandas.compat._optional import (
|
34 |
+
get_version,
|
35 |
+
import_optional_dependency,
|
36 |
+
)
|
37 |
+
from pandas.errors import EmptyDataError
|
38 |
+
from pandas.util._decorators import (
|
39 |
+
Appender,
|
40 |
+
doc,
|
41 |
+
)
|
42 |
+
from pandas.util._exceptions import find_stack_level
|
43 |
+
from pandas.util._validators import check_dtype_backend
|
44 |
+
|
45 |
+
from pandas.core.dtypes.common import (
|
46 |
+
is_bool,
|
47 |
+
is_float,
|
48 |
+
is_integer,
|
49 |
+
is_list_like,
|
50 |
+
)
|
51 |
+
|
52 |
+
from pandas.core.frame import DataFrame
|
53 |
+
from pandas.core.shared_docs import _shared_docs
|
54 |
+
from pandas.util.version import Version
|
55 |
+
|
56 |
+
from pandas.io.common import (
|
57 |
+
IOHandles,
|
58 |
+
get_handle,
|
59 |
+
stringify_path,
|
60 |
+
validate_header_arg,
|
61 |
+
)
|
62 |
+
from pandas.io.excel._util import (
|
63 |
+
fill_mi_header,
|
64 |
+
get_default_engine,
|
65 |
+
get_writer,
|
66 |
+
maybe_convert_usecols,
|
67 |
+
pop_header_name,
|
68 |
+
)
|
69 |
+
from pandas.io.parsers import TextParser
|
70 |
+
from pandas.io.parsers.readers import validate_integer
|
71 |
+
|
72 |
+
if TYPE_CHECKING:
|
73 |
+
from types import TracebackType
|
74 |
+
|
75 |
+
from pandas._typing import (
|
76 |
+
DtypeArg,
|
77 |
+
DtypeBackend,
|
78 |
+
ExcelWriterIfSheetExists,
|
79 |
+
FilePath,
|
80 |
+
IntStrT,
|
81 |
+
ReadBuffer,
|
82 |
+
Self,
|
83 |
+
SequenceNotStr,
|
84 |
+
StorageOptions,
|
85 |
+
WriteExcelBuffer,
|
86 |
+
)
|
87 |
+
_read_excel_doc = (
|
88 |
+
"""
|
89 |
+
Read an Excel file into a ``pandas`` ``DataFrame``.
|
90 |
+
|
91 |
+
Supports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions
|
92 |
+
read from a local filesystem or URL. Supports an option to read
|
93 |
+
a single sheet or a list of sheets.
|
94 |
+
|
95 |
+
Parameters
|
96 |
+
----------
|
97 |
+
io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object
|
98 |
+
Any valid string path is acceptable. The string could be a URL. Valid
|
99 |
+
URL schemes include http, ftp, s3, and file. For file URLs, a host is
|
100 |
+
expected. A local file could be: ``file://localhost/path/to/table.xlsx``.
|
101 |
+
|
102 |
+
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
|
103 |
+
|
104 |
+
By file-like object, we refer to objects with a ``read()`` method,
|
105 |
+
such as a file handle (e.g. via builtin ``open`` function)
|
106 |
+
or ``StringIO``.
|
107 |
+
|
108 |
+
.. deprecated:: 2.1.0
|
109 |
+
Passing byte strings is deprecated. To read from a
|
110 |
+
byte string, wrap it in a ``BytesIO`` object.
|
111 |
+
sheet_name : str, int, list, or None, default 0
|
112 |
+
Strings are used for sheet names. Integers are used in zero-indexed
|
113 |
+
sheet positions (chart sheets do not count as a sheet position).
|
114 |
+
Lists of strings/integers are used to request multiple sheets.
|
115 |
+
Specify ``None`` to get all worksheets.
|
116 |
+
|
117 |
+
Available cases:
|
118 |
+
|
119 |
+
* Defaults to ``0``: 1st sheet as a `DataFrame`
|
120 |
+
* ``1``: 2nd sheet as a `DataFrame`
|
121 |
+
* ``"Sheet1"``: Load sheet with name "Sheet1"
|
122 |
+
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
|
123 |
+
as a dict of `DataFrame`
|
124 |
+
* ``None``: All worksheets.
|
125 |
+
|
126 |
+
header : int, list of int, default 0
|
127 |
+
Row (0-indexed) to use for the column labels of the parsed
|
128 |
+
DataFrame. If a list of integers is passed those row positions will
|
129 |
+
be combined into a ``MultiIndex``. Use None if there is no header.
|
130 |
+
names : array-like, default None
|
131 |
+
List of column names to use. If file contains no header row,
|
132 |
+
then you should explicitly pass header=None.
|
133 |
+
index_col : int, str, list of int, default None
|
134 |
+
Column (0-indexed) to use as the row labels of the DataFrame.
|
135 |
+
Pass None if there is no such column. If a list is passed,
|
136 |
+
those columns will be combined into a ``MultiIndex``. If a
|
137 |
+
subset of data is selected with ``usecols``, index_col
|
138 |
+
is based on the subset.
|
139 |
+
|
140 |
+
Missing values will be forward filled to allow roundtripping with
|
141 |
+
``to_excel`` for ``merged_cells=True``. To avoid forward filling the
|
142 |
+
missing values use ``set_index`` after reading the data instead of
|
143 |
+
``index_col``.
|
144 |
+
usecols : str, list-like, or callable, default None
|
145 |
+
* If None, then parse all columns.
|
146 |
+
* If str, then indicates comma separated list of Excel column letters
|
147 |
+
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
|
148 |
+
both sides.
|
149 |
+
* If list of int, then indicates list of column numbers to be parsed
|
150 |
+
(0-indexed).
|
151 |
+
* If list of string, then indicates list of column names to be parsed.
|
152 |
+
* If callable, then evaluate each column name against it and parse the
|
153 |
+
column if the callable returns ``True``.
|
154 |
+
|
155 |
+
Returns a subset of the columns according to behavior above.
|
156 |
+
dtype : Type name or dict of column -> type, default None
|
157 |
+
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32}}
|
158 |
+
Use ``object`` to preserve data as stored in Excel and not interpret dtype,
|
159 |
+
which will necessarily result in ``object`` dtype.
|
160 |
+
If converters are specified, they will be applied INSTEAD
|
161 |
+
of dtype conversion.
|
162 |
+
If you use ``None``, it will infer the dtype of each column based on the data.
|
163 |
+
engine : {{'openpyxl', 'calamine', 'odf', 'pyxlsb', 'xlrd'}}, default None
|
164 |
+
If io is not a buffer or path, this must be set to identify io.
|
165 |
+
Engine compatibility :
|
166 |
+
|
167 |
+
- ``openpyxl`` supports newer Excel file formats.
|
168 |
+
- ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)
|
169 |
+
and OpenDocument (.ods) file formats.
|
170 |
+
- ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
|
171 |
+
- ``pyxlsb`` supports Binary Excel files.
|
172 |
+
- ``xlrd`` supports old-style Excel files (.xls).
|
173 |
+
|
174 |
+
When ``engine=None``, the following logic will be used to determine the engine:
|
175 |
+
|
176 |
+
- If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
|
177 |
+
then `odf <https://pypi.org/project/odfpy/>`_ will be used.
|
178 |
+
- Otherwise if ``path_or_buffer`` is an xls format, ``xlrd`` will be used.
|
179 |
+
- Otherwise if ``path_or_buffer`` is in xlsb format, ``pyxlsb`` will be used.
|
180 |
+
- Otherwise ``openpyxl`` will be used.
|
181 |
+
converters : dict, default None
|
182 |
+
Dict of functions for converting values in certain columns. Keys can
|
183 |
+
either be integers or column labels, values are functions that take one
|
184 |
+
input argument, the Excel cell content, and return the transformed
|
185 |
+
content.
|
186 |
+
true_values : list, default None
|
187 |
+
Values to consider as True.
|
188 |
+
false_values : list, default None
|
189 |
+
Values to consider as False.
|
190 |
+
skiprows : list-like, int, or callable, optional
|
191 |
+
Line numbers to skip (0-indexed) or number of lines to skip (int) at the
|
192 |
+
start of the file. If callable, the callable function will be evaluated
|
193 |
+
against the row indices, returning True if the row should be skipped and
|
194 |
+
False otherwise. An example of a valid callable argument would be ``lambda
|
195 |
+
x: x in [0, 2]``.
|
196 |
+
nrows : int, default None
|
197 |
+
Number of rows to parse.
|
198 |
+
na_values : scalar, str, list-like, or dict, default None
|
199 |
+
Additional strings to recognize as NA/NaN. If dict passed, specific
|
200 |
+
per-column NA values. By default the following values are interpreted
|
201 |
+
as NaN: '"""
|
202 |
+
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
|
203 |
+
+ """'.
|
204 |
+
keep_default_na : bool, default True
|
205 |
+
Whether or not to include the default NaN values when parsing the data.
|
206 |
+
Depending on whether ``na_values`` is passed in, the behavior is as follows:
|
207 |
+
|
208 |
+
* If ``keep_default_na`` is True, and ``na_values`` are specified,
|
209 |
+
``na_values`` is appended to the default NaN values used for parsing.
|
210 |
+
* If ``keep_default_na`` is True, and ``na_values`` are not specified, only
|
211 |
+
the default NaN values are used for parsing.
|
212 |
+
* If ``keep_default_na`` is False, and ``na_values`` are specified, only
|
213 |
+
the NaN values specified ``na_values`` are used for parsing.
|
214 |
+
* If ``keep_default_na`` is False, and ``na_values`` are not specified, no
|
215 |
+
strings will be parsed as NaN.
|
216 |
+
|
217 |
+
Note that if `na_filter` is passed in as False, the ``keep_default_na`` and
|
218 |
+
``na_values`` parameters will be ignored.
|
219 |
+
na_filter : bool, default True
|
220 |
+
Detect missing value markers (empty strings and the value of na_values). In
|
221 |
+
data without any NAs, passing ``na_filter=False`` can improve the
|
222 |
+
performance of reading a large file.
|
223 |
+
verbose : bool, default False
|
224 |
+
Indicate number of NA values placed in non-numeric columns.
|
225 |
+
parse_dates : bool, list-like, or dict, default False
|
226 |
+
The behavior is as follows:
|
227 |
+
|
228 |
+
* ``bool``. If True -> try parsing the index.
|
229 |
+
* ``list`` of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
|
230 |
+
each as a separate date column.
|
231 |
+
* ``list`` of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
|
232 |
+
a single date column.
|
233 |
+
* ``dict``, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
|
234 |
+
result 'foo'
|
235 |
+
|
236 |
+
If a column or index contains an unparsable date, the entire column or
|
237 |
+
index will be returned unaltered as an object data type. If you don`t want to
|
238 |
+
parse some cells as date just change their type in Excel to "Text".
|
239 |
+
For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.
|
240 |
+
|
241 |
+
Note: A fast-path exists for iso8601-formatted dates.
|
242 |
+
date_parser : function, optional
|
243 |
+
Function to use for converting a sequence of string columns to an array of
|
244 |
+
datetime instances. The default uses ``dateutil.parser.parser`` to do the
|
245 |
+
conversion. Pandas will try to call `date_parser` in three different ways,
|
246 |
+
advancing to the next if an exception occurs: 1) Pass one or more arrays
|
247 |
+
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
|
248 |
+
string values from the columns defined by `parse_dates` into a single array
|
249 |
+
and pass that; and 3) call `date_parser` once for each row using one or
|
250 |
+
more strings (corresponding to the columns defined by `parse_dates`) as
|
251 |
+
arguments.
|
252 |
+
|
253 |
+
.. deprecated:: 2.0.0
|
254 |
+
Use ``date_format`` instead, or read in as ``object`` and then apply
|
255 |
+
:func:`to_datetime` as-needed.
|
256 |
+
date_format : str or dict of column -> format, default ``None``
|
257 |
+
If used in conjunction with ``parse_dates``, will parse dates according to this
|
258 |
+
format. For anything more complex,
|
259 |
+
please read in as ``object`` and then apply :func:`to_datetime` as-needed.
|
260 |
+
|
261 |
+
.. versionadded:: 2.0.0
|
262 |
+
thousands : str, default None
|
263 |
+
Thousands separator for parsing string columns to numeric. Note that
|
264 |
+
this parameter is only necessary for columns stored as TEXT in Excel,
|
265 |
+
any numeric columns will automatically be parsed, regardless of display
|
266 |
+
format.
|
267 |
+
decimal : str, default '.'
|
268 |
+
Character to recognize as decimal point for parsing string columns to numeric.
|
269 |
+
Note that this parameter is only necessary for columns stored as TEXT in Excel,
|
270 |
+
any numeric columns will automatically be parsed, regardless of display
|
271 |
+
format.(e.g. use ',' for European data).
|
272 |
+
|
273 |
+
.. versionadded:: 1.4.0
|
274 |
+
|
275 |
+
comment : str, default None
|
276 |
+
Comments out remainder of line. Pass a character or characters to this
|
277 |
+
argument to indicate comments in the input file. Any data between the
|
278 |
+
comment string and the end of the current line is ignored.
|
279 |
+
skipfooter : int, default 0
|
280 |
+
Rows at the end to skip (0-indexed).
|
281 |
+
{storage_options}
|
282 |
+
|
283 |
+
dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
|
284 |
+
Back-end data type applied to the resultant :class:`DataFrame`
|
285 |
+
(still experimental). Behaviour is as follows:
|
286 |
+
|
287 |
+
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
|
288 |
+
(default).
|
289 |
+
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
|
290 |
+
DataFrame.
|
291 |
+
|
292 |
+
.. versionadded:: 2.0
|
293 |
+
|
294 |
+
engine_kwargs : dict, optional
|
295 |
+
Arbitrary keyword arguments passed to excel engine.
|
296 |
+
|
297 |
+
Returns
|
298 |
+
-------
|
299 |
+
DataFrame or dict of DataFrames
|
300 |
+
DataFrame from the passed in Excel file. See notes in sheet_name
|
301 |
+
argument for more information on when a dict of DataFrames is returned.
|
302 |
+
|
303 |
+
See Also
|
304 |
+
--------
|
305 |
+
DataFrame.to_excel : Write DataFrame to an Excel file.
|
306 |
+
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
|
307 |
+
read_csv : Read a comma-separated values (csv) file into DataFrame.
|
308 |
+
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
|
309 |
+
|
310 |
+
Notes
|
311 |
+
-----
|
312 |
+
For specific information on the methods used for each Excel engine, refer to the pandas
|
313 |
+
:ref:`user guide <io.excel_reader>`
|
314 |
+
|
315 |
+
Examples
|
316 |
+
--------
|
317 |
+
The file can be read using the file name as string or an open file object:
|
318 |
+
|
319 |
+
>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
|
320 |
+
Name Value
|
321 |
+
0 string1 1
|
322 |
+
1 string2 2
|
323 |
+
2 #Comment 3
|
324 |
+
|
325 |
+
>>> pd.read_excel(open('tmp.xlsx', 'rb'),
|
326 |
+
... sheet_name='Sheet3') # doctest: +SKIP
|
327 |
+
Unnamed: 0 Name Value
|
328 |
+
0 0 string1 1
|
329 |
+
1 1 string2 2
|
330 |
+
2 2 #Comment 3
|
331 |
+
|
332 |
+
Index and header can be specified via the `index_col` and `header` arguments
|
333 |
+
|
334 |
+
>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
|
335 |
+
0 1 2
|
336 |
+
0 NaN Name Value
|
337 |
+
1 0.0 string1 1
|
338 |
+
2 1.0 string2 2
|
339 |
+
3 2.0 #Comment 3
|
340 |
+
|
341 |
+
Column types are inferred but can be explicitly specified
|
342 |
+
|
343 |
+
>>> pd.read_excel('tmp.xlsx', index_col=0,
|
344 |
+
... dtype={{'Name': str, 'Value': float}}) # doctest: +SKIP
|
345 |
+
Name Value
|
346 |
+
0 string1 1.0
|
347 |
+
1 string2 2.0
|
348 |
+
2 #Comment 3.0
|
349 |
+
|
350 |
+
True, False, and NA values, and thousands separators have defaults,
|
351 |
+
but can be explicitly specified, too. Supply the values you would like
|
352 |
+
as strings or lists of strings!
|
353 |
+
|
354 |
+
>>> pd.read_excel('tmp.xlsx', index_col=0,
|
355 |
+
... na_values=['string1', 'string2']) # doctest: +SKIP
|
356 |
+
Name Value
|
357 |
+
0 NaN 1
|
358 |
+
1 NaN 2
|
359 |
+
2 #Comment 3
|
360 |
+
|
361 |
+
Comment lines in the excel input file can be skipped using the
|
362 |
+
``comment`` kwarg.
|
363 |
+
|
364 |
+
>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
|
365 |
+
Name Value
|
366 |
+
0 string1 1.0
|
367 |
+
1 string2 2.0
|
368 |
+
2 None NaN
|
369 |
+
"""
|
370 |
+
)
|
371 |
+
|
372 |
+
|
373 |
+
@overload
|
374 |
+
def read_excel(
|
375 |
+
io,
|
376 |
+
# sheet name is str or int -> DataFrame
|
377 |
+
sheet_name: str | int = ...,
|
378 |
+
*,
|
379 |
+
header: int | Sequence[int] | None = ...,
|
380 |
+
names: SequenceNotStr[Hashable] | range | None = ...,
|
381 |
+
index_col: int | str | Sequence[int] | None = ...,
|
382 |
+
usecols: int
|
383 |
+
| str
|
384 |
+
| Sequence[int]
|
385 |
+
| Sequence[str]
|
386 |
+
| Callable[[str], bool]
|
387 |
+
| None = ...,
|
388 |
+
dtype: DtypeArg | None = ...,
|
389 |
+
engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ...,
|
390 |
+
converters: dict[str, Callable] | dict[int, Callable] | None = ...,
|
391 |
+
true_values: Iterable[Hashable] | None = ...,
|
392 |
+
false_values: Iterable[Hashable] | None = ...,
|
393 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
|
394 |
+
nrows: int | None = ...,
|
395 |
+
na_values=...,
|
396 |
+
keep_default_na: bool = ...,
|
397 |
+
na_filter: bool = ...,
|
398 |
+
verbose: bool = ...,
|
399 |
+
parse_dates: list | dict | bool = ...,
|
400 |
+
date_parser: Callable | lib.NoDefault = ...,
|
401 |
+
date_format: dict[Hashable, str] | str | None = ...,
|
402 |
+
thousands: str | None = ...,
|
403 |
+
decimal: str = ...,
|
404 |
+
comment: str | None = ...,
|
405 |
+
skipfooter: int = ...,
|
406 |
+
storage_options: StorageOptions = ...,
|
407 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
408 |
+
) -> DataFrame:
|
409 |
+
...
|
410 |
+
|
411 |
+
|
412 |
+
@overload
|
413 |
+
def read_excel(
|
414 |
+
io,
|
415 |
+
# sheet name is list or None -> dict[IntStrT, DataFrame]
|
416 |
+
sheet_name: list[IntStrT] | None,
|
417 |
+
*,
|
418 |
+
header: int | Sequence[int] | None = ...,
|
419 |
+
names: SequenceNotStr[Hashable] | range | None = ...,
|
420 |
+
index_col: int | str | Sequence[int] | None = ...,
|
421 |
+
usecols: int
|
422 |
+
| str
|
423 |
+
| Sequence[int]
|
424 |
+
| Sequence[str]
|
425 |
+
| Callable[[str], bool]
|
426 |
+
| None = ...,
|
427 |
+
dtype: DtypeArg | None = ...,
|
428 |
+
engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ...,
|
429 |
+
converters: dict[str, Callable] | dict[int, Callable] | None = ...,
|
430 |
+
true_values: Iterable[Hashable] | None = ...,
|
431 |
+
false_values: Iterable[Hashable] | None = ...,
|
432 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
|
433 |
+
nrows: int | None = ...,
|
434 |
+
na_values=...,
|
435 |
+
keep_default_na: bool = ...,
|
436 |
+
na_filter: bool = ...,
|
437 |
+
verbose: bool = ...,
|
438 |
+
parse_dates: list | dict | bool = ...,
|
439 |
+
date_parser: Callable | lib.NoDefault = ...,
|
440 |
+
date_format: dict[Hashable, str] | str | None = ...,
|
441 |
+
thousands: str | None = ...,
|
442 |
+
decimal: str = ...,
|
443 |
+
comment: str | None = ...,
|
444 |
+
skipfooter: int = ...,
|
445 |
+
storage_options: StorageOptions = ...,
|
446 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
447 |
+
) -> dict[IntStrT, DataFrame]:
|
448 |
+
...
|
449 |
+
|
450 |
+
|
451 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
452 |
+
@Appender(_read_excel_doc)
|
453 |
+
def read_excel(
|
454 |
+
io,
|
455 |
+
sheet_name: str | int | list[IntStrT] | None = 0,
|
456 |
+
*,
|
457 |
+
header: int | Sequence[int] | None = 0,
|
458 |
+
names: SequenceNotStr[Hashable] | range | None = None,
|
459 |
+
index_col: int | str | Sequence[int] | None = None,
|
460 |
+
usecols: int
|
461 |
+
| str
|
462 |
+
| Sequence[int]
|
463 |
+
| Sequence[str]
|
464 |
+
| Callable[[str], bool]
|
465 |
+
| None = None,
|
466 |
+
dtype: DtypeArg | None = None,
|
467 |
+
engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = None,
|
468 |
+
converters: dict[str, Callable] | dict[int, Callable] | None = None,
|
469 |
+
true_values: Iterable[Hashable] | None = None,
|
470 |
+
false_values: Iterable[Hashable] | None = None,
|
471 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
|
472 |
+
nrows: int | None = None,
|
473 |
+
na_values=None,
|
474 |
+
keep_default_na: bool = True,
|
475 |
+
na_filter: bool = True,
|
476 |
+
verbose: bool = False,
|
477 |
+
parse_dates: list | dict | bool = False,
|
478 |
+
date_parser: Callable | lib.NoDefault = lib.no_default,
|
479 |
+
date_format: dict[Hashable, str] | str | None = None,
|
480 |
+
thousands: str | None = None,
|
481 |
+
decimal: str = ".",
|
482 |
+
comment: str | None = None,
|
483 |
+
skipfooter: int = 0,
|
484 |
+
storage_options: StorageOptions | None = None,
|
485 |
+
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
|
486 |
+
engine_kwargs: dict | None = None,
|
487 |
+
) -> DataFrame | dict[IntStrT, DataFrame]:
|
488 |
+
check_dtype_backend(dtype_backend)
|
489 |
+
should_close = False
|
490 |
+
if engine_kwargs is None:
|
491 |
+
engine_kwargs = {}
|
492 |
+
|
493 |
+
if not isinstance(io, ExcelFile):
|
494 |
+
should_close = True
|
495 |
+
io = ExcelFile(
|
496 |
+
io,
|
497 |
+
storage_options=storage_options,
|
498 |
+
engine=engine,
|
499 |
+
engine_kwargs=engine_kwargs,
|
500 |
+
)
|
501 |
+
elif engine and engine != io.engine:
|
502 |
+
raise ValueError(
|
503 |
+
"Engine should not be specified when passing "
|
504 |
+
"an ExcelFile - ExcelFile already has the engine set"
|
505 |
+
)
|
506 |
+
|
507 |
+
try:
|
508 |
+
data = io.parse(
|
509 |
+
sheet_name=sheet_name,
|
510 |
+
header=header,
|
511 |
+
names=names,
|
512 |
+
index_col=index_col,
|
513 |
+
usecols=usecols,
|
514 |
+
dtype=dtype,
|
515 |
+
converters=converters,
|
516 |
+
true_values=true_values,
|
517 |
+
false_values=false_values,
|
518 |
+
skiprows=skiprows,
|
519 |
+
nrows=nrows,
|
520 |
+
na_values=na_values,
|
521 |
+
keep_default_na=keep_default_na,
|
522 |
+
na_filter=na_filter,
|
523 |
+
verbose=verbose,
|
524 |
+
parse_dates=parse_dates,
|
525 |
+
date_parser=date_parser,
|
526 |
+
date_format=date_format,
|
527 |
+
thousands=thousands,
|
528 |
+
decimal=decimal,
|
529 |
+
comment=comment,
|
530 |
+
skipfooter=skipfooter,
|
531 |
+
dtype_backend=dtype_backend,
|
532 |
+
)
|
533 |
+
finally:
|
534 |
+
# make sure to close opened file handles
|
535 |
+
if should_close:
|
536 |
+
io.close()
|
537 |
+
return data
|
538 |
+
|
539 |
+
|
540 |
+
_WorkbookT = TypeVar("_WorkbookT")
|
541 |
+
|
542 |
+
|
543 |
+
class BaseExcelReader(Generic[_WorkbookT]):
|
544 |
+
book: _WorkbookT
|
545 |
+
|
546 |
+
def __init__(
|
547 |
+
self,
|
548 |
+
filepath_or_buffer,
|
549 |
+
storage_options: StorageOptions | None = None,
|
550 |
+
engine_kwargs: dict | None = None,
|
551 |
+
) -> None:
|
552 |
+
if engine_kwargs is None:
|
553 |
+
engine_kwargs = {}
|
554 |
+
|
555 |
+
# First argument can also be bytes, so create a buffer
|
556 |
+
if isinstance(filepath_or_buffer, bytes):
|
557 |
+
filepath_or_buffer = BytesIO(filepath_or_buffer)
|
558 |
+
|
559 |
+
self.handles = IOHandles(
|
560 |
+
handle=filepath_or_buffer, compression={"method": None}
|
561 |
+
)
|
562 |
+
if not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
|
563 |
+
self.handles = get_handle(
|
564 |
+
filepath_or_buffer, "rb", storage_options=storage_options, is_text=False
|
565 |
+
)
|
566 |
+
|
567 |
+
if isinstance(self.handles.handle, self._workbook_class):
|
568 |
+
self.book = self.handles.handle
|
569 |
+
elif hasattr(self.handles.handle, "read"):
|
570 |
+
# N.B. xlrd.Book has a read attribute too
|
571 |
+
self.handles.handle.seek(0)
|
572 |
+
try:
|
573 |
+
self.book = self.load_workbook(self.handles.handle, engine_kwargs)
|
574 |
+
except Exception:
|
575 |
+
self.close()
|
576 |
+
raise
|
577 |
+
else:
|
578 |
+
raise ValueError(
|
579 |
+
"Must explicitly set engine if not passing in buffer or path for io."
|
580 |
+
)
|
581 |
+
|
582 |
+
@property
|
583 |
+
def _workbook_class(self) -> type[_WorkbookT]:
|
584 |
+
raise NotImplementedError
|
585 |
+
|
586 |
+
def load_workbook(self, filepath_or_buffer, engine_kwargs) -> _WorkbookT:
|
587 |
+
raise NotImplementedError
|
588 |
+
|
589 |
+
def close(self) -> None:
|
590 |
+
if hasattr(self, "book"):
|
591 |
+
if hasattr(self.book, "close"):
|
592 |
+
# pyxlsb: opens a TemporaryFile
|
593 |
+
# openpyxl: https://stackoverflow.com/questions/31416842/
|
594 |
+
# openpyxl-does-not-close-excel-workbook-in-read-only-mode
|
595 |
+
self.book.close()
|
596 |
+
elif hasattr(self.book, "release_resources"):
|
597 |
+
# xlrd
|
598 |
+
# https://github.com/python-excel/xlrd/blob/2.0.1/xlrd/book.py#L548
|
599 |
+
self.book.release_resources()
|
600 |
+
self.handles.close()
|
601 |
+
|
602 |
+
@property
|
603 |
+
def sheet_names(self) -> list[str]:
|
604 |
+
raise NotImplementedError
|
605 |
+
|
606 |
+
def get_sheet_by_name(self, name: str):
|
607 |
+
raise NotImplementedError
|
608 |
+
|
609 |
+
def get_sheet_by_index(self, index: int):
|
610 |
+
raise NotImplementedError
|
611 |
+
|
612 |
+
def get_sheet_data(self, sheet, rows: int | None = None):
|
613 |
+
raise NotImplementedError
|
614 |
+
|
615 |
+
def raise_if_bad_sheet_by_index(self, index: int) -> None:
|
616 |
+
n_sheets = len(self.sheet_names)
|
617 |
+
if index >= n_sheets:
|
618 |
+
raise ValueError(
|
619 |
+
f"Worksheet index {index} is invalid, {n_sheets} worksheets found"
|
620 |
+
)
|
621 |
+
|
622 |
+
def raise_if_bad_sheet_by_name(self, name: str) -> None:
|
623 |
+
if name not in self.sheet_names:
|
624 |
+
raise ValueError(f"Worksheet named '{name}' not found")
|
625 |
+
|
626 |
+
def _check_skiprows_func(
|
627 |
+
self,
|
628 |
+
skiprows: Callable,
|
629 |
+
rows_to_use: int,
|
630 |
+
) -> int:
|
631 |
+
"""
|
632 |
+
Determine how many file rows are required to obtain `nrows` data
|
633 |
+
rows when `skiprows` is a function.
|
634 |
+
|
635 |
+
Parameters
|
636 |
+
----------
|
637 |
+
skiprows : function
|
638 |
+
The function passed to read_excel by the user.
|
639 |
+
rows_to_use : int
|
640 |
+
The number of rows that will be needed for the header and
|
641 |
+
the data.
|
642 |
+
|
643 |
+
Returns
|
644 |
+
-------
|
645 |
+
int
|
646 |
+
"""
|
647 |
+
i = 0
|
648 |
+
rows_used_so_far = 0
|
649 |
+
while rows_used_so_far < rows_to_use:
|
650 |
+
if not skiprows(i):
|
651 |
+
rows_used_so_far += 1
|
652 |
+
i += 1
|
653 |
+
return i
|
654 |
+
|
655 |
+
def _calc_rows(
|
656 |
+
self,
|
657 |
+
header: int | Sequence[int] | None,
|
658 |
+
index_col: int | Sequence[int] | None,
|
659 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None,
|
660 |
+
nrows: int | None,
|
661 |
+
) -> int | None:
|
662 |
+
"""
|
663 |
+
If nrows specified, find the number of rows needed from the
|
664 |
+
file, otherwise return None.
|
665 |
+
|
666 |
+
|
667 |
+
Parameters
|
668 |
+
----------
|
669 |
+
header : int, list of int, or None
|
670 |
+
See read_excel docstring.
|
671 |
+
index_col : int, str, list of int, or None
|
672 |
+
See read_excel docstring.
|
673 |
+
skiprows : list-like, int, callable, or None
|
674 |
+
See read_excel docstring.
|
675 |
+
nrows : int or None
|
676 |
+
See read_excel docstring.
|
677 |
+
|
678 |
+
Returns
|
679 |
+
-------
|
680 |
+
int or None
|
681 |
+
"""
|
682 |
+
if nrows is None:
|
683 |
+
return None
|
684 |
+
if header is None:
|
685 |
+
header_rows = 1
|
686 |
+
elif is_integer(header):
|
687 |
+
header = cast(int, header)
|
688 |
+
header_rows = 1 + header
|
689 |
+
else:
|
690 |
+
header = cast(Sequence, header)
|
691 |
+
header_rows = 1 + header[-1]
|
692 |
+
# If there is a MultiIndex header and an index then there is also
|
693 |
+
# a row containing just the index name(s)
|
694 |
+
if is_list_like(header) and index_col is not None:
|
695 |
+
header = cast(Sequence, header)
|
696 |
+
if len(header) > 1:
|
697 |
+
header_rows += 1
|
698 |
+
if skiprows is None:
|
699 |
+
return header_rows + nrows
|
700 |
+
if is_integer(skiprows):
|
701 |
+
skiprows = cast(int, skiprows)
|
702 |
+
return header_rows + nrows + skiprows
|
703 |
+
if is_list_like(skiprows):
|
704 |
+
|
705 |
+
def f(skiprows: Sequence, x: int) -> bool:
|
706 |
+
return x in skiprows
|
707 |
+
|
708 |
+
skiprows = cast(Sequence, skiprows)
|
709 |
+
return self._check_skiprows_func(partial(f, skiprows), header_rows + nrows)
|
710 |
+
if callable(skiprows):
|
711 |
+
return self._check_skiprows_func(
|
712 |
+
skiprows,
|
713 |
+
header_rows + nrows,
|
714 |
+
)
|
715 |
+
# else unexpected skiprows type: read_excel will not optimize
|
716 |
+
# the number of rows read from file
|
717 |
+
return None
|
718 |
+
|
719 |
+
def parse(
|
720 |
+
self,
|
721 |
+
sheet_name: str | int | list[int] | list[str] | None = 0,
|
722 |
+
header: int | Sequence[int] | None = 0,
|
723 |
+
names: SequenceNotStr[Hashable] | range | None = None,
|
724 |
+
index_col: int | Sequence[int] | None = None,
|
725 |
+
usecols=None,
|
726 |
+
dtype: DtypeArg | None = None,
|
727 |
+
true_values: Iterable[Hashable] | None = None,
|
728 |
+
false_values: Iterable[Hashable] | None = None,
|
729 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
|
730 |
+
nrows: int | None = None,
|
731 |
+
na_values=None,
|
732 |
+
verbose: bool = False,
|
733 |
+
parse_dates: list | dict | bool = False,
|
734 |
+
date_parser: Callable | lib.NoDefault = lib.no_default,
|
735 |
+
date_format: dict[Hashable, str] | str | None = None,
|
736 |
+
thousands: str | None = None,
|
737 |
+
decimal: str = ".",
|
738 |
+
comment: str | None = None,
|
739 |
+
skipfooter: int = 0,
|
740 |
+
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
|
741 |
+
**kwds,
|
742 |
+
):
|
743 |
+
validate_header_arg(header)
|
744 |
+
validate_integer("nrows", nrows)
|
745 |
+
|
746 |
+
ret_dict = False
|
747 |
+
|
748 |
+
# Keep sheetname to maintain backwards compatibility.
|
749 |
+
sheets: list[int] | list[str]
|
750 |
+
if isinstance(sheet_name, list):
|
751 |
+
sheets = sheet_name
|
752 |
+
ret_dict = True
|
753 |
+
elif sheet_name is None:
|
754 |
+
sheets = self.sheet_names
|
755 |
+
ret_dict = True
|
756 |
+
elif isinstance(sheet_name, str):
|
757 |
+
sheets = [sheet_name]
|
758 |
+
else:
|
759 |
+
sheets = [sheet_name]
|
760 |
+
|
761 |
+
# handle same-type duplicates.
|
762 |
+
sheets = cast(Union[list[int], list[str]], list(dict.fromkeys(sheets).keys()))
|
763 |
+
|
764 |
+
output = {}
|
765 |
+
|
766 |
+
last_sheetname = None
|
767 |
+
for asheetname in sheets:
|
768 |
+
last_sheetname = asheetname
|
769 |
+
if verbose:
|
770 |
+
print(f"Reading sheet {asheetname}")
|
771 |
+
|
772 |
+
if isinstance(asheetname, str):
|
773 |
+
sheet = self.get_sheet_by_name(asheetname)
|
774 |
+
else: # assume an integer if not a string
|
775 |
+
sheet = self.get_sheet_by_index(asheetname)
|
776 |
+
|
777 |
+
file_rows_needed = self._calc_rows(header, index_col, skiprows, nrows)
|
778 |
+
data = self.get_sheet_data(sheet, file_rows_needed)
|
779 |
+
if hasattr(sheet, "close"):
|
780 |
+
# pyxlsb opens two TemporaryFiles
|
781 |
+
sheet.close()
|
782 |
+
usecols = maybe_convert_usecols(usecols)
|
783 |
+
|
784 |
+
if not data:
|
785 |
+
output[asheetname] = DataFrame()
|
786 |
+
continue
|
787 |
+
|
788 |
+
is_list_header = False
|
789 |
+
is_len_one_list_header = False
|
790 |
+
if is_list_like(header):
|
791 |
+
assert isinstance(header, Sequence)
|
792 |
+
is_list_header = True
|
793 |
+
if len(header) == 1:
|
794 |
+
is_len_one_list_header = True
|
795 |
+
|
796 |
+
if is_len_one_list_header:
|
797 |
+
header = cast(Sequence[int], header)[0]
|
798 |
+
|
799 |
+
# forward fill and pull out names for MultiIndex column
|
800 |
+
header_names = None
|
801 |
+
if header is not None and is_list_like(header):
|
802 |
+
assert isinstance(header, Sequence)
|
803 |
+
|
804 |
+
header_names = []
|
805 |
+
control_row = [True] * len(data[0])
|
806 |
+
|
807 |
+
for row in header:
|
808 |
+
if is_integer(skiprows):
|
809 |
+
assert isinstance(skiprows, int)
|
810 |
+
row += skiprows
|
811 |
+
|
812 |
+
if row > len(data) - 1:
|
813 |
+
raise ValueError(
|
814 |
+
f"header index {row} exceeds maximum index "
|
815 |
+
f"{len(data) - 1} of data.",
|
816 |
+
)
|
817 |
+
|
818 |
+
data[row], control_row = fill_mi_header(data[row], control_row)
|
819 |
+
|
820 |
+
if index_col is not None:
|
821 |
+
header_name, _ = pop_header_name(data[row], index_col)
|
822 |
+
header_names.append(header_name)
|
823 |
+
|
824 |
+
# If there is a MultiIndex header and an index then there is also
|
825 |
+
# a row containing just the index name(s)
|
826 |
+
has_index_names = False
|
827 |
+
if is_list_header and not is_len_one_list_header and index_col is not None:
|
828 |
+
index_col_list: Sequence[int]
|
829 |
+
if isinstance(index_col, int):
|
830 |
+
index_col_list = [index_col]
|
831 |
+
else:
|
832 |
+
assert isinstance(index_col, Sequence)
|
833 |
+
index_col_list = index_col
|
834 |
+
|
835 |
+
# We have to handle mi without names. If any of the entries in the data
|
836 |
+
# columns are not empty, this is a regular row
|
837 |
+
assert isinstance(header, Sequence)
|
838 |
+
if len(header) < len(data):
|
839 |
+
potential_index_names = data[len(header)]
|
840 |
+
potential_data = [
|
841 |
+
x
|
842 |
+
for i, x in enumerate(potential_index_names)
|
843 |
+
if not control_row[i] and i not in index_col_list
|
844 |
+
]
|
845 |
+
has_index_names = all(x == "" or x is None for x in potential_data)
|
846 |
+
|
847 |
+
if is_list_like(index_col):
|
848 |
+
# Forward fill values for MultiIndex index.
|
849 |
+
if header is None:
|
850 |
+
offset = 0
|
851 |
+
elif isinstance(header, int):
|
852 |
+
offset = 1 + header
|
853 |
+
else:
|
854 |
+
offset = 1 + max(header)
|
855 |
+
|
856 |
+
# GH34673: if MultiIndex names present and not defined in the header,
|
857 |
+
# offset needs to be incremented so that forward filling starts
|
858 |
+
# from the first MI value instead of the name
|
859 |
+
if has_index_names:
|
860 |
+
offset += 1
|
861 |
+
|
862 |
+
# Check if we have an empty dataset
|
863 |
+
# before trying to collect data.
|
864 |
+
if offset < len(data):
|
865 |
+
assert isinstance(index_col, Sequence)
|
866 |
+
|
867 |
+
for col in index_col:
|
868 |
+
last = data[offset][col]
|
869 |
+
|
870 |
+
for row in range(offset + 1, len(data)):
|
871 |
+
if data[row][col] == "" or data[row][col] is None:
|
872 |
+
data[row][col] = last
|
873 |
+
else:
|
874 |
+
last = data[row][col]
|
875 |
+
|
876 |
+
# GH 12292 : error when read one empty column from excel file
|
877 |
+
try:
|
878 |
+
parser = TextParser(
|
879 |
+
data,
|
880 |
+
names=names,
|
881 |
+
header=header,
|
882 |
+
index_col=index_col,
|
883 |
+
has_index_names=has_index_names,
|
884 |
+
dtype=dtype,
|
885 |
+
true_values=true_values,
|
886 |
+
false_values=false_values,
|
887 |
+
skiprows=skiprows,
|
888 |
+
nrows=nrows,
|
889 |
+
na_values=na_values,
|
890 |
+
skip_blank_lines=False, # GH 39808
|
891 |
+
parse_dates=parse_dates,
|
892 |
+
date_parser=date_parser,
|
893 |
+
date_format=date_format,
|
894 |
+
thousands=thousands,
|
895 |
+
decimal=decimal,
|
896 |
+
comment=comment,
|
897 |
+
skipfooter=skipfooter,
|
898 |
+
usecols=usecols,
|
899 |
+
dtype_backend=dtype_backend,
|
900 |
+
**kwds,
|
901 |
+
)
|
902 |
+
|
903 |
+
output[asheetname] = parser.read(nrows=nrows)
|
904 |
+
|
905 |
+
if header_names:
|
906 |
+
output[asheetname].columns = output[asheetname].columns.set_names(
|
907 |
+
header_names
|
908 |
+
)
|
909 |
+
|
910 |
+
except EmptyDataError:
|
911 |
+
# No Data, return an empty DataFrame
|
912 |
+
output[asheetname] = DataFrame()
|
913 |
+
|
914 |
+
except Exception as err:
|
915 |
+
err.args = (f"{err.args[0]} (sheet: {asheetname})", *err.args[1:])
|
916 |
+
raise err
|
917 |
+
|
918 |
+
if last_sheetname is None:
|
919 |
+
raise ValueError("Sheet name is an empty list")
|
920 |
+
|
921 |
+
if ret_dict:
|
922 |
+
return output
|
923 |
+
else:
|
924 |
+
return output[last_sheetname]
|
925 |
+
|
926 |
+
|
927 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
928 |
+
class ExcelWriter(Generic[_WorkbookT]):
|
929 |
+
"""
|
930 |
+
Class for writing DataFrame objects into excel sheets.
|
931 |
+
|
932 |
+
Default is to use:
|
933 |
+
|
934 |
+
* `xlsxwriter <https://pypi.org/project/XlsxWriter/>`__ for xlsx files if xlsxwriter
|
935 |
+
is installed otherwise `openpyxl <https://pypi.org/project/openpyxl/>`__
|
936 |
+
* `odswriter <https://pypi.org/project/odswriter/>`__ for ods files
|
937 |
+
|
938 |
+
See ``DataFrame.to_excel`` for typical usage.
|
939 |
+
|
940 |
+
The writer should be used as a context manager. Otherwise, call `close()` to save
|
941 |
+
and close any opened file handles.
|
942 |
+
|
943 |
+
Parameters
|
944 |
+
----------
|
945 |
+
path : str or typing.BinaryIO
|
946 |
+
Path to xls or xlsx or ods file.
|
947 |
+
engine : str (optional)
|
948 |
+
Engine to use for writing. If None, defaults to
|
949 |
+
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
|
950 |
+
argument.
|
951 |
+
date_format : str, default None
|
952 |
+
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
|
953 |
+
datetime_format : str, default None
|
954 |
+
Format string for datetime objects written into Excel files.
|
955 |
+
(e.g. 'YYYY-MM-DD HH:MM:SS').
|
956 |
+
mode : {{'w', 'a'}}, default 'w'
|
957 |
+
File mode to use (write or append). Append does not work with fsspec URLs.
|
958 |
+
{storage_options}
|
959 |
+
|
960 |
+
if_sheet_exists : {{'error', 'new', 'replace', 'overlay'}}, default 'error'
|
961 |
+
How to behave when trying to write to a sheet that already
|
962 |
+
exists (append mode only).
|
963 |
+
|
964 |
+
* error: raise a ValueError.
|
965 |
+
* new: Create a new sheet, with a name determined by the engine.
|
966 |
+
* replace: Delete the contents of the sheet before writing to it.
|
967 |
+
* overlay: Write contents to the existing sheet without first removing,
|
968 |
+
but possibly over top of, the existing contents.
|
969 |
+
|
970 |
+
.. versionadded:: 1.3.0
|
971 |
+
|
972 |
+
.. versionchanged:: 1.4.0
|
973 |
+
|
974 |
+
Added ``overlay`` option
|
975 |
+
|
976 |
+
engine_kwargs : dict, optional
|
977 |
+
Keyword arguments to be passed into the engine. These will be passed to
|
978 |
+
the following functions of the respective engines:
|
979 |
+
|
980 |
+
* xlsxwriter: ``xlsxwriter.Workbook(file, **engine_kwargs)``
|
981 |
+
* openpyxl (write mode): ``openpyxl.Workbook(**engine_kwargs)``
|
982 |
+
* openpyxl (append mode): ``openpyxl.load_workbook(file, **engine_kwargs)``
|
983 |
+
* odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)``
|
984 |
+
|
985 |
+
.. versionadded:: 1.3.0
|
986 |
+
|
987 |
+
Notes
|
988 |
+
-----
|
989 |
+
For compatibility with CSV writers, ExcelWriter serializes lists
|
990 |
+
and dicts to strings before writing.
|
991 |
+
|
992 |
+
Examples
|
993 |
+
--------
|
994 |
+
Default usage:
|
995 |
+
|
996 |
+
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
|
997 |
+
>>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
|
998 |
+
... df.to_excel(writer) # doctest: +SKIP
|
999 |
+
|
1000 |
+
To write to separate sheets in a single file:
|
1001 |
+
|
1002 |
+
>>> df1 = pd.DataFrame([["AAA", "BBB"]], columns=["Spam", "Egg"]) # doctest: +SKIP
|
1003 |
+
>>> df2 = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
|
1004 |
+
>>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
|
1005 |
+
... df1.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
|
1006 |
+
... df2.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
|
1007 |
+
|
1008 |
+
You can set the date format or datetime format:
|
1009 |
+
|
1010 |
+
>>> from datetime import date, datetime # doctest: +SKIP
|
1011 |
+
>>> df = pd.DataFrame(
|
1012 |
+
... [
|
1013 |
+
... [date(2014, 1, 31), date(1999, 9, 24)],
|
1014 |
+
... [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
|
1015 |
+
... ],
|
1016 |
+
... index=["Date", "Datetime"],
|
1017 |
+
... columns=["X", "Y"],
|
1018 |
+
... ) # doctest: +SKIP
|
1019 |
+
>>> with pd.ExcelWriter(
|
1020 |
+
... "path_to_file.xlsx",
|
1021 |
+
... date_format="YYYY-MM-DD",
|
1022 |
+
... datetime_format="YYYY-MM-DD HH:MM:SS"
|
1023 |
+
... ) as writer:
|
1024 |
+
... df.to_excel(writer) # doctest: +SKIP
|
1025 |
+
|
1026 |
+
You can also append to an existing Excel file:
|
1027 |
+
|
1028 |
+
>>> with pd.ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer:
|
1029 |
+
... df.to_excel(writer, sheet_name="Sheet3") # doctest: +SKIP
|
1030 |
+
|
1031 |
+
Here, the `if_sheet_exists` parameter can be set to replace a sheet if it
|
1032 |
+
already exists:
|
1033 |
+
|
1034 |
+
>>> with ExcelWriter(
|
1035 |
+
... "path_to_file.xlsx",
|
1036 |
+
... mode="a",
|
1037 |
+
... engine="openpyxl",
|
1038 |
+
... if_sheet_exists="replace",
|
1039 |
+
... ) as writer:
|
1040 |
+
... df.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
|
1041 |
+
|
1042 |
+
You can also write multiple DataFrames to a single sheet. Note that the
|
1043 |
+
``if_sheet_exists`` parameter needs to be set to ``overlay``:
|
1044 |
+
|
1045 |
+
>>> with ExcelWriter("path_to_file.xlsx",
|
1046 |
+
... mode="a",
|
1047 |
+
... engine="openpyxl",
|
1048 |
+
... if_sheet_exists="overlay",
|
1049 |
+
... ) as writer:
|
1050 |
+
... df1.to_excel(writer, sheet_name="Sheet1")
|
1051 |
+
... df2.to_excel(writer, sheet_name="Sheet1", startcol=3) # doctest: +SKIP
|
1052 |
+
|
1053 |
+
You can store Excel file in RAM:
|
1054 |
+
|
1055 |
+
>>> import io
|
1056 |
+
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])
|
1057 |
+
>>> buffer = io.BytesIO()
|
1058 |
+
>>> with pd.ExcelWriter(buffer) as writer:
|
1059 |
+
... df.to_excel(writer)
|
1060 |
+
|
1061 |
+
You can pack Excel file into zip archive:
|
1062 |
+
|
1063 |
+
>>> import zipfile # doctest: +SKIP
|
1064 |
+
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
|
1065 |
+
>>> with zipfile.ZipFile("path_to_file.zip", "w") as zf:
|
1066 |
+
... with zf.open("filename.xlsx", "w") as buffer:
|
1067 |
+
... with pd.ExcelWriter(buffer) as writer:
|
1068 |
+
... df.to_excel(writer) # doctest: +SKIP
|
1069 |
+
|
1070 |
+
You can specify additional arguments to the underlying engine:
|
1071 |
+
|
1072 |
+
>>> with pd.ExcelWriter(
|
1073 |
+
... "path_to_file.xlsx",
|
1074 |
+
... engine="xlsxwriter",
|
1075 |
+
... engine_kwargs={{"options": {{"nan_inf_to_errors": True}}}}
|
1076 |
+
... ) as writer:
|
1077 |
+
... df.to_excel(writer) # doctest: +SKIP
|
1078 |
+
|
1079 |
+
In append mode, ``engine_kwargs`` are passed through to
|
1080 |
+
openpyxl's ``load_workbook``:
|
1081 |
+
|
1082 |
+
>>> with pd.ExcelWriter(
|
1083 |
+
... "path_to_file.xlsx",
|
1084 |
+
... engine="openpyxl",
|
1085 |
+
... mode="a",
|
1086 |
+
... engine_kwargs={{"keep_vba": True}}
|
1087 |
+
... ) as writer:
|
1088 |
+
... df.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
|
1089 |
+
"""
|
1090 |
+
|
1091 |
+
# Defining an ExcelWriter implementation (see abstract methods for more...)
|
1092 |
+
|
1093 |
+
# - Mandatory
|
1094 |
+
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
|
1095 |
+
# --> called to write additional DataFrames to disk
|
1096 |
+
# - ``_supported_extensions`` (tuple of supported extensions), used to
|
1097 |
+
# check that engine supports the given extension.
|
1098 |
+
# - ``_engine`` - string that gives the engine name. Necessary to
|
1099 |
+
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
|
1100 |
+
# lookup.
|
1101 |
+
# - ``save(self)`` --> called to save file to disk
|
1102 |
+
# - Mostly mandatory (i.e. should at least exist)
|
1103 |
+
# - book, cur_sheet, path
|
1104 |
+
|
1105 |
+
# - Optional:
|
1106 |
+
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
|
1107 |
+
# with path as first argument.
|
1108 |
+
|
1109 |
+
# You also need to register the class with ``register_writer()``.
|
1110 |
+
# Technically, ExcelWriter implementations don't need to subclass
|
1111 |
+
# ExcelWriter.
|
1112 |
+
|
1113 |
+
_engine: str
|
1114 |
+
_supported_extensions: tuple[str, ...]
|
1115 |
+
|
1116 |
+
def __new__(
|
1117 |
+
cls,
|
1118 |
+
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
1119 |
+
engine: str | None = None,
|
1120 |
+
date_format: str | None = None,
|
1121 |
+
datetime_format: str | None = None,
|
1122 |
+
mode: str = "w",
|
1123 |
+
storage_options: StorageOptions | None = None,
|
1124 |
+
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
1125 |
+
engine_kwargs: dict | None = None,
|
1126 |
+
) -> Self:
|
1127 |
+
# only switch class if generic(ExcelWriter)
|
1128 |
+
if cls is ExcelWriter:
|
1129 |
+
if engine is None or (isinstance(engine, str) and engine == "auto"):
|
1130 |
+
if isinstance(path, str):
|
1131 |
+
ext = os.path.splitext(path)[-1][1:]
|
1132 |
+
else:
|
1133 |
+
ext = "xlsx"
|
1134 |
+
|
1135 |
+
try:
|
1136 |
+
engine = config.get_option(f"io.excel.{ext}.writer", silent=True)
|
1137 |
+
if engine == "auto":
|
1138 |
+
engine = get_default_engine(ext, mode="writer")
|
1139 |
+
except KeyError as err:
|
1140 |
+
raise ValueError(f"No engine for filetype: '{ext}'") from err
|
1141 |
+
|
1142 |
+
# for mypy
|
1143 |
+
assert engine is not None
|
1144 |
+
# error: Incompatible types in assignment (expression has type
|
1145 |
+
# "type[ExcelWriter[Any]]", variable has type "type[Self]")
|
1146 |
+
cls = get_writer(engine) # type: ignore[assignment]
|
1147 |
+
|
1148 |
+
return object.__new__(cls)
|
1149 |
+
|
1150 |
+
# declare external properties you can count on
|
1151 |
+
_path = None
|
1152 |
+
|
1153 |
+
@property
|
1154 |
+
def supported_extensions(self) -> tuple[str, ...]:
|
1155 |
+
"""Extensions that writer engine supports."""
|
1156 |
+
return self._supported_extensions
|
1157 |
+
|
1158 |
+
@property
|
1159 |
+
def engine(self) -> str:
|
1160 |
+
"""Name of engine."""
|
1161 |
+
return self._engine
|
1162 |
+
|
1163 |
+
@property
|
1164 |
+
def sheets(self) -> dict[str, Any]:
|
1165 |
+
"""Mapping of sheet names to sheet objects."""
|
1166 |
+
raise NotImplementedError
|
1167 |
+
|
1168 |
+
@property
|
1169 |
+
def book(self) -> _WorkbookT:
|
1170 |
+
"""
|
1171 |
+
Book instance. Class type will depend on the engine used.
|
1172 |
+
|
1173 |
+
This attribute can be used to access engine-specific features.
|
1174 |
+
"""
|
1175 |
+
raise NotImplementedError
|
1176 |
+
|
1177 |
+
def _write_cells(
|
1178 |
+
self,
|
1179 |
+
cells,
|
1180 |
+
sheet_name: str | None = None,
|
1181 |
+
startrow: int = 0,
|
1182 |
+
startcol: int = 0,
|
1183 |
+
freeze_panes: tuple[int, int] | None = None,
|
1184 |
+
) -> None:
|
1185 |
+
"""
|
1186 |
+
Write given formatted cells into Excel an excel sheet
|
1187 |
+
|
1188 |
+
Parameters
|
1189 |
+
----------
|
1190 |
+
cells : generator
|
1191 |
+
cell of formatted data to save to Excel sheet
|
1192 |
+
sheet_name : str, default None
|
1193 |
+
Name of Excel sheet, if None, then use self.cur_sheet
|
1194 |
+
startrow : upper left cell row to dump data frame
|
1195 |
+
startcol : upper left cell column to dump data frame
|
1196 |
+
freeze_panes: int tuple of length 2
|
1197 |
+
contains the bottom-most row and right-most column to freeze
|
1198 |
+
"""
|
1199 |
+
raise NotImplementedError
|
1200 |
+
|
1201 |
+
def _save(self) -> None:
|
1202 |
+
"""
|
1203 |
+
Save workbook to disk.
|
1204 |
+
"""
|
1205 |
+
raise NotImplementedError
|
1206 |
+
|
1207 |
+
def __init__(
|
1208 |
+
self,
|
1209 |
+
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
1210 |
+
engine: str | None = None,
|
1211 |
+
date_format: str | None = None,
|
1212 |
+
datetime_format: str | None = None,
|
1213 |
+
mode: str = "w",
|
1214 |
+
storage_options: StorageOptions | None = None,
|
1215 |
+
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
1216 |
+
engine_kwargs: dict[str, Any] | None = None,
|
1217 |
+
) -> None:
|
1218 |
+
# validate that this engine can handle the extension
|
1219 |
+
if isinstance(path, str):
|
1220 |
+
ext = os.path.splitext(path)[-1]
|
1221 |
+
self.check_extension(ext)
|
1222 |
+
|
1223 |
+
# use mode to open the file
|
1224 |
+
if "b" not in mode:
|
1225 |
+
mode += "b"
|
1226 |
+
# use "a" for the user to append data to excel but internally use "r+" to let
|
1227 |
+
# the excel backend first read the existing file and then write any data to it
|
1228 |
+
mode = mode.replace("a", "r+")
|
1229 |
+
|
1230 |
+
if if_sheet_exists not in (None, "error", "new", "replace", "overlay"):
|
1231 |
+
raise ValueError(
|
1232 |
+
f"'{if_sheet_exists}' is not valid for if_sheet_exists. "
|
1233 |
+
"Valid options are 'error', 'new', 'replace' and 'overlay'."
|
1234 |
+
)
|
1235 |
+
if if_sheet_exists and "r+" not in mode:
|
1236 |
+
raise ValueError("if_sheet_exists is only valid in append mode (mode='a')")
|
1237 |
+
if if_sheet_exists is None:
|
1238 |
+
if_sheet_exists = "error"
|
1239 |
+
self._if_sheet_exists = if_sheet_exists
|
1240 |
+
|
1241 |
+
# cast ExcelWriter to avoid adding 'if self._handles is not None'
|
1242 |
+
self._handles = IOHandles(
|
1243 |
+
cast(IO[bytes], path), compression={"compression": None}
|
1244 |
+
)
|
1245 |
+
if not isinstance(path, ExcelWriter):
|
1246 |
+
self._handles = get_handle(
|
1247 |
+
path, mode, storage_options=storage_options, is_text=False
|
1248 |
+
)
|
1249 |
+
self._cur_sheet = None
|
1250 |
+
|
1251 |
+
if date_format is None:
|
1252 |
+
self._date_format = "YYYY-MM-DD"
|
1253 |
+
else:
|
1254 |
+
self._date_format = date_format
|
1255 |
+
if datetime_format is None:
|
1256 |
+
self._datetime_format = "YYYY-MM-DD HH:MM:SS"
|
1257 |
+
else:
|
1258 |
+
self._datetime_format = datetime_format
|
1259 |
+
|
1260 |
+
self._mode = mode
|
1261 |
+
|
1262 |
+
@property
|
1263 |
+
def date_format(self) -> str:
|
1264 |
+
"""
|
1265 |
+
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
|
1266 |
+
"""
|
1267 |
+
return self._date_format
|
1268 |
+
|
1269 |
+
@property
|
1270 |
+
def datetime_format(self) -> str:
|
1271 |
+
"""
|
1272 |
+
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
|
1273 |
+
"""
|
1274 |
+
return self._datetime_format
|
1275 |
+
|
1276 |
+
@property
|
1277 |
+
def if_sheet_exists(self) -> str:
|
1278 |
+
"""
|
1279 |
+
How to behave when writing to a sheet that already exists in append mode.
|
1280 |
+
"""
|
1281 |
+
return self._if_sheet_exists
|
1282 |
+
|
1283 |
+
def __fspath__(self) -> str:
|
1284 |
+
return getattr(self._handles.handle, "name", "")
|
1285 |
+
|
1286 |
+
def _get_sheet_name(self, sheet_name: str | None) -> str:
|
1287 |
+
if sheet_name is None:
|
1288 |
+
sheet_name = self._cur_sheet
|
1289 |
+
if sheet_name is None: # pragma: no cover
|
1290 |
+
raise ValueError("Must pass explicit sheet_name or set _cur_sheet property")
|
1291 |
+
return sheet_name
|
1292 |
+
|
1293 |
+
def _value_with_fmt(
|
1294 |
+
self, val
|
1295 |
+
) -> tuple[
|
1296 |
+
int | float | bool | str | datetime.datetime | datetime.date, str | None
|
1297 |
+
]:
|
1298 |
+
"""
|
1299 |
+
Convert numpy types to Python types for the Excel writers.
|
1300 |
+
|
1301 |
+
Parameters
|
1302 |
+
----------
|
1303 |
+
val : object
|
1304 |
+
Value to be written into cells
|
1305 |
+
|
1306 |
+
Returns
|
1307 |
+
-------
|
1308 |
+
Tuple with the first element being the converted value and the second
|
1309 |
+
being an optional format
|
1310 |
+
"""
|
1311 |
+
fmt = None
|
1312 |
+
|
1313 |
+
if is_integer(val):
|
1314 |
+
val = int(val)
|
1315 |
+
elif is_float(val):
|
1316 |
+
val = float(val)
|
1317 |
+
elif is_bool(val):
|
1318 |
+
val = bool(val)
|
1319 |
+
elif isinstance(val, datetime.datetime):
|
1320 |
+
fmt = self._datetime_format
|
1321 |
+
elif isinstance(val, datetime.date):
|
1322 |
+
fmt = self._date_format
|
1323 |
+
elif isinstance(val, datetime.timedelta):
|
1324 |
+
val = val.total_seconds() / 86400
|
1325 |
+
fmt = "0"
|
1326 |
+
else:
|
1327 |
+
val = str(val)
|
1328 |
+
|
1329 |
+
return val, fmt
|
1330 |
+
|
1331 |
+
@classmethod
|
1332 |
+
def check_extension(cls, ext: str) -> Literal[True]:
|
1333 |
+
"""
|
1334 |
+
checks that path's extension against the Writer's supported
|
1335 |
+
extensions. If it isn't supported, raises UnsupportedFiletypeError.
|
1336 |
+
"""
|
1337 |
+
if ext.startswith("."):
|
1338 |
+
ext = ext[1:]
|
1339 |
+
if not any(ext in extension for extension in cls._supported_extensions):
|
1340 |
+
raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'")
|
1341 |
+
return True
|
1342 |
+
|
1343 |
+
# Allow use as a contextmanager
|
1344 |
+
def __enter__(self) -> Self:
|
1345 |
+
return self
|
1346 |
+
|
1347 |
+
def __exit__(
|
1348 |
+
self,
|
1349 |
+
exc_type: type[BaseException] | None,
|
1350 |
+
exc_value: BaseException | None,
|
1351 |
+
traceback: TracebackType | None,
|
1352 |
+
) -> None:
|
1353 |
+
self.close()
|
1354 |
+
|
1355 |
+
def close(self) -> None:
|
1356 |
+
"""synonym for save, to make it more file-like"""
|
1357 |
+
self._save()
|
1358 |
+
self._handles.close()
|
1359 |
+
|
1360 |
+
|
1361 |
+
XLS_SIGNATURES = (
|
1362 |
+
b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2
|
1363 |
+
b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3
|
1364 |
+
b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4
|
1365 |
+
b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary
|
1366 |
+
)
|
1367 |
+
ZIP_SIGNATURE = b"PK\x03\x04"
|
1368 |
+
PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,)))
|
1369 |
+
|
1370 |
+
|
1371 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
1372 |
+
def inspect_excel_format(
|
1373 |
+
content_or_path: FilePath | ReadBuffer[bytes],
|
1374 |
+
storage_options: StorageOptions | None = None,
|
1375 |
+
) -> str | None:
|
1376 |
+
"""
|
1377 |
+
Inspect the path or content of an excel file and get its format.
|
1378 |
+
|
1379 |
+
Adopted from xlrd: https://github.com/python-excel/xlrd.
|
1380 |
+
|
1381 |
+
Parameters
|
1382 |
+
----------
|
1383 |
+
content_or_path : str or file-like object
|
1384 |
+
Path to file or content of file to inspect. May be a URL.
|
1385 |
+
{storage_options}
|
1386 |
+
|
1387 |
+
Returns
|
1388 |
+
-------
|
1389 |
+
str or None
|
1390 |
+
Format of file if it can be determined.
|
1391 |
+
|
1392 |
+
Raises
|
1393 |
+
------
|
1394 |
+
ValueError
|
1395 |
+
If resulting stream is empty.
|
1396 |
+
BadZipFile
|
1397 |
+
If resulting stream does not have an XLS signature and is not a valid zipfile.
|
1398 |
+
"""
|
1399 |
+
if isinstance(content_or_path, bytes):
|
1400 |
+
content_or_path = BytesIO(content_or_path)
|
1401 |
+
|
1402 |
+
with get_handle(
|
1403 |
+
content_or_path, "rb", storage_options=storage_options, is_text=False
|
1404 |
+
) as handle:
|
1405 |
+
stream = handle.handle
|
1406 |
+
stream.seek(0)
|
1407 |
+
buf = stream.read(PEEK_SIZE)
|
1408 |
+
if buf is None:
|
1409 |
+
raise ValueError("stream is empty")
|
1410 |
+
assert isinstance(buf, bytes)
|
1411 |
+
peek = buf
|
1412 |
+
stream.seek(0)
|
1413 |
+
|
1414 |
+
if any(peek.startswith(sig) for sig in XLS_SIGNATURES):
|
1415 |
+
return "xls"
|
1416 |
+
elif not peek.startswith(ZIP_SIGNATURE):
|
1417 |
+
return None
|
1418 |
+
|
1419 |
+
with zipfile.ZipFile(stream) as zf:
|
1420 |
+
# Workaround for some third party files that use forward slashes and
|
1421 |
+
# lower case names.
|
1422 |
+
component_names = [
|
1423 |
+
name.replace("\\", "/").lower() for name in zf.namelist()
|
1424 |
+
]
|
1425 |
+
|
1426 |
+
if "xl/workbook.xml" in component_names:
|
1427 |
+
return "xlsx"
|
1428 |
+
if "xl/workbook.bin" in component_names:
|
1429 |
+
return "xlsb"
|
1430 |
+
if "content.xml" in component_names:
|
1431 |
+
return "ods"
|
1432 |
+
return "zip"
|
1433 |
+
|
1434 |
+
|
1435 |
+
class ExcelFile:
|
1436 |
+
"""
|
1437 |
+
Class for parsing tabular Excel sheets into DataFrame objects.
|
1438 |
+
|
1439 |
+
See read_excel for more documentation.
|
1440 |
+
|
1441 |
+
Parameters
|
1442 |
+
----------
|
1443 |
+
path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath),
|
1444 |
+
A file-like object, xlrd workbook or openpyxl workbook.
|
1445 |
+
If a string or path object, expected to be a path to a
|
1446 |
+
.xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.
|
1447 |
+
engine : str, default None
|
1448 |
+
If io is not a buffer or path, this must be set to identify io.
|
1449 |
+
Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``, ``calamine``
|
1450 |
+
Engine compatibility :
|
1451 |
+
|
1452 |
+
- ``xlrd`` supports old-style Excel files (.xls).
|
1453 |
+
- ``openpyxl`` supports newer Excel file formats.
|
1454 |
+
- ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
|
1455 |
+
- ``pyxlsb`` supports Binary Excel files.
|
1456 |
+
- ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)
|
1457 |
+
and OpenDocument (.ods) file formats.
|
1458 |
+
|
1459 |
+
.. versionchanged:: 1.2.0
|
1460 |
+
|
1461 |
+
The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_
|
1462 |
+
now only supports old-style ``.xls`` files.
|
1463 |
+
When ``engine=None``, the following logic will be
|
1464 |
+
used to determine the engine:
|
1465 |
+
|
1466 |
+
- If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
|
1467 |
+
then `odf <https://pypi.org/project/odfpy/>`_ will be used.
|
1468 |
+
- Otherwise if ``path_or_buffer`` is an xls format,
|
1469 |
+
``xlrd`` will be used.
|
1470 |
+
- Otherwise if ``path_or_buffer`` is in xlsb format,
|
1471 |
+
`pyxlsb <https://pypi.org/project/pyxlsb/>`_ will be used.
|
1472 |
+
|
1473 |
+
.. versionadded:: 1.3.0
|
1474 |
+
|
1475 |
+
- Otherwise if `openpyxl <https://pypi.org/project/openpyxl/>`_ is installed,
|
1476 |
+
then ``openpyxl`` will be used.
|
1477 |
+
- Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised.
|
1478 |
+
|
1479 |
+
.. warning::
|
1480 |
+
|
1481 |
+
Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.
|
1482 |
+
This is not supported, switch to using ``openpyxl`` instead.
|
1483 |
+
engine_kwargs : dict, optional
|
1484 |
+
Arbitrary keyword arguments passed to excel engine.
|
1485 |
+
|
1486 |
+
Examples
|
1487 |
+
--------
|
1488 |
+
>>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
|
1489 |
+
>>> with pd.ExcelFile("myfile.xls") as xls: # doctest: +SKIP
|
1490 |
+
... df1 = pd.read_excel(xls, "Sheet1") # doctest: +SKIP
|
1491 |
+
"""
|
1492 |
+
|
1493 |
+
from pandas.io.excel._calamine import CalamineReader
|
1494 |
+
from pandas.io.excel._odfreader import ODFReader
|
1495 |
+
from pandas.io.excel._openpyxl import OpenpyxlReader
|
1496 |
+
from pandas.io.excel._pyxlsb import PyxlsbReader
|
1497 |
+
from pandas.io.excel._xlrd import XlrdReader
|
1498 |
+
|
1499 |
+
_engines: Mapping[str, Any] = {
|
1500 |
+
"xlrd": XlrdReader,
|
1501 |
+
"openpyxl": OpenpyxlReader,
|
1502 |
+
"odf": ODFReader,
|
1503 |
+
"pyxlsb": PyxlsbReader,
|
1504 |
+
"calamine": CalamineReader,
|
1505 |
+
}
|
1506 |
+
|
1507 |
+
def __init__(
|
1508 |
+
self,
|
1509 |
+
path_or_buffer,
|
1510 |
+
engine: str | None = None,
|
1511 |
+
storage_options: StorageOptions | None = None,
|
1512 |
+
engine_kwargs: dict | None = None,
|
1513 |
+
) -> None:
|
1514 |
+
if engine_kwargs is None:
|
1515 |
+
engine_kwargs = {}
|
1516 |
+
|
1517 |
+
if engine is not None and engine not in self._engines:
|
1518 |
+
raise ValueError(f"Unknown engine: {engine}")
|
1519 |
+
|
1520 |
+
# First argument can also be bytes, so create a buffer
|
1521 |
+
if isinstance(path_or_buffer, bytes):
|
1522 |
+
path_or_buffer = BytesIO(path_or_buffer)
|
1523 |
+
warnings.warn(
|
1524 |
+
"Passing bytes to 'read_excel' is deprecated and "
|
1525 |
+
"will be removed in a future version. To read from a "
|
1526 |
+
"byte string, wrap it in a `BytesIO` object.",
|
1527 |
+
FutureWarning,
|
1528 |
+
stacklevel=find_stack_level(),
|
1529 |
+
)
|
1530 |
+
|
1531 |
+
# Could be a str, ExcelFile, Book, etc.
|
1532 |
+
self.io = path_or_buffer
|
1533 |
+
# Always a string
|
1534 |
+
self._io = stringify_path(path_or_buffer)
|
1535 |
+
|
1536 |
+
# Determine xlrd version if installed
|
1537 |
+
if import_optional_dependency("xlrd", errors="ignore") is None:
|
1538 |
+
xlrd_version = None
|
1539 |
+
else:
|
1540 |
+
import xlrd
|
1541 |
+
|
1542 |
+
xlrd_version = Version(get_version(xlrd))
|
1543 |
+
|
1544 |
+
if engine is None:
|
1545 |
+
# Only determine ext if it is needed
|
1546 |
+
ext: str | None
|
1547 |
+
if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book):
|
1548 |
+
ext = "xls"
|
1549 |
+
else:
|
1550 |
+
ext = inspect_excel_format(
|
1551 |
+
content_or_path=path_or_buffer, storage_options=storage_options
|
1552 |
+
)
|
1553 |
+
if ext is None:
|
1554 |
+
raise ValueError(
|
1555 |
+
"Excel file format cannot be determined, you must specify "
|
1556 |
+
"an engine manually."
|
1557 |
+
)
|
1558 |
+
|
1559 |
+
engine = config.get_option(f"io.excel.{ext}.reader", silent=True)
|
1560 |
+
if engine == "auto":
|
1561 |
+
engine = get_default_engine(ext, mode="reader")
|
1562 |
+
|
1563 |
+
assert engine is not None
|
1564 |
+
self.engine = engine
|
1565 |
+
self.storage_options = storage_options
|
1566 |
+
|
1567 |
+
self._reader = self._engines[engine](
|
1568 |
+
self._io,
|
1569 |
+
storage_options=storage_options,
|
1570 |
+
engine_kwargs=engine_kwargs,
|
1571 |
+
)
|
1572 |
+
|
1573 |
+
def __fspath__(self):
|
1574 |
+
return self._io
|
1575 |
+
|
1576 |
+
def parse(
|
1577 |
+
self,
|
1578 |
+
sheet_name: str | int | list[int] | list[str] | None = 0,
|
1579 |
+
header: int | Sequence[int] | None = 0,
|
1580 |
+
names: SequenceNotStr[Hashable] | range | None = None,
|
1581 |
+
index_col: int | Sequence[int] | None = None,
|
1582 |
+
usecols=None,
|
1583 |
+
converters=None,
|
1584 |
+
true_values: Iterable[Hashable] | None = None,
|
1585 |
+
false_values: Iterable[Hashable] | None = None,
|
1586 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
|
1587 |
+
nrows: int | None = None,
|
1588 |
+
na_values=None,
|
1589 |
+
parse_dates: list | dict | bool = False,
|
1590 |
+
date_parser: Callable | lib.NoDefault = lib.no_default,
|
1591 |
+
date_format: str | dict[Hashable, str] | None = None,
|
1592 |
+
thousands: str | None = None,
|
1593 |
+
comment: str | None = None,
|
1594 |
+
skipfooter: int = 0,
|
1595 |
+
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
|
1596 |
+
**kwds,
|
1597 |
+
) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]:
|
1598 |
+
"""
|
1599 |
+
Parse specified sheet(s) into a DataFrame.
|
1600 |
+
|
1601 |
+
Equivalent to read_excel(ExcelFile, ...) See the read_excel
|
1602 |
+
docstring for more info on accepted parameters.
|
1603 |
+
|
1604 |
+
Returns
|
1605 |
+
-------
|
1606 |
+
DataFrame or dict of DataFrames
|
1607 |
+
DataFrame from the passed in Excel file.
|
1608 |
+
|
1609 |
+
Examples
|
1610 |
+
--------
|
1611 |
+
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
|
1612 |
+
>>> df.to_excel('myfile.xlsx') # doctest: +SKIP
|
1613 |
+
>>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
|
1614 |
+
>>> file.parse() # doctest: +SKIP
|
1615 |
+
"""
|
1616 |
+
return self._reader.parse(
|
1617 |
+
sheet_name=sheet_name,
|
1618 |
+
header=header,
|
1619 |
+
names=names,
|
1620 |
+
index_col=index_col,
|
1621 |
+
usecols=usecols,
|
1622 |
+
converters=converters,
|
1623 |
+
true_values=true_values,
|
1624 |
+
false_values=false_values,
|
1625 |
+
skiprows=skiprows,
|
1626 |
+
nrows=nrows,
|
1627 |
+
na_values=na_values,
|
1628 |
+
parse_dates=parse_dates,
|
1629 |
+
date_parser=date_parser,
|
1630 |
+
date_format=date_format,
|
1631 |
+
thousands=thousands,
|
1632 |
+
comment=comment,
|
1633 |
+
skipfooter=skipfooter,
|
1634 |
+
dtype_backend=dtype_backend,
|
1635 |
+
**kwds,
|
1636 |
+
)
|
1637 |
+
|
1638 |
+
@property
|
1639 |
+
def book(self):
|
1640 |
+
return self._reader.book
|
1641 |
+
|
1642 |
+
@property
|
1643 |
+
def sheet_names(self):
|
1644 |
+
return self._reader.sheet_names
|
1645 |
+
|
1646 |
+
def close(self) -> None:
|
1647 |
+
"""close io if necessary"""
|
1648 |
+
self._reader.close()
|
1649 |
+
|
1650 |
+
def __enter__(self) -> Self:
|
1651 |
+
return self
|
1652 |
+
|
1653 |
+
def __exit__(
|
1654 |
+
self,
|
1655 |
+
exc_type: type[BaseException] | None,
|
1656 |
+
exc_value: BaseException | None,
|
1657 |
+
traceback: TracebackType | None,
|
1658 |
+
) -> None:
|
1659 |
+
self.close()
|
venv/lib/python3.10/site-packages/pandas/io/excel/_calamine.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from datetime import (
|
4 |
+
date,
|
5 |
+
datetime,
|
6 |
+
time,
|
7 |
+
timedelta,
|
8 |
+
)
|
9 |
+
from typing import (
|
10 |
+
TYPE_CHECKING,
|
11 |
+
Any,
|
12 |
+
Union,
|
13 |
+
)
|
14 |
+
|
15 |
+
from pandas.compat._optional import import_optional_dependency
|
16 |
+
from pandas.util._decorators import doc
|
17 |
+
|
18 |
+
import pandas as pd
|
19 |
+
from pandas.core.shared_docs import _shared_docs
|
20 |
+
|
21 |
+
from pandas.io.excel._base import BaseExcelReader
|
22 |
+
|
23 |
+
if TYPE_CHECKING:
|
24 |
+
from python_calamine import (
|
25 |
+
CalamineSheet,
|
26 |
+
CalamineWorkbook,
|
27 |
+
)
|
28 |
+
|
29 |
+
from pandas._typing import (
|
30 |
+
FilePath,
|
31 |
+
NaTType,
|
32 |
+
ReadBuffer,
|
33 |
+
Scalar,
|
34 |
+
StorageOptions,
|
35 |
+
)
|
36 |
+
|
37 |
+
_CellValue = Union[int, float, str, bool, time, date, datetime, timedelta]
|
38 |
+
|
39 |
+
|
40 |
+
class CalamineReader(BaseExcelReader["CalamineWorkbook"]):
|
41 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
42 |
+
def __init__(
|
43 |
+
self,
|
44 |
+
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
45 |
+
storage_options: StorageOptions | None = None,
|
46 |
+
engine_kwargs: dict | None = None,
|
47 |
+
) -> None:
|
48 |
+
"""
|
49 |
+
Reader using calamine engine (xlsx/xls/xlsb/ods).
|
50 |
+
|
51 |
+
Parameters
|
52 |
+
----------
|
53 |
+
filepath_or_buffer : str, path to be parsed or
|
54 |
+
an open readable stream.
|
55 |
+
{storage_options}
|
56 |
+
engine_kwargs : dict, optional
|
57 |
+
Arbitrary keyword arguments passed to excel engine.
|
58 |
+
"""
|
59 |
+
import_optional_dependency("python_calamine")
|
60 |
+
super().__init__(
|
61 |
+
filepath_or_buffer,
|
62 |
+
storage_options=storage_options,
|
63 |
+
engine_kwargs=engine_kwargs,
|
64 |
+
)
|
65 |
+
|
66 |
+
@property
|
67 |
+
def _workbook_class(self) -> type[CalamineWorkbook]:
|
68 |
+
from python_calamine import CalamineWorkbook
|
69 |
+
|
70 |
+
return CalamineWorkbook
|
71 |
+
|
72 |
+
def load_workbook(
|
73 |
+
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs: Any
|
74 |
+
) -> CalamineWorkbook:
|
75 |
+
from python_calamine import load_workbook
|
76 |
+
|
77 |
+
return load_workbook(filepath_or_buffer, **engine_kwargs)
|
78 |
+
|
79 |
+
@property
|
80 |
+
def sheet_names(self) -> list[str]:
|
81 |
+
from python_calamine import SheetTypeEnum
|
82 |
+
|
83 |
+
return [
|
84 |
+
sheet.name
|
85 |
+
for sheet in self.book.sheets_metadata
|
86 |
+
if sheet.typ == SheetTypeEnum.WorkSheet
|
87 |
+
]
|
88 |
+
|
89 |
+
def get_sheet_by_name(self, name: str) -> CalamineSheet:
|
90 |
+
self.raise_if_bad_sheet_by_name(name)
|
91 |
+
return self.book.get_sheet_by_name(name)
|
92 |
+
|
93 |
+
def get_sheet_by_index(self, index: int) -> CalamineSheet:
|
94 |
+
self.raise_if_bad_sheet_by_index(index)
|
95 |
+
return self.book.get_sheet_by_index(index)
|
96 |
+
|
97 |
+
def get_sheet_data(
|
98 |
+
self, sheet: CalamineSheet, file_rows_needed: int | None = None
|
99 |
+
) -> list[list[Scalar | NaTType | time]]:
|
100 |
+
def _convert_cell(value: _CellValue) -> Scalar | NaTType | time:
|
101 |
+
if isinstance(value, float):
|
102 |
+
val = int(value)
|
103 |
+
if val == value:
|
104 |
+
return val
|
105 |
+
else:
|
106 |
+
return value
|
107 |
+
elif isinstance(value, date):
|
108 |
+
return pd.Timestamp(value)
|
109 |
+
elif isinstance(value, timedelta):
|
110 |
+
return pd.Timedelta(value)
|
111 |
+
elif isinstance(value, time):
|
112 |
+
return value
|
113 |
+
|
114 |
+
return value
|
115 |
+
|
116 |
+
rows: list[list[_CellValue]] = sheet.to_python(
|
117 |
+
skip_empty_area=False, nrows=file_rows_needed
|
118 |
+
)
|
119 |
+
data = [[_convert_cell(cell) for cell in row] for row in rows]
|
120 |
+
|
121 |
+
return data
|
venv/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py
ADDED
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import (
|
4 |
+
TYPE_CHECKING,
|
5 |
+
cast,
|
6 |
+
)
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
from pandas._typing import (
|
11 |
+
FilePath,
|
12 |
+
ReadBuffer,
|
13 |
+
Scalar,
|
14 |
+
StorageOptions,
|
15 |
+
)
|
16 |
+
from pandas.compat._optional import import_optional_dependency
|
17 |
+
from pandas.util._decorators import doc
|
18 |
+
|
19 |
+
import pandas as pd
|
20 |
+
from pandas.core.shared_docs import _shared_docs
|
21 |
+
|
22 |
+
from pandas.io.excel._base import BaseExcelReader
|
23 |
+
|
24 |
+
if TYPE_CHECKING:
|
25 |
+
from odf.opendocument import OpenDocument
|
26 |
+
|
27 |
+
from pandas._libs.tslibs.nattype import NaTType
|
28 |
+
|
29 |
+
|
30 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
31 |
+
class ODFReader(BaseExcelReader["OpenDocument"]):
|
32 |
+
def __init__(
|
33 |
+
self,
|
34 |
+
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
35 |
+
storage_options: StorageOptions | None = None,
|
36 |
+
engine_kwargs: dict | None = None,
|
37 |
+
) -> None:
|
38 |
+
"""
|
39 |
+
Read tables out of OpenDocument formatted files.
|
40 |
+
|
41 |
+
Parameters
|
42 |
+
----------
|
43 |
+
filepath_or_buffer : str, path to be parsed or
|
44 |
+
an open readable stream.
|
45 |
+
{storage_options}
|
46 |
+
engine_kwargs : dict, optional
|
47 |
+
Arbitrary keyword arguments passed to excel engine.
|
48 |
+
"""
|
49 |
+
import_optional_dependency("odf")
|
50 |
+
super().__init__(
|
51 |
+
filepath_or_buffer,
|
52 |
+
storage_options=storage_options,
|
53 |
+
engine_kwargs=engine_kwargs,
|
54 |
+
)
|
55 |
+
|
56 |
+
@property
|
57 |
+
def _workbook_class(self) -> type[OpenDocument]:
|
58 |
+
from odf.opendocument import OpenDocument
|
59 |
+
|
60 |
+
return OpenDocument
|
61 |
+
|
62 |
+
def load_workbook(
|
63 |
+
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
|
64 |
+
) -> OpenDocument:
|
65 |
+
from odf.opendocument import load
|
66 |
+
|
67 |
+
return load(filepath_or_buffer, **engine_kwargs)
|
68 |
+
|
69 |
+
@property
|
70 |
+
def empty_value(self) -> str:
|
71 |
+
"""Property for compat with other readers."""
|
72 |
+
return ""
|
73 |
+
|
74 |
+
@property
|
75 |
+
def sheet_names(self) -> list[str]:
|
76 |
+
"""Return a list of sheet names present in the document"""
|
77 |
+
from odf.table import Table
|
78 |
+
|
79 |
+
tables = self.book.getElementsByType(Table)
|
80 |
+
return [t.getAttribute("name") for t in tables]
|
81 |
+
|
82 |
+
def get_sheet_by_index(self, index: int):
|
83 |
+
from odf.table import Table
|
84 |
+
|
85 |
+
self.raise_if_bad_sheet_by_index(index)
|
86 |
+
tables = self.book.getElementsByType(Table)
|
87 |
+
return tables[index]
|
88 |
+
|
89 |
+
def get_sheet_by_name(self, name: str):
|
90 |
+
from odf.table import Table
|
91 |
+
|
92 |
+
self.raise_if_bad_sheet_by_name(name)
|
93 |
+
tables = self.book.getElementsByType(Table)
|
94 |
+
|
95 |
+
for table in tables:
|
96 |
+
if table.getAttribute("name") == name:
|
97 |
+
return table
|
98 |
+
|
99 |
+
self.close()
|
100 |
+
raise ValueError(f"sheet {name} not found")
|
101 |
+
|
102 |
+
def get_sheet_data(
|
103 |
+
self, sheet, file_rows_needed: int | None = None
|
104 |
+
) -> list[list[Scalar | NaTType]]:
|
105 |
+
"""
|
106 |
+
Parse an ODF Table into a list of lists
|
107 |
+
"""
|
108 |
+
from odf.table import (
|
109 |
+
CoveredTableCell,
|
110 |
+
TableCell,
|
111 |
+
TableRow,
|
112 |
+
)
|
113 |
+
|
114 |
+
covered_cell_name = CoveredTableCell().qname
|
115 |
+
table_cell_name = TableCell().qname
|
116 |
+
cell_names = {covered_cell_name, table_cell_name}
|
117 |
+
|
118 |
+
sheet_rows = sheet.getElementsByType(TableRow)
|
119 |
+
empty_rows = 0
|
120 |
+
max_row_len = 0
|
121 |
+
|
122 |
+
table: list[list[Scalar | NaTType]] = []
|
123 |
+
|
124 |
+
for sheet_row in sheet_rows:
|
125 |
+
sheet_cells = [
|
126 |
+
x
|
127 |
+
for x in sheet_row.childNodes
|
128 |
+
if hasattr(x, "qname") and x.qname in cell_names
|
129 |
+
]
|
130 |
+
empty_cells = 0
|
131 |
+
table_row: list[Scalar | NaTType] = []
|
132 |
+
|
133 |
+
for sheet_cell in sheet_cells:
|
134 |
+
if sheet_cell.qname == table_cell_name:
|
135 |
+
value = self._get_cell_value(sheet_cell)
|
136 |
+
else:
|
137 |
+
value = self.empty_value
|
138 |
+
|
139 |
+
column_repeat = self._get_column_repeat(sheet_cell)
|
140 |
+
|
141 |
+
# Queue up empty values, writing only if content succeeds them
|
142 |
+
if value == self.empty_value:
|
143 |
+
empty_cells += column_repeat
|
144 |
+
else:
|
145 |
+
table_row.extend([self.empty_value] * empty_cells)
|
146 |
+
empty_cells = 0
|
147 |
+
table_row.extend([value] * column_repeat)
|
148 |
+
|
149 |
+
if max_row_len < len(table_row):
|
150 |
+
max_row_len = len(table_row)
|
151 |
+
|
152 |
+
row_repeat = self._get_row_repeat(sheet_row)
|
153 |
+
if len(table_row) == 0:
|
154 |
+
empty_rows += row_repeat
|
155 |
+
else:
|
156 |
+
# add blank rows to our table
|
157 |
+
table.extend([[self.empty_value]] * empty_rows)
|
158 |
+
empty_rows = 0
|
159 |
+
table.extend(table_row for _ in range(row_repeat))
|
160 |
+
if file_rows_needed is not None and len(table) >= file_rows_needed:
|
161 |
+
break
|
162 |
+
|
163 |
+
# Make our table square
|
164 |
+
for row in table:
|
165 |
+
if len(row) < max_row_len:
|
166 |
+
row.extend([self.empty_value] * (max_row_len - len(row)))
|
167 |
+
|
168 |
+
return table
|
169 |
+
|
170 |
+
def _get_row_repeat(self, row) -> int:
|
171 |
+
"""
|
172 |
+
Return number of times this row was repeated
|
173 |
+
Repeating an empty row appeared to be a common way
|
174 |
+
of representing sparse rows in the table.
|
175 |
+
"""
|
176 |
+
from odf.namespaces import TABLENS
|
177 |
+
|
178 |
+
return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1))
|
179 |
+
|
180 |
+
def _get_column_repeat(self, cell) -> int:
|
181 |
+
from odf.namespaces import TABLENS
|
182 |
+
|
183 |
+
return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1))
|
184 |
+
|
185 |
+
def _get_cell_value(self, cell) -> Scalar | NaTType:
|
186 |
+
from odf.namespaces import OFFICENS
|
187 |
+
|
188 |
+
if str(cell) == "#N/A":
|
189 |
+
return np.nan
|
190 |
+
|
191 |
+
cell_type = cell.attributes.get((OFFICENS, "value-type"))
|
192 |
+
if cell_type == "boolean":
|
193 |
+
if str(cell) == "TRUE":
|
194 |
+
return True
|
195 |
+
return False
|
196 |
+
if cell_type is None:
|
197 |
+
return self.empty_value
|
198 |
+
elif cell_type == "float":
|
199 |
+
# GH5394
|
200 |
+
cell_value = float(cell.attributes.get((OFFICENS, "value")))
|
201 |
+
val = int(cell_value)
|
202 |
+
if val == cell_value:
|
203 |
+
return val
|
204 |
+
return cell_value
|
205 |
+
elif cell_type == "percentage":
|
206 |
+
cell_value = cell.attributes.get((OFFICENS, "value"))
|
207 |
+
return float(cell_value)
|
208 |
+
elif cell_type == "string":
|
209 |
+
return self._get_cell_string_value(cell)
|
210 |
+
elif cell_type == "currency":
|
211 |
+
cell_value = cell.attributes.get((OFFICENS, "value"))
|
212 |
+
return float(cell_value)
|
213 |
+
elif cell_type == "date":
|
214 |
+
cell_value = cell.attributes.get((OFFICENS, "date-value"))
|
215 |
+
return pd.Timestamp(cell_value)
|
216 |
+
elif cell_type == "time":
|
217 |
+
stamp = pd.Timestamp(str(cell))
|
218 |
+
# cast needed here because Scalar doesn't include datetime.time
|
219 |
+
return cast(Scalar, stamp.time())
|
220 |
+
else:
|
221 |
+
self.close()
|
222 |
+
raise ValueError(f"Unrecognized type {cell_type}")
|
223 |
+
|
224 |
+
def _get_cell_string_value(self, cell) -> str:
|
225 |
+
"""
|
226 |
+
Find and decode OpenDocument text:s tags that represent
|
227 |
+
a run length encoded sequence of space characters.
|
228 |
+
"""
|
229 |
+
from odf.element import Element
|
230 |
+
from odf.namespaces import TEXTNS
|
231 |
+
from odf.office import Annotation
|
232 |
+
from odf.text import S
|
233 |
+
|
234 |
+
office_annotation = Annotation().qname
|
235 |
+
text_s = S().qname
|
236 |
+
|
237 |
+
value = []
|
238 |
+
|
239 |
+
for fragment in cell.childNodes:
|
240 |
+
if isinstance(fragment, Element):
|
241 |
+
if fragment.qname == text_s:
|
242 |
+
spaces = int(fragment.attributes.get((TEXTNS, "c"), 1))
|
243 |
+
value.append(" " * spaces)
|
244 |
+
elif fragment.qname == office_annotation:
|
245 |
+
continue
|
246 |
+
else:
|
247 |
+
# recursive impl needed in case of nested fragments
|
248 |
+
# with multiple spaces
|
249 |
+
# https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704
|
250 |
+
value.append(self._get_cell_string_value(fragment))
|
251 |
+
else:
|
252 |
+
value.append(str(fragment).strip("\n"))
|
253 |
+
return "".join(value)
|
venv/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py
ADDED
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections import defaultdict
|
4 |
+
import datetime
|
5 |
+
import json
|
6 |
+
from typing import (
|
7 |
+
TYPE_CHECKING,
|
8 |
+
Any,
|
9 |
+
DefaultDict,
|
10 |
+
cast,
|
11 |
+
overload,
|
12 |
+
)
|
13 |
+
|
14 |
+
from pandas.io.excel._base import ExcelWriter
|
15 |
+
from pandas.io.excel._util import (
|
16 |
+
combine_kwargs,
|
17 |
+
validate_freeze_panes,
|
18 |
+
)
|
19 |
+
|
20 |
+
if TYPE_CHECKING:
|
21 |
+
from pandas._typing import (
|
22 |
+
ExcelWriterIfSheetExists,
|
23 |
+
FilePath,
|
24 |
+
StorageOptions,
|
25 |
+
WriteExcelBuffer,
|
26 |
+
)
|
27 |
+
|
28 |
+
from pandas.io.formats.excel import ExcelCell
|
29 |
+
|
30 |
+
|
31 |
+
class ODSWriter(ExcelWriter):
|
32 |
+
_engine = "odf"
|
33 |
+
_supported_extensions = (".ods",)
|
34 |
+
|
35 |
+
def __init__(
|
36 |
+
self,
|
37 |
+
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
38 |
+
engine: str | None = None,
|
39 |
+
date_format: str | None = None,
|
40 |
+
datetime_format=None,
|
41 |
+
mode: str = "w",
|
42 |
+
storage_options: StorageOptions | None = None,
|
43 |
+
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
44 |
+
engine_kwargs: dict[str, Any] | None = None,
|
45 |
+
**kwargs,
|
46 |
+
) -> None:
|
47 |
+
from odf.opendocument import OpenDocumentSpreadsheet
|
48 |
+
|
49 |
+
if mode == "a":
|
50 |
+
raise ValueError("Append mode is not supported with odf!")
|
51 |
+
|
52 |
+
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
53 |
+
self._book = OpenDocumentSpreadsheet(**engine_kwargs)
|
54 |
+
|
55 |
+
super().__init__(
|
56 |
+
path,
|
57 |
+
mode=mode,
|
58 |
+
storage_options=storage_options,
|
59 |
+
if_sheet_exists=if_sheet_exists,
|
60 |
+
engine_kwargs=engine_kwargs,
|
61 |
+
)
|
62 |
+
|
63 |
+
self._style_dict: dict[str, str] = {}
|
64 |
+
|
65 |
+
@property
|
66 |
+
def book(self):
|
67 |
+
"""
|
68 |
+
Book instance of class odf.opendocument.OpenDocumentSpreadsheet.
|
69 |
+
|
70 |
+
This attribute can be used to access engine-specific features.
|
71 |
+
"""
|
72 |
+
return self._book
|
73 |
+
|
74 |
+
@property
|
75 |
+
def sheets(self) -> dict[str, Any]:
|
76 |
+
"""Mapping of sheet names to sheet objects."""
|
77 |
+
from odf.table import Table
|
78 |
+
|
79 |
+
result = {
|
80 |
+
sheet.getAttribute("name"): sheet
|
81 |
+
for sheet in self.book.getElementsByType(Table)
|
82 |
+
}
|
83 |
+
return result
|
84 |
+
|
85 |
+
def _save(self) -> None:
|
86 |
+
"""
|
87 |
+
Save workbook to disk.
|
88 |
+
"""
|
89 |
+
for sheet in self.sheets.values():
|
90 |
+
self.book.spreadsheet.addElement(sheet)
|
91 |
+
self.book.save(self._handles.handle)
|
92 |
+
|
93 |
+
def _write_cells(
|
94 |
+
self,
|
95 |
+
cells: list[ExcelCell],
|
96 |
+
sheet_name: str | None = None,
|
97 |
+
startrow: int = 0,
|
98 |
+
startcol: int = 0,
|
99 |
+
freeze_panes: tuple[int, int] | None = None,
|
100 |
+
) -> None:
|
101 |
+
"""
|
102 |
+
Write the frame cells using odf
|
103 |
+
"""
|
104 |
+
from odf.table import (
|
105 |
+
Table,
|
106 |
+
TableCell,
|
107 |
+
TableRow,
|
108 |
+
)
|
109 |
+
from odf.text import P
|
110 |
+
|
111 |
+
sheet_name = self._get_sheet_name(sheet_name)
|
112 |
+
assert sheet_name is not None
|
113 |
+
|
114 |
+
if sheet_name in self.sheets:
|
115 |
+
wks = self.sheets[sheet_name]
|
116 |
+
else:
|
117 |
+
wks = Table(name=sheet_name)
|
118 |
+
self.book.spreadsheet.addElement(wks)
|
119 |
+
|
120 |
+
if validate_freeze_panes(freeze_panes):
|
121 |
+
freeze_panes = cast(tuple[int, int], freeze_panes)
|
122 |
+
self._create_freeze_panes(sheet_name, freeze_panes)
|
123 |
+
|
124 |
+
for _ in range(startrow):
|
125 |
+
wks.addElement(TableRow())
|
126 |
+
|
127 |
+
rows: DefaultDict = defaultdict(TableRow)
|
128 |
+
col_count: DefaultDict = defaultdict(int)
|
129 |
+
|
130 |
+
for cell in sorted(cells, key=lambda cell: (cell.row, cell.col)):
|
131 |
+
# only add empty cells if the row is still empty
|
132 |
+
if not col_count[cell.row]:
|
133 |
+
for _ in range(startcol):
|
134 |
+
rows[cell.row].addElement(TableCell())
|
135 |
+
|
136 |
+
# fill with empty cells if needed
|
137 |
+
for _ in range(cell.col - col_count[cell.row]):
|
138 |
+
rows[cell.row].addElement(TableCell())
|
139 |
+
col_count[cell.row] += 1
|
140 |
+
|
141 |
+
pvalue, tc = self._make_table_cell(cell)
|
142 |
+
rows[cell.row].addElement(tc)
|
143 |
+
col_count[cell.row] += 1
|
144 |
+
p = P(text=pvalue)
|
145 |
+
tc.addElement(p)
|
146 |
+
|
147 |
+
# add all rows to the sheet
|
148 |
+
if len(rows) > 0:
|
149 |
+
for row_nr in range(max(rows.keys()) + 1):
|
150 |
+
wks.addElement(rows[row_nr])
|
151 |
+
|
152 |
+
def _make_table_cell_attributes(self, cell) -> dict[str, int | str]:
|
153 |
+
"""Convert cell attributes to OpenDocument attributes
|
154 |
+
|
155 |
+
Parameters
|
156 |
+
----------
|
157 |
+
cell : ExcelCell
|
158 |
+
Spreadsheet cell data
|
159 |
+
|
160 |
+
Returns
|
161 |
+
-------
|
162 |
+
attributes : Dict[str, Union[int, str]]
|
163 |
+
Dictionary with attributes and attribute values
|
164 |
+
"""
|
165 |
+
attributes: dict[str, int | str] = {}
|
166 |
+
style_name = self._process_style(cell.style)
|
167 |
+
if style_name is not None:
|
168 |
+
attributes["stylename"] = style_name
|
169 |
+
if cell.mergestart is not None and cell.mergeend is not None:
|
170 |
+
attributes["numberrowsspanned"] = max(1, cell.mergestart)
|
171 |
+
attributes["numbercolumnsspanned"] = cell.mergeend
|
172 |
+
return attributes
|
173 |
+
|
174 |
+
def _make_table_cell(self, cell) -> tuple[object, Any]:
|
175 |
+
"""Convert cell data to an OpenDocument spreadsheet cell
|
176 |
+
|
177 |
+
Parameters
|
178 |
+
----------
|
179 |
+
cell : ExcelCell
|
180 |
+
Spreadsheet cell data
|
181 |
+
|
182 |
+
Returns
|
183 |
+
-------
|
184 |
+
pvalue, cell : Tuple[str, TableCell]
|
185 |
+
Display value, Cell value
|
186 |
+
"""
|
187 |
+
from odf.table import TableCell
|
188 |
+
|
189 |
+
attributes = self._make_table_cell_attributes(cell)
|
190 |
+
val, fmt = self._value_with_fmt(cell.val)
|
191 |
+
pvalue = value = val
|
192 |
+
if isinstance(val, bool):
|
193 |
+
value = str(val).lower()
|
194 |
+
pvalue = str(val).upper()
|
195 |
+
return (
|
196 |
+
pvalue,
|
197 |
+
TableCell(
|
198 |
+
valuetype="boolean",
|
199 |
+
booleanvalue=value,
|
200 |
+
attributes=attributes,
|
201 |
+
),
|
202 |
+
)
|
203 |
+
elif isinstance(val, datetime.datetime):
|
204 |
+
# Fast formatting
|
205 |
+
value = val.isoformat()
|
206 |
+
# Slow but locale-dependent
|
207 |
+
pvalue = val.strftime("%c")
|
208 |
+
return (
|
209 |
+
pvalue,
|
210 |
+
TableCell(valuetype="date", datevalue=value, attributes=attributes),
|
211 |
+
)
|
212 |
+
elif isinstance(val, datetime.date):
|
213 |
+
# Fast formatting
|
214 |
+
value = f"{val.year}-{val.month:02d}-{val.day:02d}"
|
215 |
+
# Slow but locale-dependent
|
216 |
+
pvalue = val.strftime("%x")
|
217 |
+
return (
|
218 |
+
pvalue,
|
219 |
+
TableCell(valuetype="date", datevalue=value, attributes=attributes),
|
220 |
+
)
|
221 |
+
elif isinstance(val, str):
|
222 |
+
return (
|
223 |
+
pvalue,
|
224 |
+
TableCell(
|
225 |
+
valuetype="string",
|
226 |
+
stringvalue=value,
|
227 |
+
attributes=attributes,
|
228 |
+
),
|
229 |
+
)
|
230 |
+
else:
|
231 |
+
return (
|
232 |
+
pvalue,
|
233 |
+
TableCell(
|
234 |
+
valuetype="float",
|
235 |
+
value=value,
|
236 |
+
attributes=attributes,
|
237 |
+
),
|
238 |
+
)
|
239 |
+
|
240 |
+
@overload
|
241 |
+
def _process_style(self, style: dict[str, Any]) -> str:
|
242 |
+
...
|
243 |
+
|
244 |
+
@overload
|
245 |
+
def _process_style(self, style: None) -> None:
|
246 |
+
...
|
247 |
+
|
248 |
+
def _process_style(self, style: dict[str, Any] | None) -> str | None:
|
249 |
+
"""Convert a style dictionary to a OpenDocument style sheet
|
250 |
+
|
251 |
+
Parameters
|
252 |
+
----------
|
253 |
+
style : Dict
|
254 |
+
Style dictionary
|
255 |
+
|
256 |
+
Returns
|
257 |
+
-------
|
258 |
+
style_key : str
|
259 |
+
Unique style key for later reference in sheet
|
260 |
+
"""
|
261 |
+
from odf.style import (
|
262 |
+
ParagraphProperties,
|
263 |
+
Style,
|
264 |
+
TableCellProperties,
|
265 |
+
TextProperties,
|
266 |
+
)
|
267 |
+
|
268 |
+
if style is None:
|
269 |
+
return None
|
270 |
+
style_key = json.dumps(style)
|
271 |
+
if style_key in self._style_dict:
|
272 |
+
return self._style_dict[style_key]
|
273 |
+
name = f"pd{len(self._style_dict)+1}"
|
274 |
+
self._style_dict[style_key] = name
|
275 |
+
odf_style = Style(name=name, family="table-cell")
|
276 |
+
if "font" in style:
|
277 |
+
font = style["font"]
|
278 |
+
if font.get("bold", False):
|
279 |
+
odf_style.addElement(TextProperties(fontweight="bold"))
|
280 |
+
if "borders" in style:
|
281 |
+
borders = style["borders"]
|
282 |
+
for side, thickness in borders.items():
|
283 |
+
thickness_translation = {"thin": "0.75pt solid #000000"}
|
284 |
+
odf_style.addElement(
|
285 |
+
TableCellProperties(
|
286 |
+
attributes={f"border{side}": thickness_translation[thickness]}
|
287 |
+
)
|
288 |
+
)
|
289 |
+
if "alignment" in style:
|
290 |
+
alignment = style["alignment"]
|
291 |
+
horizontal = alignment.get("horizontal")
|
292 |
+
if horizontal:
|
293 |
+
odf_style.addElement(ParagraphProperties(textalign=horizontal))
|
294 |
+
vertical = alignment.get("vertical")
|
295 |
+
if vertical:
|
296 |
+
odf_style.addElement(TableCellProperties(verticalalign=vertical))
|
297 |
+
self.book.styles.addElement(odf_style)
|
298 |
+
return name
|
299 |
+
|
300 |
+
def _create_freeze_panes(
|
301 |
+
self, sheet_name: str, freeze_panes: tuple[int, int]
|
302 |
+
) -> None:
|
303 |
+
"""
|
304 |
+
Create freeze panes in the sheet.
|
305 |
+
|
306 |
+
Parameters
|
307 |
+
----------
|
308 |
+
sheet_name : str
|
309 |
+
Name of the spreadsheet
|
310 |
+
freeze_panes : tuple of (int, int)
|
311 |
+
Freeze pane location x and y
|
312 |
+
"""
|
313 |
+
from odf.config import (
|
314 |
+
ConfigItem,
|
315 |
+
ConfigItemMapEntry,
|
316 |
+
ConfigItemMapIndexed,
|
317 |
+
ConfigItemMapNamed,
|
318 |
+
ConfigItemSet,
|
319 |
+
)
|
320 |
+
|
321 |
+
config_item_set = ConfigItemSet(name="ooo:view-settings")
|
322 |
+
self.book.settings.addElement(config_item_set)
|
323 |
+
|
324 |
+
config_item_map_indexed = ConfigItemMapIndexed(name="Views")
|
325 |
+
config_item_set.addElement(config_item_map_indexed)
|
326 |
+
|
327 |
+
config_item_map_entry = ConfigItemMapEntry()
|
328 |
+
config_item_map_indexed.addElement(config_item_map_entry)
|
329 |
+
|
330 |
+
config_item_map_named = ConfigItemMapNamed(name="Tables")
|
331 |
+
config_item_map_entry.addElement(config_item_map_named)
|
332 |
+
|
333 |
+
config_item_map_entry = ConfigItemMapEntry(name=sheet_name)
|
334 |
+
config_item_map_named.addElement(config_item_map_entry)
|
335 |
+
|
336 |
+
config_item_map_entry.addElement(
|
337 |
+
ConfigItem(name="HorizontalSplitMode", type="short", text="2")
|
338 |
+
)
|
339 |
+
config_item_map_entry.addElement(
|
340 |
+
ConfigItem(name="VerticalSplitMode", type="short", text="2")
|
341 |
+
)
|
342 |
+
config_item_map_entry.addElement(
|
343 |
+
ConfigItem(
|
344 |
+
name="HorizontalSplitPosition", type="int", text=str(freeze_panes[0])
|
345 |
+
)
|
346 |
+
)
|
347 |
+
config_item_map_entry.addElement(
|
348 |
+
ConfigItem(
|
349 |
+
name="VerticalSplitPosition", type="int", text=str(freeze_panes[1])
|
350 |
+
)
|
351 |
+
)
|
352 |
+
config_item_map_entry.addElement(
|
353 |
+
ConfigItem(name="PositionRight", type="int", text=str(freeze_panes[0]))
|
354 |
+
)
|
355 |
+
config_item_map_entry.addElement(
|
356 |
+
ConfigItem(name="PositionBottom", type="int", text=str(freeze_panes[1]))
|
357 |
+
)
|
venv/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py
ADDED
@@ -0,0 +1,639 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import mmap
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
Any,
|
7 |
+
cast,
|
8 |
+
)
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from pandas.compat._optional import import_optional_dependency
|
13 |
+
from pandas.util._decorators import doc
|
14 |
+
|
15 |
+
from pandas.core.shared_docs import _shared_docs
|
16 |
+
|
17 |
+
from pandas.io.excel._base import (
|
18 |
+
BaseExcelReader,
|
19 |
+
ExcelWriter,
|
20 |
+
)
|
21 |
+
from pandas.io.excel._util import (
|
22 |
+
combine_kwargs,
|
23 |
+
validate_freeze_panes,
|
24 |
+
)
|
25 |
+
|
26 |
+
if TYPE_CHECKING:
|
27 |
+
from openpyxl import Workbook
|
28 |
+
from openpyxl.descriptors.serialisable import Serialisable
|
29 |
+
|
30 |
+
from pandas._typing import (
|
31 |
+
ExcelWriterIfSheetExists,
|
32 |
+
FilePath,
|
33 |
+
ReadBuffer,
|
34 |
+
Scalar,
|
35 |
+
StorageOptions,
|
36 |
+
WriteExcelBuffer,
|
37 |
+
)
|
38 |
+
|
39 |
+
|
40 |
+
class OpenpyxlWriter(ExcelWriter):
|
41 |
+
_engine = "openpyxl"
|
42 |
+
_supported_extensions = (".xlsx", ".xlsm")
|
43 |
+
|
44 |
+
def __init__(
|
45 |
+
self,
|
46 |
+
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
47 |
+
engine: str | None = None,
|
48 |
+
date_format: str | None = None,
|
49 |
+
datetime_format: str | None = None,
|
50 |
+
mode: str = "w",
|
51 |
+
storage_options: StorageOptions | None = None,
|
52 |
+
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
53 |
+
engine_kwargs: dict[str, Any] | None = None,
|
54 |
+
**kwargs,
|
55 |
+
) -> None:
|
56 |
+
# Use the openpyxl module as the Excel writer.
|
57 |
+
from openpyxl.workbook import Workbook
|
58 |
+
|
59 |
+
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
60 |
+
|
61 |
+
super().__init__(
|
62 |
+
path,
|
63 |
+
mode=mode,
|
64 |
+
storage_options=storage_options,
|
65 |
+
if_sheet_exists=if_sheet_exists,
|
66 |
+
engine_kwargs=engine_kwargs,
|
67 |
+
)
|
68 |
+
|
69 |
+
# ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from
|
70 |
+
# the file and later write to it
|
71 |
+
if "r+" in self._mode: # Load from existing workbook
|
72 |
+
from openpyxl import load_workbook
|
73 |
+
|
74 |
+
try:
|
75 |
+
self._book = load_workbook(self._handles.handle, **engine_kwargs)
|
76 |
+
except TypeError:
|
77 |
+
self._handles.handle.close()
|
78 |
+
raise
|
79 |
+
self._handles.handle.seek(0)
|
80 |
+
else:
|
81 |
+
# Create workbook object with default optimized_write=True.
|
82 |
+
try:
|
83 |
+
self._book = Workbook(**engine_kwargs)
|
84 |
+
except TypeError:
|
85 |
+
self._handles.handle.close()
|
86 |
+
raise
|
87 |
+
|
88 |
+
if self.book.worksheets:
|
89 |
+
self.book.remove(self.book.worksheets[0])
|
90 |
+
|
91 |
+
@property
|
92 |
+
def book(self) -> Workbook:
|
93 |
+
"""
|
94 |
+
Book instance of class openpyxl.workbook.Workbook.
|
95 |
+
|
96 |
+
This attribute can be used to access engine-specific features.
|
97 |
+
"""
|
98 |
+
return self._book
|
99 |
+
|
100 |
+
@property
|
101 |
+
def sheets(self) -> dict[str, Any]:
|
102 |
+
"""Mapping of sheet names to sheet objects."""
|
103 |
+
result = {name: self.book[name] for name in self.book.sheetnames}
|
104 |
+
return result
|
105 |
+
|
106 |
+
def _save(self) -> None:
|
107 |
+
"""
|
108 |
+
Save workbook to disk.
|
109 |
+
"""
|
110 |
+
self.book.save(self._handles.handle)
|
111 |
+
if "r+" in self._mode and not isinstance(self._handles.handle, mmap.mmap):
|
112 |
+
# truncate file to the written content
|
113 |
+
self._handles.handle.truncate()
|
114 |
+
|
115 |
+
@classmethod
|
116 |
+
def _convert_to_style_kwargs(cls, style_dict: dict) -> dict[str, Serialisable]:
|
117 |
+
"""
|
118 |
+
Convert a style_dict to a set of kwargs suitable for initializing
|
119 |
+
or updating-on-copy an openpyxl v2 style object.
|
120 |
+
|
121 |
+
Parameters
|
122 |
+
----------
|
123 |
+
style_dict : dict
|
124 |
+
A dict with zero or more of the following keys (or their synonyms).
|
125 |
+
'font'
|
126 |
+
'fill'
|
127 |
+
'border' ('borders')
|
128 |
+
'alignment'
|
129 |
+
'number_format'
|
130 |
+
'protection'
|
131 |
+
|
132 |
+
Returns
|
133 |
+
-------
|
134 |
+
style_kwargs : dict
|
135 |
+
A dict with the same, normalized keys as ``style_dict`` but each
|
136 |
+
value has been replaced with a native openpyxl style object of the
|
137 |
+
appropriate class.
|
138 |
+
"""
|
139 |
+
_style_key_map = {"borders": "border"}
|
140 |
+
|
141 |
+
style_kwargs: dict[str, Serialisable] = {}
|
142 |
+
for k, v in style_dict.items():
|
143 |
+
k = _style_key_map.get(k, k)
|
144 |
+
_conv_to_x = getattr(cls, f"_convert_to_{k}", lambda x: None)
|
145 |
+
new_v = _conv_to_x(v)
|
146 |
+
if new_v:
|
147 |
+
style_kwargs[k] = new_v
|
148 |
+
|
149 |
+
return style_kwargs
|
150 |
+
|
151 |
+
@classmethod
|
152 |
+
def _convert_to_color(cls, color_spec):
|
153 |
+
"""
|
154 |
+
Convert ``color_spec`` to an openpyxl v2 Color object.
|
155 |
+
|
156 |
+
Parameters
|
157 |
+
----------
|
158 |
+
color_spec : str, dict
|
159 |
+
A 32-bit ARGB hex string, or a dict with zero or more of the
|
160 |
+
following keys.
|
161 |
+
'rgb'
|
162 |
+
'indexed'
|
163 |
+
'auto'
|
164 |
+
'theme'
|
165 |
+
'tint'
|
166 |
+
'index'
|
167 |
+
'type'
|
168 |
+
|
169 |
+
Returns
|
170 |
+
-------
|
171 |
+
color : openpyxl.styles.Color
|
172 |
+
"""
|
173 |
+
from openpyxl.styles import Color
|
174 |
+
|
175 |
+
if isinstance(color_spec, str):
|
176 |
+
return Color(color_spec)
|
177 |
+
else:
|
178 |
+
return Color(**color_spec)
|
179 |
+
|
180 |
+
@classmethod
|
181 |
+
def _convert_to_font(cls, font_dict):
|
182 |
+
"""
|
183 |
+
Convert ``font_dict`` to an openpyxl v2 Font object.
|
184 |
+
|
185 |
+
Parameters
|
186 |
+
----------
|
187 |
+
font_dict : dict
|
188 |
+
A dict with zero or more of the following keys (or their synonyms).
|
189 |
+
'name'
|
190 |
+
'size' ('sz')
|
191 |
+
'bold' ('b')
|
192 |
+
'italic' ('i')
|
193 |
+
'underline' ('u')
|
194 |
+
'strikethrough' ('strike')
|
195 |
+
'color'
|
196 |
+
'vertAlign' ('vertalign')
|
197 |
+
'charset'
|
198 |
+
'scheme'
|
199 |
+
'family'
|
200 |
+
'outline'
|
201 |
+
'shadow'
|
202 |
+
'condense'
|
203 |
+
|
204 |
+
Returns
|
205 |
+
-------
|
206 |
+
font : openpyxl.styles.Font
|
207 |
+
"""
|
208 |
+
from openpyxl.styles import Font
|
209 |
+
|
210 |
+
_font_key_map = {
|
211 |
+
"sz": "size",
|
212 |
+
"b": "bold",
|
213 |
+
"i": "italic",
|
214 |
+
"u": "underline",
|
215 |
+
"strike": "strikethrough",
|
216 |
+
"vertalign": "vertAlign",
|
217 |
+
}
|
218 |
+
|
219 |
+
font_kwargs = {}
|
220 |
+
for k, v in font_dict.items():
|
221 |
+
k = _font_key_map.get(k, k)
|
222 |
+
if k == "color":
|
223 |
+
v = cls._convert_to_color(v)
|
224 |
+
font_kwargs[k] = v
|
225 |
+
|
226 |
+
return Font(**font_kwargs)
|
227 |
+
|
228 |
+
@classmethod
|
229 |
+
def _convert_to_stop(cls, stop_seq):
|
230 |
+
"""
|
231 |
+
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
|
232 |
+
suitable for initializing the ``GradientFill`` ``stop`` parameter.
|
233 |
+
|
234 |
+
Parameters
|
235 |
+
----------
|
236 |
+
stop_seq : iterable
|
237 |
+
An iterable that yields objects suitable for consumption by
|
238 |
+
``_convert_to_color``.
|
239 |
+
|
240 |
+
Returns
|
241 |
+
-------
|
242 |
+
stop : list of openpyxl.styles.Color
|
243 |
+
"""
|
244 |
+
return map(cls._convert_to_color, stop_seq)
|
245 |
+
|
246 |
+
@classmethod
|
247 |
+
def _convert_to_fill(cls, fill_dict: dict[str, Any]):
|
248 |
+
"""
|
249 |
+
Convert ``fill_dict`` to an openpyxl v2 Fill object.
|
250 |
+
|
251 |
+
Parameters
|
252 |
+
----------
|
253 |
+
fill_dict : dict
|
254 |
+
A dict with one or more of the following keys (or their synonyms),
|
255 |
+
'fill_type' ('patternType', 'patterntype')
|
256 |
+
'start_color' ('fgColor', 'fgcolor')
|
257 |
+
'end_color' ('bgColor', 'bgcolor')
|
258 |
+
or one or more of the following keys (or their synonyms).
|
259 |
+
'type' ('fill_type')
|
260 |
+
'degree'
|
261 |
+
'left'
|
262 |
+
'right'
|
263 |
+
'top'
|
264 |
+
'bottom'
|
265 |
+
'stop'
|
266 |
+
|
267 |
+
Returns
|
268 |
+
-------
|
269 |
+
fill : openpyxl.styles.Fill
|
270 |
+
"""
|
271 |
+
from openpyxl.styles import (
|
272 |
+
GradientFill,
|
273 |
+
PatternFill,
|
274 |
+
)
|
275 |
+
|
276 |
+
_pattern_fill_key_map = {
|
277 |
+
"patternType": "fill_type",
|
278 |
+
"patterntype": "fill_type",
|
279 |
+
"fgColor": "start_color",
|
280 |
+
"fgcolor": "start_color",
|
281 |
+
"bgColor": "end_color",
|
282 |
+
"bgcolor": "end_color",
|
283 |
+
}
|
284 |
+
|
285 |
+
_gradient_fill_key_map = {"fill_type": "type"}
|
286 |
+
|
287 |
+
pfill_kwargs = {}
|
288 |
+
gfill_kwargs = {}
|
289 |
+
for k, v in fill_dict.items():
|
290 |
+
pk = _pattern_fill_key_map.get(k)
|
291 |
+
gk = _gradient_fill_key_map.get(k)
|
292 |
+
if pk in ["start_color", "end_color"]:
|
293 |
+
v = cls._convert_to_color(v)
|
294 |
+
if gk == "stop":
|
295 |
+
v = cls._convert_to_stop(v)
|
296 |
+
if pk:
|
297 |
+
pfill_kwargs[pk] = v
|
298 |
+
elif gk:
|
299 |
+
gfill_kwargs[gk] = v
|
300 |
+
else:
|
301 |
+
pfill_kwargs[k] = v
|
302 |
+
gfill_kwargs[k] = v
|
303 |
+
|
304 |
+
try:
|
305 |
+
return PatternFill(**pfill_kwargs)
|
306 |
+
except TypeError:
|
307 |
+
return GradientFill(**gfill_kwargs)
|
308 |
+
|
309 |
+
@classmethod
|
310 |
+
def _convert_to_side(cls, side_spec):
|
311 |
+
"""
|
312 |
+
Convert ``side_spec`` to an openpyxl v2 Side object.
|
313 |
+
|
314 |
+
Parameters
|
315 |
+
----------
|
316 |
+
side_spec : str, dict
|
317 |
+
A string specifying the border style, or a dict with zero or more
|
318 |
+
of the following keys (or their synonyms).
|
319 |
+
'style' ('border_style')
|
320 |
+
'color'
|
321 |
+
|
322 |
+
Returns
|
323 |
+
-------
|
324 |
+
side : openpyxl.styles.Side
|
325 |
+
"""
|
326 |
+
from openpyxl.styles import Side
|
327 |
+
|
328 |
+
_side_key_map = {"border_style": "style"}
|
329 |
+
|
330 |
+
if isinstance(side_spec, str):
|
331 |
+
return Side(style=side_spec)
|
332 |
+
|
333 |
+
side_kwargs = {}
|
334 |
+
for k, v in side_spec.items():
|
335 |
+
k = _side_key_map.get(k, k)
|
336 |
+
if k == "color":
|
337 |
+
v = cls._convert_to_color(v)
|
338 |
+
side_kwargs[k] = v
|
339 |
+
|
340 |
+
return Side(**side_kwargs)
|
341 |
+
|
342 |
+
@classmethod
|
343 |
+
def _convert_to_border(cls, border_dict):
|
344 |
+
"""
|
345 |
+
Convert ``border_dict`` to an openpyxl v2 Border object.
|
346 |
+
|
347 |
+
Parameters
|
348 |
+
----------
|
349 |
+
border_dict : dict
|
350 |
+
A dict with zero or more of the following keys (or their synonyms).
|
351 |
+
'left'
|
352 |
+
'right'
|
353 |
+
'top'
|
354 |
+
'bottom'
|
355 |
+
'diagonal'
|
356 |
+
'diagonal_direction'
|
357 |
+
'vertical'
|
358 |
+
'horizontal'
|
359 |
+
'diagonalUp' ('diagonalup')
|
360 |
+
'diagonalDown' ('diagonaldown')
|
361 |
+
'outline'
|
362 |
+
|
363 |
+
Returns
|
364 |
+
-------
|
365 |
+
border : openpyxl.styles.Border
|
366 |
+
"""
|
367 |
+
from openpyxl.styles import Border
|
368 |
+
|
369 |
+
_border_key_map = {"diagonalup": "diagonalUp", "diagonaldown": "diagonalDown"}
|
370 |
+
|
371 |
+
border_kwargs = {}
|
372 |
+
for k, v in border_dict.items():
|
373 |
+
k = _border_key_map.get(k, k)
|
374 |
+
if k == "color":
|
375 |
+
v = cls._convert_to_color(v)
|
376 |
+
if k in ["left", "right", "top", "bottom", "diagonal"]:
|
377 |
+
v = cls._convert_to_side(v)
|
378 |
+
border_kwargs[k] = v
|
379 |
+
|
380 |
+
return Border(**border_kwargs)
|
381 |
+
|
382 |
+
@classmethod
|
383 |
+
def _convert_to_alignment(cls, alignment_dict):
|
384 |
+
"""
|
385 |
+
Convert ``alignment_dict`` to an openpyxl v2 Alignment object.
|
386 |
+
|
387 |
+
Parameters
|
388 |
+
----------
|
389 |
+
alignment_dict : dict
|
390 |
+
A dict with zero or more of the following keys (or their synonyms).
|
391 |
+
'horizontal'
|
392 |
+
'vertical'
|
393 |
+
'text_rotation'
|
394 |
+
'wrap_text'
|
395 |
+
'shrink_to_fit'
|
396 |
+
'indent'
|
397 |
+
Returns
|
398 |
+
-------
|
399 |
+
alignment : openpyxl.styles.Alignment
|
400 |
+
"""
|
401 |
+
from openpyxl.styles import Alignment
|
402 |
+
|
403 |
+
return Alignment(**alignment_dict)
|
404 |
+
|
405 |
+
@classmethod
|
406 |
+
def _convert_to_number_format(cls, number_format_dict):
|
407 |
+
"""
|
408 |
+
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
|
409 |
+
initializer.
|
410 |
+
|
411 |
+
Parameters
|
412 |
+
----------
|
413 |
+
number_format_dict : dict
|
414 |
+
A dict with zero or more of the following keys.
|
415 |
+
'format_code' : str
|
416 |
+
|
417 |
+
Returns
|
418 |
+
-------
|
419 |
+
number_format : str
|
420 |
+
"""
|
421 |
+
return number_format_dict["format_code"]
|
422 |
+
|
423 |
+
@classmethod
|
424 |
+
def _convert_to_protection(cls, protection_dict):
|
425 |
+
"""
|
426 |
+
Convert ``protection_dict`` to an openpyxl v2 Protection object.
|
427 |
+
|
428 |
+
Parameters
|
429 |
+
----------
|
430 |
+
protection_dict : dict
|
431 |
+
A dict with zero or more of the following keys.
|
432 |
+
'locked'
|
433 |
+
'hidden'
|
434 |
+
|
435 |
+
Returns
|
436 |
+
-------
|
437 |
+
"""
|
438 |
+
from openpyxl.styles import Protection
|
439 |
+
|
440 |
+
return Protection(**protection_dict)
|
441 |
+
|
442 |
+
def _write_cells(
|
443 |
+
self,
|
444 |
+
cells,
|
445 |
+
sheet_name: str | None = None,
|
446 |
+
startrow: int = 0,
|
447 |
+
startcol: int = 0,
|
448 |
+
freeze_panes: tuple[int, int] | None = None,
|
449 |
+
) -> None:
|
450 |
+
# Write the frame cells using openpyxl.
|
451 |
+
sheet_name = self._get_sheet_name(sheet_name)
|
452 |
+
|
453 |
+
_style_cache: dict[str, dict[str, Serialisable]] = {}
|
454 |
+
|
455 |
+
if sheet_name in self.sheets and self._if_sheet_exists != "new":
|
456 |
+
if "r+" in self._mode:
|
457 |
+
if self._if_sheet_exists == "replace":
|
458 |
+
old_wks = self.sheets[sheet_name]
|
459 |
+
target_index = self.book.index(old_wks)
|
460 |
+
del self.book[sheet_name]
|
461 |
+
wks = self.book.create_sheet(sheet_name, target_index)
|
462 |
+
elif self._if_sheet_exists == "error":
|
463 |
+
raise ValueError(
|
464 |
+
f"Sheet '{sheet_name}' already exists and "
|
465 |
+
f"if_sheet_exists is set to 'error'."
|
466 |
+
)
|
467 |
+
elif self._if_sheet_exists == "overlay":
|
468 |
+
wks = self.sheets[sheet_name]
|
469 |
+
else:
|
470 |
+
raise ValueError(
|
471 |
+
f"'{self._if_sheet_exists}' is not valid for if_sheet_exists. "
|
472 |
+
"Valid options are 'error', 'new', 'replace' and 'overlay'."
|
473 |
+
)
|
474 |
+
else:
|
475 |
+
wks = self.sheets[sheet_name]
|
476 |
+
else:
|
477 |
+
wks = self.book.create_sheet()
|
478 |
+
wks.title = sheet_name
|
479 |
+
|
480 |
+
if validate_freeze_panes(freeze_panes):
|
481 |
+
freeze_panes = cast(tuple[int, int], freeze_panes)
|
482 |
+
wks.freeze_panes = wks.cell(
|
483 |
+
row=freeze_panes[0] + 1, column=freeze_panes[1] + 1
|
484 |
+
)
|
485 |
+
|
486 |
+
for cell in cells:
|
487 |
+
xcell = wks.cell(
|
488 |
+
row=startrow + cell.row + 1, column=startcol + cell.col + 1
|
489 |
+
)
|
490 |
+
xcell.value, fmt = self._value_with_fmt(cell.val)
|
491 |
+
if fmt:
|
492 |
+
xcell.number_format = fmt
|
493 |
+
|
494 |
+
style_kwargs: dict[str, Serialisable] | None = {}
|
495 |
+
if cell.style:
|
496 |
+
key = str(cell.style)
|
497 |
+
style_kwargs = _style_cache.get(key)
|
498 |
+
if style_kwargs is None:
|
499 |
+
style_kwargs = self._convert_to_style_kwargs(cell.style)
|
500 |
+
_style_cache[key] = style_kwargs
|
501 |
+
|
502 |
+
if style_kwargs:
|
503 |
+
for k, v in style_kwargs.items():
|
504 |
+
setattr(xcell, k, v)
|
505 |
+
|
506 |
+
if cell.mergestart is not None and cell.mergeend is not None:
|
507 |
+
wks.merge_cells(
|
508 |
+
start_row=startrow + cell.row + 1,
|
509 |
+
start_column=startcol + cell.col + 1,
|
510 |
+
end_column=startcol + cell.mergeend + 1,
|
511 |
+
end_row=startrow + cell.mergestart + 1,
|
512 |
+
)
|
513 |
+
|
514 |
+
# When cells are merged only the top-left cell is preserved
|
515 |
+
# The behaviour of the other cells in a merged range is
|
516 |
+
# undefined
|
517 |
+
if style_kwargs:
|
518 |
+
first_row = startrow + cell.row + 1
|
519 |
+
last_row = startrow + cell.mergestart + 1
|
520 |
+
first_col = startcol + cell.col + 1
|
521 |
+
last_col = startcol + cell.mergeend + 1
|
522 |
+
|
523 |
+
for row in range(first_row, last_row + 1):
|
524 |
+
for col in range(first_col, last_col + 1):
|
525 |
+
if row == first_row and col == first_col:
|
526 |
+
# Ignore first cell. It is already handled.
|
527 |
+
continue
|
528 |
+
xcell = wks.cell(column=col, row=row)
|
529 |
+
for k, v in style_kwargs.items():
|
530 |
+
setattr(xcell, k, v)
|
531 |
+
|
532 |
+
|
533 |
+
class OpenpyxlReader(BaseExcelReader["Workbook"]):
|
534 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
535 |
+
def __init__(
|
536 |
+
self,
|
537 |
+
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
538 |
+
storage_options: StorageOptions | None = None,
|
539 |
+
engine_kwargs: dict | None = None,
|
540 |
+
) -> None:
|
541 |
+
"""
|
542 |
+
Reader using openpyxl engine.
|
543 |
+
|
544 |
+
Parameters
|
545 |
+
----------
|
546 |
+
filepath_or_buffer : str, path object or Workbook
|
547 |
+
Object to be parsed.
|
548 |
+
{storage_options}
|
549 |
+
engine_kwargs : dict, optional
|
550 |
+
Arbitrary keyword arguments passed to excel engine.
|
551 |
+
"""
|
552 |
+
import_optional_dependency("openpyxl")
|
553 |
+
super().__init__(
|
554 |
+
filepath_or_buffer,
|
555 |
+
storage_options=storage_options,
|
556 |
+
engine_kwargs=engine_kwargs,
|
557 |
+
)
|
558 |
+
|
559 |
+
@property
|
560 |
+
def _workbook_class(self) -> type[Workbook]:
|
561 |
+
from openpyxl import Workbook
|
562 |
+
|
563 |
+
return Workbook
|
564 |
+
|
565 |
+
def load_workbook(
|
566 |
+
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
|
567 |
+
) -> Workbook:
|
568 |
+
from openpyxl import load_workbook
|
569 |
+
|
570 |
+
default_kwargs = {"read_only": True, "data_only": True, "keep_links": False}
|
571 |
+
|
572 |
+
return load_workbook(
|
573 |
+
filepath_or_buffer,
|
574 |
+
**(default_kwargs | engine_kwargs),
|
575 |
+
)
|
576 |
+
|
577 |
+
@property
|
578 |
+
def sheet_names(self) -> list[str]:
|
579 |
+
return [sheet.title for sheet in self.book.worksheets]
|
580 |
+
|
581 |
+
def get_sheet_by_name(self, name: str):
|
582 |
+
self.raise_if_bad_sheet_by_name(name)
|
583 |
+
return self.book[name]
|
584 |
+
|
585 |
+
def get_sheet_by_index(self, index: int):
|
586 |
+
self.raise_if_bad_sheet_by_index(index)
|
587 |
+
return self.book.worksheets[index]
|
588 |
+
|
589 |
+
def _convert_cell(self, cell) -> Scalar:
|
590 |
+
from openpyxl.cell.cell import (
|
591 |
+
TYPE_ERROR,
|
592 |
+
TYPE_NUMERIC,
|
593 |
+
)
|
594 |
+
|
595 |
+
if cell.value is None:
|
596 |
+
return "" # compat with xlrd
|
597 |
+
elif cell.data_type == TYPE_ERROR:
|
598 |
+
return np.nan
|
599 |
+
elif cell.data_type == TYPE_NUMERIC:
|
600 |
+
val = int(cell.value)
|
601 |
+
if val == cell.value:
|
602 |
+
return val
|
603 |
+
return float(cell.value)
|
604 |
+
|
605 |
+
return cell.value
|
606 |
+
|
607 |
+
def get_sheet_data(
|
608 |
+
self, sheet, file_rows_needed: int | None = None
|
609 |
+
) -> list[list[Scalar]]:
|
610 |
+
if self.book.read_only:
|
611 |
+
sheet.reset_dimensions()
|
612 |
+
|
613 |
+
data: list[list[Scalar]] = []
|
614 |
+
last_row_with_data = -1
|
615 |
+
for row_number, row in enumerate(sheet.rows):
|
616 |
+
converted_row = [self._convert_cell(cell) for cell in row]
|
617 |
+
while converted_row and converted_row[-1] == "":
|
618 |
+
# trim trailing empty elements
|
619 |
+
converted_row.pop()
|
620 |
+
if converted_row:
|
621 |
+
last_row_with_data = row_number
|
622 |
+
data.append(converted_row)
|
623 |
+
if file_rows_needed is not None and len(data) >= file_rows_needed:
|
624 |
+
break
|
625 |
+
|
626 |
+
# Trim trailing empty rows
|
627 |
+
data = data[: last_row_with_data + 1]
|
628 |
+
|
629 |
+
if len(data) > 0:
|
630 |
+
# extend rows to max width
|
631 |
+
max_width = max(len(data_row) for data_row in data)
|
632 |
+
if min(len(data_row) for data_row in data) < max_width:
|
633 |
+
empty_cell: list[Scalar] = [""]
|
634 |
+
data = [
|
635 |
+
data_row + (max_width - len(data_row)) * empty_cell
|
636 |
+
for data_row in data
|
637 |
+
]
|
638 |
+
|
639 |
+
return data
|
venv/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# pyright: reportMissingImports=false
|
2 |
+
from __future__ import annotations
|
3 |
+
|
4 |
+
from typing import TYPE_CHECKING
|
5 |
+
|
6 |
+
from pandas.compat._optional import import_optional_dependency
|
7 |
+
from pandas.util._decorators import doc
|
8 |
+
|
9 |
+
from pandas.core.shared_docs import _shared_docs
|
10 |
+
|
11 |
+
from pandas.io.excel._base import BaseExcelReader
|
12 |
+
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
from pyxlsb import Workbook
|
15 |
+
|
16 |
+
from pandas._typing import (
|
17 |
+
FilePath,
|
18 |
+
ReadBuffer,
|
19 |
+
Scalar,
|
20 |
+
StorageOptions,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
class PyxlsbReader(BaseExcelReader["Workbook"]):
|
25 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
26 |
+
def __init__(
|
27 |
+
self,
|
28 |
+
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
29 |
+
storage_options: StorageOptions | None = None,
|
30 |
+
engine_kwargs: dict | None = None,
|
31 |
+
) -> None:
|
32 |
+
"""
|
33 |
+
Reader using pyxlsb engine.
|
34 |
+
|
35 |
+
Parameters
|
36 |
+
----------
|
37 |
+
filepath_or_buffer : str, path object, or Workbook
|
38 |
+
Object to be parsed.
|
39 |
+
{storage_options}
|
40 |
+
engine_kwargs : dict, optional
|
41 |
+
Arbitrary keyword arguments passed to excel engine.
|
42 |
+
"""
|
43 |
+
import_optional_dependency("pyxlsb")
|
44 |
+
# This will call load_workbook on the filepath or buffer
|
45 |
+
# And set the result to the book-attribute
|
46 |
+
super().__init__(
|
47 |
+
filepath_or_buffer,
|
48 |
+
storage_options=storage_options,
|
49 |
+
engine_kwargs=engine_kwargs,
|
50 |
+
)
|
51 |
+
|
52 |
+
@property
|
53 |
+
def _workbook_class(self) -> type[Workbook]:
|
54 |
+
from pyxlsb import Workbook
|
55 |
+
|
56 |
+
return Workbook
|
57 |
+
|
58 |
+
def load_workbook(
|
59 |
+
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
|
60 |
+
) -> Workbook:
|
61 |
+
from pyxlsb import open_workbook
|
62 |
+
|
63 |
+
# TODO: hack in buffer capability
|
64 |
+
# This might need some modifications to the Pyxlsb library
|
65 |
+
# Actual work for opening it is in xlsbpackage.py, line 20-ish
|
66 |
+
|
67 |
+
return open_workbook(filepath_or_buffer, **engine_kwargs)
|
68 |
+
|
69 |
+
@property
|
70 |
+
def sheet_names(self) -> list[str]:
|
71 |
+
return self.book.sheets
|
72 |
+
|
73 |
+
def get_sheet_by_name(self, name: str):
|
74 |
+
self.raise_if_bad_sheet_by_name(name)
|
75 |
+
return self.book.get_sheet(name)
|
76 |
+
|
77 |
+
def get_sheet_by_index(self, index: int):
|
78 |
+
self.raise_if_bad_sheet_by_index(index)
|
79 |
+
# pyxlsb sheets are indexed from 1 onwards
|
80 |
+
# There's a fix for this in the source, but the pypi package doesn't have it
|
81 |
+
return self.book.get_sheet(index + 1)
|
82 |
+
|
83 |
+
def _convert_cell(self, cell) -> Scalar:
|
84 |
+
# TODO: there is no way to distinguish between floats and datetimes in pyxlsb
|
85 |
+
# This means that there is no way to read datetime types from an xlsb file yet
|
86 |
+
if cell.v is None:
|
87 |
+
return "" # Prevents non-named columns from not showing up as Unnamed: i
|
88 |
+
if isinstance(cell.v, float):
|
89 |
+
val = int(cell.v)
|
90 |
+
if val == cell.v:
|
91 |
+
return val
|
92 |
+
else:
|
93 |
+
return float(cell.v)
|
94 |
+
|
95 |
+
return cell.v
|
96 |
+
|
97 |
+
def get_sheet_data(
|
98 |
+
self,
|
99 |
+
sheet,
|
100 |
+
file_rows_needed: int | None = None,
|
101 |
+
) -> list[list[Scalar]]:
|
102 |
+
data: list[list[Scalar]] = []
|
103 |
+
previous_row_number = -1
|
104 |
+
# When sparse=True the rows can have different lengths and empty rows are
|
105 |
+
# not returned. The cells are namedtuples of row, col, value (r, c, v).
|
106 |
+
for row in sheet.rows(sparse=True):
|
107 |
+
row_number = row[0].r
|
108 |
+
converted_row = [self._convert_cell(cell) for cell in row]
|
109 |
+
while converted_row and converted_row[-1] == "":
|
110 |
+
# trim trailing empty elements
|
111 |
+
converted_row.pop()
|
112 |
+
if converted_row:
|
113 |
+
data.extend([[]] * (row_number - previous_row_number - 1))
|
114 |
+
data.append(converted_row)
|
115 |
+
previous_row_number = row_number
|
116 |
+
if file_rows_needed is not None and len(data) >= file_rows_needed:
|
117 |
+
break
|
118 |
+
if data:
|
119 |
+
# extend rows to max_width
|
120 |
+
max_width = max(len(data_row) for data_row in data)
|
121 |
+
if min(len(data_row) for data_row in data) < max_width:
|
122 |
+
empty_cell: list[Scalar] = [""]
|
123 |
+
data = [
|
124 |
+
data_row + (max_width - len(data_row)) * empty_cell
|
125 |
+
for data_row in data
|
126 |
+
]
|
127 |
+
return data
|
venv/lib/python3.10/site-packages/pandas/io/excel/_util.py
ADDED
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections.abc import (
|
4 |
+
Hashable,
|
5 |
+
Iterable,
|
6 |
+
MutableMapping,
|
7 |
+
Sequence,
|
8 |
+
)
|
9 |
+
from typing import (
|
10 |
+
TYPE_CHECKING,
|
11 |
+
Any,
|
12 |
+
Callable,
|
13 |
+
Literal,
|
14 |
+
TypeVar,
|
15 |
+
overload,
|
16 |
+
)
|
17 |
+
|
18 |
+
from pandas.compat._optional import import_optional_dependency
|
19 |
+
|
20 |
+
from pandas.core.dtypes.common import (
|
21 |
+
is_integer,
|
22 |
+
is_list_like,
|
23 |
+
)
|
24 |
+
|
25 |
+
if TYPE_CHECKING:
|
26 |
+
from pandas.io.excel._base import ExcelWriter
|
27 |
+
|
28 |
+
ExcelWriter_t = type[ExcelWriter]
|
29 |
+
usecols_func = TypeVar("usecols_func", bound=Callable[[Hashable], object])
|
30 |
+
|
31 |
+
_writers: MutableMapping[str, ExcelWriter_t] = {}
|
32 |
+
|
33 |
+
|
34 |
+
def register_writer(klass: ExcelWriter_t) -> None:
|
35 |
+
"""
|
36 |
+
Add engine to the excel writer registry.io.excel.
|
37 |
+
|
38 |
+
You must use this method to integrate with ``to_excel``.
|
39 |
+
|
40 |
+
Parameters
|
41 |
+
----------
|
42 |
+
klass : ExcelWriter
|
43 |
+
"""
|
44 |
+
if not callable(klass):
|
45 |
+
raise ValueError("Can only register callables as engines")
|
46 |
+
engine_name = klass._engine
|
47 |
+
_writers[engine_name] = klass
|
48 |
+
|
49 |
+
|
50 |
+
def get_default_engine(ext: str, mode: Literal["reader", "writer"] = "reader") -> str:
|
51 |
+
"""
|
52 |
+
Return the default reader/writer for the given extension.
|
53 |
+
|
54 |
+
Parameters
|
55 |
+
----------
|
56 |
+
ext : str
|
57 |
+
The excel file extension for which to get the default engine.
|
58 |
+
mode : str {'reader', 'writer'}
|
59 |
+
Whether to get the default engine for reading or writing.
|
60 |
+
Either 'reader' or 'writer'
|
61 |
+
|
62 |
+
Returns
|
63 |
+
-------
|
64 |
+
str
|
65 |
+
The default engine for the extension.
|
66 |
+
"""
|
67 |
+
_default_readers = {
|
68 |
+
"xlsx": "openpyxl",
|
69 |
+
"xlsm": "openpyxl",
|
70 |
+
"xlsb": "pyxlsb",
|
71 |
+
"xls": "xlrd",
|
72 |
+
"ods": "odf",
|
73 |
+
}
|
74 |
+
_default_writers = {
|
75 |
+
"xlsx": "openpyxl",
|
76 |
+
"xlsm": "openpyxl",
|
77 |
+
"xlsb": "pyxlsb",
|
78 |
+
"ods": "odf",
|
79 |
+
}
|
80 |
+
assert mode in ["reader", "writer"]
|
81 |
+
if mode == "writer":
|
82 |
+
# Prefer xlsxwriter over openpyxl if installed
|
83 |
+
xlsxwriter = import_optional_dependency("xlsxwriter", errors="warn")
|
84 |
+
if xlsxwriter:
|
85 |
+
_default_writers["xlsx"] = "xlsxwriter"
|
86 |
+
return _default_writers[ext]
|
87 |
+
else:
|
88 |
+
return _default_readers[ext]
|
89 |
+
|
90 |
+
|
91 |
+
def get_writer(engine_name: str) -> ExcelWriter_t:
|
92 |
+
try:
|
93 |
+
return _writers[engine_name]
|
94 |
+
except KeyError as err:
|
95 |
+
raise ValueError(f"No Excel writer '{engine_name}'") from err
|
96 |
+
|
97 |
+
|
98 |
+
def _excel2num(x: str) -> int:
|
99 |
+
"""
|
100 |
+
Convert Excel column name like 'AB' to 0-based column index.
|
101 |
+
|
102 |
+
Parameters
|
103 |
+
----------
|
104 |
+
x : str
|
105 |
+
The Excel column name to convert to a 0-based column index.
|
106 |
+
|
107 |
+
Returns
|
108 |
+
-------
|
109 |
+
num : int
|
110 |
+
The column index corresponding to the name.
|
111 |
+
|
112 |
+
Raises
|
113 |
+
------
|
114 |
+
ValueError
|
115 |
+
Part of the Excel column name was invalid.
|
116 |
+
"""
|
117 |
+
index = 0
|
118 |
+
|
119 |
+
for c in x.upper().strip():
|
120 |
+
cp = ord(c)
|
121 |
+
|
122 |
+
if cp < ord("A") or cp > ord("Z"):
|
123 |
+
raise ValueError(f"Invalid column name: {x}")
|
124 |
+
|
125 |
+
index = index * 26 + cp - ord("A") + 1
|
126 |
+
|
127 |
+
return index - 1
|
128 |
+
|
129 |
+
|
130 |
+
def _range2cols(areas: str) -> list[int]:
|
131 |
+
"""
|
132 |
+
Convert comma separated list of column names and ranges to indices.
|
133 |
+
|
134 |
+
Parameters
|
135 |
+
----------
|
136 |
+
areas : str
|
137 |
+
A string containing a sequence of column ranges (or areas).
|
138 |
+
|
139 |
+
Returns
|
140 |
+
-------
|
141 |
+
cols : list
|
142 |
+
A list of 0-based column indices.
|
143 |
+
|
144 |
+
Examples
|
145 |
+
--------
|
146 |
+
>>> _range2cols('A:E')
|
147 |
+
[0, 1, 2, 3, 4]
|
148 |
+
>>> _range2cols('A,C,Z:AB')
|
149 |
+
[0, 2, 25, 26, 27]
|
150 |
+
"""
|
151 |
+
cols: list[int] = []
|
152 |
+
|
153 |
+
for rng in areas.split(","):
|
154 |
+
if ":" in rng:
|
155 |
+
rngs = rng.split(":")
|
156 |
+
cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1))
|
157 |
+
else:
|
158 |
+
cols.append(_excel2num(rng))
|
159 |
+
|
160 |
+
return cols
|
161 |
+
|
162 |
+
|
163 |
+
@overload
|
164 |
+
def maybe_convert_usecols(usecols: str | list[int]) -> list[int]:
|
165 |
+
...
|
166 |
+
|
167 |
+
|
168 |
+
@overload
|
169 |
+
def maybe_convert_usecols(usecols: list[str]) -> list[str]:
|
170 |
+
...
|
171 |
+
|
172 |
+
|
173 |
+
@overload
|
174 |
+
def maybe_convert_usecols(usecols: usecols_func) -> usecols_func:
|
175 |
+
...
|
176 |
+
|
177 |
+
|
178 |
+
@overload
|
179 |
+
def maybe_convert_usecols(usecols: None) -> None:
|
180 |
+
...
|
181 |
+
|
182 |
+
|
183 |
+
def maybe_convert_usecols(
|
184 |
+
usecols: str | list[int] | list[str] | usecols_func | None,
|
185 |
+
) -> None | list[int] | list[str] | usecols_func:
|
186 |
+
"""
|
187 |
+
Convert `usecols` into a compatible format for parsing in `parsers.py`.
|
188 |
+
|
189 |
+
Parameters
|
190 |
+
----------
|
191 |
+
usecols : object
|
192 |
+
The use-columns object to potentially convert.
|
193 |
+
|
194 |
+
Returns
|
195 |
+
-------
|
196 |
+
converted : object
|
197 |
+
The compatible format of `usecols`.
|
198 |
+
"""
|
199 |
+
if usecols is None:
|
200 |
+
return usecols
|
201 |
+
|
202 |
+
if is_integer(usecols):
|
203 |
+
raise ValueError(
|
204 |
+
"Passing an integer for `usecols` is no longer supported. "
|
205 |
+
"Please pass in a list of int from 0 to `usecols` inclusive instead."
|
206 |
+
)
|
207 |
+
|
208 |
+
if isinstance(usecols, str):
|
209 |
+
return _range2cols(usecols)
|
210 |
+
|
211 |
+
return usecols
|
212 |
+
|
213 |
+
|
214 |
+
@overload
|
215 |
+
def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]:
|
216 |
+
...
|
217 |
+
|
218 |
+
|
219 |
+
@overload
|
220 |
+
def validate_freeze_panes(freeze_panes: None) -> Literal[False]:
|
221 |
+
...
|
222 |
+
|
223 |
+
|
224 |
+
def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool:
|
225 |
+
if freeze_panes is not None:
|
226 |
+
if len(freeze_panes) == 2 and all(
|
227 |
+
isinstance(item, int) for item in freeze_panes
|
228 |
+
):
|
229 |
+
return True
|
230 |
+
|
231 |
+
raise ValueError(
|
232 |
+
"freeze_panes must be of form (row, column) "
|
233 |
+
"where row and column are integers"
|
234 |
+
)
|
235 |
+
|
236 |
+
# freeze_panes wasn't specified, return False so it won't be applied
|
237 |
+
# to output sheet
|
238 |
+
return False
|
239 |
+
|
240 |
+
|
241 |
+
def fill_mi_header(
|
242 |
+
row: list[Hashable], control_row: list[bool]
|
243 |
+
) -> tuple[list[Hashable], list[bool]]:
|
244 |
+
"""
|
245 |
+
Forward fill blank entries in row but only inside the same parent index.
|
246 |
+
|
247 |
+
Used for creating headers in Multiindex.
|
248 |
+
|
249 |
+
Parameters
|
250 |
+
----------
|
251 |
+
row : list
|
252 |
+
List of items in a single row.
|
253 |
+
control_row : list of bool
|
254 |
+
Helps to determine if particular column is in same parent index as the
|
255 |
+
previous value. Used to stop propagation of empty cells between
|
256 |
+
different indexes.
|
257 |
+
|
258 |
+
Returns
|
259 |
+
-------
|
260 |
+
Returns changed row and control_row
|
261 |
+
"""
|
262 |
+
last = row[0]
|
263 |
+
for i in range(1, len(row)):
|
264 |
+
if not control_row[i]:
|
265 |
+
last = row[i]
|
266 |
+
|
267 |
+
if row[i] == "" or row[i] is None:
|
268 |
+
row[i] = last
|
269 |
+
else:
|
270 |
+
control_row[i] = False
|
271 |
+
last = row[i]
|
272 |
+
|
273 |
+
return row, control_row
|
274 |
+
|
275 |
+
|
276 |
+
def pop_header_name(
|
277 |
+
row: list[Hashable], index_col: int | Sequence[int]
|
278 |
+
) -> tuple[Hashable | None, list[Hashable]]:
|
279 |
+
"""
|
280 |
+
Pop the header name for MultiIndex parsing.
|
281 |
+
|
282 |
+
Parameters
|
283 |
+
----------
|
284 |
+
row : list
|
285 |
+
The data row to parse for the header name.
|
286 |
+
index_col : int, list
|
287 |
+
The index columns for our data. Assumed to be non-null.
|
288 |
+
|
289 |
+
Returns
|
290 |
+
-------
|
291 |
+
header_name : str
|
292 |
+
The extracted header name.
|
293 |
+
trimmed_row : list
|
294 |
+
The original data row with the header name removed.
|
295 |
+
"""
|
296 |
+
# Pop out header name and fill w/blank.
|
297 |
+
if is_list_like(index_col):
|
298 |
+
assert isinstance(index_col, Iterable)
|
299 |
+
i = max(index_col)
|
300 |
+
else:
|
301 |
+
assert not isinstance(index_col, Iterable)
|
302 |
+
i = index_col
|
303 |
+
|
304 |
+
header_name = row[i]
|
305 |
+
header_name = None if header_name == "" else header_name
|
306 |
+
|
307 |
+
return header_name, row[:i] + [""] + row[i + 1 :]
|
308 |
+
|
309 |
+
|
310 |
+
def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict:
|
311 |
+
"""
|
312 |
+
Used to combine two sources of kwargs for the backend engine.
|
313 |
+
|
314 |
+
Use of kwargs is deprecated, this function is solely for use in 1.3 and should
|
315 |
+
be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs
|
316 |
+
or kwargs must be None or empty respectively.
|
317 |
+
|
318 |
+
Parameters
|
319 |
+
----------
|
320 |
+
engine_kwargs: dict
|
321 |
+
kwargs to be passed through to the engine.
|
322 |
+
kwargs: dict
|
323 |
+
kwargs to be psased through to the engine (deprecated)
|
324 |
+
|
325 |
+
Returns
|
326 |
+
-------
|
327 |
+
engine_kwargs combined with kwargs
|
328 |
+
"""
|
329 |
+
if engine_kwargs is None:
|
330 |
+
result = {}
|
331 |
+
else:
|
332 |
+
result = engine_kwargs.copy()
|
333 |
+
result.update(kwargs)
|
334 |
+
return result
|
venv/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from datetime import time
|
4 |
+
import math
|
5 |
+
from typing import TYPE_CHECKING
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
from pandas.compat._optional import import_optional_dependency
|
10 |
+
from pandas.util._decorators import doc
|
11 |
+
|
12 |
+
from pandas.core.shared_docs import _shared_docs
|
13 |
+
|
14 |
+
from pandas.io.excel._base import BaseExcelReader
|
15 |
+
|
16 |
+
if TYPE_CHECKING:
|
17 |
+
from xlrd import Book
|
18 |
+
|
19 |
+
from pandas._typing import (
|
20 |
+
Scalar,
|
21 |
+
StorageOptions,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
class XlrdReader(BaseExcelReader["Book"]):
|
26 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
27 |
+
def __init__(
|
28 |
+
self,
|
29 |
+
filepath_or_buffer,
|
30 |
+
storage_options: StorageOptions | None = None,
|
31 |
+
engine_kwargs: dict | None = None,
|
32 |
+
) -> None:
|
33 |
+
"""
|
34 |
+
Reader using xlrd engine.
|
35 |
+
|
36 |
+
Parameters
|
37 |
+
----------
|
38 |
+
filepath_or_buffer : str, path object or Workbook
|
39 |
+
Object to be parsed.
|
40 |
+
{storage_options}
|
41 |
+
engine_kwargs : dict, optional
|
42 |
+
Arbitrary keyword arguments passed to excel engine.
|
43 |
+
"""
|
44 |
+
err_msg = "Install xlrd >= 2.0.1 for xls Excel support"
|
45 |
+
import_optional_dependency("xlrd", extra=err_msg)
|
46 |
+
super().__init__(
|
47 |
+
filepath_or_buffer,
|
48 |
+
storage_options=storage_options,
|
49 |
+
engine_kwargs=engine_kwargs,
|
50 |
+
)
|
51 |
+
|
52 |
+
@property
|
53 |
+
def _workbook_class(self) -> type[Book]:
|
54 |
+
from xlrd import Book
|
55 |
+
|
56 |
+
return Book
|
57 |
+
|
58 |
+
def load_workbook(self, filepath_or_buffer, engine_kwargs) -> Book:
|
59 |
+
from xlrd import open_workbook
|
60 |
+
|
61 |
+
if hasattr(filepath_or_buffer, "read"):
|
62 |
+
data = filepath_or_buffer.read()
|
63 |
+
return open_workbook(file_contents=data, **engine_kwargs)
|
64 |
+
else:
|
65 |
+
return open_workbook(filepath_or_buffer, **engine_kwargs)
|
66 |
+
|
67 |
+
@property
|
68 |
+
def sheet_names(self):
|
69 |
+
return self.book.sheet_names()
|
70 |
+
|
71 |
+
def get_sheet_by_name(self, name):
|
72 |
+
self.raise_if_bad_sheet_by_name(name)
|
73 |
+
return self.book.sheet_by_name(name)
|
74 |
+
|
75 |
+
def get_sheet_by_index(self, index):
|
76 |
+
self.raise_if_bad_sheet_by_index(index)
|
77 |
+
return self.book.sheet_by_index(index)
|
78 |
+
|
79 |
+
def get_sheet_data(
|
80 |
+
self, sheet, file_rows_needed: int | None = None
|
81 |
+
) -> list[list[Scalar]]:
|
82 |
+
from xlrd import (
|
83 |
+
XL_CELL_BOOLEAN,
|
84 |
+
XL_CELL_DATE,
|
85 |
+
XL_CELL_ERROR,
|
86 |
+
XL_CELL_NUMBER,
|
87 |
+
xldate,
|
88 |
+
)
|
89 |
+
|
90 |
+
epoch1904 = self.book.datemode
|
91 |
+
|
92 |
+
def _parse_cell(cell_contents, cell_typ):
|
93 |
+
"""
|
94 |
+
converts the contents of the cell into a pandas appropriate object
|
95 |
+
"""
|
96 |
+
if cell_typ == XL_CELL_DATE:
|
97 |
+
# Use the newer xlrd datetime handling.
|
98 |
+
try:
|
99 |
+
cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)
|
100 |
+
except OverflowError:
|
101 |
+
return cell_contents
|
102 |
+
|
103 |
+
# Excel doesn't distinguish between dates and time,
|
104 |
+
# so we treat dates on the epoch as times only.
|
105 |
+
# Also, Excel supports 1900 and 1904 epochs.
|
106 |
+
year = (cell_contents.timetuple())[0:3]
|
107 |
+
if (not epoch1904 and year == (1899, 12, 31)) or (
|
108 |
+
epoch1904 and year == (1904, 1, 1)
|
109 |
+
):
|
110 |
+
cell_contents = time(
|
111 |
+
cell_contents.hour,
|
112 |
+
cell_contents.minute,
|
113 |
+
cell_contents.second,
|
114 |
+
cell_contents.microsecond,
|
115 |
+
)
|
116 |
+
|
117 |
+
elif cell_typ == XL_CELL_ERROR:
|
118 |
+
cell_contents = np.nan
|
119 |
+
elif cell_typ == XL_CELL_BOOLEAN:
|
120 |
+
cell_contents = bool(cell_contents)
|
121 |
+
elif cell_typ == XL_CELL_NUMBER:
|
122 |
+
# GH5394 - Excel 'numbers' are always floats
|
123 |
+
# it's a minimal perf hit and less surprising
|
124 |
+
if math.isfinite(cell_contents):
|
125 |
+
# GH54564 - don't attempt to convert NaN/Inf
|
126 |
+
val = int(cell_contents)
|
127 |
+
if val == cell_contents:
|
128 |
+
cell_contents = val
|
129 |
+
return cell_contents
|
130 |
+
|
131 |
+
data = []
|
132 |
+
|
133 |
+
nrows = sheet.nrows
|
134 |
+
if file_rows_needed is not None:
|
135 |
+
nrows = min(nrows, file_rows_needed)
|
136 |
+
for i in range(nrows):
|
137 |
+
row = [
|
138 |
+
_parse_cell(value, typ)
|
139 |
+
for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
|
140 |
+
]
|
141 |
+
data.append(row)
|
142 |
+
|
143 |
+
return data
|
venv/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py
ADDED
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
Any,
|
7 |
+
)
|
8 |
+
|
9 |
+
from pandas.io.excel._base import ExcelWriter
|
10 |
+
from pandas.io.excel._util import (
|
11 |
+
combine_kwargs,
|
12 |
+
validate_freeze_panes,
|
13 |
+
)
|
14 |
+
|
15 |
+
if TYPE_CHECKING:
|
16 |
+
from pandas._typing import (
|
17 |
+
ExcelWriterIfSheetExists,
|
18 |
+
FilePath,
|
19 |
+
StorageOptions,
|
20 |
+
WriteExcelBuffer,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
class _XlsxStyler:
|
25 |
+
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
|
26 |
+
# Ordering necessary for both determinism and because some are keyed by
|
27 |
+
# prefixes of others.
|
28 |
+
STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {
|
29 |
+
"font": [
|
30 |
+
(("name",), "font_name"),
|
31 |
+
(("sz",), "font_size"),
|
32 |
+
(("size",), "font_size"),
|
33 |
+
(("color", "rgb"), "font_color"),
|
34 |
+
(("color",), "font_color"),
|
35 |
+
(("b",), "bold"),
|
36 |
+
(("bold",), "bold"),
|
37 |
+
(("i",), "italic"),
|
38 |
+
(("italic",), "italic"),
|
39 |
+
(("u",), "underline"),
|
40 |
+
(("underline",), "underline"),
|
41 |
+
(("strike",), "font_strikeout"),
|
42 |
+
(("vertAlign",), "font_script"),
|
43 |
+
(("vertalign",), "font_script"),
|
44 |
+
],
|
45 |
+
"number_format": [(("format_code",), "num_format"), ((), "num_format")],
|
46 |
+
"protection": [(("locked",), "locked"), (("hidden",), "hidden")],
|
47 |
+
"alignment": [
|
48 |
+
(("horizontal",), "align"),
|
49 |
+
(("vertical",), "valign"),
|
50 |
+
(("text_rotation",), "rotation"),
|
51 |
+
(("wrap_text",), "text_wrap"),
|
52 |
+
(("indent",), "indent"),
|
53 |
+
(("shrink_to_fit",), "shrink"),
|
54 |
+
],
|
55 |
+
"fill": [
|
56 |
+
(("patternType",), "pattern"),
|
57 |
+
(("patterntype",), "pattern"),
|
58 |
+
(("fill_type",), "pattern"),
|
59 |
+
(("start_color", "rgb"), "fg_color"),
|
60 |
+
(("fgColor", "rgb"), "fg_color"),
|
61 |
+
(("fgcolor", "rgb"), "fg_color"),
|
62 |
+
(("start_color",), "fg_color"),
|
63 |
+
(("fgColor",), "fg_color"),
|
64 |
+
(("fgcolor",), "fg_color"),
|
65 |
+
(("end_color", "rgb"), "bg_color"),
|
66 |
+
(("bgColor", "rgb"), "bg_color"),
|
67 |
+
(("bgcolor", "rgb"), "bg_color"),
|
68 |
+
(("end_color",), "bg_color"),
|
69 |
+
(("bgColor",), "bg_color"),
|
70 |
+
(("bgcolor",), "bg_color"),
|
71 |
+
],
|
72 |
+
"border": [
|
73 |
+
(("color", "rgb"), "border_color"),
|
74 |
+
(("color",), "border_color"),
|
75 |
+
(("style",), "border"),
|
76 |
+
(("top", "color", "rgb"), "top_color"),
|
77 |
+
(("top", "color"), "top_color"),
|
78 |
+
(("top", "style"), "top"),
|
79 |
+
(("top",), "top"),
|
80 |
+
(("right", "color", "rgb"), "right_color"),
|
81 |
+
(("right", "color"), "right_color"),
|
82 |
+
(("right", "style"), "right"),
|
83 |
+
(("right",), "right"),
|
84 |
+
(("bottom", "color", "rgb"), "bottom_color"),
|
85 |
+
(("bottom", "color"), "bottom_color"),
|
86 |
+
(("bottom", "style"), "bottom"),
|
87 |
+
(("bottom",), "bottom"),
|
88 |
+
(("left", "color", "rgb"), "left_color"),
|
89 |
+
(("left", "color"), "left_color"),
|
90 |
+
(("left", "style"), "left"),
|
91 |
+
(("left",), "left"),
|
92 |
+
],
|
93 |
+
}
|
94 |
+
|
95 |
+
@classmethod
|
96 |
+
def convert(cls, style_dict, num_format_str=None):
|
97 |
+
"""
|
98 |
+
converts a style_dict to an xlsxwriter format dict
|
99 |
+
|
100 |
+
Parameters
|
101 |
+
----------
|
102 |
+
style_dict : style dictionary to convert
|
103 |
+
num_format_str : optional number format string
|
104 |
+
"""
|
105 |
+
# Create a XlsxWriter format object.
|
106 |
+
props = {}
|
107 |
+
|
108 |
+
if num_format_str is not None:
|
109 |
+
props["num_format"] = num_format_str
|
110 |
+
|
111 |
+
if style_dict is None:
|
112 |
+
return props
|
113 |
+
|
114 |
+
if "borders" in style_dict:
|
115 |
+
style_dict = style_dict.copy()
|
116 |
+
style_dict["border"] = style_dict.pop("borders")
|
117 |
+
|
118 |
+
for style_group_key, style_group in style_dict.items():
|
119 |
+
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
|
120 |
+
# src is a sequence of keys into a nested dict
|
121 |
+
# dst is a flat key
|
122 |
+
if dst in props:
|
123 |
+
continue
|
124 |
+
v = style_group
|
125 |
+
for k in src:
|
126 |
+
try:
|
127 |
+
v = v[k]
|
128 |
+
except (KeyError, TypeError):
|
129 |
+
break
|
130 |
+
else:
|
131 |
+
props[dst] = v
|
132 |
+
|
133 |
+
if isinstance(props.get("pattern"), str):
|
134 |
+
# TODO: support other fill patterns
|
135 |
+
props["pattern"] = 0 if props["pattern"] == "none" else 1
|
136 |
+
|
137 |
+
for k in ["border", "top", "right", "bottom", "left"]:
|
138 |
+
if isinstance(props.get(k), str):
|
139 |
+
try:
|
140 |
+
props[k] = [
|
141 |
+
"none",
|
142 |
+
"thin",
|
143 |
+
"medium",
|
144 |
+
"dashed",
|
145 |
+
"dotted",
|
146 |
+
"thick",
|
147 |
+
"double",
|
148 |
+
"hair",
|
149 |
+
"mediumDashed",
|
150 |
+
"dashDot",
|
151 |
+
"mediumDashDot",
|
152 |
+
"dashDotDot",
|
153 |
+
"mediumDashDotDot",
|
154 |
+
"slantDashDot",
|
155 |
+
].index(props[k])
|
156 |
+
except ValueError:
|
157 |
+
props[k] = 2
|
158 |
+
|
159 |
+
if isinstance(props.get("font_script"), str):
|
160 |
+
props["font_script"] = ["baseline", "superscript", "subscript"].index(
|
161 |
+
props["font_script"]
|
162 |
+
)
|
163 |
+
|
164 |
+
if isinstance(props.get("underline"), str):
|
165 |
+
props["underline"] = {
|
166 |
+
"none": 0,
|
167 |
+
"single": 1,
|
168 |
+
"double": 2,
|
169 |
+
"singleAccounting": 33,
|
170 |
+
"doubleAccounting": 34,
|
171 |
+
}[props["underline"]]
|
172 |
+
|
173 |
+
# GH 30107 - xlsxwriter uses different name
|
174 |
+
if props.get("valign") == "center":
|
175 |
+
props["valign"] = "vcenter"
|
176 |
+
|
177 |
+
return props
|
178 |
+
|
179 |
+
|
180 |
+
class XlsxWriter(ExcelWriter):
|
181 |
+
_engine = "xlsxwriter"
|
182 |
+
_supported_extensions = (".xlsx",)
|
183 |
+
|
184 |
+
def __init__(
|
185 |
+
self,
|
186 |
+
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
187 |
+
engine: str | None = None,
|
188 |
+
date_format: str | None = None,
|
189 |
+
datetime_format: str | None = None,
|
190 |
+
mode: str = "w",
|
191 |
+
storage_options: StorageOptions | None = None,
|
192 |
+
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
193 |
+
engine_kwargs: dict[str, Any] | None = None,
|
194 |
+
**kwargs,
|
195 |
+
) -> None:
|
196 |
+
# Use the xlsxwriter module as the Excel writer.
|
197 |
+
from xlsxwriter import Workbook
|
198 |
+
|
199 |
+
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
200 |
+
|
201 |
+
if mode == "a":
|
202 |
+
raise ValueError("Append mode is not supported with xlsxwriter!")
|
203 |
+
|
204 |
+
super().__init__(
|
205 |
+
path,
|
206 |
+
engine=engine,
|
207 |
+
date_format=date_format,
|
208 |
+
datetime_format=datetime_format,
|
209 |
+
mode=mode,
|
210 |
+
storage_options=storage_options,
|
211 |
+
if_sheet_exists=if_sheet_exists,
|
212 |
+
engine_kwargs=engine_kwargs,
|
213 |
+
)
|
214 |
+
|
215 |
+
try:
|
216 |
+
self._book = Workbook(self._handles.handle, **engine_kwargs)
|
217 |
+
except TypeError:
|
218 |
+
self._handles.handle.close()
|
219 |
+
raise
|
220 |
+
|
221 |
+
@property
|
222 |
+
def book(self):
|
223 |
+
"""
|
224 |
+
Book instance of class xlsxwriter.Workbook.
|
225 |
+
|
226 |
+
This attribute can be used to access engine-specific features.
|
227 |
+
"""
|
228 |
+
return self._book
|
229 |
+
|
230 |
+
@property
|
231 |
+
def sheets(self) -> dict[str, Any]:
|
232 |
+
result = self.book.sheetnames
|
233 |
+
return result
|
234 |
+
|
235 |
+
def _save(self) -> None:
|
236 |
+
"""
|
237 |
+
Save workbook to disk.
|
238 |
+
"""
|
239 |
+
self.book.close()
|
240 |
+
|
241 |
+
def _write_cells(
|
242 |
+
self,
|
243 |
+
cells,
|
244 |
+
sheet_name: str | None = None,
|
245 |
+
startrow: int = 0,
|
246 |
+
startcol: int = 0,
|
247 |
+
freeze_panes: tuple[int, int] | None = None,
|
248 |
+
) -> None:
|
249 |
+
# Write the frame cells using xlsxwriter.
|
250 |
+
sheet_name = self._get_sheet_name(sheet_name)
|
251 |
+
|
252 |
+
wks = self.book.get_worksheet_by_name(sheet_name)
|
253 |
+
if wks is None:
|
254 |
+
wks = self.book.add_worksheet(sheet_name)
|
255 |
+
|
256 |
+
style_dict = {"null": None}
|
257 |
+
|
258 |
+
if validate_freeze_panes(freeze_panes):
|
259 |
+
wks.freeze_panes(*(freeze_panes))
|
260 |
+
|
261 |
+
for cell in cells:
|
262 |
+
val, fmt = self._value_with_fmt(cell.val)
|
263 |
+
|
264 |
+
stylekey = json.dumps(cell.style)
|
265 |
+
if fmt:
|
266 |
+
stylekey += fmt
|
267 |
+
|
268 |
+
if stylekey in style_dict:
|
269 |
+
style = style_dict[stylekey]
|
270 |
+
else:
|
271 |
+
style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))
|
272 |
+
style_dict[stylekey] = style
|
273 |
+
|
274 |
+
if cell.mergestart is not None and cell.mergeend is not None:
|
275 |
+
wks.merge_range(
|
276 |
+
startrow + cell.row,
|
277 |
+
startcol + cell.col,
|
278 |
+
startrow + cell.mergestart,
|
279 |
+
startcol + cell.mergeend,
|
280 |
+
val,
|
281 |
+
style,
|
282 |
+
)
|
283 |
+
else:
|
284 |
+
wks.write(startrow + cell.row, startcol + cell.col, val, style)
|
venv/lib/python3.10/site-packages/pandas/io/json/__init__.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.io.json._json import (
|
2 |
+
read_json,
|
3 |
+
to_json,
|
4 |
+
ujson_dumps,
|
5 |
+
ujson_loads,
|
6 |
+
)
|
7 |
+
from pandas.io.json._table_schema import build_table_schema
|
8 |
+
|
9 |
+
__all__ = [
|
10 |
+
"ujson_dumps",
|
11 |
+
"ujson_loads",
|
12 |
+
"read_json",
|
13 |
+
"to_json",
|
14 |
+
"build_table_schema",
|
15 |
+
]
|
venv/lib/python3.10/site-packages/pandas/io/json/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (415 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/io/json/__pycache__/_json.cpython-310.pyc
ADDED
Binary file (39 kB). View file
|
|