Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__init__.py +13 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/api.py +65 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/common.py +1267 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__init__.py +19 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_calamine.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odswriter.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_util.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlrd.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlsxwriter.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_base.py +1659 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_calamine.py +121 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py +253 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py +357 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py +639 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py +127 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_util.py +334 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py +143 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py +284 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/gbq.py +255 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/json/__init__.py +15 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_json.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_normalize.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_table_schema.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc
ADDED
Binary file (1.42 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc
ADDED
Binary file (2.31 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc
ADDED
Binary file (78.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc
ADDED
Binary file (159 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc
ADDED
Binary file (9.75 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__init__.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ruff: noqa: TCH004
|
2 |
+
from typing import TYPE_CHECKING
|
3 |
+
|
4 |
+
if TYPE_CHECKING:
|
5 |
+
# import modules that have public classes/functions
|
6 |
+
from pandas.io import (
|
7 |
+
formats,
|
8 |
+
json,
|
9 |
+
stata,
|
10 |
+
)
|
11 |
+
|
12 |
+
# mark only those modules as public
|
13 |
+
__all__ = ["formats", "json", "stata"]
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (329 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc
ADDED
Binary file (1.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc
ADDED
Binary file (1.32 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc
ADDED
Binary file (5.25 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc
ADDED
Binary file (27.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc
ADDED
Binary file (4.24 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc
ADDED
Binary file (8.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc
ADDED
Binary file (36.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc
ADDED
Binary file (7.66 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc
ADDED
Binary file (18.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc
ADDED
Binary file (5.84 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-310.pyc
ADDED
Binary file (138 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc
ADDED
Binary file (2.31 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc
ADDED
Binary file (79.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc
ADDED
Binary file (103 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc
ADDED
Binary file (34 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/api.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Data IO api
|
3 |
+
"""
|
4 |
+
|
5 |
+
from pandas.io.clipboards import read_clipboard
|
6 |
+
from pandas.io.excel import (
|
7 |
+
ExcelFile,
|
8 |
+
ExcelWriter,
|
9 |
+
read_excel,
|
10 |
+
)
|
11 |
+
from pandas.io.feather_format import read_feather
|
12 |
+
from pandas.io.gbq import read_gbq
|
13 |
+
from pandas.io.html import read_html
|
14 |
+
from pandas.io.json import read_json
|
15 |
+
from pandas.io.orc import read_orc
|
16 |
+
from pandas.io.parquet import read_parquet
|
17 |
+
from pandas.io.parsers import (
|
18 |
+
read_csv,
|
19 |
+
read_fwf,
|
20 |
+
read_table,
|
21 |
+
)
|
22 |
+
from pandas.io.pickle import (
|
23 |
+
read_pickle,
|
24 |
+
to_pickle,
|
25 |
+
)
|
26 |
+
from pandas.io.pytables import (
|
27 |
+
HDFStore,
|
28 |
+
read_hdf,
|
29 |
+
)
|
30 |
+
from pandas.io.sas import read_sas
|
31 |
+
from pandas.io.spss import read_spss
|
32 |
+
from pandas.io.sql import (
|
33 |
+
read_sql,
|
34 |
+
read_sql_query,
|
35 |
+
read_sql_table,
|
36 |
+
)
|
37 |
+
from pandas.io.stata import read_stata
|
38 |
+
from pandas.io.xml import read_xml
|
39 |
+
|
40 |
+
__all__ = [
|
41 |
+
"ExcelFile",
|
42 |
+
"ExcelWriter",
|
43 |
+
"HDFStore",
|
44 |
+
"read_clipboard",
|
45 |
+
"read_csv",
|
46 |
+
"read_excel",
|
47 |
+
"read_feather",
|
48 |
+
"read_fwf",
|
49 |
+
"read_gbq",
|
50 |
+
"read_hdf",
|
51 |
+
"read_html",
|
52 |
+
"read_json",
|
53 |
+
"read_orc",
|
54 |
+
"read_parquet",
|
55 |
+
"read_pickle",
|
56 |
+
"read_sas",
|
57 |
+
"read_spss",
|
58 |
+
"read_sql",
|
59 |
+
"read_sql_query",
|
60 |
+
"read_sql_table",
|
61 |
+
"read_stata",
|
62 |
+
"read_table",
|
63 |
+
"read_xml",
|
64 |
+
"to_pickle",
|
65 |
+
]
|
llmeval-env/lib/python3.10/site-packages/pandas/io/common.py
ADDED
@@ -0,0 +1,1267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Common IO api utilities"""
|
2 |
+
from __future__ import annotations
|
3 |
+
|
4 |
+
from abc import (
|
5 |
+
ABC,
|
6 |
+
abstractmethod,
|
7 |
+
)
|
8 |
+
import codecs
|
9 |
+
from collections import defaultdict
|
10 |
+
from collections.abc import (
|
11 |
+
Hashable,
|
12 |
+
Mapping,
|
13 |
+
Sequence,
|
14 |
+
)
|
15 |
+
import dataclasses
|
16 |
+
import functools
|
17 |
+
import gzip
|
18 |
+
from io import (
|
19 |
+
BufferedIOBase,
|
20 |
+
BytesIO,
|
21 |
+
RawIOBase,
|
22 |
+
StringIO,
|
23 |
+
TextIOBase,
|
24 |
+
TextIOWrapper,
|
25 |
+
)
|
26 |
+
import mmap
|
27 |
+
import os
|
28 |
+
from pathlib import Path
|
29 |
+
import re
|
30 |
+
import tarfile
|
31 |
+
from typing import (
|
32 |
+
IO,
|
33 |
+
TYPE_CHECKING,
|
34 |
+
Any,
|
35 |
+
AnyStr,
|
36 |
+
DefaultDict,
|
37 |
+
Generic,
|
38 |
+
Literal,
|
39 |
+
TypeVar,
|
40 |
+
cast,
|
41 |
+
overload,
|
42 |
+
)
|
43 |
+
from urllib.parse import (
|
44 |
+
urljoin,
|
45 |
+
urlparse as parse_url,
|
46 |
+
uses_netloc,
|
47 |
+
uses_params,
|
48 |
+
uses_relative,
|
49 |
+
)
|
50 |
+
import warnings
|
51 |
+
import zipfile
|
52 |
+
|
53 |
+
from pandas._typing import (
|
54 |
+
BaseBuffer,
|
55 |
+
ReadCsvBuffer,
|
56 |
+
)
|
57 |
+
from pandas.compat import (
|
58 |
+
get_bz2_file,
|
59 |
+
get_lzma_file,
|
60 |
+
)
|
61 |
+
from pandas.compat._optional import import_optional_dependency
|
62 |
+
from pandas.util._decorators import doc
|
63 |
+
from pandas.util._exceptions import find_stack_level
|
64 |
+
|
65 |
+
from pandas.core.dtypes.common import (
|
66 |
+
is_bool,
|
67 |
+
is_file_like,
|
68 |
+
is_integer,
|
69 |
+
is_list_like,
|
70 |
+
)
|
71 |
+
from pandas.core.dtypes.generic import ABCMultiIndex
|
72 |
+
|
73 |
+
from pandas.core.shared_docs import _shared_docs
|
74 |
+
|
75 |
+
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
|
76 |
+
_VALID_URLS.discard("")
|
77 |
+
_RFC_3986_PATTERN = re.compile(r"^[A-Za-z][A-Za-z0-9+\-+.]*://")
|
78 |
+
|
79 |
+
BaseBufferT = TypeVar("BaseBufferT", bound=BaseBuffer)
|
80 |
+
|
81 |
+
|
82 |
+
if TYPE_CHECKING:
|
83 |
+
from types import TracebackType
|
84 |
+
|
85 |
+
from pandas._typing import (
|
86 |
+
CompressionDict,
|
87 |
+
CompressionOptions,
|
88 |
+
FilePath,
|
89 |
+
ReadBuffer,
|
90 |
+
StorageOptions,
|
91 |
+
WriteBuffer,
|
92 |
+
)
|
93 |
+
|
94 |
+
from pandas import MultiIndex
|
95 |
+
|
96 |
+
|
97 |
+
@dataclasses.dataclass
|
98 |
+
class IOArgs:
|
99 |
+
"""
|
100 |
+
Return value of io/common.py:_get_filepath_or_buffer.
|
101 |
+
"""
|
102 |
+
|
103 |
+
filepath_or_buffer: str | BaseBuffer
|
104 |
+
encoding: str
|
105 |
+
mode: str
|
106 |
+
compression: CompressionDict
|
107 |
+
should_close: bool = False
|
108 |
+
|
109 |
+
|
110 |
+
@dataclasses.dataclass
|
111 |
+
class IOHandles(Generic[AnyStr]):
|
112 |
+
"""
|
113 |
+
Return value of io/common.py:get_handle
|
114 |
+
|
115 |
+
Can be used as a context manager.
|
116 |
+
|
117 |
+
This is used to easily close created buffers and to handle corner cases when
|
118 |
+
TextIOWrapper is inserted.
|
119 |
+
|
120 |
+
handle: The file handle to be used.
|
121 |
+
created_handles: All file handles that are created by get_handle
|
122 |
+
is_wrapped: Whether a TextIOWrapper needs to be detached.
|
123 |
+
"""
|
124 |
+
|
125 |
+
# handle might not implement the IO-interface
|
126 |
+
handle: IO[AnyStr]
|
127 |
+
compression: CompressionDict
|
128 |
+
created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list)
|
129 |
+
is_wrapped: bool = False
|
130 |
+
|
131 |
+
def close(self) -> None:
|
132 |
+
"""
|
133 |
+
Close all created buffers.
|
134 |
+
|
135 |
+
Note: If a TextIOWrapper was inserted, it is flushed and detached to
|
136 |
+
avoid closing the potentially user-created buffer.
|
137 |
+
"""
|
138 |
+
if self.is_wrapped:
|
139 |
+
assert isinstance(self.handle, TextIOWrapper)
|
140 |
+
self.handle.flush()
|
141 |
+
self.handle.detach()
|
142 |
+
self.created_handles.remove(self.handle)
|
143 |
+
for handle in self.created_handles:
|
144 |
+
handle.close()
|
145 |
+
self.created_handles = []
|
146 |
+
self.is_wrapped = False
|
147 |
+
|
148 |
+
def __enter__(self) -> IOHandles[AnyStr]:
|
149 |
+
return self
|
150 |
+
|
151 |
+
def __exit__(
|
152 |
+
self,
|
153 |
+
exc_type: type[BaseException] | None,
|
154 |
+
exc_value: BaseException | None,
|
155 |
+
traceback: TracebackType | None,
|
156 |
+
) -> None:
|
157 |
+
self.close()
|
158 |
+
|
159 |
+
|
160 |
+
def is_url(url: object) -> bool:
|
161 |
+
"""
|
162 |
+
Check to see if a URL has a valid protocol.
|
163 |
+
|
164 |
+
Parameters
|
165 |
+
----------
|
166 |
+
url : str or unicode
|
167 |
+
|
168 |
+
Returns
|
169 |
+
-------
|
170 |
+
isurl : bool
|
171 |
+
If `url` has a valid protocol return True otherwise False.
|
172 |
+
"""
|
173 |
+
if not isinstance(url, str):
|
174 |
+
return False
|
175 |
+
return parse_url(url).scheme in _VALID_URLS
|
176 |
+
|
177 |
+
|
178 |
+
@overload
|
179 |
+
def _expand_user(filepath_or_buffer: str) -> str:
|
180 |
+
...
|
181 |
+
|
182 |
+
|
183 |
+
@overload
|
184 |
+
def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT:
|
185 |
+
...
|
186 |
+
|
187 |
+
|
188 |
+
def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT:
|
189 |
+
"""
|
190 |
+
Return the argument with an initial component of ~ or ~user
|
191 |
+
replaced by that user's home directory.
|
192 |
+
|
193 |
+
Parameters
|
194 |
+
----------
|
195 |
+
filepath_or_buffer : object to be converted if possible
|
196 |
+
|
197 |
+
Returns
|
198 |
+
-------
|
199 |
+
expanded_filepath_or_buffer : an expanded filepath or the
|
200 |
+
input if not expandable
|
201 |
+
"""
|
202 |
+
if isinstance(filepath_or_buffer, str):
|
203 |
+
return os.path.expanduser(filepath_or_buffer)
|
204 |
+
return filepath_or_buffer
|
205 |
+
|
206 |
+
|
207 |
+
def validate_header_arg(header: object) -> None:
|
208 |
+
if header is None:
|
209 |
+
return
|
210 |
+
if is_integer(header):
|
211 |
+
header = cast(int, header)
|
212 |
+
if header < 0:
|
213 |
+
# GH 27779
|
214 |
+
raise ValueError(
|
215 |
+
"Passing negative integer to header is invalid. "
|
216 |
+
"For no header, use header=None instead"
|
217 |
+
)
|
218 |
+
return
|
219 |
+
if is_list_like(header, allow_sets=False):
|
220 |
+
header = cast(Sequence, header)
|
221 |
+
if not all(map(is_integer, header)):
|
222 |
+
raise ValueError("header must be integer or list of integers")
|
223 |
+
if any(i < 0 for i in header):
|
224 |
+
raise ValueError("cannot specify multi-index header with negative integers")
|
225 |
+
return
|
226 |
+
if is_bool(header):
|
227 |
+
raise TypeError(
|
228 |
+
"Passing a bool to header is invalid. Use header=None for no header or "
|
229 |
+
"header=int or list-like of ints to specify "
|
230 |
+
"the row(s) making up the column names"
|
231 |
+
)
|
232 |
+
# GH 16338
|
233 |
+
raise ValueError("header must be integer or list of integers")
|
234 |
+
|
235 |
+
|
236 |
+
@overload
|
237 |
+
def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str:
|
238 |
+
...
|
239 |
+
|
240 |
+
|
241 |
+
@overload
|
242 |
+
def stringify_path(
|
243 |
+
filepath_or_buffer: BaseBufferT, convert_file_like: bool = ...
|
244 |
+
) -> BaseBufferT:
|
245 |
+
...
|
246 |
+
|
247 |
+
|
248 |
+
def stringify_path(
|
249 |
+
filepath_or_buffer: FilePath | BaseBufferT,
|
250 |
+
convert_file_like: bool = False,
|
251 |
+
) -> str | BaseBufferT:
|
252 |
+
"""
|
253 |
+
Attempt to convert a path-like object to a string.
|
254 |
+
|
255 |
+
Parameters
|
256 |
+
----------
|
257 |
+
filepath_or_buffer : object to be converted
|
258 |
+
|
259 |
+
Returns
|
260 |
+
-------
|
261 |
+
str_filepath_or_buffer : maybe a string version of the object
|
262 |
+
|
263 |
+
Notes
|
264 |
+
-----
|
265 |
+
Objects supporting the fspath protocol are coerced
|
266 |
+
according to its __fspath__ method.
|
267 |
+
|
268 |
+
Any other object is passed through unchanged, which includes bytes,
|
269 |
+
strings, buffers, or anything else that's not even path-like.
|
270 |
+
"""
|
271 |
+
if not convert_file_like and is_file_like(filepath_or_buffer):
|
272 |
+
# GH 38125: some fsspec objects implement os.PathLike but have already opened a
|
273 |
+
# file. This prevents opening the file a second time. infer_compression calls
|
274 |
+
# this function with convert_file_like=True to infer the compression.
|
275 |
+
return cast(BaseBufferT, filepath_or_buffer)
|
276 |
+
|
277 |
+
if isinstance(filepath_or_buffer, os.PathLike):
|
278 |
+
filepath_or_buffer = filepath_or_buffer.__fspath__()
|
279 |
+
return _expand_user(filepath_or_buffer)
|
280 |
+
|
281 |
+
|
282 |
+
def urlopen(*args, **kwargs):
|
283 |
+
"""
|
284 |
+
Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of
|
285 |
+
the stdlib.
|
286 |
+
"""
|
287 |
+
import urllib.request
|
288 |
+
|
289 |
+
return urllib.request.urlopen(*args, **kwargs)
|
290 |
+
|
291 |
+
|
292 |
+
def is_fsspec_url(url: FilePath | BaseBuffer) -> bool:
|
293 |
+
"""
|
294 |
+
Returns true if the given URL looks like
|
295 |
+
something fsspec can handle
|
296 |
+
"""
|
297 |
+
return (
|
298 |
+
isinstance(url, str)
|
299 |
+
and bool(_RFC_3986_PATTERN.match(url))
|
300 |
+
and not url.startswith(("http://", "https://"))
|
301 |
+
)
|
302 |
+
|
303 |
+
|
304 |
+
@doc(
|
305 |
+
storage_options=_shared_docs["storage_options"],
|
306 |
+
compression_options=_shared_docs["compression_options"] % "filepath_or_buffer",
|
307 |
+
)
|
308 |
+
def _get_filepath_or_buffer(
|
309 |
+
filepath_or_buffer: FilePath | BaseBuffer,
|
310 |
+
encoding: str = "utf-8",
|
311 |
+
compression: CompressionOptions | None = None,
|
312 |
+
mode: str = "r",
|
313 |
+
storage_options: StorageOptions | None = None,
|
314 |
+
) -> IOArgs:
|
315 |
+
"""
|
316 |
+
If the filepath_or_buffer is a url, translate and return the buffer.
|
317 |
+
Otherwise passthrough.
|
318 |
+
|
319 |
+
Parameters
|
320 |
+
----------
|
321 |
+
filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
|
322 |
+
or buffer
|
323 |
+
{compression_options}
|
324 |
+
|
325 |
+
.. versionchanged:: 1.4.0 Zstandard support.
|
326 |
+
|
327 |
+
encoding : the encoding to use to decode bytes, default is 'utf-8'
|
328 |
+
mode : str, optional
|
329 |
+
|
330 |
+
{storage_options}
|
331 |
+
|
332 |
+
|
333 |
+
Returns the dataclass IOArgs.
|
334 |
+
"""
|
335 |
+
filepath_or_buffer = stringify_path(filepath_or_buffer)
|
336 |
+
|
337 |
+
# handle compression dict
|
338 |
+
compression_method, compression = get_compression_method(compression)
|
339 |
+
compression_method = infer_compression(filepath_or_buffer, compression_method)
|
340 |
+
|
341 |
+
# GH21227 internal compression is not used for non-binary handles.
|
342 |
+
if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode:
|
343 |
+
warnings.warn(
|
344 |
+
"compression has no effect when passing a non-binary object as input.",
|
345 |
+
RuntimeWarning,
|
346 |
+
stacklevel=find_stack_level(),
|
347 |
+
)
|
348 |
+
compression_method = None
|
349 |
+
|
350 |
+
compression = dict(compression, method=compression_method)
|
351 |
+
|
352 |
+
# bz2 and xz do not write the byte order mark for utf-16 and utf-32
|
353 |
+
# print a warning when writing such files
|
354 |
+
if (
|
355 |
+
"w" in mode
|
356 |
+
and compression_method in ["bz2", "xz"]
|
357 |
+
and encoding in ["utf-16", "utf-32"]
|
358 |
+
):
|
359 |
+
warnings.warn(
|
360 |
+
f"{compression} will not write the byte order mark for {encoding}",
|
361 |
+
UnicodeWarning,
|
362 |
+
stacklevel=find_stack_level(),
|
363 |
+
)
|
364 |
+
|
365 |
+
# Use binary mode when converting path-like objects to file-like objects (fsspec)
|
366 |
+
# except when text mode is explicitly requested. The original mode is returned if
|
367 |
+
# fsspec is not used.
|
368 |
+
fsspec_mode = mode
|
369 |
+
if "t" not in fsspec_mode and "b" not in fsspec_mode:
|
370 |
+
fsspec_mode += "b"
|
371 |
+
|
372 |
+
if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):
|
373 |
+
# TODO: fsspec can also handle HTTP via requests, but leaving this
|
374 |
+
# unchanged. using fsspec appears to break the ability to infer if the
|
375 |
+
# server responded with gzipped data
|
376 |
+
storage_options = storage_options or {}
|
377 |
+
|
378 |
+
# waiting until now for importing to match intended lazy logic of
|
379 |
+
# urlopen function defined elsewhere in this module
|
380 |
+
import urllib.request
|
381 |
+
|
382 |
+
# assuming storage_options is to be interpreted as headers
|
383 |
+
req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options)
|
384 |
+
with urlopen(req_info) as req:
|
385 |
+
content_encoding = req.headers.get("Content-Encoding", None)
|
386 |
+
if content_encoding == "gzip":
|
387 |
+
# Override compression based on Content-Encoding header
|
388 |
+
compression = {"method": "gzip"}
|
389 |
+
reader = BytesIO(req.read())
|
390 |
+
return IOArgs(
|
391 |
+
filepath_or_buffer=reader,
|
392 |
+
encoding=encoding,
|
393 |
+
compression=compression,
|
394 |
+
should_close=True,
|
395 |
+
mode=fsspec_mode,
|
396 |
+
)
|
397 |
+
|
398 |
+
if is_fsspec_url(filepath_or_buffer):
|
399 |
+
assert isinstance(
|
400 |
+
filepath_or_buffer, str
|
401 |
+
) # just to appease mypy for this branch
|
402 |
+
# two special-case s3-like protocols; these have special meaning in Hadoop,
|
403 |
+
# but are equivalent to just "s3" from fsspec's point of view
|
404 |
+
# cc #11071
|
405 |
+
if filepath_or_buffer.startswith("s3a://"):
|
406 |
+
filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://")
|
407 |
+
if filepath_or_buffer.startswith("s3n://"):
|
408 |
+
filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://")
|
409 |
+
fsspec = import_optional_dependency("fsspec")
|
410 |
+
|
411 |
+
# If botocore is installed we fallback to reading with anon=True
|
412 |
+
# to allow reads from public buckets
|
413 |
+
err_types_to_retry_with_anon: list[Any] = []
|
414 |
+
try:
|
415 |
+
import_optional_dependency("botocore")
|
416 |
+
from botocore.exceptions import (
|
417 |
+
ClientError,
|
418 |
+
NoCredentialsError,
|
419 |
+
)
|
420 |
+
|
421 |
+
err_types_to_retry_with_anon = [
|
422 |
+
ClientError,
|
423 |
+
NoCredentialsError,
|
424 |
+
PermissionError,
|
425 |
+
]
|
426 |
+
except ImportError:
|
427 |
+
pass
|
428 |
+
|
429 |
+
try:
|
430 |
+
file_obj = fsspec.open(
|
431 |
+
filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
|
432 |
+
).open()
|
433 |
+
# GH 34626 Reads from Public Buckets without Credentials needs anon=True
|
434 |
+
except tuple(err_types_to_retry_with_anon):
|
435 |
+
if storage_options is None:
|
436 |
+
storage_options = {"anon": True}
|
437 |
+
else:
|
438 |
+
# don't mutate user input.
|
439 |
+
storage_options = dict(storage_options)
|
440 |
+
storage_options["anon"] = True
|
441 |
+
file_obj = fsspec.open(
|
442 |
+
filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
|
443 |
+
).open()
|
444 |
+
|
445 |
+
return IOArgs(
|
446 |
+
filepath_or_buffer=file_obj,
|
447 |
+
encoding=encoding,
|
448 |
+
compression=compression,
|
449 |
+
should_close=True,
|
450 |
+
mode=fsspec_mode,
|
451 |
+
)
|
452 |
+
elif storage_options:
|
453 |
+
raise ValueError(
|
454 |
+
"storage_options passed with file object or non-fsspec file path"
|
455 |
+
)
|
456 |
+
|
457 |
+
if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):
|
458 |
+
return IOArgs(
|
459 |
+
filepath_or_buffer=_expand_user(filepath_or_buffer),
|
460 |
+
encoding=encoding,
|
461 |
+
compression=compression,
|
462 |
+
should_close=False,
|
463 |
+
mode=mode,
|
464 |
+
)
|
465 |
+
|
466 |
+
# is_file_like requires (read | write) & __iter__ but __iter__ is only
|
467 |
+
# needed for read_csv(engine=python)
|
468 |
+
if not (
|
469 |
+
hasattr(filepath_or_buffer, "read") or hasattr(filepath_or_buffer, "write")
|
470 |
+
):
|
471 |
+
msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
|
472 |
+
raise ValueError(msg)
|
473 |
+
|
474 |
+
return IOArgs(
|
475 |
+
filepath_or_buffer=filepath_or_buffer,
|
476 |
+
encoding=encoding,
|
477 |
+
compression=compression,
|
478 |
+
should_close=False,
|
479 |
+
mode=mode,
|
480 |
+
)
|
481 |
+
|
482 |
+
|
483 |
+
def file_path_to_url(path: str) -> str:
|
484 |
+
"""
|
485 |
+
converts an absolute native path to a FILE URL.
|
486 |
+
|
487 |
+
Parameters
|
488 |
+
----------
|
489 |
+
path : a path in native format
|
490 |
+
|
491 |
+
Returns
|
492 |
+
-------
|
493 |
+
a valid FILE URL
|
494 |
+
"""
|
495 |
+
# lazify expensive import (~30ms)
|
496 |
+
from urllib.request import pathname2url
|
497 |
+
|
498 |
+
return urljoin("file:", pathname2url(path))
|
499 |
+
|
500 |
+
|
501 |
+
extension_to_compression = {
|
502 |
+
".tar": "tar",
|
503 |
+
".tar.gz": "tar",
|
504 |
+
".tar.bz2": "tar",
|
505 |
+
".tar.xz": "tar",
|
506 |
+
".gz": "gzip",
|
507 |
+
".bz2": "bz2",
|
508 |
+
".zip": "zip",
|
509 |
+
".xz": "xz",
|
510 |
+
".zst": "zstd",
|
511 |
+
}
|
512 |
+
_supported_compressions = set(extension_to_compression.values())
|
513 |
+
|
514 |
+
|
515 |
+
def get_compression_method(
|
516 |
+
compression: CompressionOptions,
|
517 |
+
) -> tuple[str | None, CompressionDict]:
|
518 |
+
"""
|
519 |
+
Simplifies a compression argument to a compression method string and
|
520 |
+
a mapping containing additional arguments.
|
521 |
+
|
522 |
+
Parameters
|
523 |
+
----------
|
524 |
+
compression : str or mapping
|
525 |
+
If string, specifies the compression method. If mapping, value at key
|
526 |
+
'method' specifies compression method.
|
527 |
+
|
528 |
+
Returns
|
529 |
+
-------
|
530 |
+
tuple of ({compression method}, Optional[str]
|
531 |
+
{compression arguments}, Dict[str, Any])
|
532 |
+
|
533 |
+
Raises
|
534 |
+
------
|
535 |
+
ValueError on mapping missing 'method' key
|
536 |
+
"""
|
537 |
+
compression_method: str | None
|
538 |
+
if isinstance(compression, Mapping):
|
539 |
+
compression_args = dict(compression)
|
540 |
+
try:
|
541 |
+
compression_method = compression_args.pop("method")
|
542 |
+
except KeyError as err:
|
543 |
+
raise ValueError("If mapping, compression must have key 'method'") from err
|
544 |
+
else:
|
545 |
+
compression_args = {}
|
546 |
+
compression_method = compression
|
547 |
+
return compression_method, compression_args
|
548 |
+
|
549 |
+
|
550 |
+
@doc(compression_options=_shared_docs["compression_options"] % "filepath_or_buffer")
|
551 |
+
def infer_compression(
|
552 |
+
filepath_or_buffer: FilePath | BaseBuffer, compression: str | None
|
553 |
+
) -> str | None:
|
554 |
+
"""
|
555 |
+
Get the compression method for filepath_or_buffer. If compression='infer',
|
556 |
+
the inferred compression method is returned. Otherwise, the input
|
557 |
+
compression method is returned unchanged, unless it's invalid, in which
|
558 |
+
case an error is raised.
|
559 |
+
|
560 |
+
Parameters
|
561 |
+
----------
|
562 |
+
filepath_or_buffer : str or file handle
|
563 |
+
File path or object.
|
564 |
+
{compression_options}
|
565 |
+
|
566 |
+
.. versionchanged:: 1.4.0 Zstandard support.
|
567 |
+
|
568 |
+
Returns
|
569 |
+
-------
|
570 |
+
string or None
|
571 |
+
|
572 |
+
Raises
|
573 |
+
------
|
574 |
+
ValueError on invalid compression specified.
|
575 |
+
"""
|
576 |
+
if compression is None:
|
577 |
+
return None
|
578 |
+
|
579 |
+
# Infer compression
|
580 |
+
if compression == "infer":
|
581 |
+
# Convert all path types (e.g. pathlib.Path) to strings
|
582 |
+
filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)
|
583 |
+
if not isinstance(filepath_or_buffer, str):
|
584 |
+
# Cannot infer compression of a buffer, assume no compression
|
585 |
+
return None
|
586 |
+
|
587 |
+
# Infer compression from the filename/URL extension
|
588 |
+
for extension, compression in extension_to_compression.items():
|
589 |
+
if filepath_or_buffer.lower().endswith(extension):
|
590 |
+
return compression
|
591 |
+
return None
|
592 |
+
|
593 |
+
# Compression has been specified. Check that it's valid
|
594 |
+
if compression in _supported_compressions:
|
595 |
+
return compression
|
596 |
+
|
597 |
+
valid = ["infer", None] + sorted(_supported_compressions)
|
598 |
+
msg = (
|
599 |
+
f"Unrecognized compression type: {compression}\n"
|
600 |
+
f"Valid compression types are {valid}"
|
601 |
+
)
|
602 |
+
raise ValueError(msg)
|
603 |
+
|
604 |
+
|
605 |
+
def check_parent_directory(path: Path | str) -> None:
|
606 |
+
"""
|
607 |
+
Check if parent directory of a file exists, raise OSError if it does not
|
608 |
+
|
609 |
+
Parameters
|
610 |
+
----------
|
611 |
+
path: Path or str
|
612 |
+
Path to check parent directory of
|
613 |
+
"""
|
614 |
+
parent = Path(path).parent
|
615 |
+
if not parent.is_dir():
|
616 |
+
raise OSError(rf"Cannot save file into a non-existent directory: '{parent}'")
|
617 |
+
|
618 |
+
|
619 |
+
@overload
|
620 |
+
def get_handle(
|
621 |
+
path_or_buf: FilePath | BaseBuffer,
|
622 |
+
mode: str,
|
623 |
+
*,
|
624 |
+
encoding: str | None = ...,
|
625 |
+
compression: CompressionOptions = ...,
|
626 |
+
memory_map: bool = ...,
|
627 |
+
is_text: Literal[False],
|
628 |
+
errors: str | None = ...,
|
629 |
+
storage_options: StorageOptions = ...,
|
630 |
+
) -> IOHandles[bytes]:
|
631 |
+
...
|
632 |
+
|
633 |
+
|
634 |
+
@overload
|
635 |
+
def get_handle(
|
636 |
+
path_or_buf: FilePath | BaseBuffer,
|
637 |
+
mode: str,
|
638 |
+
*,
|
639 |
+
encoding: str | None = ...,
|
640 |
+
compression: CompressionOptions = ...,
|
641 |
+
memory_map: bool = ...,
|
642 |
+
is_text: Literal[True] = ...,
|
643 |
+
errors: str | None = ...,
|
644 |
+
storage_options: StorageOptions = ...,
|
645 |
+
) -> IOHandles[str]:
|
646 |
+
...
|
647 |
+
|
648 |
+
|
649 |
+
@overload
|
650 |
+
def get_handle(
|
651 |
+
path_or_buf: FilePath | BaseBuffer,
|
652 |
+
mode: str,
|
653 |
+
*,
|
654 |
+
encoding: str | None = ...,
|
655 |
+
compression: CompressionOptions = ...,
|
656 |
+
memory_map: bool = ...,
|
657 |
+
is_text: bool = ...,
|
658 |
+
errors: str | None = ...,
|
659 |
+
storage_options: StorageOptions = ...,
|
660 |
+
) -> IOHandles[str] | IOHandles[bytes]:
|
661 |
+
...
|
662 |
+
|
663 |
+
|
664 |
+
@doc(compression_options=_shared_docs["compression_options"] % "path_or_buf")
|
665 |
+
def get_handle(
|
666 |
+
path_or_buf: FilePath | BaseBuffer,
|
667 |
+
mode: str,
|
668 |
+
*,
|
669 |
+
encoding: str | None = None,
|
670 |
+
compression: CompressionOptions | None = None,
|
671 |
+
memory_map: bool = False,
|
672 |
+
is_text: bool = True,
|
673 |
+
errors: str | None = None,
|
674 |
+
storage_options: StorageOptions | None = None,
|
675 |
+
) -> IOHandles[str] | IOHandles[bytes]:
|
676 |
+
"""
|
677 |
+
Get file handle for given path/buffer and mode.
|
678 |
+
|
679 |
+
Parameters
|
680 |
+
----------
|
681 |
+
path_or_buf : str or file handle
|
682 |
+
File path or object.
|
683 |
+
mode : str
|
684 |
+
Mode to open path_or_buf with.
|
685 |
+
encoding : str or None
|
686 |
+
Encoding to use.
|
687 |
+
{compression_options}
|
688 |
+
|
689 |
+
May be a dict with key 'method' as compression mode
|
690 |
+
and other keys as compression options if compression
|
691 |
+
mode is 'zip'.
|
692 |
+
|
693 |
+
Passing compression options as keys in dict is
|
694 |
+
supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'.
|
695 |
+
|
696 |
+
.. versionchanged:: 1.4.0 Zstandard support.
|
697 |
+
|
698 |
+
memory_map : bool, default False
|
699 |
+
See parsers._parser_params for more information. Only used by read_csv.
|
700 |
+
is_text : bool, default True
|
701 |
+
Whether the type of the content passed to the file/buffer is string or
|
702 |
+
bytes. This is not the same as `"b" not in mode`. If a string content is
|
703 |
+
passed to a binary file/buffer, a wrapper is inserted.
|
704 |
+
errors : str, default 'strict'
|
705 |
+
Specifies how encoding and decoding errors are to be handled.
|
706 |
+
See the errors argument for :func:`open` for a full list
|
707 |
+
of options.
|
708 |
+
storage_options: StorageOptions = None
|
709 |
+
Passed to _get_filepath_or_buffer
|
710 |
+
|
711 |
+
Returns the dataclass IOHandles
|
712 |
+
"""
|
713 |
+
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
|
714 |
+
encoding = encoding or "utf-8"
|
715 |
+
|
716 |
+
errors = errors or "strict"
|
717 |
+
|
718 |
+
# read_csv does not know whether the buffer is opened in binary/text mode
|
719 |
+
if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
|
720 |
+
mode += "b"
|
721 |
+
|
722 |
+
# validate encoding and errors
|
723 |
+
codecs.lookup(encoding)
|
724 |
+
if isinstance(errors, str):
|
725 |
+
codecs.lookup_error(errors)
|
726 |
+
|
727 |
+
# open URLs
|
728 |
+
ioargs = _get_filepath_or_buffer(
|
729 |
+
path_or_buf,
|
730 |
+
encoding=encoding,
|
731 |
+
compression=compression,
|
732 |
+
mode=mode,
|
733 |
+
storage_options=storage_options,
|
734 |
+
)
|
735 |
+
|
736 |
+
handle = ioargs.filepath_or_buffer
|
737 |
+
handles: list[BaseBuffer]
|
738 |
+
|
739 |
+
# memory mapping needs to be the first step
|
740 |
+
# only used for read_csv
|
741 |
+
handle, memory_map, handles = _maybe_memory_map(handle, memory_map)
|
742 |
+
|
743 |
+
is_path = isinstance(handle, str)
|
744 |
+
compression_args = dict(ioargs.compression)
|
745 |
+
compression = compression_args.pop("method")
|
746 |
+
|
747 |
+
# Only for write methods
|
748 |
+
if "r" not in mode and is_path:
|
749 |
+
check_parent_directory(str(handle))
|
750 |
+
|
751 |
+
if compression:
|
752 |
+
if compression != "zstd":
|
753 |
+
# compression libraries do not like an explicit text-mode
|
754 |
+
ioargs.mode = ioargs.mode.replace("t", "")
|
755 |
+
elif compression == "zstd" and "b" not in ioargs.mode:
|
756 |
+
# python-zstandard defaults to text mode, but we always expect
|
757 |
+
# compression libraries to use binary mode.
|
758 |
+
ioargs.mode += "b"
|
759 |
+
|
760 |
+
# GZ Compression
|
761 |
+
if compression == "gzip":
|
762 |
+
if isinstance(handle, str):
|
763 |
+
# error: Incompatible types in assignment (expression has type
|
764 |
+
# "GzipFile", variable has type "Union[str, BaseBuffer]")
|
765 |
+
handle = gzip.GzipFile( # type: ignore[assignment]
|
766 |
+
filename=handle,
|
767 |
+
mode=ioargs.mode,
|
768 |
+
**compression_args,
|
769 |
+
)
|
770 |
+
else:
|
771 |
+
handle = gzip.GzipFile(
|
772 |
+
# No overload variant of "GzipFile" matches argument types
|
773 |
+
# "Union[str, BaseBuffer]", "str", "Dict[str, Any]"
|
774 |
+
fileobj=handle, # type: ignore[call-overload]
|
775 |
+
mode=ioargs.mode,
|
776 |
+
**compression_args,
|
777 |
+
)
|
778 |
+
|
779 |
+
# BZ Compression
|
780 |
+
elif compression == "bz2":
|
781 |
+
# Overload of "BZ2File" to handle pickle protocol 5
|
782 |
+
# "Union[str, BaseBuffer]", "str", "Dict[str, Any]"
|
783 |
+
handle = get_bz2_file()( # type: ignore[call-overload]
|
784 |
+
handle,
|
785 |
+
mode=ioargs.mode,
|
786 |
+
**compression_args,
|
787 |
+
)
|
788 |
+
|
789 |
+
# ZIP Compression
|
790 |
+
elif compression == "zip":
|
791 |
+
# error: Argument 1 to "_BytesZipFile" has incompatible type
|
792 |
+
# "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]],
|
793 |
+
# ReadBuffer[bytes], WriteBuffer[bytes]]"
|
794 |
+
handle = _BytesZipFile(
|
795 |
+
handle, ioargs.mode, **compression_args # type: ignore[arg-type]
|
796 |
+
)
|
797 |
+
if handle.buffer.mode == "r":
|
798 |
+
handles.append(handle)
|
799 |
+
zip_names = handle.buffer.namelist()
|
800 |
+
if len(zip_names) == 1:
|
801 |
+
handle = handle.buffer.open(zip_names.pop())
|
802 |
+
elif not zip_names:
|
803 |
+
raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
|
804 |
+
else:
|
805 |
+
raise ValueError(
|
806 |
+
"Multiple files found in ZIP file. "
|
807 |
+
f"Only one file per ZIP: {zip_names}"
|
808 |
+
)
|
809 |
+
|
810 |
+
# TAR Encoding
|
811 |
+
elif compression == "tar":
|
812 |
+
compression_args.setdefault("mode", ioargs.mode)
|
813 |
+
if isinstance(handle, str):
|
814 |
+
handle = _BytesTarFile(name=handle, **compression_args)
|
815 |
+
else:
|
816 |
+
# error: Argument "fileobj" to "_BytesTarFile" has incompatible
|
817 |
+
# type "BaseBuffer"; expected "Union[ReadBuffer[bytes],
|
818 |
+
# WriteBuffer[bytes], None]"
|
819 |
+
handle = _BytesTarFile(
|
820 |
+
fileobj=handle, **compression_args # type: ignore[arg-type]
|
821 |
+
)
|
822 |
+
assert isinstance(handle, _BytesTarFile)
|
823 |
+
if "r" in handle.buffer.mode:
|
824 |
+
handles.append(handle)
|
825 |
+
files = handle.buffer.getnames()
|
826 |
+
if len(files) == 1:
|
827 |
+
file = handle.buffer.extractfile(files[0])
|
828 |
+
assert file is not None
|
829 |
+
handle = file
|
830 |
+
elif not files:
|
831 |
+
raise ValueError(f"Zero files found in TAR archive {path_or_buf}")
|
832 |
+
else:
|
833 |
+
raise ValueError(
|
834 |
+
"Multiple files found in TAR archive. "
|
835 |
+
f"Only one file per TAR archive: {files}"
|
836 |
+
)
|
837 |
+
|
838 |
+
# XZ Compression
|
839 |
+
elif compression == "xz":
|
840 |
+
# error: Argument 1 to "LZMAFile" has incompatible type "Union[str,
|
841 |
+
# BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str],
|
842 |
+
# PathLike[bytes]], IO[bytes]], None]"
|
843 |
+
handle = get_lzma_file()(
|
844 |
+
handle, ioargs.mode, **compression_args # type: ignore[arg-type]
|
845 |
+
)
|
846 |
+
|
847 |
+
# Zstd Compression
|
848 |
+
elif compression == "zstd":
|
849 |
+
zstd = import_optional_dependency("zstandard")
|
850 |
+
if "r" in ioargs.mode:
|
851 |
+
open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)}
|
852 |
+
else:
|
853 |
+
open_args = {"cctx": zstd.ZstdCompressor(**compression_args)}
|
854 |
+
handle = zstd.open(
|
855 |
+
handle,
|
856 |
+
mode=ioargs.mode,
|
857 |
+
**open_args,
|
858 |
+
)
|
859 |
+
|
860 |
+
# Unrecognized Compression
|
861 |
+
else:
|
862 |
+
msg = f"Unrecognized compression type: {compression}"
|
863 |
+
raise ValueError(msg)
|
864 |
+
|
865 |
+
assert not isinstance(handle, str)
|
866 |
+
handles.append(handle)
|
867 |
+
|
868 |
+
elif isinstance(handle, str):
|
869 |
+
# Check whether the filename is to be opened in binary mode.
|
870 |
+
# Binary mode does not support 'encoding' and 'newline'.
|
871 |
+
if ioargs.encoding and "b" not in ioargs.mode:
|
872 |
+
# Encoding
|
873 |
+
handle = open(
|
874 |
+
handle,
|
875 |
+
ioargs.mode,
|
876 |
+
encoding=ioargs.encoding,
|
877 |
+
errors=errors,
|
878 |
+
newline="",
|
879 |
+
)
|
880 |
+
else:
|
881 |
+
# Binary mode
|
882 |
+
handle = open(handle, ioargs.mode)
|
883 |
+
handles.append(handle)
|
884 |
+
|
885 |
+
# Convert BytesIO or file objects passed with an encoding
|
886 |
+
is_wrapped = False
|
887 |
+
if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase):
|
888 |
+
# not added to handles as it does not open/buffer resources
|
889 |
+
handle = _BytesIOWrapper(
|
890 |
+
handle,
|
891 |
+
encoding=ioargs.encoding,
|
892 |
+
)
|
893 |
+
elif is_text and (
|
894 |
+
compression or memory_map or _is_binary_mode(handle, ioargs.mode)
|
895 |
+
):
|
896 |
+
if (
|
897 |
+
not hasattr(handle, "readable")
|
898 |
+
or not hasattr(handle, "writable")
|
899 |
+
or not hasattr(handle, "seekable")
|
900 |
+
):
|
901 |
+
handle = _IOWrapper(handle)
|
902 |
+
# error: Argument 1 to "TextIOWrapper" has incompatible type
|
903 |
+
# "_IOWrapper"; expected "IO[bytes]"
|
904 |
+
handle = TextIOWrapper(
|
905 |
+
handle, # type: ignore[arg-type]
|
906 |
+
encoding=ioargs.encoding,
|
907 |
+
errors=errors,
|
908 |
+
newline="",
|
909 |
+
)
|
910 |
+
handles.append(handle)
|
911 |
+
# only marked as wrapped when the caller provided a handle
|
912 |
+
is_wrapped = not (
|
913 |
+
isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close
|
914 |
+
)
|
915 |
+
|
916 |
+
if "r" in ioargs.mode and not hasattr(handle, "read"):
|
917 |
+
raise TypeError(
|
918 |
+
"Expected file path name or file-like object, "
|
919 |
+
f"got {type(ioargs.filepath_or_buffer)} type"
|
920 |
+
)
|
921 |
+
|
922 |
+
handles.reverse() # close the most recently added buffer first
|
923 |
+
if ioargs.should_close:
|
924 |
+
assert not isinstance(ioargs.filepath_or_buffer, str)
|
925 |
+
handles.append(ioargs.filepath_or_buffer)
|
926 |
+
|
927 |
+
return IOHandles(
|
928 |
+
# error: Argument "handle" to "IOHandles" has incompatible type
|
929 |
+
# "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes],
|
930 |
+
# typing.IO[Any]]"; expected "pandas._typing.IO[Any]"
|
931 |
+
handle=handle, # type: ignore[arg-type]
|
932 |
+
# error: Argument "created_handles" to "IOHandles" has incompatible type
|
933 |
+
# "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]"
|
934 |
+
created_handles=handles, # type: ignore[arg-type]
|
935 |
+
is_wrapped=is_wrapped,
|
936 |
+
compression=ioargs.compression,
|
937 |
+
)
|
938 |
+
|
939 |
+
|
940 |
+
# error: Definition of "__enter__" in base class "IOBase" is incompatible
|
941 |
+
# with definition in base class "BinaryIO"
|
942 |
+
class _BufferedWriter(BytesIO, ABC): # type: ignore[misc]
|
943 |
+
"""
|
944 |
+
Some objects do not support multiple .write() calls (TarFile and ZipFile).
|
945 |
+
This wrapper writes to the underlying buffer on close.
|
946 |
+
"""
|
947 |
+
|
948 |
+
buffer = BytesIO()
|
949 |
+
|
950 |
+
@abstractmethod
|
951 |
+
def write_to_buffer(self) -> None:
|
952 |
+
...
|
953 |
+
|
954 |
+
def close(self) -> None:
|
955 |
+
if self.closed:
|
956 |
+
# already closed
|
957 |
+
return
|
958 |
+
if self.getbuffer().nbytes:
|
959 |
+
# write to buffer
|
960 |
+
self.seek(0)
|
961 |
+
with self.buffer:
|
962 |
+
self.write_to_buffer()
|
963 |
+
else:
|
964 |
+
self.buffer.close()
|
965 |
+
super().close()
|
966 |
+
|
967 |
+
|
968 |
+
class _BytesTarFile(_BufferedWriter):
|
969 |
+
def __init__(
|
970 |
+
self,
|
971 |
+
name: str | None = None,
|
972 |
+
mode: Literal["r", "a", "w", "x"] = "r",
|
973 |
+
fileobj: ReadBuffer[bytes] | WriteBuffer[bytes] | None = None,
|
974 |
+
archive_name: str | None = None,
|
975 |
+
**kwargs,
|
976 |
+
) -> None:
|
977 |
+
super().__init__()
|
978 |
+
self.archive_name = archive_name
|
979 |
+
self.name = name
|
980 |
+
# error: Incompatible types in assignment (expression has type "TarFile",
|
981 |
+
# base class "_BufferedWriter" defined the type as "BytesIO")
|
982 |
+
self.buffer: tarfile.TarFile = tarfile.TarFile.open( # type: ignore[assignment]
|
983 |
+
name=name,
|
984 |
+
mode=self.extend_mode(mode),
|
985 |
+
fileobj=fileobj,
|
986 |
+
**kwargs,
|
987 |
+
)
|
988 |
+
|
989 |
+
def extend_mode(self, mode: str) -> str:
|
990 |
+
mode = mode.replace("b", "")
|
991 |
+
if mode != "w":
|
992 |
+
return mode
|
993 |
+
if self.name is not None:
|
994 |
+
suffix = Path(self.name).suffix
|
995 |
+
if suffix in (".gz", ".xz", ".bz2"):
|
996 |
+
mode = f"{mode}:{suffix[1:]}"
|
997 |
+
return mode
|
998 |
+
|
999 |
+
def infer_filename(self) -> str | None:
|
1000 |
+
"""
|
1001 |
+
If an explicit archive_name is not given, we still want the file inside the zip
|
1002 |
+
file not to be named something.tar, because that causes confusion (GH39465).
|
1003 |
+
"""
|
1004 |
+
if self.name is None:
|
1005 |
+
return None
|
1006 |
+
|
1007 |
+
filename = Path(self.name)
|
1008 |
+
if filename.suffix == ".tar":
|
1009 |
+
return filename.with_suffix("").name
|
1010 |
+
elif filename.suffix in (".tar.gz", ".tar.bz2", ".tar.xz"):
|
1011 |
+
return filename.with_suffix("").with_suffix("").name
|
1012 |
+
return filename.name
|
1013 |
+
|
1014 |
+
def write_to_buffer(self) -> None:
|
1015 |
+
# TarFile needs a non-empty string
|
1016 |
+
archive_name = self.archive_name or self.infer_filename() or "tar"
|
1017 |
+
tarinfo = tarfile.TarInfo(name=archive_name)
|
1018 |
+
tarinfo.size = len(self.getvalue())
|
1019 |
+
self.buffer.addfile(tarinfo, self)
|
1020 |
+
|
1021 |
+
|
1022 |
+
class _BytesZipFile(_BufferedWriter):
|
1023 |
+
def __init__(
|
1024 |
+
self,
|
1025 |
+
file: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],
|
1026 |
+
mode: str,
|
1027 |
+
archive_name: str | None = None,
|
1028 |
+
**kwargs,
|
1029 |
+
) -> None:
|
1030 |
+
super().__init__()
|
1031 |
+
mode = mode.replace("b", "")
|
1032 |
+
self.archive_name = archive_name
|
1033 |
+
|
1034 |
+
kwargs.setdefault("compression", zipfile.ZIP_DEFLATED)
|
1035 |
+
# error: Incompatible types in assignment (expression has type "ZipFile",
|
1036 |
+
# base class "_BufferedWriter" defined the type as "BytesIO")
|
1037 |
+
self.buffer: zipfile.ZipFile = zipfile.ZipFile( # type: ignore[assignment]
|
1038 |
+
file, mode, **kwargs
|
1039 |
+
)
|
1040 |
+
|
1041 |
+
def infer_filename(self) -> str | None:
|
1042 |
+
"""
|
1043 |
+
If an explicit archive_name is not given, we still want the file inside the zip
|
1044 |
+
file not to be named something.zip, because that causes confusion (GH39465).
|
1045 |
+
"""
|
1046 |
+
if isinstance(self.buffer.filename, (os.PathLike, str)):
|
1047 |
+
filename = Path(self.buffer.filename)
|
1048 |
+
if filename.suffix == ".zip":
|
1049 |
+
return filename.with_suffix("").name
|
1050 |
+
return filename.name
|
1051 |
+
return None
|
1052 |
+
|
1053 |
+
def write_to_buffer(self) -> None:
|
1054 |
+
# ZipFile needs a non-empty string
|
1055 |
+
archive_name = self.archive_name or self.infer_filename() or "zip"
|
1056 |
+
self.buffer.writestr(archive_name, self.getvalue())
|
1057 |
+
|
1058 |
+
|
1059 |
+
class _IOWrapper:
|
1060 |
+
# TextIOWrapper is overly strict: it request that the buffer has seekable, readable,
|
1061 |
+
# and writable. If we have a read-only buffer, we shouldn't need writable and vice
|
1062 |
+
# versa. Some buffers, are seek/read/writ-able but they do not have the "-able"
|
1063 |
+
# methods, e.g., tempfile.SpooledTemporaryFile.
|
1064 |
+
# If a buffer does not have the above "-able" methods, we simple assume they are
|
1065 |
+
# seek/read/writ-able.
|
1066 |
+
def __init__(self, buffer: BaseBuffer) -> None:
|
1067 |
+
self.buffer = buffer
|
1068 |
+
|
1069 |
+
def __getattr__(self, name: str):
|
1070 |
+
return getattr(self.buffer, name)
|
1071 |
+
|
1072 |
+
def readable(self) -> bool:
|
1073 |
+
if hasattr(self.buffer, "readable"):
|
1074 |
+
return self.buffer.readable()
|
1075 |
+
return True
|
1076 |
+
|
1077 |
+
def seekable(self) -> bool:
|
1078 |
+
if hasattr(self.buffer, "seekable"):
|
1079 |
+
return self.buffer.seekable()
|
1080 |
+
return True
|
1081 |
+
|
1082 |
+
def writable(self) -> bool:
|
1083 |
+
if hasattr(self.buffer, "writable"):
|
1084 |
+
return self.buffer.writable()
|
1085 |
+
return True
|
1086 |
+
|
1087 |
+
|
1088 |
+
class _BytesIOWrapper:
|
1089 |
+
# Wrapper that wraps a StringIO buffer and reads bytes from it
|
1090 |
+
# Created for compat with pyarrow read_csv
|
1091 |
+
def __init__(self, buffer: StringIO | TextIOBase, encoding: str = "utf-8") -> None:
|
1092 |
+
self.buffer = buffer
|
1093 |
+
self.encoding = encoding
|
1094 |
+
# Because a character can be represented by more than 1 byte,
|
1095 |
+
# it is possible that reading will produce more bytes than n
|
1096 |
+
# We store the extra bytes in this overflow variable, and append the
|
1097 |
+
# overflow to the front of the bytestring the next time reading is performed
|
1098 |
+
self.overflow = b""
|
1099 |
+
|
1100 |
+
def __getattr__(self, attr: str):
|
1101 |
+
return getattr(self.buffer, attr)
|
1102 |
+
|
1103 |
+
def read(self, n: int | None = -1) -> bytes:
|
1104 |
+
assert self.buffer is not None
|
1105 |
+
bytestring = self.buffer.read(n).encode(self.encoding)
|
1106 |
+
# When n=-1/n greater than remaining bytes: Read entire file/rest of file
|
1107 |
+
combined_bytestring = self.overflow + bytestring
|
1108 |
+
if n is None or n < 0 or n >= len(combined_bytestring):
|
1109 |
+
self.overflow = b""
|
1110 |
+
return combined_bytestring
|
1111 |
+
else:
|
1112 |
+
to_return = combined_bytestring[:n]
|
1113 |
+
self.overflow = combined_bytestring[n:]
|
1114 |
+
return to_return
|
1115 |
+
|
1116 |
+
|
1117 |
+
def _maybe_memory_map(
|
1118 |
+
handle: str | BaseBuffer, memory_map: bool
|
1119 |
+
) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]:
|
1120 |
+
"""Try to memory map file/buffer."""
|
1121 |
+
handles: list[BaseBuffer] = []
|
1122 |
+
memory_map &= hasattr(handle, "fileno") or isinstance(handle, str)
|
1123 |
+
if not memory_map:
|
1124 |
+
return handle, memory_map, handles
|
1125 |
+
|
1126 |
+
# mmap used by only read_csv
|
1127 |
+
handle = cast(ReadCsvBuffer, handle)
|
1128 |
+
|
1129 |
+
# need to open the file first
|
1130 |
+
if isinstance(handle, str):
|
1131 |
+
handle = open(handle, "rb")
|
1132 |
+
handles.append(handle)
|
1133 |
+
|
1134 |
+
try:
|
1135 |
+
# open mmap and adds *-able
|
1136 |
+
# error: Argument 1 to "_IOWrapper" has incompatible type "mmap";
|
1137 |
+
# expected "BaseBuffer"
|
1138 |
+
wrapped = _IOWrapper(
|
1139 |
+
mmap.mmap(
|
1140 |
+
handle.fileno(), 0, access=mmap.ACCESS_READ # type: ignore[arg-type]
|
1141 |
+
)
|
1142 |
+
)
|
1143 |
+
finally:
|
1144 |
+
for handle in reversed(handles):
|
1145 |
+
# error: "BaseBuffer" has no attribute "close"
|
1146 |
+
handle.close() # type: ignore[attr-defined]
|
1147 |
+
|
1148 |
+
return wrapped, memory_map, [wrapped]
|
1149 |
+
|
1150 |
+
|
1151 |
+
def file_exists(filepath_or_buffer: FilePath | BaseBuffer) -> bool:
|
1152 |
+
"""Test whether file exists."""
|
1153 |
+
exists = False
|
1154 |
+
filepath_or_buffer = stringify_path(filepath_or_buffer)
|
1155 |
+
if not isinstance(filepath_or_buffer, str):
|
1156 |
+
return exists
|
1157 |
+
try:
|
1158 |
+
exists = os.path.exists(filepath_or_buffer)
|
1159 |
+
# gh-5874: if the filepath is too long will raise here
|
1160 |
+
except (TypeError, ValueError):
|
1161 |
+
pass
|
1162 |
+
return exists
|
1163 |
+
|
1164 |
+
|
1165 |
+
def _is_binary_mode(handle: FilePath | BaseBuffer, mode: str) -> bool:
|
1166 |
+
"""Whether the handle is opened in binary mode"""
|
1167 |
+
# specified by user
|
1168 |
+
if "t" in mode or "b" in mode:
|
1169 |
+
return "b" in mode
|
1170 |
+
|
1171 |
+
# exceptions
|
1172 |
+
text_classes = (
|
1173 |
+
# classes that expect string but have 'b' in mode
|
1174 |
+
codecs.StreamWriter,
|
1175 |
+
codecs.StreamReader,
|
1176 |
+
codecs.StreamReaderWriter,
|
1177 |
+
)
|
1178 |
+
if issubclass(type(handle), text_classes):
|
1179 |
+
return False
|
1180 |
+
|
1181 |
+
return isinstance(handle, _get_binary_io_classes()) or "b" in getattr(
|
1182 |
+
handle, "mode", mode
|
1183 |
+
)
|
1184 |
+
|
1185 |
+
|
1186 |
+
@functools.lru_cache
|
1187 |
+
def _get_binary_io_classes() -> tuple[type, ...]:
|
1188 |
+
"""IO classes that that expect bytes"""
|
1189 |
+
binary_classes: tuple[type, ...] = (BufferedIOBase, RawIOBase)
|
1190 |
+
|
1191 |
+
# python-zstandard doesn't use any of the builtin base classes; instead we
|
1192 |
+
# have to use the `zstd.ZstdDecompressionReader` class for isinstance checks.
|
1193 |
+
# Unfortunately `zstd.ZstdDecompressionReader` isn't exposed by python-zstandard
|
1194 |
+
# so we have to get it from a `zstd.ZstdDecompressor` instance.
|
1195 |
+
# See also https://github.com/indygreg/python-zstandard/pull/165.
|
1196 |
+
zstd = import_optional_dependency("zstandard", errors="ignore")
|
1197 |
+
if zstd is not None:
|
1198 |
+
with zstd.ZstdDecompressor().stream_reader(b"") as reader:
|
1199 |
+
binary_classes += (type(reader),)
|
1200 |
+
|
1201 |
+
return binary_classes
|
1202 |
+
|
1203 |
+
|
1204 |
+
def is_potential_multi_index(
|
1205 |
+
columns: Sequence[Hashable] | MultiIndex,
|
1206 |
+
index_col: bool | Sequence[int] | None = None,
|
1207 |
+
) -> bool:
|
1208 |
+
"""
|
1209 |
+
Check whether or not the `columns` parameter
|
1210 |
+
could be converted into a MultiIndex.
|
1211 |
+
|
1212 |
+
Parameters
|
1213 |
+
----------
|
1214 |
+
columns : array-like
|
1215 |
+
Object which may or may not be convertible into a MultiIndex
|
1216 |
+
index_col : None, bool or list, optional
|
1217 |
+
Column or columns to use as the (possibly hierarchical) index
|
1218 |
+
|
1219 |
+
Returns
|
1220 |
+
-------
|
1221 |
+
bool : Whether or not columns could become a MultiIndex
|
1222 |
+
"""
|
1223 |
+
if index_col is None or isinstance(index_col, bool):
|
1224 |
+
index_col = []
|
1225 |
+
|
1226 |
+
return bool(
|
1227 |
+
len(columns)
|
1228 |
+
and not isinstance(columns, ABCMultiIndex)
|
1229 |
+
and all(isinstance(c, tuple) for c in columns if c not in list(index_col))
|
1230 |
+
)
|
1231 |
+
|
1232 |
+
|
1233 |
+
def dedup_names(
|
1234 |
+
names: Sequence[Hashable], is_potential_multiindex: bool
|
1235 |
+
) -> Sequence[Hashable]:
|
1236 |
+
"""
|
1237 |
+
Rename column names if duplicates exist.
|
1238 |
+
|
1239 |
+
Currently the renaming is done by appending a period and an autonumeric,
|
1240 |
+
but a custom pattern may be supported in the future.
|
1241 |
+
|
1242 |
+
Examples
|
1243 |
+
--------
|
1244 |
+
>>> dedup_names(["x", "y", "x", "x"], is_potential_multiindex=False)
|
1245 |
+
['x', 'y', 'x.1', 'x.2']
|
1246 |
+
"""
|
1247 |
+
names = list(names) # so we can index
|
1248 |
+
counts: DefaultDict[Hashable, int] = defaultdict(int)
|
1249 |
+
|
1250 |
+
for i, col in enumerate(names):
|
1251 |
+
cur_count = counts[col]
|
1252 |
+
|
1253 |
+
while cur_count > 0:
|
1254 |
+
counts[col] = cur_count + 1
|
1255 |
+
|
1256 |
+
if is_potential_multiindex:
|
1257 |
+
# for mypy
|
1258 |
+
assert isinstance(col, tuple)
|
1259 |
+
col = col[:-1] + (f"{col[-1]}.{cur_count}",)
|
1260 |
+
else:
|
1261 |
+
col = f"{col}.{cur_count}"
|
1262 |
+
cur_count = counts[col]
|
1263 |
+
|
1264 |
+
names[i] = col
|
1265 |
+
counts[col] = cur_count + 1
|
1266 |
+
|
1267 |
+
return names
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__init__.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.io.excel._base import (
|
2 |
+
ExcelFile,
|
3 |
+
ExcelWriter,
|
4 |
+
read_excel,
|
5 |
+
)
|
6 |
+
from pandas.io.excel._odswriter import ODSWriter as _ODSWriter
|
7 |
+
from pandas.io.excel._openpyxl import OpenpyxlWriter as _OpenpyxlWriter
|
8 |
+
from pandas.io.excel._util import register_writer
|
9 |
+
from pandas.io.excel._xlsxwriter import XlsxWriter as _XlsxWriter
|
10 |
+
|
11 |
+
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
|
12 |
+
|
13 |
+
|
14 |
+
register_writer(_OpenpyxlWriter)
|
15 |
+
|
16 |
+
register_writer(_XlsxWriter)
|
17 |
+
|
18 |
+
|
19 |
+
register_writer(_ODSWriter)
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (641 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc
ADDED
Binary file (48.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_calamine.cpython-310.pyc
ADDED
Binary file (4.35 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc
ADDED
Binary file (7.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odswriter.cpython-310.pyc
ADDED
Binary file (8.79 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc
ADDED
Binary file (17.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc
ADDED
Binary file (4.17 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_util.cpython-310.pyc
ADDED
Binary file (8.41 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlrd.cpython-310.pyc
ADDED
Binary file (4.24 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlsxwriter.cpython-310.pyc
ADDED
Binary file (6.14 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_base.py
ADDED
@@ -0,0 +1,1659 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections.abc import (
|
4 |
+
Hashable,
|
5 |
+
Iterable,
|
6 |
+
Mapping,
|
7 |
+
Sequence,
|
8 |
+
)
|
9 |
+
import datetime
|
10 |
+
from functools import partial
|
11 |
+
from io import BytesIO
|
12 |
+
import os
|
13 |
+
from textwrap import fill
|
14 |
+
from typing import (
|
15 |
+
IO,
|
16 |
+
TYPE_CHECKING,
|
17 |
+
Any,
|
18 |
+
Callable,
|
19 |
+
Generic,
|
20 |
+
Literal,
|
21 |
+
TypeVar,
|
22 |
+
Union,
|
23 |
+
cast,
|
24 |
+
overload,
|
25 |
+
)
|
26 |
+
import warnings
|
27 |
+
import zipfile
|
28 |
+
|
29 |
+
from pandas._config import config
|
30 |
+
|
31 |
+
from pandas._libs import lib
|
32 |
+
from pandas._libs.parsers import STR_NA_VALUES
|
33 |
+
from pandas.compat._optional import (
|
34 |
+
get_version,
|
35 |
+
import_optional_dependency,
|
36 |
+
)
|
37 |
+
from pandas.errors import EmptyDataError
|
38 |
+
from pandas.util._decorators import (
|
39 |
+
Appender,
|
40 |
+
doc,
|
41 |
+
)
|
42 |
+
from pandas.util._exceptions import find_stack_level
|
43 |
+
from pandas.util._validators import check_dtype_backend
|
44 |
+
|
45 |
+
from pandas.core.dtypes.common import (
|
46 |
+
is_bool,
|
47 |
+
is_float,
|
48 |
+
is_integer,
|
49 |
+
is_list_like,
|
50 |
+
)
|
51 |
+
|
52 |
+
from pandas.core.frame import DataFrame
|
53 |
+
from pandas.core.shared_docs import _shared_docs
|
54 |
+
from pandas.util.version import Version
|
55 |
+
|
56 |
+
from pandas.io.common import (
|
57 |
+
IOHandles,
|
58 |
+
get_handle,
|
59 |
+
stringify_path,
|
60 |
+
validate_header_arg,
|
61 |
+
)
|
62 |
+
from pandas.io.excel._util import (
|
63 |
+
fill_mi_header,
|
64 |
+
get_default_engine,
|
65 |
+
get_writer,
|
66 |
+
maybe_convert_usecols,
|
67 |
+
pop_header_name,
|
68 |
+
)
|
69 |
+
from pandas.io.parsers import TextParser
|
70 |
+
from pandas.io.parsers.readers import validate_integer
|
71 |
+
|
72 |
+
if TYPE_CHECKING:
|
73 |
+
from types import TracebackType
|
74 |
+
|
75 |
+
from pandas._typing import (
|
76 |
+
DtypeArg,
|
77 |
+
DtypeBackend,
|
78 |
+
ExcelWriterIfSheetExists,
|
79 |
+
FilePath,
|
80 |
+
IntStrT,
|
81 |
+
ReadBuffer,
|
82 |
+
Self,
|
83 |
+
SequenceNotStr,
|
84 |
+
StorageOptions,
|
85 |
+
WriteExcelBuffer,
|
86 |
+
)
|
87 |
+
_read_excel_doc = (
|
88 |
+
"""
|
89 |
+
Read an Excel file into a ``pandas`` ``DataFrame``.
|
90 |
+
|
91 |
+
Supports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions
|
92 |
+
read from a local filesystem or URL. Supports an option to read
|
93 |
+
a single sheet or a list of sheets.
|
94 |
+
|
95 |
+
Parameters
|
96 |
+
----------
|
97 |
+
io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object
|
98 |
+
Any valid string path is acceptable. The string could be a URL. Valid
|
99 |
+
URL schemes include http, ftp, s3, and file. For file URLs, a host is
|
100 |
+
expected. A local file could be: ``file://localhost/path/to/table.xlsx``.
|
101 |
+
|
102 |
+
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
|
103 |
+
|
104 |
+
By file-like object, we refer to objects with a ``read()`` method,
|
105 |
+
such as a file handle (e.g. via builtin ``open`` function)
|
106 |
+
or ``StringIO``.
|
107 |
+
|
108 |
+
.. deprecated:: 2.1.0
|
109 |
+
Passing byte strings is deprecated. To read from a
|
110 |
+
byte string, wrap it in a ``BytesIO`` object.
|
111 |
+
sheet_name : str, int, list, or None, default 0
|
112 |
+
Strings are used for sheet names. Integers are used in zero-indexed
|
113 |
+
sheet positions (chart sheets do not count as a sheet position).
|
114 |
+
Lists of strings/integers are used to request multiple sheets.
|
115 |
+
Specify ``None`` to get all worksheets.
|
116 |
+
|
117 |
+
Available cases:
|
118 |
+
|
119 |
+
* Defaults to ``0``: 1st sheet as a `DataFrame`
|
120 |
+
* ``1``: 2nd sheet as a `DataFrame`
|
121 |
+
* ``"Sheet1"``: Load sheet with name "Sheet1"
|
122 |
+
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
|
123 |
+
as a dict of `DataFrame`
|
124 |
+
* ``None``: All worksheets.
|
125 |
+
|
126 |
+
header : int, list of int, default 0
|
127 |
+
Row (0-indexed) to use for the column labels of the parsed
|
128 |
+
DataFrame. If a list of integers is passed those row positions will
|
129 |
+
be combined into a ``MultiIndex``. Use None if there is no header.
|
130 |
+
names : array-like, default None
|
131 |
+
List of column names to use. If file contains no header row,
|
132 |
+
then you should explicitly pass header=None.
|
133 |
+
index_col : int, str, list of int, default None
|
134 |
+
Column (0-indexed) to use as the row labels of the DataFrame.
|
135 |
+
Pass None if there is no such column. If a list is passed,
|
136 |
+
those columns will be combined into a ``MultiIndex``. If a
|
137 |
+
subset of data is selected with ``usecols``, index_col
|
138 |
+
is based on the subset.
|
139 |
+
|
140 |
+
Missing values will be forward filled to allow roundtripping with
|
141 |
+
``to_excel`` for ``merged_cells=True``. To avoid forward filling the
|
142 |
+
missing values use ``set_index`` after reading the data instead of
|
143 |
+
``index_col``.
|
144 |
+
usecols : str, list-like, or callable, default None
|
145 |
+
* If None, then parse all columns.
|
146 |
+
* If str, then indicates comma separated list of Excel column letters
|
147 |
+
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
|
148 |
+
both sides.
|
149 |
+
* If list of int, then indicates list of column numbers to be parsed
|
150 |
+
(0-indexed).
|
151 |
+
* If list of string, then indicates list of column names to be parsed.
|
152 |
+
* If callable, then evaluate each column name against it and parse the
|
153 |
+
column if the callable returns ``True``.
|
154 |
+
|
155 |
+
Returns a subset of the columns according to behavior above.
|
156 |
+
dtype : Type name or dict of column -> type, default None
|
157 |
+
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32}}
|
158 |
+
Use ``object`` to preserve data as stored in Excel and not interpret dtype,
|
159 |
+
which will necessarily result in ``object`` dtype.
|
160 |
+
If converters are specified, they will be applied INSTEAD
|
161 |
+
of dtype conversion.
|
162 |
+
If you use ``None``, it will infer the dtype of each column based on the data.
|
163 |
+
engine : {{'openpyxl', 'calamine', 'odf', 'pyxlsb', 'xlrd'}}, default None
|
164 |
+
If io is not a buffer or path, this must be set to identify io.
|
165 |
+
Engine compatibility :
|
166 |
+
|
167 |
+
- ``openpyxl`` supports newer Excel file formats.
|
168 |
+
- ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)
|
169 |
+
and OpenDocument (.ods) file formats.
|
170 |
+
- ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
|
171 |
+
- ``pyxlsb`` supports Binary Excel files.
|
172 |
+
- ``xlrd`` supports old-style Excel files (.xls).
|
173 |
+
|
174 |
+
When ``engine=None``, the following logic will be used to determine the engine:
|
175 |
+
|
176 |
+
- If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
|
177 |
+
then `odf <https://pypi.org/project/odfpy/>`_ will be used.
|
178 |
+
- Otherwise if ``path_or_buffer`` is an xls format, ``xlrd`` will be used.
|
179 |
+
- Otherwise if ``path_or_buffer`` is in xlsb format, ``pyxlsb`` will be used.
|
180 |
+
- Otherwise ``openpyxl`` will be used.
|
181 |
+
converters : dict, default None
|
182 |
+
Dict of functions for converting values in certain columns. Keys can
|
183 |
+
either be integers or column labels, values are functions that take one
|
184 |
+
input argument, the Excel cell content, and return the transformed
|
185 |
+
content.
|
186 |
+
true_values : list, default None
|
187 |
+
Values to consider as True.
|
188 |
+
false_values : list, default None
|
189 |
+
Values to consider as False.
|
190 |
+
skiprows : list-like, int, or callable, optional
|
191 |
+
Line numbers to skip (0-indexed) or number of lines to skip (int) at the
|
192 |
+
start of the file. If callable, the callable function will be evaluated
|
193 |
+
against the row indices, returning True if the row should be skipped and
|
194 |
+
False otherwise. An example of a valid callable argument would be ``lambda
|
195 |
+
x: x in [0, 2]``.
|
196 |
+
nrows : int, default None
|
197 |
+
Number of rows to parse.
|
198 |
+
na_values : scalar, str, list-like, or dict, default None
|
199 |
+
Additional strings to recognize as NA/NaN. If dict passed, specific
|
200 |
+
per-column NA values. By default the following values are interpreted
|
201 |
+
as NaN: '"""
|
202 |
+
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
|
203 |
+
+ """'.
|
204 |
+
keep_default_na : bool, default True
|
205 |
+
Whether or not to include the default NaN values when parsing the data.
|
206 |
+
Depending on whether ``na_values`` is passed in, the behavior is as follows:
|
207 |
+
|
208 |
+
* If ``keep_default_na`` is True, and ``na_values`` are specified,
|
209 |
+
``na_values`` is appended to the default NaN values used for parsing.
|
210 |
+
* If ``keep_default_na`` is True, and ``na_values`` are not specified, only
|
211 |
+
the default NaN values are used for parsing.
|
212 |
+
* If ``keep_default_na`` is False, and ``na_values`` are specified, only
|
213 |
+
the NaN values specified ``na_values`` are used for parsing.
|
214 |
+
* If ``keep_default_na`` is False, and ``na_values`` are not specified, no
|
215 |
+
strings will be parsed as NaN.
|
216 |
+
|
217 |
+
Note that if `na_filter` is passed in as False, the ``keep_default_na`` and
|
218 |
+
``na_values`` parameters will be ignored.
|
219 |
+
na_filter : bool, default True
|
220 |
+
Detect missing value markers (empty strings and the value of na_values). In
|
221 |
+
data without any NAs, passing ``na_filter=False`` can improve the
|
222 |
+
performance of reading a large file.
|
223 |
+
verbose : bool, default False
|
224 |
+
Indicate number of NA values placed in non-numeric columns.
|
225 |
+
parse_dates : bool, list-like, or dict, default False
|
226 |
+
The behavior is as follows:
|
227 |
+
|
228 |
+
* ``bool``. If True -> try parsing the index.
|
229 |
+
* ``list`` of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
|
230 |
+
each as a separate date column.
|
231 |
+
* ``list`` of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
|
232 |
+
a single date column.
|
233 |
+
* ``dict``, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
|
234 |
+
result 'foo'
|
235 |
+
|
236 |
+
If a column or index contains an unparsable date, the entire column or
|
237 |
+
index will be returned unaltered as an object data type. If you don`t want to
|
238 |
+
parse some cells as date just change their type in Excel to "Text".
|
239 |
+
For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.
|
240 |
+
|
241 |
+
Note: A fast-path exists for iso8601-formatted dates.
|
242 |
+
date_parser : function, optional
|
243 |
+
Function to use for converting a sequence of string columns to an array of
|
244 |
+
datetime instances. The default uses ``dateutil.parser.parser`` to do the
|
245 |
+
conversion. Pandas will try to call `date_parser` in three different ways,
|
246 |
+
advancing to the next if an exception occurs: 1) Pass one or more arrays
|
247 |
+
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
|
248 |
+
string values from the columns defined by `parse_dates` into a single array
|
249 |
+
and pass that; and 3) call `date_parser` once for each row using one or
|
250 |
+
more strings (corresponding to the columns defined by `parse_dates`) as
|
251 |
+
arguments.
|
252 |
+
|
253 |
+
.. deprecated:: 2.0.0
|
254 |
+
Use ``date_format`` instead, or read in as ``object`` and then apply
|
255 |
+
:func:`to_datetime` as-needed.
|
256 |
+
date_format : str or dict of column -> format, default ``None``
|
257 |
+
If used in conjunction with ``parse_dates``, will parse dates according to this
|
258 |
+
format. For anything more complex,
|
259 |
+
please read in as ``object`` and then apply :func:`to_datetime` as-needed.
|
260 |
+
|
261 |
+
.. versionadded:: 2.0.0
|
262 |
+
thousands : str, default None
|
263 |
+
Thousands separator for parsing string columns to numeric. Note that
|
264 |
+
this parameter is only necessary for columns stored as TEXT in Excel,
|
265 |
+
any numeric columns will automatically be parsed, regardless of display
|
266 |
+
format.
|
267 |
+
decimal : str, default '.'
|
268 |
+
Character to recognize as decimal point for parsing string columns to numeric.
|
269 |
+
Note that this parameter is only necessary for columns stored as TEXT in Excel,
|
270 |
+
any numeric columns will automatically be parsed, regardless of display
|
271 |
+
format.(e.g. use ',' for European data).
|
272 |
+
|
273 |
+
.. versionadded:: 1.4.0
|
274 |
+
|
275 |
+
comment : str, default None
|
276 |
+
Comments out remainder of line. Pass a character or characters to this
|
277 |
+
argument to indicate comments in the input file. Any data between the
|
278 |
+
comment string and the end of the current line is ignored.
|
279 |
+
skipfooter : int, default 0
|
280 |
+
Rows at the end to skip (0-indexed).
|
281 |
+
{storage_options}
|
282 |
+
|
283 |
+
dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
|
284 |
+
Back-end data type applied to the resultant :class:`DataFrame`
|
285 |
+
(still experimental). Behaviour is as follows:
|
286 |
+
|
287 |
+
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
|
288 |
+
(default).
|
289 |
+
* ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
|
290 |
+
DataFrame.
|
291 |
+
|
292 |
+
.. versionadded:: 2.0
|
293 |
+
|
294 |
+
engine_kwargs : dict, optional
|
295 |
+
Arbitrary keyword arguments passed to excel engine.
|
296 |
+
|
297 |
+
Returns
|
298 |
+
-------
|
299 |
+
DataFrame or dict of DataFrames
|
300 |
+
DataFrame from the passed in Excel file. See notes in sheet_name
|
301 |
+
argument for more information on when a dict of DataFrames is returned.
|
302 |
+
|
303 |
+
See Also
|
304 |
+
--------
|
305 |
+
DataFrame.to_excel : Write DataFrame to an Excel file.
|
306 |
+
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
|
307 |
+
read_csv : Read a comma-separated values (csv) file into DataFrame.
|
308 |
+
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
|
309 |
+
|
310 |
+
Notes
|
311 |
+
-----
|
312 |
+
For specific information on the methods used for each Excel engine, refer to the pandas
|
313 |
+
:ref:`user guide <io.excel_reader>`
|
314 |
+
|
315 |
+
Examples
|
316 |
+
--------
|
317 |
+
The file can be read using the file name as string or an open file object:
|
318 |
+
|
319 |
+
>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
|
320 |
+
Name Value
|
321 |
+
0 string1 1
|
322 |
+
1 string2 2
|
323 |
+
2 #Comment 3
|
324 |
+
|
325 |
+
>>> pd.read_excel(open('tmp.xlsx', 'rb'),
|
326 |
+
... sheet_name='Sheet3') # doctest: +SKIP
|
327 |
+
Unnamed: 0 Name Value
|
328 |
+
0 0 string1 1
|
329 |
+
1 1 string2 2
|
330 |
+
2 2 #Comment 3
|
331 |
+
|
332 |
+
Index and header can be specified via the `index_col` and `header` arguments
|
333 |
+
|
334 |
+
>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
|
335 |
+
0 1 2
|
336 |
+
0 NaN Name Value
|
337 |
+
1 0.0 string1 1
|
338 |
+
2 1.0 string2 2
|
339 |
+
3 2.0 #Comment 3
|
340 |
+
|
341 |
+
Column types are inferred but can be explicitly specified
|
342 |
+
|
343 |
+
>>> pd.read_excel('tmp.xlsx', index_col=0,
|
344 |
+
... dtype={{'Name': str, 'Value': float}}) # doctest: +SKIP
|
345 |
+
Name Value
|
346 |
+
0 string1 1.0
|
347 |
+
1 string2 2.0
|
348 |
+
2 #Comment 3.0
|
349 |
+
|
350 |
+
True, False, and NA values, and thousands separators have defaults,
|
351 |
+
but can be explicitly specified, too. Supply the values you would like
|
352 |
+
as strings or lists of strings!
|
353 |
+
|
354 |
+
>>> pd.read_excel('tmp.xlsx', index_col=0,
|
355 |
+
... na_values=['string1', 'string2']) # doctest: +SKIP
|
356 |
+
Name Value
|
357 |
+
0 NaN 1
|
358 |
+
1 NaN 2
|
359 |
+
2 #Comment 3
|
360 |
+
|
361 |
+
Comment lines in the excel input file can be skipped using the
|
362 |
+
``comment`` kwarg.
|
363 |
+
|
364 |
+
>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
|
365 |
+
Name Value
|
366 |
+
0 string1 1.0
|
367 |
+
1 string2 2.0
|
368 |
+
2 None NaN
|
369 |
+
"""
|
370 |
+
)
|
371 |
+
|
372 |
+
|
373 |
+
@overload
|
374 |
+
def read_excel(
|
375 |
+
io,
|
376 |
+
# sheet name is str or int -> DataFrame
|
377 |
+
sheet_name: str | int = ...,
|
378 |
+
*,
|
379 |
+
header: int | Sequence[int] | None = ...,
|
380 |
+
names: SequenceNotStr[Hashable] | range | None = ...,
|
381 |
+
index_col: int | str | Sequence[int] | None = ...,
|
382 |
+
usecols: int
|
383 |
+
| str
|
384 |
+
| Sequence[int]
|
385 |
+
| Sequence[str]
|
386 |
+
| Callable[[str], bool]
|
387 |
+
| None = ...,
|
388 |
+
dtype: DtypeArg | None = ...,
|
389 |
+
engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ...,
|
390 |
+
converters: dict[str, Callable] | dict[int, Callable] | None = ...,
|
391 |
+
true_values: Iterable[Hashable] | None = ...,
|
392 |
+
false_values: Iterable[Hashable] | None = ...,
|
393 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
|
394 |
+
nrows: int | None = ...,
|
395 |
+
na_values=...,
|
396 |
+
keep_default_na: bool = ...,
|
397 |
+
na_filter: bool = ...,
|
398 |
+
verbose: bool = ...,
|
399 |
+
parse_dates: list | dict | bool = ...,
|
400 |
+
date_parser: Callable | lib.NoDefault = ...,
|
401 |
+
date_format: dict[Hashable, str] | str | None = ...,
|
402 |
+
thousands: str | None = ...,
|
403 |
+
decimal: str = ...,
|
404 |
+
comment: str | None = ...,
|
405 |
+
skipfooter: int = ...,
|
406 |
+
storage_options: StorageOptions = ...,
|
407 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
408 |
+
) -> DataFrame:
|
409 |
+
...
|
410 |
+
|
411 |
+
|
412 |
+
@overload
|
413 |
+
def read_excel(
|
414 |
+
io,
|
415 |
+
# sheet name is list or None -> dict[IntStrT, DataFrame]
|
416 |
+
sheet_name: list[IntStrT] | None,
|
417 |
+
*,
|
418 |
+
header: int | Sequence[int] | None = ...,
|
419 |
+
names: SequenceNotStr[Hashable] | range | None = ...,
|
420 |
+
index_col: int | str | Sequence[int] | None = ...,
|
421 |
+
usecols: int
|
422 |
+
| str
|
423 |
+
| Sequence[int]
|
424 |
+
| Sequence[str]
|
425 |
+
| Callable[[str], bool]
|
426 |
+
| None = ...,
|
427 |
+
dtype: DtypeArg | None = ...,
|
428 |
+
engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ...,
|
429 |
+
converters: dict[str, Callable] | dict[int, Callable] | None = ...,
|
430 |
+
true_values: Iterable[Hashable] | None = ...,
|
431 |
+
false_values: Iterable[Hashable] | None = ...,
|
432 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
|
433 |
+
nrows: int | None = ...,
|
434 |
+
na_values=...,
|
435 |
+
keep_default_na: bool = ...,
|
436 |
+
na_filter: bool = ...,
|
437 |
+
verbose: bool = ...,
|
438 |
+
parse_dates: list | dict | bool = ...,
|
439 |
+
date_parser: Callable | lib.NoDefault = ...,
|
440 |
+
date_format: dict[Hashable, str] | str | None = ...,
|
441 |
+
thousands: str | None = ...,
|
442 |
+
decimal: str = ...,
|
443 |
+
comment: str | None = ...,
|
444 |
+
skipfooter: int = ...,
|
445 |
+
storage_options: StorageOptions = ...,
|
446 |
+
dtype_backend: DtypeBackend | lib.NoDefault = ...,
|
447 |
+
) -> dict[IntStrT, DataFrame]:
|
448 |
+
...
|
449 |
+
|
450 |
+
|
451 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
452 |
+
@Appender(_read_excel_doc)
|
453 |
+
def read_excel(
|
454 |
+
io,
|
455 |
+
sheet_name: str | int | list[IntStrT] | None = 0,
|
456 |
+
*,
|
457 |
+
header: int | Sequence[int] | None = 0,
|
458 |
+
names: SequenceNotStr[Hashable] | range | None = None,
|
459 |
+
index_col: int | str | Sequence[int] | None = None,
|
460 |
+
usecols: int
|
461 |
+
| str
|
462 |
+
| Sequence[int]
|
463 |
+
| Sequence[str]
|
464 |
+
| Callable[[str], bool]
|
465 |
+
| None = None,
|
466 |
+
dtype: DtypeArg | None = None,
|
467 |
+
engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = None,
|
468 |
+
converters: dict[str, Callable] | dict[int, Callable] | None = None,
|
469 |
+
true_values: Iterable[Hashable] | None = None,
|
470 |
+
false_values: Iterable[Hashable] | None = None,
|
471 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
|
472 |
+
nrows: int | None = None,
|
473 |
+
na_values=None,
|
474 |
+
keep_default_na: bool = True,
|
475 |
+
na_filter: bool = True,
|
476 |
+
verbose: bool = False,
|
477 |
+
parse_dates: list | dict | bool = False,
|
478 |
+
date_parser: Callable | lib.NoDefault = lib.no_default,
|
479 |
+
date_format: dict[Hashable, str] | str | None = None,
|
480 |
+
thousands: str | None = None,
|
481 |
+
decimal: str = ".",
|
482 |
+
comment: str | None = None,
|
483 |
+
skipfooter: int = 0,
|
484 |
+
storage_options: StorageOptions | None = None,
|
485 |
+
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
|
486 |
+
engine_kwargs: dict | None = None,
|
487 |
+
) -> DataFrame | dict[IntStrT, DataFrame]:
|
488 |
+
check_dtype_backend(dtype_backend)
|
489 |
+
should_close = False
|
490 |
+
if engine_kwargs is None:
|
491 |
+
engine_kwargs = {}
|
492 |
+
|
493 |
+
if not isinstance(io, ExcelFile):
|
494 |
+
should_close = True
|
495 |
+
io = ExcelFile(
|
496 |
+
io,
|
497 |
+
storage_options=storage_options,
|
498 |
+
engine=engine,
|
499 |
+
engine_kwargs=engine_kwargs,
|
500 |
+
)
|
501 |
+
elif engine and engine != io.engine:
|
502 |
+
raise ValueError(
|
503 |
+
"Engine should not be specified when passing "
|
504 |
+
"an ExcelFile - ExcelFile already has the engine set"
|
505 |
+
)
|
506 |
+
|
507 |
+
try:
|
508 |
+
data = io.parse(
|
509 |
+
sheet_name=sheet_name,
|
510 |
+
header=header,
|
511 |
+
names=names,
|
512 |
+
index_col=index_col,
|
513 |
+
usecols=usecols,
|
514 |
+
dtype=dtype,
|
515 |
+
converters=converters,
|
516 |
+
true_values=true_values,
|
517 |
+
false_values=false_values,
|
518 |
+
skiprows=skiprows,
|
519 |
+
nrows=nrows,
|
520 |
+
na_values=na_values,
|
521 |
+
keep_default_na=keep_default_na,
|
522 |
+
na_filter=na_filter,
|
523 |
+
verbose=verbose,
|
524 |
+
parse_dates=parse_dates,
|
525 |
+
date_parser=date_parser,
|
526 |
+
date_format=date_format,
|
527 |
+
thousands=thousands,
|
528 |
+
decimal=decimal,
|
529 |
+
comment=comment,
|
530 |
+
skipfooter=skipfooter,
|
531 |
+
dtype_backend=dtype_backend,
|
532 |
+
)
|
533 |
+
finally:
|
534 |
+
# make sure to close opened file handles
|
535 |
+
if should_close:
|
536 |
+
io.close()
|
537 |
+
return data
|
538 |
+
|
539 |
+
|
540 |
+
_WorkbookT = TypeVar("_WorkbookT")
|
541 |
+
|
542 |
+
|
543 |
+
class BaseExcelReader(Generic[_WorkbookT]):
|
544 |
+
book: _WorkbookT
|
545 |
+
|
546 |
+
def __init__(
|
547 |
+
self,
|
548 |
+
filepath_or_buffer,
|
549 |
+
storage_options: StorageOptions | None = None,
|
550 |
+
engine_kwargs: dict | None = None,
|
551 |
+
) -> None:
|
552 |
+
if engine_kwargs is None:
|
553 |
+
engine_kwargs = {}
|
554 |
+
|
555 |
+
# First argument can also be bytes, so create a buffer
|
556 |
+
if isinstance(filepath_or_buffer, bytes):
|
557 |
+
filepath_or_buffer = BytesIO(filepath_or_buffer)
|
558 |
+
|
559 |
+
self.handles = IOHandles(
|
560 |
+
handle=filepath_or_buffer, compression={"method": None}
|
561 |
+
)
|
562 |
+
if not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
|
563 |
+
self.handles = get_handle(
|
564 |
+
filepath_or_buffer, "rb", storage_options=storage_options, is_text=False
|
565 |
+
)
|
566 |
+
|
567 |
+
if isinstance(self.handles.handle, self._workbook_class):
|
568 |
+
self.book = self.handles.handle
|
569 |
+
elif hasattr(self.handles.handle, "read"):
|
570 |
+
# N.B. xlrd.Book has a read attribute too
|
571 |
+
self.handles.handle.seek(0)
|
572 |
+
try:
|
573 |
+
self.book = self.load_workbook(self.handles.handle, engine_kwargs)
|
574 |
+
except Exception:
|
575 |
+
self.close()
|
576 |
+
raise
|
577 |
+
else:
|
578 |
+
raise ValueError(
|
579 |
+
"Must explicitly set engine if not passing in buffer or path for io."
|
580 |
+
)
|
581 |
+
|
582 |
+
@property
|
583 |
+
def _workbook_class(self) -> type[_WorkbookT]:
|
584 |
+
raise NotImplementedError
|
585 |
+
|
586 |
+
def load_workbook(self, filepath_or_buffer, engine_kwargs) -> _WorkbookT:
|
587 |
+
raise NotImplementedError
|
588 |
+
|
589 |
+
def close(self) -> None:
|
590 |
+
if hasattr(self, "book"):
|
591 |
+
if hasattr(self.book, "close"):
|
592 |
+
# pyxlsb: opens a TemporaryFile
|
593 |
+
# openpyxl: https://stackoverflow.com/questions/31416842/
|
594 |
+
# openpyxl-does-not-close-excel-workbook-in-read-only-mode
|
595 |
+
self.book.close()
|
596 |
+
elif hasattr(self.book, "release_resources"):
|
597 |
+
# xlrd
|
598 |
+
# https://github.com/python-excel/xlrd/blob/2.0.1/xlrd/book.py#L548
|
599 |
+
self.book.release_resources()
|
600 |
+
self.handles.close()
|
601 |
+
|
602 |
+
@property
|
603 |
+
def sheet_names(self) -> list[str]:
|
604 |
+
raise NotImplementedError
|
605 |
+
|
606 |
+
def get_sheet_by_name(self, name: str):
|
607 |
+
raise NotImplementedError
|
608 |
+
|
609 |
+
def get_sheet_by_index(self, index: int):
|
610 |
+
raise NotImplementedError
|
611 |
+
|
612 |
+
def get_sheet_data(self, sheet, rows: int | None = None):
|
613 |
+
raise NotImplementedError
|
614 |
+
|
615 |
+
def raise_if_bad_sheet_by_index(self, index: int) -> None:
|
616 |
+
n_sheets = len(self.sheet_names)
|
617 |
+
if index >= n_sheets:
|
618 |
+
raise ValueError(
|
619 |
+
f"Worksheet index {index} is invalid, {n_sheets} worksheets found"
|
620 |
+
)
|
621 |
+
|
622 |
+
def raise_if_bad_sheet_by_name(self, name: str) -> None:
|
623 |
+
if name not in self.sheet_names:
|
624 |
+
raise ValueError(f"Worksheet named '{name}' not found")
|
625 |
+
|
626 |
+
def _check_skiprows_func(
|
627 |
+
self,
|
628 |
+
skiprows: Callable,
|
629 |
+
rows_to_use: int,
|
630 |
+
) -> int:
|
631 |
+
"""
|
632 |
+
Determine how many file rows are required to obtain `nrows` data
|
633 |
+
rows when `skiprows` is a function.
|
634 |
+
|
635 |
+
Parameters
|
636 |
+
----------
|
637 |
+
skiprows : function
|
638 |
+
The function passed to read_excel by the user.
|
639 |
+
rows_to_use : int
|
640 |
+
The number of rows that will be needed for the header and
|
641 |
+
the data.
|
642 |
+
|
643 |
+
Returns
|
644 |
+
-------
|
645 |
+
int
|
646 |
+
"""
|
647 |
+
i = 0
|
648 |
+
rows_used_so_far = 0
|
649 |
+
while rows_used_so_far < rows_to_use:
|
650 |
+
if not skiprows(i):
|
651 |
+
rows_used_so_far += 1
|
652 |
+
i += 1
|
653 |
+
return i
|
654 |
+
|
655 |
+
def _calc_rows(
|
656 |
+
self,
|
657 |
+
header: int | Sequence[int] | None,
|
658 |
+
index_col: int | Sequence[int] | None,
|
659 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None,
|
660 |
+
nrows: int | None,
|
661 |
+
) -> int | None:
|
662 |
+
"""
|
663 |
+
If nrows specified, find the number of rows needed from the
|
664 |
+
file, otherwise return None.
|
665 |
+
|
666 |
+
|
667 |
+
Parameters
|
668 |
+
----------
|
669 |
+
header : int, list of int, or None
|
670 |
+
See read_excel docstring.
|
671 |
+
index_col : int, str, list of int, or None
|
672 |
+
See read_excel docstring.
|
673 |
+
skiprows : list-like, int, callable, or None
|
674 |
+
See read_excel docstring.
|
675 |
+
nrows : int or None
|
676 |
+
See read_excel docstring.
|
677 |
+
|
678 |
+
Returns
|
679 |
+
-------
|
680 |
+
int or None
|
681 |
+
"""
|
682 |
+
if nrows is None:
|
683 |
+
return None
|
684 |
+
if header is None:
|
685 |
+
header_rows = 1
|
686 |
+
elif is_integer(header):
|
687 |
+
header = cast(int, header)
|
688 |
+
header_rows = 1 + header
|
689 |
+
else:
|
690 |
+
header = cast(Sequence, header)
|
691 |
+
header_rows = 1 + header[-1]
|
692 |
+
# If there is a MultiIndex header and an index then there is also
|
693 |
+
# a row containing just the index name(s)
|
694 |
+
if is_list_like(header) and index_col is not None:
|
695 |
+
header = cast(Sequence, header)
|
696 |
+
if len(header) > 1:
|
697 |
+
header_rows += 1
|
698 |
+
if skiprows is None:
|
699 |
+
return header_rows + nrows
|
700 |
+
if is_integer(skiprows):
|
701 |
+
skiprows = cast(int, skiprows)
|
702 |
+
return header_rows + nrows + skiprows
|
703 |
+
if is_list_like(skiprows):
|
704 |
+
|
705 |
+
def f(skiprows: Sequence, x: int) -> bool:
|
706 |
+
return x in skiprows
|
707 |
+
|
708 |
+
skiprows = cast(Sequence, skiprows)
|
709 |
+
return self._check_skiprows_func(partial(f, skiprows), header_rows + nrows)
|
710 |
+
if callable(skiprows):
|
711 |
+
return self._check_skiprows_func(
|
712 |
+
skiprows,
|
713 |
+
header_rows + nrows,
|
714 |
+
)
|
715 |
+
# else unexpected skiprows type: read_excel will not optimize
|
716 |
+
# the number of rows read from file
|
717 |
+
return None
|
718 |
+
|
719 |
+
def parse(
|
720 |
+
self,
|
721 |
+
sheet_name: str | int | list[int] | list[str] | None = 0,
|
722 |
+
header: int | Sequence[int] | None = 0,
|
723 |
+
names: SequenceNotStr[Hashable] | range | None = None,
|
724 |
+
index_col: int | Sequence[int] | None = None,
|
725 |
+
usecols=None,
|
726 |
+
dtype: DtypeArg | None = None,
|
727 |
+
true_values: Iterable[Hashable] | None = None,
|
728 |
+
false_values: Iterable[Hashable] | None = None,
|
729 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
|
730 |
+
nrows: int | None = None,
|
731 |
+
na_values=None,
|
732 |
+
verbose: bool = False,
|
733 |
+
parse_dates: list | dict | bool = False,
|
734 |
+
date_parser: Callable | lib.NoDefault = lib.no_default,
|
735 |
+
date_format: dict[Hashable, str] | str | None = None,
|
736 |
+
thousands: str | None = None,
|
737 |
+
decimal: str = ".",
|
738 |
+
comment: str | None = None,
|
739 |
+
skipfooter: int = 0,
|
740 |
+
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
|
741 |
+
**kwds,
|
742 |
+
):
|
743 |
+
validate_header_arg(header)
|
744 |
+
validate_integer("nrows", nrows)
|
745 |
+
|
746 |
+
ret_dict = False
|
747 |
+
|
748 |
+
# Keep sheetname to maintain backwards compatibility.
|
749 |
+
sheets: list[int] | list[str]
|
750 |
+
if isinstance(sheet_name, list):
|
751 |
+
sheets = sheet_name
|
752 |
+
ret_dict = True
|
753 |
+
elif sheet_name is None:
|
754 |
+
sheets = self.sheet_names
|
755 |
+
ret_dict = True
|
756 |
+
elif isinstance(sheet_name, str):
|
757 |
+
sheets = [sheet_name]
|
758 |
+
else:
|
759 |
+
sheets = [sheet_name]
|
760 |
+
|
761 |
+
# handle same-type duplicates.
|
762 |
+
sheets = cast(Union[list[int], list[str]], list(dict.fromkeys(sheets).keys()))
|
763 |
+
|
764 |
+
output = {}
|
765 |
+
|
766 |
+
last_sheetname = None
|
767 |
+
for asheetname in sheets:
|
768 |
+
last_sheetname = asheetname
|
769 |
+
if verbose:
|
770 |
+
print(f"Reading sheet {asheetname}")
|
771 |
+
|
772 |
+
if isinstance(asheetname, str):
|
773 |
+
sheet = self.get_sheet_by_name(asheetname)
|
774 |
+
else: # assume an integer if not a string
|
775 |
+
sheet = self.get_sheet_by_index(asheetname)
|
776 |
+
|
777 |
+
file_rows_needed = self._calc_rows(header, index_col, skiprows, nrows)
|
778 |
+
data = self.get_sheet_data(sheet, file_rows_needed)
|
779 |
+
if hasattr(sheet, "close"):
|
780 |
+
# pyxlsb opens two TemporaryFiles
|
781 |
+
sheet.close()
|
782 |
+
usecols = maybe_convert_usecols(usecols)
|
783 |
+
|
784 |
+
if not data:
|
785 |
+
output[asheetname] = DataFrame()
|
786 |
+
continue
|
787 |
+
|
788 |
+
is_list_header = False
|
789 |
+
is_len_one_list_header = False
|
790 |
+
if is_list_like(header):
|
791 |
+
assert isinstance(header, Sequence)
|
792 |
+
is_list_header = True
|
793 |
+
if len(header) == 1:
|
794 |
+
is_len_one_list_header = True
|
795 |
+
|
796 |
+
if is_len_one_list_header:
|
797 |
+
header = cast(Sequence[int], header)[0]
|
798 |
+
|
799 |
+
# forward fill and pull out names for MultiIndex column
|
800 |
+
header_names = None
|
801 |
+
if header is not None and is_list_like(header):
|
802 |
+
assert isinstance(header, Sequence)
|
803 |
+
|
804 |
+
header_names = []
|
805 |
+
control_row = [True] * len(data[0])
|
806 |
+
|
807 |
+
for row in header:
|
808 |
+
if is_integer(skiprows):
|
809 |
+
assert isinstance(skiprows, int)
|
810 |
+
row += skiprows
|
811 |
+
|
812 |
+
if row > len(data) - 1:
|
813 |
+
raise ValueError(
|
814 |
+
f"header index {row} exceeds maximum index "
|
815 |
+
f"{len(data) - 1} of data.",
|
816 |
+
)
|
817 |
+
|
818 |
+
data[row], control_row = fill_mi_header(data[row], control_row)
|
819 |
+
|
820 |
+
if index_col is not None:
|
821 |
+
header_name, _ = pop_header_name(data[row], index_col)
|
822 |
+
header_names.append(header_name)
|
823 |
+
|
824 |
+
# If there is a MultiIndex header and an index then there is also
|
825 |
+
# a row containing just the index name(s)
|
826 |
+
has_index_names = False
|
827 |
+
if is_list_header and not is_len_one_list_header and index_col is not None:
|
828 |
+
index_col_list: Sequence[int]
|
829 |
+
if isinstance(index_col, int):
|
830 |
+
index_col_list = [index_col]
|
831 |
+
else:
|
832 |
+
assert isinstance(index_col, Sequence)
|
833 |
+
index_col_list = index_col
|
834 |
+
|
835 |
+
# We have to handle mi without names. If any of the entries in the data
|
836 |
+
# columns are not empty, this is a regular row
|
837 |
+
assert isinstance(header, Sequence)
|
838 |
+
if len(header) < len(data):
|
839 |
+
potential_index_names = data[len(header)]
|
840 |
+
potential_data = [
|
841 |
+
x
|
842 |
+
for i, x in enumerate(potential_index_names)
|
843 |
+
if not control_row[i] and i not in index_col_list
|
844 |
+
]
|
845 |
+
has_index_names = all(x == "" or x is None for x in potential_data)
|
846 |
+
|
847 |
+
if is_list_like(index_col):
|
848 |
+
# Forward fill values for MultiIndex index.
|
849 |
+
if header is None:
|
850 |
+
offset = 0
|
851 |
+
elif isinstance(header, int):
|
852 |
+
offset = 1 + header
|
853 |
+
else:
|
854 |
+
offset = 1 + max(header)
|
855 |
+
|
856 |
+
# GH34673: if MultiIndex names present and not defined in the header,
|
857 |
+
# offset needs to be incremented so that forward filling starts
|
858 |
+
# from the first MI value instead of the name
|
859 |
+
if has_index_names:
|
860 |
+
offset += 1
|
861 |
+
|
862 |
+
# Check if we have an empty dataset
|
863 |
+
# before trying to collect data.
|
864 |
+
if offset < len(data):
|
865 |
+
assert isinstance(index_col, Sequence)
|
866 |
+
|
867 |
+
for col in index_col:
|
868 |
+
last = data[offset][col]
|
869 |
+
|
870 |
+
for row in range(offset + 1, len(data)):
|
871 |
+
if data[row][col] == "" or data[row][col] is None:
|
872 |
+
data[row][col] = last
|
873 |
+
else:
|
874 |
+
last = data[row][col]
|
875 |
+
|
876 |
+
# GH 12292 : error when read one empty column from excel file
|
877 |
+
try:
|
878 |
+
parser = TextParser(
|
879 |
+
data,
|
880 |
+
names=names,
|
881 |
+
header=header,
|
882 |
+
index_col=index_col,
|
883 |
+
has_index_names=has_index_names,
|
884 |
+
dtype=dtype,
|
885 |
+
true_values=true_values,
|
886 |
+
false_values=false_values,
|
887 |
+
skiprows=skiprows,
|
888 |
+
nrows=nrows,
|
889 |
+
na_values=na_values,
|
890 |
+
skip_blank_lines=False, # GH 39808
|
891 |
+
parse_dates=parse_dates,
|
892 |
+
date_parser=date_parser,
|
893 |
+
date_format=date_format,
|
894 |
+
thousands=thousands,
|
895 |
+
decimal=decimal,
|
896 |
+
comment=comment,
|
897 |
+
skipfooter=skipfooter,
|
898 |
+
usecols=usecols,
|
899 |
+
dtype_backend=dtype_backend,
|
900 |
+
**kwds,
|
901 |
+
)
|
902 |
+
|
903 |
+
output[asheetname] = parser.read(nrows=nrows)
|
904 |
+
|
905 |
+
if header_names:
|
906 |
+
output[asheetname].columns = output[asheetname].columns.set_names(
|
907 |
+
header_names
|
908 |
+
)
|
909 |
+
|
910 |
+
except EmptyDataError:
|
911 |
+
# No Data, return an empty DataFrame
|
912 |
+
output[asheetname] = DataFrame()
|
913 |
+
|
914 |
+
except Exception as err:
|
915 |
+
err.args = (f"{err.args[0]} (sheet: {asheetname})", *err.args[1:])
|
916 |
+
raise err
|
917 |
+
|
918 |
+
if last_sheetname is None:
|
919 |
+
raise ValueError("Sheet name is an empty list")
|
920 |
+
|
921 |
+
if ret_dict:
|
922 |
+
return output
|
923 |
+
else:
|
924 |
+
return output[last_sheetname]
|
925 |
+
|
926 |
+
|
927 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
928 |
+
class ExcelWriter(Generic[_WorkbookT]):
|
929 |
+
"""
|
930 |
+
Class for writing DataFrame objects into excel sheets.
|
931 |
+
|
932 |
+
Default is to use:
|
933 |
+
|
934 |
+
* `xlsxwriter <https://pypi.org/project/XlsxWriter/>`__ for xlsx files if xlsxwriter
|
935 |
+
is installed otherwise `openpyxl <https://pypi.org/project/openpyxl/>`__
|
936 |
+
* `odswriter <https://pypi.org/project/odswriter/>`__ for ods files
|
937 |
+
|
938 |
+
See ``DataFrame.to_excel`` for typical usage.
|
939 |
+
|
940 |
+
The writer should be used as a context manager. Otherwise, call `close()` to save
|
941 |
+
and close any opened file handles.
|
942 |
+
|
943 |
+
Parameters
|
944 |
+
----------
|
945 |
+
path : str or typing.BinaryIO
|
946 |
+
Path to xls or xlsx or ods file.
|
947 |
+
engine : str (optional)
|
948 |
+
Engine to use for writing. If None, defaults to
|
949 |
+
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
|
950 |
+
argument.
|
951 |
+
date_format : str, default None
|
952 |
+
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
|
953 |
+
datetime_format : str, default None
|
954 |
+
Format string for datetime objects written into Excel files.
|
955 |
+
(e.g. 'YYYY-MM-DD HH:MM:SS').
|
956 |
+
mode : {{'w', 'a'}}, default 'w'
|
957 |
+
File mode to use (write or append). Append does not work with fsspec URLs.
|
958 |
+
{storage_options}
|
959 |
+
|
960 |
+
if_sheet_exists : {{'error', 'new', 'replace', 'overlay'}}, default 'error'
|
961 |
+
How to behave when trying to write to a sheet that already
|
962 |
+
exists (append mode only).
|
963 |
+
|
964 |
+
* error: raise a ValueError.
|
965 |
+
* new: Create a new sheet, with a name determined by the engine.
|
966 |
+
* replace: Delete the contents of the sheet before writing to it.
|
967 |
+
* overlay: Write contents to the existing sheet without first removing,
|
968 |
+
but possibly over top of, the existing contents.
|
969 |
+
|
970 |
+
.. versionadded:: 1.3.0
|
971 |
+
|
972 |
+
.. versionchanged:: 1.4.0
|
973 |
+
|
974 |
+
Added ``overlay`` option
|
975 |
+
|
976 |
+
engine_kwargs : dict, optional
|
977 |
+
Keyword arguments to be passed into the engine. These will be passed to
|
978 |
+
the following functions of the respective engines:
|
979 |
+
|
980 |
+
* xlsxwriter: ``xlsxwriter.Workbook(file, **engine_kwargs)``
|
981 |
+
* openpyxl (write mode): ``openpyxl.Workbook(**engine_kwargs)``
|
982 |
+
* openpyxl (append mode): ``openpyxl.load_workbook(file, **engine_kwargs)``
|
983 |
+
* odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)``
|
984 |
+
|
985 |
+
.. versionadded:: 1.3.0
|
986 |
+
|
987 |
+
Notes
|
988 |
+
-----
|
989 |
+
For compatibility with CSV writers, ExcelWriter serializes lists
|
990 |
+
and dicts to strings before writing.
|
991 |
+
|
992 |
+
Examples
|
993 |
+
--------
|
994 |
+
Default usage:
|
995 |
+
|
996 |
+
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
|
997 |
+
>>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
|
998 |
+
... df.to_excel(writer) # doctest: +SKIP
|
999 |
+
|
1000 |
+
To write to separate sheets in a single file:
|
1001 |
+
|
1002 |
+
>>> df1 = pd.DataFrame([["AAA", "BBB"]], columns=["Spam", "Egg"]) # doctest: +SKIP
|
1003 |
+
>>> df2 = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
|
1004 |
+
>>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
|
1005 |
+
... df1.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
|
1006 |
+
... df2.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
|
1007 |
+
|
1008 |
+
You can set the date format or datetime format:
|
1009 |
+
|
1010 |
+
>>> from datetime import date, datetime # doctest: +SKIP
|
1011 |
+
>>> df = pd.DataFrame(
|
1012 |
+
... [
|
1013 |
+
... [date(2014, 1, 31), date(1999, 9, 24)],
|
1014 |
+
... [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
|
1015 |
+
... ],
|
1016 |
+
... index=["Date", "Datetime"],
|
1017 |
+
... columns=["X", "Y"],
|
1018 |
+
... ) # doctest: +SKIP
|
1019 |
+
>>> with pd.ExcelWriter(
|
1020 |
+
... "path_to_file.xlsx",
|
1021 |
+
... date_format="YYYY-MM-DD",
|
1022 |
+
... datetime_format="YYYY-MM-DD HH:MM:SS"
|
1023 |
+
... ) as writer:
|
1024 |
+
... df.to_excel(writer) # doctest: +SKIP
|
1025 |
+
|
1026 |
+
You can also append to an existing Excel file:
|
1027 |
+
|
1028 |
+
>>> with pd.ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer:
|
1029 |
+
... df.to_excel(writer, sheet_name="Sheet3") # doctest: +SKIP
|
1030 |
+
|
1031 |
+
Here, the `if_sheet_exists` parameter can be set to replace a sheet if it
|
1032 |
+
already exists:
|
1033 |
+
|
1034 |
+
>>> with ExcelWriter(
|
1035 |
+
... "path_to_file.xlsx",
|
1036 |
+
... mode="a",
|
1037 |
+
... engine="openpyxl",
|
1038 |
+
... if_sheet_exists="replace",
|
1039 |
+
... ) as writer:
|
1040 |
+
... df.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
|
1041 |
+
|
1042 |
+
You can also write multiple DataFrames to a single sheet. Note that the
|
1043 |
+
``if_sheet_exists`` parameter needs to be set to ``overlay``:
|
1044 |
+
|
1045 |
+
>>> with ExcelWriter("path_to_file.xlsx",
|
1046 |
+
... mode="a",
|
1047 |
+
... engine="openpyxl",
|
1048 |
+
... if_sheet_exists="overlay",
|
1049 |
+
... ) as writer:
|
1050 |
+
... df1.to_excel(writer, sheet_name="Sheet1")
|
1051 |
+
... df2.to_excel(writer, sheet_name="Sheet1", startcol=3) # doctest: +SKIP
|
1052 |
+
|
1053 |
+
You can store Excel file in RAM:
|
1054 |
+
|
1055 |
+
>>> import io
|
1056 |
+
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])
|
1057 |
+
>>> buffer = io.BytesIO()
|
1058 |
+
>>> with pd.ExcelWriter(buffer) as writer:
|
1059 |
+
... df.to_excel(writer)
|
1060 |
+
|
1061 |
+
You can pack Excel file into zip archive:
|
1062 |
+
|
1063 |
+
>>> import zipfile # doctest: +SKIP
|
1064 |
+
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
|
1065 |
+
>>> with zipfile.ZipFile("path_to_file.zip", "w") as zf:
|
1066 |
+
... with zf.open("filename.xlsx", "w") as buffer:
|
1067 |
+
... with pd.ExcelWriter(buffer) as writer:
|
1068 |
+
... df.to_excel(writer) # doctest: +SKIP
|
1069 |
+
|
1070 |
+
You can specify additional arguments to the underlying engine:
|
1071 |
+
|
1072 |
+
>>> with pd.ExcelWriter(
|
1073 |
+
... "path_to_file.xlsx",
|
1074 |
+
... engine="xlsxwriter",
|
1075 |
+
... engine_kwargs={{"options": {{"nan_inf_to_errors": True}}}}
|
1076 |
+
... ) as writer:
|
1077 |
+
... df.to_excel(writer) # doctest: +SKIP
|
1078 |
+
|
1079 |
+
In append mode, ``engine_kwargs`` are passed through to
|
1080 |
+
openpyxl's ``load_workbook``:
|
1081 |
+
|
1082 |
+
>>> with pd.ExcelWriter(
|
1083 |
+
... "path_to_file.xlsx",
|
1084 |
+
... engine="openpyxl",
|
1085 |
+
... mode="a",
|
1086 |
+
... engine_kwargs={{"keep_vba": True}}
|
1087 |
+
... ) as writer:
|
1088 |
+
... df.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
|
1089 |
+
"""
|
1090 |
+
|
1091 |
+
# Defining an ExcelWriter implementation (see abstract methods for more...)
|
1092 |
+
|
1093 |
+
# - Mandatory
|
1094 |
+
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
|
1095 |
+
# --> called to write additional DataFrames to disk
|
1096 |
+
# - ``_supported_extensions`` (tuple of supported extensions), used to
|
1097 |
+
# check that engine supports the given extension.
|
1098 |
+
# - ``_engine`` - string that gives the engine name. Necessary to
|
1099 |
+
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
|
1100 |
+
# lookup.
|
1101 |
+
# - ``save(self)`` --> called to save file to disk
|
1102 |
+
# - Mostly mandatory (i.e. should at least exist)
|
1103 |
+
# - book, cur_sheet, path
|
1104 |
+
|
1105 |
+
# - Optional:
|
1106 |
+
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
|
1107 |
+
# with path as first argument.
|
1108 |
+
|
1109 |
+
# You also need to register the class with ``register_writer()``.
|
1110 |
+
# Technically, ExcelWriter implementations don't need to subclass
|
1111 |
+
# ExcelWriter.
|
1112 |
+
|
1113 |
+
_engine: str
|
1114 |
+
_supported_extensions: tuple[str, ...]
|
1115 |
+
|
1116 |
+
def __new__(
|
1117 |
+
cls,
|
1118 |
+
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
1119 |
+
engine: str | None = None,
|
1120 |
+
date_format: str | None = None,
|
1121 |
+
datetime_format: str | None = None,
|
1122 |
+
mode: str = "w",
|
1123 |
+
storage_options: StorageOptions | None = None,
|
1124 |
+
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
1125 |
+
engine_kwargs: dict | None = None,
|
1126 |
+
) -> Self:
|
1127 |
+
# only switch class if generic(ExcelWriter)
|
1128 |
+
if cls is ExcelWriter:
|
1129 |
+
if engine is None or (isinstance(engine, str) and engine == "auto"):
|
1130 |
+
if isinstance(path, str):
|
1131 |
+
ext = os.path.splitext(path)[-1][1:]
|
1132 |
+
else:
|
1133 |
+
ext = "xlsx"
|
1134 |
+
|
1135 |
+
try:
|
1136 |
+
engine = config.get_option(f"io.excel.{ext}.writer", silent=True)
|
1137 |
+
if engine == "auto":
|
1138 |
+
engine = get_default_engine(ext, mode="writer")
|
1139 |
+
except KeyError as err:
|
1140 |
+
raise ValueError(f"No engine for filetype: '{ext}'") from err
|
1141 |
+
|
1142 |
+
# for mypy
|
1143 |
+
assert engine is not None
|
1144 |
+
# error: Incompatible types in assignment (expression has type
|
1145 |
+
# "type[ExcelWriter[Any]]", variable has type "type[Self]")
|
1146 |
+
cls = get_writer(engine) # type: ignore[assignment]
|
1147 |
+
|
1148 |
+
return object.__new__(cls)
|
1149 |
+
|
1150 |
+
# declare external properties you can count on
|
1151 |
+
_path = None
|
1152 |
+
|
1153 |
+
@property
|
1154 |
+
def supported_extensions(self) -> tuple[str, ...]:
|
1155 |
+
"""Extensions that writer engine supports."""
|
1156 |
+
return self._supported_extensions
|
1157 |
+
|
1158 |
+
@property
|
1159 |
+
def engine(self) -> str:
|
1160 |
+
"""Name of engine."""
|
1161 |
+
return self._engine
|
1162 |
+
|
1163 |
+
@property
|
1164 |
+
def sheets(self) -> dict[str, Any]:
|
1165 |
+
"""Mapping of sheet names to sheet objects."""
|
1166 |
+
raise NotImplementedError
|
1167 |
+
|
1168 |
+
@property
|
1169 |
+
def book(self) -> _WorkbookT:
|
1170 |
+
"""
|
1171 |
+
Book instance. Class type will depend on the engine used.
|
1172 |
+
|
1173 |
+
This attribute can be used to access engine-specific features.
|
1174 |
+
"""
|
1175 |
+
raise NotImplementedError
|
1176 |
+
|
1177 |
+
def _write_cells(
|
1178 |
+
self,
|
1179 |
+
cells,
|
1180 |
+
sheet_name: str | None = None,
|
1181 |
+
startrow: int = 0,
|
1182 |
+
startcol: int = 0,
|
1183 |
+
freeze_panes: tuple[int, int] | None = None,
|
1184 |
+
) -> None:
|
1185 |
+
"""
|
1186 |
+
Write given formatted cells into Excel an excel sheet
|
1187 |
+
|
1188 |
+
Parameters
|
1189 |
+
----------
|
1190 |
+
cells : generator
|
1191 |
+
cell of formatted data to save to Excel sheet
|
1192 |
+
sheet_name : str, default None
|
1193 |
+
Name of Excel sheet, if None, then use self.cur_sheet
|
1194 |
+
startrow : upper left cell row to dump data frame
|
1195 |
+
startcol : upper left cell column to dump data frame
|
1196 |
+
freeze_panes: int tuple of length 2
|
1197 |
+
contains the bottom-most row and right-most column to freeze
|
1198 |
+
"""
|
1199 |
+
raise NotImplementedError
|
1200 |
+
|
1201 |
+
def _save(self) -> None:
|
1202 |
+
"""
|
1203 |
+
Save workbook to disk.
|
1204 |
+
"""
|
1205 |
+
raise NotImplementedError
|
1206 |
+
|
1207 |
+
def __init__(
|
1208 |
+
self,
|
1209 |
+
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
1210 |
+
engine: str | None = None,
|
1211 |
+
date_format: str | None = None,
|
1212 |
+
datetime_format: str | None = None,
|
1213 |
+
mode: str = "w",
|
1214 |
+
storage_options: StorageOptions | None = None,
|
1215 |
+
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
1216 |
+
engine_kwargs: dict[str, Any] | None = None,
|
1217 |
+
) -> None:
|
1218 |
+
# validate that this engine can handle the extension
|
1219 |
+
if isinstance(path, str):
|
1220 |
+
ext = os.path.splitext(path)[-1]
|
1221 |
+
self.check_extension(ext)
|
1222 |
+
|
1223 |
+
# use mode to open the file
|
1224 |
+
if "b" not in mode:
|
1225 |
+
mode += "b"
|
1226 |
+
# use "a" for the user to append data to excel but internally use "r+" to let
|
1227 |
+
# the excel backend first read the existing file and then write any data to it
|
1228 |
+
mode = mode.replace("a", "r+")
|
1229 |
+
|
1230 |
+
if if_sheet_exists not in (None, "error", "new", "replace", "overlay"):
|
1231 |
+
raise ValueError(
|
1232 |
+
f"'{if_sheet_exists}' is not valid for if_sheet_exists. "
|
1233 |
+
"Valid options are 'error', 'new', 'replace' and 'overlay'."
|
1234 |
+
)
|
1235 |
+
if if_sheet_exists and "r+" not in mode:
|
1236 |
+
raise ValueError("if_sheet_exists is only valid in append mode (mode='a')")
|
1237 |
+
if if_sheet_exists is None:
|
1238 |
+
if_sheet_exists = "error"
|
1239 |
+
self._if_sheet_exists = if_sheet_exists
|
1240 |
+
|
1241 |
+
# cast ExcelWriter to avoid adding 'if self._handles is not None'
|
1242 |
+
self._handles = IOHandles(
|
1243 |
+
cast(IO[bytes], path), compression={"compression": None}
|
1244 |
+
)
|
1245 |
+
if not isinstance(path, ExcelWriter):
|
1246 |
+
self._handles = get_handle(
|
1247 |
+
path, mode, storage_options=storage_options, is_text=False
|
1248 |
+
)
|
1249 |
+
self._cur_sheet = None
|
1250 |
+
|
1251 |
+
if date_format is None:
|
1252 |
+
self._date_format = "YYYY-MM-DD"
|
1253 |
+
else:
|
1254 |
+
self._date_format = date_format
|
1255 |
+
if datetime_format is None:
|
1256 |
+
self._datetime_format = "YYYY-MM-DD HH:MM:SS"
|
1257 |
+
else:
|
1258 |
+
self._datetime_format = datetime_format
|
1259 |
+
|
1260 |
+
self._mode = mode
|
1261 |
+
|
1262 |
+
@property
|
1263 |
+
def date_format(self) -> str:
|
1264 |
+
"""
|
1265 |
+
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
|
1266 |
+
"""
|
1267 |
+
return self._date_format
|
1268 |
+
|
1269 |
+
@property
|
1270 |
+
def datetime_format(self) -> str:
|
1271 |
+
"""
|
1272 |
+
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
|
1273 |
+
"""
|
1274 |
+
return self._datetime_format
|
1275 |
+
|
1276 |
+
@property
|
1277 |
+
def if_sheet_exists(self) -> str:
|
1278 |
+
"""
|
1279 |
+
How to behave when writing to a sheet that already exists in append mode.
|
1280 |
+
"""
|
1281 |
+
return self._if_sheet_exists
|
1282 |
+
|
1283 |
+
def __fspath__(self) -> str:
|
1284 |
+
return getattr(self._handles.handle, "name", "")
|
1285 |
+
|
1286 |
+
def _get_sheet_name(self, sheet_name: str | None) -> str:
|
1287 |
+
if sheet_name is None:
|
1288 |
+
sheet_name = self._cur_sheet
|
1289 |
+
if sheet_name is None: # pragma: no cover
|
1290 |
+
raise ValueError("Must pass explicit sheet_name or set _cur_sheet property")
|
1291 |
+
return sheet_name
|
1292 |
+
|
1293 |
+
def _value_with_fmt(
|
1294 |
+
self, val
|
1295 |
+
) -> tuple[
|
1296 |
+
int | float | bool | str | datetime.datetime | datetime.date, str | None
|
1297 |
+
]:
|
1298 |
+
"""
|
1299 |
+
Convert numpy types to Python types for the Excel writers.
|
1300 |
+
|
1301 |
+
Parameters
|
1302 |
+
----------
|
1303 |
+
val : object
|
1304 |
+
Value to be written into cells
|
1305 |
+
|
1306 |
+
Returns
|
1307 |
+
-------
|
1308 |
+
Tuple with the first element being the converted value and the second
|
1309 |
+
being an optional format
|
1310 |
+
"""
|
1311 |
+
fmt = None
|
1312 |
+
|
1313 |
+
if is_integer(val):
|
1314 |
+
val = int(val)
|
1315 |
+
elif is_float(val):
|
1316 |
+
val = float(val)
|
1317 |
+
elif is_bool(val):
|
1318 |
+
val = bool(val)
|
1319 |
+
elif isinstance(val, datetime.datetime):
|
1320 |
+
fmt = self._datetime_format
|
1321 |
+
elif isinstance(val, datetime.date):
|
1322 |
+
fmt = self._date_format
|
1323 |
+
elif isinstance(val, datetime.timedelta):
|
1324 |
+
val = val.total_seconds() / 86400
|
1325 |
+
fmt = "0"
|
1326 |
+
else:
|
1327 |
+
val = str(val)
|
1328 |
+
|
1329 |
+
return val, fmt
|
1330 |
+
|
1331 |
+
@classmethod
|
1332 |
+
def check_extension(cls, ext: str) -> Literal[True]:
|
1333 |
+
"""
|
1334 |
+
checks that path's extension against the Writer's supported
|
1335 |
+
extensions. If it isn't supported, raises UnsupportedFiletypeError.
|
1336 |
+
"""
|
1337 |
+
if ext.startswith("."):
|
1338 |
+
ext = ext[1:]
|
1339 |
+
if not any(ext in extension for extension in cls._supported_extensions):
|
1340 |
+
raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'")
|
1341 |
+
return True
|
1342 |
+
|
1343 |
+
# Allow use as a contextmanager
|
1344 |
+
def __enter__(self) -> Self:
|
1345 |
+
return self
|
1346 |
+
|
1347 |
+
def __exit__(
|
1348 |
+
self,
|
1349 |
+
exc_type: type[BaseException] | None,
|
1350 |
+
exc_value: BaseException | None,
|
1351 |
+
traceback: TracebackType | None,
|
1352 |
+
) -> None:
|
1353 |
+
self.close()
|
1354 |
+
|
1355 |
+
def close(self) -> None:
|
1356 |
+
"""synonym for save, to make it more file-like"""
|
1357 |
+
self._save()
|
1358 |
+
self._handles.close()
|
1359 |
+
|
1360 |
+
|
1361 |
+
XLS_SIGNATURES = (
|
1362 |
+
b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2
|
1363 |
+
b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3
|
1364 |
+
b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4
|
1365 |
+
b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary
|
1366 |
+
)
|
1367 |
+
ZIP_SIGNATURE = b"PK\x03\x04"
|
1368 |
+
PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,)))
|
1369 |
+
|
1370 |
+
|
1371 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
1372 |
+
def inspect_excel_format(
|
1373 |
+
content_or_path: FilePath | ReadBuffer[bytes],
|
1374 |
+
storage_options: StorageOptions | None = None,
|
1375 |
+
) -> str | None:
|
1376 |
+
"""
|
1377 |
+
Inspect the path or content of an excel file and get its format.
|
1378 |
+
|
1379 |
+
Adopted from xlrd: https://github.com/python-excel/xlrd.
|
1380 |
+
|
1381 |
+
Parameters
|
1382 |
+
----------
|
1383 |
+
content_or_path : str or file-like object
|
1384 |
+
Path to file or content of file to inspect. May be a URL.
|
1385 |
+
{storage_options}
|
1386 |
+
|
1387 |
+
Returns
|
1388 |
+
-------
|
1389 |
+
str or None
|
1390 |
+
Format of file if it can be determined.
|
1391 |
+
|
1392 |
+
Raises
|
1393 |
+
------
|
1394 |
+
ValueError
|
1395 |
+
If resulting stream is empty.
|
1396 |
+
BadZipFile
|
1397 |
+
If resulting stream does not have an XLS signature and is not a valid zipfile.
|
1398 |
+
"""
|
1399 |
+
if isinstance(content_or_path, bytes):
|
1400 |
+
content_or_path = BytesIO(content_or_path)
|
1401 |
+
|
1402 |
+
with get_handle(
|
1403 |
+
content_or_path, "rb", storage_options=storage_options, is_text=False
|
1404 |
+
) as handle:
|
1405 |
+
stream = handle.handle
|
1406 |
+
stream.seek(0)
|
1407 |
+
buf = stream.read(PEEK_SIZE)
|
1408 |
+
if buf is None:
|
1409 |
+
raise ValueError("stream is empty")
|
1410 |
+
assert isinstance(buf, bytes)
|
1411 |
+
peek = buf
|
1412 |
+
stream.seek(0)
|
1413 |
+
|
1414 |
+
if any(peek.startswith(sig) for sig in XLS_SIGNATURES):
|
1415 |
+
return "xls"
|
1416 |
+
elif not peek.startswith(ZIP_SIGNATURE):
|
1417 |
+
return None
|
1418 |
+
|
1419 |
+
with zipfile.ZipFile(stream) as zf:
|
1420 |
+
# Workaround for some third party files that use forward slashes and
|
1421 |
+
# lower case names.
|
1422 |
+
component_names = [
|
1423 |
+
name.replace("\\", "/").lower() for name in zf.namelist()
|
1424 |
+
]
|
1425 |
+
|
1426 |
+
if "xl/workbook.xml" in component_names:
|
1427 |
+
return "xlsx"
|
1428 |
+
if "xl/workbook.bin" in component_names:
|
1429 |
+
return "xlsb"
|
1430 |
+
if "content.xml" in component_names:
|
1431 |
+
return "ods"
|
1432 |
+
return "zip"
|
1433 |
+
|
1434 |
+
|
1435 |
+
class ExcelFile:
|
1436 |
+
"""
|
1437 |
+
Class for parsing tabular Excel sheets into DataFrame objects.
|
1438 |
+
|
1439 |
+
See read_excel for more documentation.
|
1440 |
+
|
1441 |
+
Parameters
|
1442 |
+
----------
|
1443 |
+
path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath),
|
1444 |
+
A file-like object, xlrd workbook or openpyxl workbook.
|
1445 |
+
If a string or path object, expected to be a path to a
|
1446 |
+
.xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.
|
1447 |
+
engine : str, default None
|
1448 |
+
If io is not a buffer or path, this must be set to identify io.
|
1449 |
+
Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``, ``calamine``
|
1450 |
+
Engine compatibility :
|
1451 |
+
|
1452 |
+
- ``xlrd`` supports old-style Excel files (.xls).
|
1453 |
+
- ``openpyxl`` supports newer Excel file formats.
|
1454 |
+
- ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
|
1455 |
+
- ``pyxlsb`` supports Binary Excel files.
|
1456 |
+
- ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)
|
1457 |
+
and OpenDocument (.ods) file formats.
|
1458 |
+
|
1459 |
+
.. versionchanged:: 1.2.0
|
1460 |
+
|
1461 |
+
The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_
|
1462 |
+
now only supports old-style ``.xls`` files.
|
1463 |
+
When ``engine=None``, the following logic will be
|
1464 |
+
used to determine the engine:
|
1465 |
+
|
1466 |
+
- If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
|
1467 |
+
then `odf <https://pypi.org/project/odfpy/>`_ will be used.
|
1468 |
+
- Otherwise if ``path_or_buffer`` is an xls format,
|
1469 |
+
``xlrd`` will be used.
|
1470 |
+
- Otherwise if ``path_or_buffer`` is in xlsb format,
|
1471 |
+
`pyxlsb <https://pypi.org/project/pyxlsb/>`_ will be used.
|
1472 |
+
|
1473 |
+
.. versionadded:: 1.3.0
|
1474 |
+
|
1475 |
+
- Otherwise if `openpyxl <https://pypi.org/project/openpyxl/>`_ is installed,
|
1476 |
+
then ``openpyxl`` will be used.
|
1477 |
+
- Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised.
|
1478 |
+
|
1479 |
+
.. warning::
|
1480 |
+
|
1481 |
+
Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.
|
1482 |
+
This is not supported, switch to using ``openpyxl`` instead.
|
1483 |
+
engine_kwargs : dict, optional
|
1484 |
+
Arbitrary keyword arguments passed to excel engine.
|
1485 |
+
|
1486 |
+
Examples
|
1487 |
+
--------
|
1488 |
+
>>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
|
1489 |
+
>>> with pd.ExcelFile("myfile.xls") as xls: # doctest: +SKIP
|
1490 |
+
... df1 = pd.read_excel(xls, "Sheet1") # doctest: +SKIP
|
1491 |
+
"""
|
1492 |
+
|
1493 |
+
from pandas.io.excel._calamine import CalamineReader
|
1494 |
+
from pandas.io.excel._odfreader import ODFReader
|
1495 |
+
from pandas.io.excel._openpyxl import OpenpyxlReader
|
1496 |
+
from pandas.io.excel._pyxlsb import PyxlsbReader
|
1497 |
+
from pandas.io.excel._xlrd import XlrdReader
|
1498 |
+
|
1499 |
+
_engines: Mapping[str, Any] = {
|
1500 |
+
"xlrd": XlrdReader,
|
1501 |
+
"openpyxl": OpenpyxlReader,
|
1502 |
+
"odf": ODFReader,
|
1503 |
+
"pyxlsb": PyxlsbReader,
|
1504 |
+
"calamine": CalamineReader,
|
1505 |
+
}
|
1506 |
+
|
1507 |
+
def __init__(
|
1508 |
+
self,
|
1509 |
+
path_or_buffer,
|
1510 |
+
engine: str | None = None,
|
1511 |
+
storage_options: StorageOptions | None = None,
|
1512 |
+
engine_kwargs: dict | None = None,
|
1513 |
+
) -> None:
|
1514 |
+
if engine_kwargs is None:
|
1515 |
+
engine_kwargs = {}
|
1516 |
+
|
1517 |
+
if engine is not None and engine not in self._engines:
|
1518 |
+
raise ValueError(f"Unknown engine: {engine}")
|
1519 |
+
|
1520 |
+
# First argument can also be bytes, so create a buffer
|
1521 |
+
if isinstance(path_or_buffer, bytes):
|
1522 |
+
path_or_buffer = BytesIO(path_or_buffer)
|
1523 |
+
warnings.warn(
|
1524 |
+
"Passing bytes to 'read_excel' is deprecated and "
|
1525 |
+
"will be removed in a future version. To read from a "
|
1526 |
+
"byte string, wrap it in a `BytesIO` object.",
|
1527 |
+
FutureWarning,
|
1528 |
+
stacklevel=find_stack_level(),
|
1529 |
+
)
|
1530 |
+
|
1531 |
+
# Could be a str, ExcelFile, Book, etc.
|
1532 |
+
self.io = path_or_buffer
|
1533 |
+
# Always a string
|
1534 |
+
self._io = stringify_path(path_or_buffer)
|
1535 |
+
|
1536 |
+
# Determine xlrd version if installed
|
1537 |
+
if import_optional_dependency("xlrd", errors="ignore") is None:
|
1538 |
+
xlrd_version = None
|
1539 |
+
else:
|
1540 |
+
import xlrd
|
1541 |
+
|
1542 |
+
xlrd_version = Version(get_version(xlrd))
|
1543 |
+
|
1544 |
+
if engine is None:
|
1545 |
+
# Only determine ext if it is needed
|
1546 |
+
ext: str | None
|
1547 |
+
if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book):
|
1548 |
+
ext = "xls"
|
1549 |
+
else:
|
1550 |
+
ext = inspect_excel_format(
|
1551 |
+
content_or_path=path_or_buffer, storage_options=storage_options
|
1552 |
+
)
|
1553 |
+
if ext is None:
|
1554 |
+
raise ValueError(
|
1555 |
+
"Excel file format cannot be determined, you must specify "
|
1556 |
+
"an engine manually."
|
1557 |
+
)
|
1558 |
+
|
1559 |
+
engine = config.get_option(f"io.excel.{ext}.reader", silent=True)
|
1560 |
+
if engine == "auto":
|
1561 |
+
engine = get_default_engine(ext, mode="reader")
|
1562 |
+
|
1563 |
+
assert engine is not None
|
1564 |
+
self.engine = engine
|
1565 |
+
self.storage_options = storage_options
|
1566 |
+
|
1567 |
+
self._reader = self._engines[engine](
|
1568 |
+
self._io,
|
1569 |
+
storage_options=storage_options,
|
1570 |
+
engine_kwargs=engine_kwargs,
|
1571 |
+
)
|
1572 |
+
|
1573 |
+
def __fspath__(self):
|
1574 |
+
return self._io
|
1575 |
+
|
1576 |
+
def parse(
|
1577 |
+
self,
|
1578 |
+
sheet_name: str | int | list[int] | list[str] | None = 0,
|
1579 |
+
header: int | Sequence[int] | None = 0,
|
1580 |
+
names: SequenceNotStr[Hashable] | range | None = None,
|
1581 |
+
index_col: int | Sequence[int] | None = None,
|
1582 |
+
usecols=None,
|
1583 |
+
converters=None,
|
1584 |
+
true_values: Iterable[Hashable] | None = None,
|
1585 |
+
false_values: Iterable[Hashable] | None = None,
|
1586 |
+
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
|
1587 |
+
nrows: int | None = None,
|
1588 |
+
na_values=None,
|
1589 |
+
parse_dates: list | dict | bool = False,
|
1590 |
+
date_parser: Callable | lib.NoDefault = lib.no_default,
|
1591 |
+
date_format: str | dict[Hashable, str] | None = None,
|
1592 |
+
thousands: str | None = None,
|
1593 |
+
comment: str | None = None,
|
1594 |
+
skipfooter: int = 0,
|
1595 |
+
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
|
1596 |
+
**kwds,
|
1597 |
+
) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]:
|
1598 |
+
"""
|
1599 |
+
Parse specified sheet(s) into a DataFrame.
|
1600 |
+
|
1601 |
+
Equivalent to read_excel(ExcelFile, ...) See the read_excel
|
1602 |
+
docstring for more info on accepted parameters.
|
1603 |
+
|
1604 |
+
Returns
|
1605 |
+
-------
|
1606 |
+
DataFrame or dict of DataFrames
|
1607 |
+
DataFrame from the passed in Excel file.
|
1608 |
+
|
1609 |
+
Examples
|
1610 |
+
--------
|
1611 |
+
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
|
1612 |
+
>>> df.to_excel('myfile.xlsx') # doctest: +SKIP
|
1613 |
+
>>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
|
1614 |
+
>>> file.parse() # doctest: +SKIP
|
1615 |
+
"""
|
1616 |
+
return self._reader.parse(
|
1617 |
+
sheet_name=sheet_name,
|
1618 |
+
header=header,
|
1619 |
+
names=names,
|
1620 |
+
index_col=index_col,
|
1621 |
+
usecols=usecols,
|
1622 |
+
converters=converters,
|
1623 |
+
true_values=true_values,
|
1624 |
+
false_values=false_values,
|
1625 |
+
skiprows=skiprows,
|
1626 |
+
nrows=nrows,
|
1627 |
+
na_values=na_values,
|
1628 |
+
parse_dates=parse_dates,
|
1629 |
+
date_parser=date_parser,
|
1630 |
+
date_format=date_format,
|
1631 |
+
thousands=thousands,
|
1632 |
+
comment=comment,
|
1633 |
+
skipfooter=skipfooter,
|
1634 |
+
dtype_backend=dtype_backend,
|
1635 |
+
**kwds,
|
1636 |
+
)
|
1637 |
+
|
1638 |
+
@property
|
1639 |
+
def book(self):
|
1640 |
+
return self._reader.book
|
1641 |
+
|
1642 |
+
@property
|
1643 |
+
def sheet_names(self):
|
1644 |
+
return self._reader.sheet_names
|
1645 |
+
|
1646 |
+
def close(self) -> None:
|
1647 |
+
"""close io if necessary"""
|
1648 |
+
self._reader.close()
|
1649 |
+
|
1650 |
+
def __enter__(self) -> Self:
|
1651 |
+
return self
|
1652 |
+
|
1653 |
+
def __exit__(
|
1654 |
+
self,
|
1655 |
+
exc_type: type[BaseException] | None,
|
1656 |
+
exc_value: BaseException | None,
|
1657 |
+
traceback: TracebackType | None,
|
1658 |
+
) -> None:
|
1659 |
+
self.close()
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_calamine.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from datetime import (
|
4 |
+
date,
|
5 |
+
datetime,
|
6 |
+
time,
|
7 |
+
timedelta,
|
8 |
+
)
|
9 |
+
from typing import (
|
10 |
+
TYPE_CHECKING,
|
11 |
+
Any,
|
12 |
+
Union,
|
13 |
+
)
|
14 |
+
|
15 |
+
from pandas.compat._optional import import_optional_dependency
|
16 |
+
from pandas.util._decorators import doc
|
17 |
+
|
18 |
+
import pandas as pd
|
19 |
+
from pandas.core.shared_docs import _shared_docs
|
20 |
+
|
21 |
+
from pandas.io.excel._base import BaseExcelReader
|
22 |
+
|
23 |
+
if TYPE_CHECKING:
|
24 |
+
from python_calamine import (
|
25 |
+
CalamineSheet,
|
26 |
+
CalamineWorkbook,
|
27 |
+
)
|
28 |
+
|
29 |
+
from pandas._typing import (
|
30 |
+
FilePath,
|
31 |
+
NaTType,
|
32 |
+
ReadBuffer,
|
33 |
+
Scalar,
|
34 |
+
StorageOptions,
|
35 |
+
)
|
36 |
+
|
37 |
+
_CellValue = Union[int, float, str, bool, time, date, datetime, timedelta]
|
38 |
+
|
39 |
+
|
40 |
+
class CalamineReader(BaseExcelReader["CalamineWorkbook"]):
|
41 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
42 |
+
def __init__(
|
43 |
+
self,
|
44 |
+
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
45 |
+
storage_options: StorageOptions | None = None,
|
46 |
+
engine_kwargs: dict | None = None,
|
47 |
+
) -> None:
|
48 |
+
"""
|
49 |
+
Reader using calamine engine (xlsx/xls/xlsb/ods).
|
50 |
+
|
51 |
+
Parameters
|
52 |
+
----------
|
53 |
+
filepath_or_buffer : str, path to be parsed or
|
54 |
+
an open readable stream.
|
55 |
+
{storage_options}
|
56 |
+
engine_kwargs : dict, optional
|
57 |
+
Arbitrary keyword arguments passed to excel engine.
|
58 |
+
"""
|
59 |
+
import_optional_dependency("python_calamine")
|
60 |
+
super().__init__(
|
61 |
+
filepath_or_buffer,
|
62 |
+
storage_options=storage_options,
|
63 |
+
engine_kwargs=engine_kwargs,
|
64 |
+
)
|
65 |
+
|
66 |
+
@property
|
67 |
+
def _workbook_class(self) -> type[CalamineWorkbook]:
|
68 |
+
from python_calamine import CalamineWorkbook
|
69 |
+
|
70 |
+
return CalamineWorkbook
|
71 |
+
|
72 |
+
def load_workbook(
|
73 |
+
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs: Any
|
74 |
+
) -> CalamineWorkbook:
|
75 |
+
from python_calamine import load_workbook
|
76 |
+
|
77 |
+
return load_workbook(filepath_or_buffer, **engine_kwargs)
|
78 |
+
|
79 |
+
@property
|
80 |
+
def sheet_names(self) -> list[str]:
|
81 |
+
from python_calamine import SheetTypeEnum
|
82 |
+
|
83 |
+
return [
|
84 |
+
sheet.name
|
85 |
+
for sheet in self.book.sheets_metadata
|
86 |
+
if sheet.typ == SheetTypeEnum.WorkSheet
|
87 |
+
]
|
88 |
+
|
89 |
+
def get_sheet_by_name(self, name: str) -> CalamineSheet:
|
90 |
+
self.raise_if_bad_sheet_by_name(name)
|
91 |
+
return self.book.get_sheet_by_name(name)
|
92 |
+
|
93 |
+
def get_sheet_by_index(self, index: int) -> CalamineSheet:
|
94 |
+
self.raise_if_bad_sheet_by_index(index)
|
95 |
+
return self.book.get_sheet_by_index(index)
|
96 |
+
|
97 |
+
def get_sheet_data(
|
98 |
+
self, sheet: CalamineSheet, file_rows_needed: int | None = None
|
99 |
+
) -> list[list[Scalar | NaTType | time]]:
|
100 |
+
def _convert_cell(value: _CellValue) -> Scalar | NaTType | time:
|
101 |
+
if isinstance(value, float):
|
102 |
+
val = int(value)
|
103 |
+
if val == value:
|
104 |
+
return val
|
105 |
+
else:
|
106 |
+
return value
|
107 |
+
elif isinstance(value, date):
|
108 |
+
return pd.Timestamp(value)
|
109 |
+
elif isinstance(value, timedelta):
|
110 |
+
return pd.Timedelta(value)
|
111 |
+
elif isinstance(value, time):
|
112 |
+
return value
|
113 |
+
|
114 |
+
return value
|
115 |
+
|
116 |
+
rows: list[list[_CellValue]] = sheet.to_python(
|
117 |
+
skip_empty_area=False, nrows=file_rows_needed
|
118 |
+
)
|
119 |
+
data = [[_convert_cell(cell) for cell in row] for row in rows]
|
120 |
+
|
121 |
+
return data
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py
ADDED
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import (
|
4 |
+
TYPE_CHECKING,
|
5 |
+
cast,
|
6 |
+
)
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
from pandas._typing import (
|
11 |
+
FilePath,
|
12 |
+
ReadBuffer,
|
13 |
+
Scalar,
|
14 |
+
StorageOptions,
|
15 |
+
)
|
16 |
+
from pandas.compat._optional import import_optional_dependency
|
17 |
+
from pandas.util._decorators import doc
|
18 |
+
|
19 |
+
import pandas as pd
|
20 |
+
from pandas.core.shared_docs import _shared_docs
|
21 |
+
|
22 |
+
from pandas.io.excel._base import BaseExcelReader
|
23 |
+
|
24 |
+
if TYPE_CHECKING:
|
25 |
+
from odf.opendocument import OpenDocument
|
26 |
+
|
27 |
+
from pandas._libs.tslibs.nattype import NaTType
|
28 |
+
|
29 |
+
|
30 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
31 |
+
class ODFReader(BaseExcelReader["OpenDocument"]):
|
32 |
+
def __init__(
|
33 |
+
self,
|
34 |
+
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
35 |
+
storage_options: StorageOptions | None = None,
|
36 |
+
engine_kwargs: dict | None = None,
|
37 |
+
) -> None:
|
38 |
+
"""
|
39 |
+
Read tables out of OpenDocument formatted files.
|
40 |
+
|
41 |
+
Parameters
|
42 |
+
----------
|
43 |
+
filepath_or_buffer : str, path to be parsed or
|
44 |
+
an open readable stream.
|
45 |
+
{storage_options}
|
46 |
+
engine_kwargs : dict, optional
|
47 |
+
Arbitrary keyword arguments passed to excel engine.
|
48 |
+
"""
|
49 |
+
import_optional_dependency("odf")
|
50 |
+
super().__init__(
|
51 |
+
filepath_or_buffer,
|
52 |
+
storage_options=storage_options,
|
53 |
+
engine_kwargs=engine_kwargs,
|
54 |
+
)
|
55 |
+
|
56 |
+
@property
|
57 |
+
def _workbook_class(self) -> type[OpenDocument]:
|
58 |
+
from odf.opendocument import OpenDocument
|
59 |
+
|
60 |
+
return OpenDocument
|
61 |
+
|
62 |
+
def load_workbook(
|
63 |
+
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
|
64 |
+
) -> OpenDocument:
|
65 |
+
from odf.opendocument import load
|
66 |
+
|
67 |
+
return load(filepath_or_buffer, **engine_kwargs)
|
68 |
+
|
69 |
+
@property
|
70 |
+
def empty_value(self) -> str:
|
71 |
+
"""Property for compat with other readers."""
|
72 |
+
return ""
|
73 |
+
|
74 |
+
@property
|
75 |
+
def sheet_names(self) -> list[str]:
|
76 |
+
"""Return a list of sheet names present in the document"""
|
77 |
+
from odf.table import Table
|
78 |
+
|
79 |
+
tables = self.book.getElementsByType(Table)
|
80 |
+
return [t.getAttribute("name") for t in tables]
|
81 |
+
|
82 |
+
def get_sheet_by_index(self, index: int):
|
83 |
+
from odf.table import Table
|
84 |
+
|
85 |
+
self.raise_if_bad_sheet_by_index(index)
|
86 |
+
tables = self.book.getElementsByType(Table)
|
87 |
+
return tables[index]
|
88 |
+
|
89 |
+
def get_sheet_by_name(self, name: str):
|
90 |
+
from odf.table import Table
|
91 |
+
|
92 |
+
self.raise_if_bad_sheet_by_name(name)
|
93 |
+
tables = self.book.getElementsByType(Table)
|
94 |
+
|
95 |
+
for table in tables:
|
96 |
+
if table.getAttribute("name") == name:
|
97 |
+
return table
|
98 |
+
|
99 |
+
self.close()
|
100 |
+
raise ValueError(f"sheet {name} not found")
|
101 |
+
|
102 |
+
def get_sheet_data(
|
103 |
+
self, sheet, file_rows_needed: int | None = None
|
104 |
+
) -> list[list[Scalar | NaTType]]:
|
105 |
+
"""
|
106 |
+
Parse an ODF Table into a list of lists
|
107 |
+
"""
|
108 |
+
from odf.table import (
|
109 |
+
CoveredTableCell,
|
110 |
+
TableCell,
|
111 |
+
TableRow,
|
112 |
+
)
|
113 |
+
|
114 |
+
covered_cell_name = CoveredTableCell().qname
|
115 |
+
table_cell_name = TableCell().qname
|
116 |
+
cell_names = {covered_cell_name, table_cell_name}
|
117 |
+
|
118 |
+
sheet_rows = sheet.getElementsByType(TableRow)
|
119 |
+
empty_rows = 0
|
120 |
+
max_row_len = 0
|
121 |
+
|
122 |
+
table: list[list[Scalar | NaTType]] = []
|
123 |
+
|
124 |
+
for sheet_row in sheet_rows:
|
125 |
+
sheet_cells = [
|
126 |
+
x
|
127 |
+
for x in sheet_row.childNodes
|
128 |
+
if hasattr(x, "qname") and x.qname in cell_names
|
129 |
+
]
|
130 |
+
empty_cells = 0
|
131 |
+
table_row: list[Scalar | NaTType] = []
|
132 |
+
|
133 |
+
for sheet_cell in sheet_cells:
|
134 |
+
if sheet_cell.qname == table_cell_name:
|
135 |
+
value = self._get_cell_value(sheet_cell)
|
136 |
+
else:
|
137 |
+
value = self.empty_value
|
138 |
+
|
139 |
+
column_repeat = self._get_column_repeat(sheet_cell)
|
140 |
+
|
141 |
+
# Queue up empty values, writing only if content succeeds them
|
142 |
+
if value == self.empty_value:
|
143 |
+
empty_cells += column_repeat
|
144 |
+
else:
|
145 |
+
table_row.extend([self.empty_value] * empty_cells)
|
146 |
+
empty_cells = 0
|
147 |
+
table_row.extend([value] * column_repeat)
|
148 |
+
|
149 |
+
if max_row_len < len(table_row):
|
150 |
+
max_row_len = len(table_row)
|
151 |
+
|
152 |
+
row_repeat = self._get_row_repeat(sheet_row)
|
153 |
+
if len(table_row) == 0:
|
154 |
+
empty_rows += row_repeat
|
155 |
+
else:
|
156 |
+
# add blank rows to our table
|
157 |
+
table.extend([[self.empty_value]] * empty_rows)
|
158 |
+
empty_rows = 0
|
159 |
+
table.extend(table_row for _ in range(row_repeat))
|
160 |
+
if file_rows_needed is not None and len(table) >= file_rows_needed:
|
161 |
+
break
|
162 |
+
|
163 |
+
# Make our table square
|
164 |
+
for row in table:
|
165 |
+
if len(row) < max_row_len:
|
166 |
+
row.extend([self.empty_value] * (max_row_len - len(row)))
|
167 |
+
|
168 |
+
return table
|
169 |
+
|
170 |
+
def _get_row_repeat(self, row) -> int:
|
171 |
+
"""
|
172 |
+
Return number of times this row was repeated
|
173 |
+
Repeating an empty row appeared to be a common way
|
174 |
+
of representing sparse rows in the table.
|
175 |
+
"""
|
176 |
+
from odf.namespaces import TABLENS
|
177 |
+
|
178 |
+
return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1))
|
179 |
+
|
180 |
+
def _get_column_repeat(self, cell) -> int:
|
181 |
+
from odf.namespaces import TABLENS
|
182 |
+
|
183 |
+
return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1))
|
184 |
+
|
185 |
+
def _get_cell_value(self, cell) -> Scalar | NaTType:
|
186 |
+
from odf.namespaces import OFFICENS
|
187 |
+
|
188 |
+
if str(cell) == "#N/A":
|
189 |
+
return np.nan
|
190 |
+
|
191 |
+
cell_type = cell.attributes.get((OFFICENS, "value-type"))
|
192 |
+
if cell_type == "boolean":
|
193 |
+
if str(cell) == "TRUE":
|
194 |
+
return True
|
195 |
+
return False
|
196 |
+
if cell_type is None:
|
197 |
+
return self.empty_value
|
198 |
+
elif cell_type == "float":
|
199 |
+
# GH5394
|
200 |
+
cell_value = float(cell.attributes.get((OFFICENS, "value")))
|
201 |
+
val = int(cell_value)
|
202 |
+
if val == cell_value:
|
203 |
+
return val
|
204 |
+
return cell_value
|
205 |
+
elif cell_type == "percentage":
|
206 |
+
cell_value = cell.attributes.get((OFFICENS, "value"))
|
207 |
+
return float(cell_value)
|
208 |
+
elif cell_type == "string":
|
209 |
+
return self._get_cell_string_value(cell)
|
210 |
+
elif cell_type == "currency":
|
211 |
+
cell_value = cell.attributes.get((OFFICENS, "value"))
|
212 |
+
return float(cell_value)
|
213 |
+
elif cell_type == "date":
|
214 |
+
cell_value = cell.attributes.get((OFFICENS, "date-value"))
|
215 |
+
return pd.Timestamp(cell_value)
|
216 |
+
elif cell_type == "time":
|
217 |
+
stamp = pd.Timestamp(str(cell))
|
218 |
+
# cast needed here because Scalar doesn't include datetime.time
|
219 |
+
return cast(Scalar, stamp.time())
|
220 |
+
else:
|
221 |
+
self.close()
|
222 |
+
raise ValueError(f"Unrecognized type {cell_type}")
|
223 |
+
|
224 |
+
def _get_cell_string_value(self, cell) -> str:
|
225 |
+
"""
|
226 |
+
Find and decode OpenDocument text:s tags that represent
|
227 |
+
a run length encoded sequence of space characters.
|
228 |
+
"""
|
229 |
+
from odf.element import Element
|
230 |
+
from odf.namespaces import TEXTNS
|
231 |
+
from odf.office import Annotation
|
232 |
+
from odf.text import S
|
233 |
+
|
234 |
+
office_annotation = Annotation().qname
|
235 |
+
text_s = S().qname
|
236 |
+
|
237 |
+
value = []
|
238 |
+
|
239 |
+
for fragment in cell.childNodes:
|
240 |
+
if isinstance(fragment, Element):
|
241 |
+
if fragment.qname == text_s:
|
242 |
+
spaces = int(fragment.attributes.get((TEXTNS, "c"), 1))
|
243 |
+
value.append(" " * spaces)
|
244 |
+
elif fragment.qname == office_annotation:
|
245 |
+
continue
|
246 |
+
else:
|
247 |
+
# recursive impl needed in case of nested fragments
|
248 |
+
# with multiple spaces
|
249 |
+
# https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704
|
250 |
+
value.append(self._get_cell_string_value(fragment))
|
251 |
+
else:
|
252 |
+
value.append(str(fragment).strip("\n"))
|
253 |
+
return "".join(value)
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py
ADDED
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections import defaultdict
|
4 |
+
import datetime
|
5 |
+
import json
|
6 |
+
from typing import (
|
7 |
+
TYPE_CHECKING,
|
8 |
+
Any,
|
9 |
+
DefaultDict,
|
10 |
+
cast,
|
11 |
+
overload,
|
12 |
+
)
|
13 |
+
|
14 |
+
from pandas.io.excel._base import ExcelWriter
|
15 |
+
from pandas.io.excel._util import (
|
16 |
+
combine_kwargs,
|
17 |
+
validate_freeze_panes,
|
18 |
+
)
|
19 |
+
|
20 |
+
if TYPE_CHECKING:
|
21 |
+
from pandas._typing import (
|
22 |
+
ExcelWriterIfSheetExists,
|
23 |
+
FilePath,
|
24 |
+
StorageOptions,
|
25 |
+
WriteExcelBuffer,
|
26 |
+
)
|
27 |
+
|
28 |
+
from pandas.io.formats.excel import ExcelCell
|
29 |
+
|
30 |
+
|
31 |
+
class ODSWriter(ExcelWriter):
|
32 |
+
_engine = "odf"
|
33 |
+
_supported_extensions = (".ods",)
|
34 |
+
|
35 |
+
def __init__(
|
36 |
+
self,
|
37 |
+
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
38 |
+
engine: str | None = None,
|
39 |
+
date_format: str | None = None,
|
40 |
+
datetime_format=None,
|
41 |
+
mode: str = "w",
|
42 |
+
storage_options: StorageOptions | None = None,
|
43 |
+
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
44 |
+
engine_kwargs: dict[str, Any] | None = None,
|
45 |
+
**kwargs,
|
46 |
+
) -> None:
|
47 |
+
from odf.opendocument import OpenDocumentSpreadsheet
|
48 |
+
|
49 |
+
if mode == "a":
|
50 |
+
raise ValueError("Append mode is not supported with odf!")
|
51 |
+
|
52 |
+
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
53 |
+
self._book = OpenDocumentSpreadsheet(**engine_kwargs)
|
54 |
+
|
55 |
+
super().__init__(
|
56 |
+
path,
|
57 |
+
mode=mode,
|
58 |
+
storage_options=storage_options,
|
59 |
+
if_sheet_exists=if_sheet_exists,
|
60 |
+
engine_kwargs=engine_kwargs,
|
61 |
+
)
|
62 |
+
|
63 |
+
self._style_dict: dict[str, str] = {}
|
64 |
+
|
65 |
+
@property
|
66 |
+
def book(self):
|
67 |
+
"""
|
68 |
+
Book instance of class odf.opendocument.OpenDocumentSpreadsheet.
|
69 |
+
|
70 |
+
This attribute can be used to access engine-specific features.
|
71 |
+
"""
|
72 |
+
return self._book
|
73 |
+
|
74 |
+
@property
|
75 |
+
def sheets(self) -> dict[str, Any]:
|
76 |
+
"""Mapping of sheet names to sheet objects."""
|
77 |
+
from odf.table import Table
|
78 |
+
|
79 |
+
result = {
|
80 |
+
sheet.getAttribute("name"): sheet
|
81 |
+
for sheet in self.book.getElementsByType(Table)
|
82 |
+
}
|
83 |
+
return result
|
84 |
+
|
85 |
+
def _save(self) -> None:
|
86 |
+
"""
|
87 |
+
Save workbook to disk.
|
88 |
+
"""
|
89 |
+
for sheet in self.sheets.values():
|
90 |
+
self.book.spreadsheet.addElement(sheet)
|
91 |
+
self.book.save(self._handles.handle)
|
92 |
+
|
93 |
+
def _write_cells(
|
94 |
+
self,
|
95 |
+
cells: list[ExcelCell],
|
96 |
+
sheet_name: str | None = None,
|
97 |
+
startrow: int = 0,
|
98 |
+
startcol: int = 0,
|
99 |
+
freeze_panes: tuple[int, int] | None = None,
|
100 |
+
) -> None:
|
101 |
+
"""
|
102 |
+
Write the frame cells using odf
|
103 |
+
"""
|
104 |
+
from odf.table import (
|
105 |
+
Table,
|
106 |
+
TableCell,
|
107 |
+
TableRow,
|
108 |
+
)
|
109 |
+
from odf.text import P
|
110 |
+
|
111 |
+
sheet_name = self._get_sheet_name(sheet_name)
|
112 |
+
assert sheet_name is not None
|
113 |
+
|
114 |
+
if sheet_name in self.sheets:
|
115 |
+
wks = self.sheets[sheet_name]
|
116 |
+
else:
|
117 |
+
wks = Table(name=sheet_name)
|
118 |
+
self.book.spreadsheet.addElement(wks)
|
119 |
+
|
120 |
+
if validate_freeze_panes(freeze_panes):
|
121 |
+
freeze_panes = cast(tuple[int, int], freeze_panes)
|
122 |
+
self._create_freeze_panes(sheet_name, freeze_panes)
|
123 |
+
|
124 |
+
for _ in range(startrow):
|
125 |
+
wks.addElement(TableRow())
|
126 |
+
|
127 |
+
rows: DefaultDict = defaultdict(TableRow)
|
128 |
+
col_count: DefaultDict = defaultdict(int)
|
129 |
+
|
130 |
+
for cell in sorted(cells, key=lambda cell: (cell.row, cell.col)):
|
131 |
+
# only add empty cells if the row is still empty
|
132 |
+
if not col_count[cell.row]:
|
133 |
+
for _ in range(startcol):
|
134 |
+
rows[cell.row].addElement(TableCell())
|
135 |
+
|
136 |
+
# fill with empty cells if needed
|
137 |
+
for _ in range(cell.col - col_count[cell.row]):
|
138 |
+
rows[cell.row].addElement(TableCell())
|
139 |
+
col_count[cell.row] += 1
|
140 |
+
|
141 |
+
pvalue, tc = self._make_table_cell(cell)
|
142 |
+
rows[cell.row].addElement(tc)
|
143 |
+
col_count[cell.row] += 1
|
144 |
+
p = P(text=pvalue)
|
145 |
+
tc.addElement(p)
|
146 |
+
|
147 |
+
# add all rows to the sheet
|
148 |
+
if len(rows) > 0:
|
149 |
+
for row_nr in range(max(rows.keys()) + 1):
|
150 |
+
wks.addElement(rows[row_nr])
|
151 |
+
|
152 |
+
def _make_table_cell_attributes(self, cell) -> dict[str, int | str]:
|
153 |
+
"""Convert cell attributes to OpenDocument attributes
|
154 |
+
|
155 |
+
Parameters
|
156 |
+
----------
|
157 |
+
cell : ExcelCell
|
158 |
+
Spreadsheet cell data
|
159 |
+
|
160 |
+
Returns
|
161 |
+
-------
|
162 |
+
attributes : Dict[str, Union[int, str]]
|
163 |
+
Dictionary with attributes and attribute values
|
164 |
+
"""
|
165 |
+
attributes: dict[str, int | str] = {}
|
166 |
+
style_name = self._process_style(cell.style)
|
167 |
+
if style_name is not None:
|
168 |
+
attributes["stylename"] = style_name
|
169 |
+
if cell.mergestart is not None and cell.mergeend is not None:
|
170 |
+
attributes["numberrowsspanned"] = max(1, cell.mergestart)
|
171 |
+
attributes["numbercolumnsspanned"] = cell.mergeend
|
172 |
+
return attributes
|
173 |
+
|
174 |
+
def _make_table_cell(self, cell) -> tuple[object, Any]:
|
175 |
+
"""Convert cell data to an OpenDocument spreadsheet cell
|
176 |
+
|
177 |
+
Parameters
|
178 |
+
----------
|
179 |
+
cell : ExcelCell
|
180 |
+
Spreadsheet cell data
|
181 |
+
|
182 |
+
Returns
|
183 |
+
-------
|
184 |
+
pvalue, cell : Tuple[str, TableCell]
|
185 |
+
Display value, Cell value
|
186 |
+
"""
|
187 |
+
from odf.table import TableCell
|
188 |
+
|
189 |
+
attributes = self._make_table_cell_attributes(cell)
|
190 |
+
val, fmt = self._value_with_fmt(cell.val)
|
191 |
+
pvalue = value = val
|
192 |
+
if isinstance(val, bool):
|
193 |
+
value = str(val).lower()
|
194 |
+
pvalue = str(val).upper()
|
195 |
+
return (
|
196 |
+
pvalue,
|
197 |
+
TableCell(
|
198 |
+
valuetype="boolean",
|
199 |
+
booleanvalue=value,
|
200 |
+
attributes=attributes,
|
201 |
+
),
|
202 |
+
)
|
203 |
+
elif isinstance(val, datetime.datetime):
|
204 |
+
# Fast formatting
|
205 |
+
value = val.isoformat()
|
206 |
+
# Slow but locale-dependent
|
207 |
+
pvalue = val.strftime("%c")
|
208 |
+
return (
|
209 |
+
pvalue,
|
210 |
+
TableCell(valuetype="date", datevalue=value, attributes=attributes),
|
211 |
+
)
|
212 |
+
elif isinstance(val, datetime.date):
|
213 |
+
# Fast formatting
|
214 |
+
value = f"{val.year}-{val.month:02d}-{val.day:02d}"
|
215 |
+
# Slow but locale-dependent
|
216 |
+
pvalue = val.strftime("%x")
|
217 |
+
return (
|
218 |
+
pvalue,
|
219 |
+
TableCell(valuetype="date", datevalue=value, attributes=attributes),
|
220 |
+
)
|
221 |
+
elif isinstance(val, str):
|
222 |
+
return (
|
223 |
+
pvalue,
|
224 |
+
TableCell(
|
225 |
+
valuetype="string",
|
226 |
+
stringvalue=value,
|
227 |
+
attributes=attributes,
|
228 |
+
),
|
229 |
+
)
|
230 |
+
else:
|
231 |
+
return (
|
232 |
+
pvalue,
|
233 |
+
TableCell(
|
234 |
+
valuetype="float",
|
235 |
+
value=value,
|
236 |
+
attributes=attributes,
|
237 |
+
),
|
238 |
+
)
|
239 |
+
|
240 |
+
@overload
|
241 |
+
def _process_style(self, style: dict[str, Any]) -> str:
|
242 |
+
...
|
243 |
+
|
244 |
+
@overload
|
245 |
+
def _process_style(self, style: None) -> None:
|
246 |
+
...
|
247 |
+
|
248 |
+
def _process_style(self, style: dict[str, Any] | None) -> str | None:
|
249 |
+
"""Convert a style dictionary to a OpenDocument style sheet
|
250 |
+
|
251 |
+
Parameters
|
252 |
+
----------
|
253 |
+
style : Dict
|
254 |
+
Style dictionary
|
255 |
+
|
256 |
+
Returns
|
257 |
+
-------
|
258 |
+
style_key : str
|
259 |
+
Unique style key for later reference in sheet
|
260 |
+
"""
|
261 |
+
from odf.style import (
|
262 |
+
ParagraphProperties,
|
263 |
+
Style,
|
264 |
+
TableCellProperties,
|
265 |
+
TextProperties,
|
266 |
+
)
|
267 |
+
|
268 |
+
if style is None:
|
269 |
+
return None
|
270 |
+
style_key = json.dumps(style)
|
271 |
+
if style_key in self._style_dict:
|
272 |
+
return self._style_dict[style_key]
|
273 |
+
name = f"pd{len(self._style_dict)+1}"
|
274 |
+
self._style_dict[style_key] = name
|
275 |
+
odf_style = Style(name=name, family="table-cell")
|
276 |
+
if "font" in style:
|
277 |
+
font = style["font"]
|
278 |
+
if font.get("bold", False):
|
279 |
+
odf_style.addElement(TextProperties(fontweight="bold"))
|
280 |
+
if "borders" in style:
|
281 |
+
borders = style["borders"]
|
282 |
+
for side, thickness in borders.items():
|
283 |
+
thickness_translation = {"thin": "0.75pt solid #000000"}
|
284 |
+
odf_style.addElement(
|
285 |
+
TableCellProperties(
|
286 |
+
attributes={f"border{side}": thickness_translation[thickness]}
|
287 |
+
)
|
288 |
+
)
|
289 |
+
if "alignment" in style:
|
290 |
+
alignment = style["alignment"]
|
291 |
+
horizontal = alignment.get("horizontal")
|
292 |
+
if horizontal:
|
293 |
+
odf_style.addElement(ParagraphProperties(textalign=horizontal))
|
294 |
+
vertical = alignment.get("vertical")
|
295 |
+
if vertical:
|
296 |
+
odf_style.addElement(TableCellProperties(verticalalign=vertical))
|
297 |
+
self.book.styles.addElement(odf_style)
|
298 |
+
return name
|
299 |
+
|
300 |
+
def _create_freeze_panes(
|
301 |
+
self, sheet_name: str, freeze_panes: tuple[int, int]
|
302 |
+
) -> None:
|
303 |
+
"""
|
304 |
+
Create freeze panes in the sheet.
|
305 |
+
|
306 |
+
Parameters
|
307 |
+
----------
|
308 |
+
sheet_name : str
|
309 |
+
Name of the spreadsheet
|
310 |
+
freeze_panes : tuple of (int, int)
|
311 |
+
Freeze pane location x and y
|
312 |
+
"""
|
313 |
+
from odf.config import (
|
314 |
+
ConfigItem,
|
315 |
+
ConfigItemMapEntry,
|
316 |
+
ConfigItemMapIndexed,
|
317 |
+
ConfigItemMapNamed,
|
318 |
+
ConfigItemSet,
|
319 |
+
)
|
320 |
+
|
321 |
+
config_item_set = ConfigItemSet(name="ooo:view-settings")
|
322 |
+
self.book.settings.addElement(config_item_set)
|
323 |
+
|
324 |
+
config_item_map_indexed = ConfigItemMapIndexed(name="Views")
|
325 |
+
config_item_set.addElement(config_item_map_indexed)
|
326 |
+
|
327 |
+
config_item_map_entry = ConfigItemMapEntry()
|
328 |
+
config_item_map_indexed.addElement(config_item_map_entry)
|
329 |
+
|
330 |
+
config_item_map_named = ConfigItemMapNamed(name="Tables")
|
331 |
+
config_item_map_entry.addElement(config_item_map_named)
|
332 |
+
|
333 |
+
config_item_map_entry = ConfigItemMapEntry(name=sheet_name)
|
334 |
+
config_item_map_named.addElement(config_item_map_entry)
|
335 |
+
|
336 |
+
config_item_map_entry.addElement(
|
337 |
+
ConfigItem(name="HorizontalSplitMode", type="short", text="2")
|
338 |
+
)
|
339 |
+
config_item_map_entry.addElement(
|
340 |
+
ConfigItem(name="VerticalSplitMode", type="short", text="2")
|
341 |
+
)
|
342 |
+
config_item_map_entry.addElement(
|
343 |
+
ConfigItem(
|
344 |
+
name="HorizontalSplitPosition", type="int", text=str(freeze_panes[0])
|
345 |
+
)
|
346 |
+
)
|
347 |
+
config_item_map_entry.addElement(
|
348 |
+
ConfigItem(
|
349 |
+
name="VerticalSplitPosition", type="int", text=str(freeze_panes[1])
|
350 |
+
)
|
351 |
+
)
|
352 |
+
config_item_map_entry.addElement(
|
353 |
+
ConfigItem(name="PositionRight", type="int", text=str(freeze_panes[0]))
|
354 |
+
)
|
355 |
+
config_item_map_entry.addElement(
|
356 |
+
ConfigItem(name="PositionBottom", type="int", text=str(freeze_panes[1]))
|
357 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py
ADDED
@@ -0,0 +1,639 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import mmap
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
Any,
|
7 |
+
cast,
|
8 |
+
)
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from pandas.compat._optional import import_optional_dependency
|
13 |
+
from pandas.util._decorators import doc
|
14 |
+
|
15 |
+
from pandas.core.shared_docs import _shared_docs
|
16 |
+
|
17 |
+
from pandas.io.excel._base import (
|
18 |
+
BaseExcelReader,
|
19 |
+
ExcelWriter,
|
20 |
+
)
|
21 |
+
from pandas.io.excel._util import (
|
22 |
+
combine_kwargs,
|
23 |
+
validate_freeze_panes,
|
24 |
+
)
|
25 |
+
|
26 |
+
if TYPE_CHECKING:
|
27 |
+
from openpyxl import Workbook
|
28 |
+
from openpyxl.descriptors.serialisable import Serialisable
|
29 |
+
|
30 |
+
from pandas._typing import (
|
31 |
+
ExcelWriterIfSheetExists,
|
32 |
+
FilePath,
|
33 |
+
ReadBuffer,
|
34 |
+
Scalar,
|
35 |
+
StorageOptions,
|
36 |
+
WriteExcelBuffer,
|
37 |
+
)
|
38 |
+
|
39 |
+
|
40 |
+
class OpenpyxlWriter(ExcelWriter):
|
41 |
+
_engine = "openpyxl"
|
42 |
+
_supported_extensions = (".xlsx", ".xlsm")
|
43 |
+
|
44 |
+
def __init__(
|
45 |
+
self,
|
46 |
+
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
47 |
+
engine: str | None = None,
|
48 |
+
date_format: str | None = None,
|
49 |
+
datetime_format: str | None = None,
|
50 |
+
mode: str = "w",
|
51 |
+
storage_options: StorageOptions | None = None,
|
52 |
+
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
53 |
+
engine_kwargs: dict[str, Any] | None = None,
|
54 |
+
**kwargs,
|
55 |
+
) -> None:
|
56 |
+
# Use the openpyxl module as the Excel writer.
|
57 |
+
from openpyxl.workbook import Workbook
|
58 |
+
|
59 |
+
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
60 |
+
|
61 |
+
super().__init__(
|
62 |
+
path,
|
63 |
+
mode=mode,
|
64 |
+
storage_options=storage_options,
|
65 |
+
if_sheet_exists=if_sheet_exists,
|
66 |
+
engine_kwargs=engine_kwargs,
|
67 |
+
)
|
68 |
+
|
69 |
+
# ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from
|
70 |
+
# the file and later write to it
|
71 |
+
if "r+" in self._mode: # Load from existing workbook
|
72 |
+
from openpyxl import load_workbook
|
73 |
+
|
74 |
+
try:
|
75 |
+
self._book = load_workbook(self._handles.handle, **engine_kwargs)
|
76 |
+
except TypeError:
|
77 |
+
self._handles.handle.close()
|
78 |
+
raise
|
79 |
+
self._handles.handle.seek(0)
|
80 |
+
else:
|
81 |
+
# Create workbook object with default optimized_write=True.
|
82 |
+
try:
|
83 |
+
self._book = Workbook(**engine_kwargs)
|
84 |
+
except TypeError:
|
85 |
+
self._handles.handle.close()
|
86 |
+
raise
|
87 |
+
|
88 |
+
if self.book.worksheets:
|
89 |
+
self.book.remove(self.book.worksheets[0])
|
90 |
+
|
91 |
+
@property
|
92 |
+
def book(self) -> Workbook:
|
93 |
+
"""
|
94 |
+
Book instance of class openpyxl.workbook.Workbook.
|
95 |
+
|
96 |
+
This attribute can be used to access engine-specific features.
|
97 |
+
"""
|
98 |
+
return self._book
|
99 |
+
|
100 |
+
@property
|
101 |
+
def sheets(self) -> dict[str, Any]:
|
102 |
+
"""Mapping of sheet names to sheet objects."""
|
103 |
+
result = {name: self.book[name] for name in self.book.sheetnames}
|
104 |
+
return result
|
105 |
+
|
106 |
+
def _save(self) -> None:
|
107 |
+
"""
|
108 |
+
Save workbook to disk.
|
109 |
+
"""
|
110 |
+
self.book.save(self._handles.handle)
|
111 |
+
if "r+" in self._mode and not isinstance(self._handles.handle, mmap.mmap):
|
112 |
+
# truncate file to the written content
|
113 |
+
self._handles.handle.truncate()
|
114 |
+
|
115 |
+
@classmethod
|
116 |
+
def _convert_to_style_kwargs(cls, style_dict: dict) -> dict[str, Serialisable]:
|
117 |
+
"""
|
118 |
+
Convert a style_dict to a set of kwargs suitable for initializing
|
119 |
+
or updating-on-copy an openpyxl v2 style object.
|
120 |
+
|
121 |
+
Parameters
|
122 |
+
----------
|
123 |
+
style_dict : dict
|
124 |
+
A dict with zero or more of the following keys (or their synonyms).
|
125 |
+
'font'
|
126 |
+
'fill'
|
127 |
+
'border' ('borders')
|
128 |
+
'alignment'
|
129 |
+
'number_format'
|
130 |
+
'protection'
|
131 |
+
|
132 |
+
Returns
|
133 |
+
-------
|
134 |
+
style_kwargs : dict
|
135 |
+
A dict with the same, normalized keys as ``style_dict`` but each
|
136 |
+
value has been replaced with a native openpyxl style object of the
|
137 |
+
appropriate class.
|
138 |
+
"""
|
139 |
+
_style_key_map = {"borders": "border"}
|
140 |
+
|
141 |
+
style_kwargs: dict[str, Serialisable] = {}
|
142 |
+
for k, v in style_dict.items():
|
143 |
+
k = _style_key_map.get(k, k)
|
144 |
+
_conv_to_x = getattr(cls, f"_convert_to_{k}", lambda x: None)
|
145 |
+
new_v = _conv_to_x(v)
|
146 |
+
if new_v:
|
147 |
+
style_kwargs[k] = new_v
|
148 |
+
|
149 |
+
return style_kwargs
|
150 |
+
|
151 |
+
@classmethod
|
152 |
+
def _convert_to_color(cls, color_spec):
|
153 |
+
"""
|
154 |
+
Convert ``color_spec`` to an openpyxl v2 Color object.
|
155 |
+
|
156 |
+
Parameters
|
157 |
+
----------
|
158 |
+
color_spec : str, dict
|
159 |
+
A 32-bit ARGB hex string, or a dict with zero or more of the
|
160 |
+
following keys.
|
161 |
+
'rgb'
|
162 |
+
'indexed'
|
163 |
+
'auto'
|
164 |
+
'theme'
|
165 |
+
'tint'
|
166 |
+
'index'
|
167 |
+
'type'
|
168 |
+
|
169 |
+
Returns
|
170 |
+
-------
|
171 |
+
color : openpyxl.styles.Color
|
172 |
+
"""
|
173 |
+
from openpyxl.styles import Color
|
174 |
+
|
175 |
+
if isinstance(color_spec, str):
|
176 |
+
return Color(color_spec)
|
177 |
+
else:
|
178 |
+
return Color(**color_spec)
|
179 |
+
|
180 |
+
@classmethod
|
181 |
+
def _convert_to_font(cls, font_dict):
|
182 |
+
"""
|
183 |
+
Convert ``font_dict`` to an openpyxl v2 Font object.
|
184 |
+
|
185 |
+
Parameters
|
186 |
+
----------
|
187 |
+
font_dict : dict
|
188 |
+
A dict with zero or more of the following keys (or their synonyms).
|
189 |
+
'name'
|
190 |
+
'size' ('sz')
|
191 |
+
'bold' ('b')
|
192 |
+
'italic' ('i')
|
193 |
+
'underline' ('u')
|
194 |
+
'strikethrough' ('strike')
|
195 |
+
'color'
|
196 |
+
'vertAlign' ('vertalign')
|
197 |
+
'charset'
|
198 |
+
'scheme'
|
199 |
+
'family'
|
200 |
+
'outline'
|
201 |
+
'shadow'
|
202 |
+
'condense'
|
203 |
+
|
204 |
+
Returns
|
205 |
+
-------
|
206 |
+
font : openpyxl.styles.Font
|
207 |
+
"""
|
208 |
+
from openpyxl.styles import Font
|
209 |
+
|
210 |
+
_font_key_map = {
|
211 |
+
"sz": "size",
|
212 |
+
"b": "bold",
|
213 |
+
"i": "italic",
|
214 |
+
"u": "underline",
|
215 |
+
"strike": "strikethrough",
|
216 |
+
"vertalign": "vertAlign",
|
217 |
+
}
|
218 |
+
|
219 |
+
font_kwargs = {}
|
220 |
+
for k, v in font_dict.items():
|
221 |
+
k = _font_key_map.get(k, k)
|
222 |
+
if k == "color":
|
223 |
+
v = cls._convert_to_color(v)
|
224 |
+
font_kwargs[k] = v
|
225 |
+
|
226 |
+
return Font(**font_kwargs)
|
227 |
+
|
228 |
+
@classmethod
|
229 |
+
def _convert_to_stop(cls, stop_seq):
|
230 |
+
"""
|
231 |
+
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
|
232 |
+
suitable for initializing the ``GradientFill`` ``stop`` parameter.
|
233 |
+
|
234 |
+
Parameters
|
235 |
+
----------
|
236 |
+
stop_seq : iterable
|
237 |
+
An iterable that yields objects suitable for consumption by
|
238 |
+
``_convert_to_color``.
|
239 |
+
|
240 |
+
Returns
|
241 |
+
-------
|
242 |
+
stop : list of openpyxl.styles.Color
|
243 |
+
"""
|
244 |
+
return map(cls._convert_to_color, stop_seq)
|
245 |
+
|
246 |
+
@classmethod
|
247 |
+
def _convert_to_fill(cls, fill_dict: dict[str, Any]):
|
248 |
+
"""
|
249 |
+
Convert ``fill_dict`` to an openpyxl v2 Fill object.
|
250 |
+
|
251 |
+
Parameters
|
252 |
+
----------
|
253 |
+
fill_dict : dict
|
254 |
+
A dict with one or more of the following keys (or their synonyms),
|
255 |
+
'fill_type' ('patternType', 'patterntype')
|
256 |
+
'start_color' ('fgColor', 'fgcolor')
|
257 |
+
'end_color' ('bgColor', 'bgcolor')
|
258 |
+
or one or more of the following keys (or their synonyms).
|
259 |
+
'type' ('fill_type')
|
260 |
+
'degree'
|
261 |
+
'left'
|
262 |
+
'right'
|
263 |
+
'top'
|
264 |
+
'bottom'
|
265 |
+
'stop'
|
266 |
+
|
267 |
+
Returns
|
268 |
+
-------
|
269 |
+
fill : openpyxl.styles.Fill
|
270 |
+
"""
|
271 |
+
from openpyxl.styles import (
|
272 |
+
GradientFill,
|
273 |
+
PatternFill,
|
274 |
+
)
|
275 |
+
|
276 |
+
_pattern_fill_key_map = {
|
277 |
+
"patternType": "fill_type",
|
278 |
+
"patterntype": "fill_type",
|
279 |
+
"fgColor": "start_color",
|
280 |
+
"fgcolor": "start_color",
|
281 |
+
"bgColor": "end_color",
|
282 |
+
"bgcolor": "end_color",
|
283 |
+
}
|
284 |
+
|
285 |
+
_gradient_fill_key_map = {"fill_type": "type"}
|
286 |
+
|
287 |
+
pfill_kwargs = {}
|
288 |
+
gfill_kwargs = {}
|
289 |
+
for k, v in fill_dict.items():
|
290 |
+
pk = _pattern_fill_key_map.get(k)
|
291 |
+
gk = _gradient_fill_key_map.get(k)
|
292 |
+
if pk in ["start_color", "end_color"]:
|
293 |
+
v = cls._convert_to_color(v)
|
294 |
+
if gk == "stop":
|
295 |
+
v = cls._convert_to_stop(v)
|
296 |
+
if pk:
|
297 |
+
pfill_kwargs[pk] = v
|
298 |
+
elif gk:
|
299 |
+
gfill_kwargs[gk] = v
|
300 |
+
else:
|
301 |
+
pfill_kwargs[k] = v
|
302 |
+
gfill_kwargs[k] = v
|
303 |
+
|
304 |
+
try:
|
305 |
+
return PatternFill(**pfill_kwargs)
|
306 |
+
except TypeError:
|
307 |
+
return GradientFill(**gfill_kwargs)
|
308 |
+
|
309 |
+
@classmethod
|
310 |
+
def _convert_to_side(cls, side_spec):
|
311 |
+
"""
|
312 |
+
Convert ``side_spec`` to an openpyxl v2 Side object.
|
313 |
+
|
314 |
+
Parameters
|
315 |
+
----------
|
316 |
+
side_spec : str, dict
|
317 |
+
A string specifying the border style, or a dict with zero or more
|
318 |
+
of the following keys (or their synonyms).
|
319 |
+
'style' ('border_style')
|
320 |
+
'color'
|
321 |
+
|
322 |
+
Returns
|
323 |
+
-------
|
324 |
+
side : openpyxl.styles.Side
|
325 |
+
"""
|
326 |
+
from openpyxl.styles import Side
|
327 |
+
|
328 |
+
_side_key_map = {"border_style": "style"}
|
329 |
+
|
330 |
+
if isinstance(side_spec, str):
|
331 |
+
return Side(style=side_spec)
|
332 |
+
|
333 |
+
side_kwargs = {}
|
334 |
+
for k, v in side_spec.items():
|
335 |
+
k = _side_key_map.get(k, k)
|
336 |
+
if k == "color":
|
337 |
+
v = cls._convert_to_color(v)
|
338 |
+
side_kwargs[k] = v
|
339 |
+
|
340 |
+
return Side(**side_kwargs)
|
341 |
+
|
342 |
+
@classmethod
|
343 |
+
def _convert_to_border(cls, border_dict):
|
344 |
+
"""
|
345 |
+
Convert ``border_dict`` to an openpyxl v2 Border object.
|
346 |
+
|
347 |
+
Parameters
|
348 |
+
----------
|
349 |
+
border_dict : dict
|
350 |
+
A dict with zero or more of the following keys (or their synonyms).
|
351 |
+
'left'
|
352 |
+
'right'
|
353 |
+
'top'
|
354 |
+
'bottom'
|
355 |
+
'diagonal'
|
356 |
+
'diagonal_direction'
|
357 |
+
'vertical'
|
358 |
+
'horizontal'
|
359 |
+
'diagonalUp' ('diagonalup')
|
360 |
+
'diagonalDown' ('diagonaldown')
|
361 |
+
'outline'
|
362 |
+
|
363 |
+
Returns
|
364 |
+
-------
|
365 |
+
border : openpyxl.styles.Border
|
366 |
+
"""
|
367 |
+
from openpyxl.styles import Border
|
368 |
+
|
369 |
+
_border_key_map = {"diagonalup": "diagonalUp", "diagonaldown": "diagonalDown"}
|
370 |
+
|
371 |
+
border_kwargs = {}
|
372 |
+
for k, v in border_dict.items():
|
373 |
+
k = _border_key_map.get(k, k)
|
374 |
+
if k == "color":
|
375 |
+
v = cls._convert_to_color(v)
|
376 |
+
if k in ["left", "right", "top", "bottom", "diagonal"]:
|
377 |
+
v = cls._convert_to_side(v)
|
378 |
+
border_kwargs[k] = v
|
379 |
+
|
380 |
+
return Border(**border_kwargs)
|
381 |
+
|
382 |
+
@classmethod
|
383 |
+
def _convert_to_alignment(cls, alignment_dict):
|
384 |
+
"""
|
385 |
+
Convert ``alignment_dict`` to an openpyxl v2 Alignment object.
|
386 |
+
|
387 |
+
Parameters
|
388 |
+
----------
|
389 |
+
alignment_dict : dict
|
390 |
+
A dict with zero or more of the following keys (or their synonyms).
|
391 |
+
'horizontal'
|
392 |
+
'vertical'
|
393 |
+
'text_rotation'
|
394 |
+
'wrap_text'
|
395 |
+
'shrink_to_fit'
|
396 |
+
'indent'
|
397 |
+
Returns
|
398 |
+
-------
|
399 |
+
alignment : openpyxl.styles.Alignment
|
400 |
+
"""
|
401 |
+
from openpyxl.styles import Alignment
|
402 |
+
|
403 |
+
return Alignment(**alignment_dict)
|
404 |
+
|
405 |
+
@classmethod
|
406 |
+
def _convert_to_number_format(cls, number_format_dict):
|
407 |
+
"""
|
408 |
+
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
|
409 |
+
initializer.
|
410 |
+
|
411 |
+
Parameters
|
412 |
+
----------
|
413 |
+
number_format_dict : dict
|
414 |
+
A dict with zero or more of the following keys.
|
415 |
+
'format_code' : str
|
416 |
+
|
417 |
+
Returns
|
418 |
+
-------
|
419 |
+
number_format : str
|
420 |
+
"""
|
421 |
+
return number_format_dict["format_code"]
|
422 |
+
|
423 |
+
@classmethod
|
424 |
+
def _convert_to_protection(cls, protection_dict):
|
425 |
+
"""
|
426 |
+
Convert ``protection_dict`` to an openpyxl v2 Protection object.
|
427 |
+
|
428 |
+
Parameters
|
429 |
+
----------
|
430 |
+
protection_dict : dict
|
431 |
+
A dict with zero or more of the following keys.
|
432 |
+
'locked'
|
433 |
+
'hidden'
|
434 |
+
|
435 |
+
Returns
|
436 |
+
-------
|
437 |
+
"""
|
438 |
+
from openpyxl.styles import Protection
|
439 |
+
|
440 |
+
return Protection(**protection_dict)
|
441 |
+
|
442 |
+
def _write_cells(
|
443 |
+
self,
|
444 |
+
cells,
|
445 |
+
sheet_name: str | None = None,
|
446 |
+
startrow: int = 0,
|
447 |
+
startcol: int = 0,
|
448 |
+
freeze_panes: tuple[int, int] | None = None,
|
449 |
+
) -> None:
|
450 |
+
# Write the frame cells using openpyxl.
|
451 |
+
sheet_name = self._get_sheet_name(sheet_name)
|
452 |
+
|
453 |
+
_style_cache: dict[str, dict[str, Serialisable]] = {}
|
454 |
+
|
455 |
+
if sheet_name in self.sheets and self._if_sheet_exists != "new":
|
456 |
+
if "r+" in self._mode:
|
457 |
+
if self._if_sheet_exists == "replace":
|
458 |
+
old_wks = self.sheets[sheet_name]
|
459 |
+
target_index = self.book.index(old_wks)
|
460 |
+
del self.book[sheet_name]
|
461 |
+
wks = self.book.create_sheet(sheet_name, target_index)
|
462 |
+
elif self._if_sheet_exists == "error":
|
463 |
+
raise ValueError(
|
464 |
+
f"Sheet '{sheet_name}' already exists and "
|
465 |
+
f"if_sheet_exists is set to 'error'."
|
466 |
+
)
|
467 |
+
elif self._if_sheet_exists == "overlay":
|
468 |
+
wks = self.sheets[sheet_name]
|
469 |
+
else:
|
470 |
+
raise ValueError(
|
471 |
+
f"'{self._if_sheet_exists}' is not valid for if_sheet_exists. "
|
472 |
+
"Valid options are 'error', 'new', 'replace' and 'overlay'."
|
473 |
+
)
|
474 |
+
else:
|
475 |
+
wks = self.sheets[sheet_name]
|
476 |
+
else:
|
477 |
+
wks = self.book.create_sheet()
|
478 |
+
wks.title = sheet_name
|
479 |
+
|
480 |
+
if validate_freeze_panes(freeze_panes):
|
481 |
+
freeze_panes = cast(tuple[int, int], freeze_panes)
|
482 |
+
wks.freeze_panes = wks.cell(
|
483 |
+
row=freeze_panes[0] + 1, column=freeze_panes[1] + 1
|
484 |
+
)
|
485 |
+
|
486 |
+
for cell in cells:
|
487 |
+
xcell = wks.cell(
|
488 |
+
row=startrow + cell.row + 1, column=startcol + cell.col + 1
|
489 |
+
)
|
490 |
+
xcell.value, fmt = self._value_with_fmt(cell.val)
|
491 |
+
if fmt:
|
492 |
+
xcell.number_format = fmt
|
493 |
+
|
494 |
+
style_kwargs: dict[str, Serialisable] | None = {}
|
495 |
+
if cell.style:
|
496 |
+
key = str(cell.style)
|
497 |
+
style_kwargs = _style_cache.get(key)
|
498 |
+
if style_kwargs is None:
|
499 |
+
style_kwargs = self._convert_to_style_kwargs(cell.style)
|
500 |
+
_style_cache[key] = style_kwargs
|
501 |
+
|
502 |
+
if style_kwargs:
|
503 |
+
for k, v in style_kwargs.items():
|
504 |
+
setattr(xcell, k, v)
|
505 |
+
|
506 |
+
if cell.mergestart is not None and cell.mergeend is not None:
|
507 |
+
wks.merge_cells(
|
508 |
+
start_row=startrow + cell.row + 1,
|
509 |
+
start_column=startcol + cell.col + 1,
|
510 |
+
end_column=startcol + cell.mergeend + 1,
|
511 |
+
end_row=startrow + cell.mergestart + 1,
|
512 |
+
)
|
513 |
+
|
514 |
+
# When cells are merged only the top-left cell is preserved
|
515 |
+
# The behaviour of the other cells in a merged range is
|
516 |
+
# undefined
|
517 |
+
if style_kwargs:
|
518 |
+
first_row = startrow + cell.row + 1
|
519 |
+
last_row = startrow + cell.mergestart + 1
|
520 |
+
first_col = startcol + cell.col + 1
|
521 |
+
last_col = startcol + cell.mergeend + 1
|
522 |
+
|
523 |
+
for row in range(first_row, last_row + 1):
|
524 |
+
for col in range(first_col, last_col + 1):
|
525 |
+
if row == first_row and col == first_col:
|
526 |
+
# Ignore first cell. It is already handled.
|
527 |
+
continue
|
528 |
+
xcell = wks.cell(column=col, row=row)
|
529 |
+
for k, v in style_kwargs.items():
|
530 |
+
setattr(xcell, k, v)
|
531 |
+
|
532 |
+
|
533 |
+
class OpenpyxlReader(BaseExcelReader["Workbook"]):
|
534 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
535 |
+
def __init__(
|
536 |
+
self,
|
537 |
+
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
538 |
+
storage_options: StorageOptions | None = None,
|
539 |
+
engine_kwargs: dict | None = None,
|
540 |
+
) -> None:
|
541 |
+
"""
|
542 |
+
Reader using openpyxl engine.
|
543 |
+
|
544 |
+
Parameters
|
545 |
+
----------
|
546 |
+
filepath_or_buffer : str, path object or Workbook
|
547 |
+
Object to be parsed.
|
548 |
+
{storage_options}
|
549 |
+
engine_kwargs : dict, optional
|
550 |
+
Arbitrary keyword arguments passed to excel engine.
|
551 |
+
"""
|
552 |
+
import_optional_dependency("openpyxl")
|
553 |
+
super().__init__(
|
554 |
+
filepath_or_buffer,
|
555 |
+
storage_options=storage_options,
|
556 |
+
engine_kwargs=engine_kwargs,
|
557 |
+
)
|
558 |
+
|
559 |
+
@property
|
560 |
+
def _workbook_class(self) -> type[Workbook]:
|
561 |
+
from openpyxl import Workbook
|
562 |
+
|
563 |
+
return Workbook
|
564 |
+
|
565 |
+
def load_workbook(
|
566 |
+
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
|
567 |
+
) -> Workbook:
|
568 |
+
from openpyxl import load_workbook
|
569 |
+
|
570 |
+
default_kwargs = {"read_only": True, "data_only": True, "keep_links": False}
|
571 |
+
|
572 |
+
return load_workbook(
|
573 |
+
filepath_or_buffer,
|
574 |
+
**(default_kwargs | engine_kwargs),
|
575 |
+
)
|
576 |
+
|
577 |
+
@property
|
578 |
+
def sheet_names(self) -> list[str]:
|
579 |
+
return [sheet.title for sheet in self.book.worksheets]
|
580 |
+
|
581 |
+
def get_sheet_by_name(self, name: str):
|
582 |
+
self.raise_if_bad_sheet_by_name(name)
|
583 |
+
return self.book[name]
|
584 |
+
|
585 |
+
def get_sheet_by_index(self, index: int):
|
586 |
+
self.raise_if_bad_sheet_by_index(index)
|
587 |
+
return self.book.worksheets[index]
|
588 |
+
|
589 |
+
def _convert_cell(self, cell) -> Scalar:
|
590 |
+
from openpyxl.cell.cell import (
|
591 |
+
TYPE_ERROR,
|
592 |
+
TYPE_NUMERIC,
|
593 |
+
)
|
594 |
+
|
595 |
+
if cell.value is None:
|
596 |
+
return "" # compat with xlrd
|
597 |
+
elif cell.data_type == TYPE_ERROR:
|
598 |
+
return np.nan
|
599 |
+
elif cell.data_type == TYPE_NUMERIC:
|
600 |
+
val = int(cell.value)
|
601 |
+
if val == cell.value:
|
602 |
+
return val
|
603 |
+
return float(cell.value)
|
604 |
+
|
605 |
+
return cell.value
|
606 |
+
|
607 |
+
def get_sheet_data(
|
608 |
+
self, sheet, file_rows_needed: int | None = None
|
609 |
+
) -> list[list[Scalar]]:
|
610 |
+
if self.book.read_only:
|
611 |
+
sheet.reset_dimensions()
|
612 |
+
|
613 |
+
data: list[list[Scalar]] = []
|
614 |
+
last_row_with_data = -1
|
615 |
+
for row_number, row in enumerate(sheet.rows):
|
616 |
+
converted_row = [self._convert_cell(cell) for cell in row]
|
617 |
+
while converted_row and converted_row[-1] == "":
|
618 |
+
# trim trailing empty elements
|
619 |
+
converted_row.pop()
|
620 |
+
if converted_row:
|
621 |
+
last_row_with_data = row_number
|
622 |
+
data.append(converted_row)
|
623 |
+
if file_rows_needed is not None and len(data) >= file_rows_needed:
|
624 |
+
break
|
625 |
+
|
626 |
+
# Trim trailing empty rows
|
627 |
+
data = data[: last_row_with_data + 1]
|
628 |
+
|
629 |
+
if len(data) > 0:
|
630 |
+
# extend rows to max width
|
631 |
+
max_width = max(len(data_row) for data_row in data)
|
632 |
+
if min(len(data_row) for data_row in data) < max_width:
|
633 |
+
empty_cell: list[Scalar] = [""]
|
634 |
+
data = [
|
635 |
+
data_row + (max_width - len(data_row)) * empty_cell
|
636 |
+
for data_row in data
|
637 |
+
]
|
638 |
+
|
639 |
+
return data
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# pyright: reportMissingImports=false
|
2 |
+
from __future__ import annotations
|
3 |
+
|
4 |
+
from typing import TYPE_CHECKING
|
5 |
+
|
6 |
+
from pandas.compat._optional import import_optional_dependency
|
7 |
+
from pandas.util._decorators import doc
|
8 |
+
|
9 |
+
from pandas.core.shared_docs import _shared_docs
|
10 |
+
|
11 |
+
from pandas.io.excel._base import BaseExcelReader
|
12 |
+
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
from pyxlsb import Workbook
|
15 |
+
|
16 |
+
from pandas._typing import (
|
17 |
+
FilePath,
|
18 |
+
ReadBuffer,
|
19 |
+
Scalar,
|
20 |
+
StorageOptions,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
class PyxlsbReader(BaseExcelReader["Workbook"]):
|
25 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
26 |
+
def __init__(
|
27 |
+
self,
|
28 |
+
filepath_or_buffer: FilePath | ReadBuffer[bytes],
|
29 |
+
storage_options: StorageOptions | None = None,
|
30 |
+
engine_kwargs: dict | None = None,
|
31 |
+
) -> None:
|
32 |
+
"""
|
33 |
+
Reader using pyxlsb engine.
|
34 |
+
|
35 |
+
Parameters
|
36 |
+
----------
|
37 |
+
filepath_or_buffer : str, path object, or Workbook
|
38 |
+
Object to be parsed.
|
39 |
+
{storage_options}
|
40 |
+
engine_kwargs : dict, optional
|
41 |
+
Arbitrary keyword arguments passed to excel engine.
|
42 |
+
"""
|
43 |
+
import_optional_dependency("pyxlsb")
|
44 |
+
# This will call load_workbook on the filepath or buffer
|
45 |
+
# And set the result to the book-attribute
|
46 |
+
super().__init__(
|
47 |
+
filepath_or_buffer,
|
48 |
+
storage_options=storage_options,
|
49 |
+
engine_kwargs=engine_kwargs,
|
50 |
+
)
|
51 |
+
|
52 |
+
@property
|
53 |
+
def _workbook_class(self) -> type[Workbook]:
|
54 |
+
from pyxlsb import Workbook
|
55 |
+
|
56 |
+
return Workbook
|
57 |
+
|
58 |
+
def load_workbook(
|
59 |
+
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
|
60 |
+
) -> Workbook:
|
61 |
+
from pyxlsb import open_workbook
|
62 |
+
|
63 |
+
# TODO: hack in buffer capability
|
64 |
+
# This might need some modifications to the Pyxlsb library
|
65 |
+
# Actual work for opening it is in xlsbpackage.py, line 20-ish
|
66 |
+
|
67 |
+
return open_workbook(filepath_or_buffer, **engine_kwargs)
|
68 |
+
|
69 |
+
@property
|
70 |
+
def sheet_names(self) -> list[str]:
|
71 |
+
return self.book.sheets
|
72 |
+
|
73 |
+
def get_sheet_by_name(self, name: str):
|
74 |
+
self.raise_if_bad_sheet_by_name(name)
|
75 |
+
return self.book.get_sheet(name)
|
76 |
+
|
77 |
+
def get_sheet_by_index(self, index: int):
|
78 |
+
self.raise_if_bad_sheet_by_index(index)
|
79 |
+
# pyxlsb sheets are indexed from 1 onwards
|
80 |
+
# There's a fix for this in the source, but the pypi package doesn't have it
|
81 |
+
return self.book.get_sheet(index + 1)
|
82 |
+
|
83 |
+
def _convert_cell(self, cell) -> Scalar:
|
84 |
+
# TODO: there is no way to distinguish between floats and datetimes in pyxlsb
|
85 |
+
# This means that there is no way to read datetime types from an xlsb file yet
|
86 |
+
if cell.v is None:
|
87 |
+
return "" # Prevents non-named columns from not showing up as Unnamed: i
|
88 |
+
if isinstance(cell.v, float):
|
89 |
+
val = int(cell.v)
|
90 |
+
if val == cell.v:
|
91 |
+
return val
|
92 |
+
else:
|
93 |
+
return float(cell.v)
|
94 |
+
|
95 |
+
return cell.v
|
96 |
+
|
97 |
+
def get_sheet_data(
|
98 |
+
self,
|
99 |
+
sheet,
|
100 |
+
file_rows_needed: int | None = None,
|
101 |
+
) -> list[list[Scalar]]:
|
102 |
+
data: list[list[Scalar]] = []
|
103 |
+
previous_row_number = -1
|
104 |
+
# When sparse=True the rows can have different lengths and empty rows are
|
105 |
+
# not returned. The cells are namedtuples of row, col, value (r, c, v).
|
106 |
+
for row in sheet.rows(sparse=True):
|
107 |
+
row_number = row[0].r
|
108 |
+
converted_row = [self._convert_cell(cell) for cell in row]
|
109 |
+
while converted_row and converted_row[-1] == "":
|
110 |
+
# trim trailing empty elements
|
111 |
+
converted_row.pop()
|
112 |
+
if converted_row:
|
113 |
+
data.extend([[]] * (row_number - previous_row_number - 1))
|
114 |
+
data.append(converted_row)
|
115 |
+
previous_row_number = row_number
|
116 |
+
if file_rows_needed is not None and len(data) >= file_rows_needed:
|
117 |
+
break
|
118 |
+
if data:
|
119 |
+
# extend rows to max_width
|
120 |
+
max_width = max(len(data_row) for data_row in data)
|
121 |
+
if min(len(data_row) for data_row in data) < max_width:
|
122 |
+
empty_cell: list[Scalar] = [""]
|
123 |
+
data = [
|
124 |
+
data_row + (max_width - len(data_row)) * empty_cell
|
125 |
+
for data_row in data
|
126 |
+
]
|
127 |
+
return data
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_util.py
ADDED
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections.abc import (
|
4 |
+
Hashable,
|
5 |
+
Iterable,
|
6 |
+
MutableMapping,
|
7 |
+
Sequence,
|
8 |
+
)
|
9 |
+
from typing import (
|
10 |
+
TYPE_CHECKING,
|
11 |
+
Any,
|
12 |
+
Callable,
|
13 |
+
Literal,
|
14 |
+
TypeVar,
|
15 |
+
overload,
|
16 |
+
)
|
17 |
+
|
18 |
+
from pandas.compat._optional import import_optional_dependency
|
19 |
+
|
20 |
+
from pandas.core.dtypes.common import (
|
21 |
+
is_integer,
|
22 |
+
is_list_like,
|
23 |
+
)
|
24 |
+
|
25 |
+
if TYPE_CHECKING:
|
26 |
+
from pandas.io.excel._base import ExcelWriter
|
27 |
+
|
28 |
+
ExcelWriter_t = type[ExcelWriter]
|
29 |
+
usecols_func = TypeVar("usecols_func", bound=Callable[[Hashable], object])
|
30 |
+
|
31 |
+
_writers: MutableMapping[str, ExcelWriter_t] = {}
|
32 |
+
|
33 |
+
|
34 |
+
def register_writer(klass: ExcelWriter_t) -> None:
|
35 |
+
"""
|
36 |
+
Add engine to the excel writer registry.io.excel.
|
37 |
+
|
38 |
+
You must use this method to integrate with ``to_excel``.
|
39 |
+
|
40 |
+
Parameters
|
41 |
+
----------
|
42 |
+
klass : ExcelWriter
|
43 |
+
"""
|
44 |
+
if not callable(klass):
|
45 |
+
raise ValueError("Can only register callables as engines")
|
46 |
+
engine_name = klass._engine
|
47 |
+
_writers[engine_name] = klass
|
48 |
+
|
49 |
+
|
50 |
+
def get_default_engine(ext: str, mode: Literal["reader", "writer"] = "reader") -> str:
|
51 |
+
"""
|
52 |
+
Return the default reader/writer for the given extension.
|
53 |
+
|
54 |
+
Parameters
|
55 |
+
----------
|
56 |
+
ext : str
|
57 |
+
The excel file extension for which to get the default engine.
|
58 |
+
mode : str {'reader', 'writer'}
|
59 |
+
Whether to get the default engine for reading or writing.
|
60 |
+
Either 'reader' or 'writer'
|
61 |
+
|
62 |
+
Returns
|
63 |
+
-------
|
64 |
+
str
|
65 |
+
The default engine for the extension.
|
66 |
+
"""
|
67 |
+
_default_readers = {
|
68 |
+
"xlsx": "openpyxl",
|
69 |
+
"xlsm": "openpyxl",
|
70 |
+
"xlsb": "pyxlsb",
|
71 |
+
"xls": "xlrd",
|
72 |
+
"ods": "odf",
|
73 |
+
}
|
74 |
+
_default_writers = {
|
75 |
+
"xlsx": "openpyxl",
|
76 |
+
"xlsm": "openpyxl",
|
77 |
+
"xlsb": "pyxlsb",
|
78 |
+
"ods": "odf",
|
79 |
+
}
|
80 |
+
assert mode in ["reader", "writer"]
|
81 |
+
if mode == "writer":
|
82 |
+
# Prefer xlsxwriter over openpyxl if installed
|
83 |
+
xlsxwriter = import_optional_dependency("xlsxwriter", errors="warn")
|
84 |
+
if xlsxwriter:
|
85 |
+
_default_writers["xlsx"] = "xlsxwriter"
|
86 |
+
return _default_writers[ext]
|
87 |
+
else:
|
88 |
+
return _default_readers[ext]
|
89 |
+
|
90 |
+
|
91 |
+
def get_writer(engine_name: str) -> ExcelWriter_t:
|
92 |
+
try:
|
93 |
+
return _writers[engine_name]
|
94 |
+
except KeyError as err:
|
95 |
+
raise ValueError(f"No Excel writer '{engine_name}'") from err
|
96 |
+
|
97 |
+
|
98 |
+
def _excel2num(x: str) -> int:
|
99 |
+
"""
|
100 |
+
Convert Excel column name like 'AB' to 0-based column index.
|
101 |
+
|
102 |
+
Parameters
|
103 |
+
----------
|
104 |
+
x : str
|
105 |
+
The Excel column name to convert to a 0-based column index.
|
106 |
+
|
107 |
+
Returns
|
108 |
+
-------
|
109 |
+
num : int
|
110 |
+
The column index corresponding to the name.
|
111 |
+
|
112 |
+
Raises
|
113 |
+
------
|
114 |
+
ValueError
|
115 |
+
Part of the Excel column name was invalid.
|
116 |
+
"""
|
117 |
+
index = 0
|
118 |
+
|
119 |
+
for c in x.upper().strip():
|
120 |
+
cp = ord(c)
|
121 |
+
|
122 |
+
if cp < ord("A") or cp > ord("Z"):
|
123 |
+
raise ValueError(f"Invalid column name: {x}")
|
124 |
+
|
125 |
+
index = index * 26 + cp - ord("A") + 1
|
126 |
+
|
127 |
+
return index - 1
|
128 |
+
|
129 |
+
|
130 |
+
def _range2cols(areas: str) -> list[int]:
|
131 |
+
"""
|
132 |
+
Convert comma separated list of column names and ranges to indices.
|
133 |
+
|
134 |
+
Parameters
|
135 |
+
----------
|
136 |
+
areas : str
|
137 |
+
A string containing a sequence of column ranges (or areas).
|
138 |
+
|
139 |
+
Returns
|
140 |
+
-------
|
141 |
+
cols : list
|
142 |
+
A list of 0-based column indices.
|
143 |
+
|
144 |
+
Examples
|
145 |
+
--------
|
146 |
+
>>> _range2cols('A:E')
|
147 |
+
[0, 1, 2, 3, 4]
|
148 |
+
>>> _range2cols('A,C,Z:AB')
|
149 |
+
[0, 2, 25, 26, 27]
|
150 |
+
"""
|
151 |
+
cols: list[int] = []
|
152 |
+
|
153 |
+
for rng in areas.split(","):
|
154 |
+
if ":" in rng:
|
155 |
+
rngs = rng.split(":")
|
156 |
+
cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1))
|
157 |
+
else:
|
158 |
+
cols.append(_excel2num(rng))
|
159 |
+
|
160 |
+
return cols
|
161 |
+
|
162 |
+
|
163 |
+
@overload
|
164 |
+
def maybe_convert_usecols(usecols: str | list[int]) -> list[int]:
|
165 |
+
...
|
166 |
+
|
167 |
+
|
168 |
+
@overload
|
169 |
+
def maybe_convert_usecols(usecols: list[str]) -> list[str]:
|
170 |
+
...
|
171 |
+
|
172 |
+
|
173 |
+
@overload
|
174 |
+
def maybe_convert_usecols(usecols: usecols_func) -> usecols_func:
|
175 |
+
...
|
176 |
+
|
177 |
+
|
178 |
+
@overload
|
179 |
+
def maybe_convert_usecols(usecols: None) -> None:
|
180 |
+
...
|
181 |
+
|
182 |
+
|
183 |
+
def maybe_convert_usecols(
|
184 |
+
usecols: str | list[int] | list[str] | usecols_func | None,
|
185 |
+
) -> None | list[int] | list[str] | usecols_func:
|
186 |
+
"""
|
187 |
+
Convert `usecols` into a compatible format for parsing in `parsers.py`.
|
188 |
+
|
189 |
+
Parameters
|
190 |
+
----------
|
191 |
+
usecols : object
|
192 |
+
The use-columns object to potentially convert.
|
193 |
+
|
194 |
+
Returns
|
195 |
+
-------
|
196 |
+
converted : object
|
197 |
+
The compatible format of `usecols`.
|
198 |
+
"""
|
199 |
+
if usecols is None:
|
200 |
+
return usecols
|
201 |
+
|
202 |
+
if is_integer(usecols):
|
203 |
+
raise ValueError(
|
204 |
+
"Passing an integer for `usecols` is no longer supported. "
|
205 |
+
"Please pass in a list of int from 0 to `usecols` inclusive instead."
|
206 |
+
)
|
207 |
+
|
208 |
+
if isinstance(usecols, str):
|
209 |
+
return _range2cols(usecols)
|
210 |
+
|
211 |
+
return usecols
|
212 |
+
|
213 |
+
|
214 |
+
@overload
|
215 |
+
def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]:
|
216 |
+
...
|
217 |
+
|
218 |
+
|
219 |
+
@overload
|
220 |
+
def validate_freeze_panes(freeze_panes: None) -> Literal[False]:
|
221 |
+
...
|
222 |
+
|
223 |
+
|
224 |
+
def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool:
|
225 |
+
if freeze_panes is not None:
|
226 |
+
if len(freeze_panes) == 2 and all(
|
227 |
+
isinstance(item, int) for item in freeze_panes
|
228 |
+
):
|
229 |
+
return True
|
230 |
+
|
231 |
+
raise ValueError(
|
232 |
+
"freeze_panes must be of form (row, column) "
|
233 |
+
"where row and column are integers"
|
234 |
+
)
|
235 |
+
|
236 |
+
# freeze_panes wasn't specified, return False so it won't be applied
|
237 |
+
# to output sheet
|
238 |
+
return False
|
239 |
+
|
240 |
+
|
241 |
+
def fill_mi_header(
|
242 |
+
row: list[Hashable], control_row: list[bool]
|
243 |
+
) -> tuple[list[Hashable], list[bool]]:
|
244 |
+
"""
|
245 |
+
Forward fill blank entries in row but only inside the same parent index.
|
246 |
+
|
247 |
+
Used for creating headers in Multiindex.
|
248 |
+
|
249 |
+
Parameters
|
250 |
+
----------
|
251 |
+
row : list
|
252 |
+
List of items in a single row.
|
253 |
+
control_row : list of bool
|
254 |
+
Helps to determine if particular column is in same parent index as the
|
255 |
+
previous value. Used to stop propagation of empty cells between
|
256 |
+
different indexes.
|
257 |
+
|
258 |
+
Returns
|
259 |
+
-------
|
260 |
+
Returns changed row and control_row
|
261 |
+
"""
|
262 |
+
last = row[0]
|
263 |
+
for i in range(1, len(row)):
|
264 |
+
if not control_row[i]:
|
265 |
+
last = row[i]
|
266 |
+
|
267 |
+
if row[i] == "" or row[i] is None:
|
268 |
+
row[i] = last
|
269 |
+
else:
|
270 |
+
control_row[i] = False
|
271 |
+
last = row[i]
|
272 |
+
|
273 |
+
return row, control_row
|
274 |
+
|
275 |
+
|
276 |
+
def pop_header_name(
|
277 |
+
row: list[Hashable], index_col: int | Sequence[int]
|
278 |
+
) -> tuple[Hashable | None, list[Hashable]]:
|
279 |
+
"""
|
280 |
+
Pop the header name for MultiIndex parsing.
|
281 |
+
|
282 |
+
Parameters
|
283 |
+
----------
|
284 |
+
row : list
|
285 |
+
The data row to parse for the header name.
|
286 |
+
index_col : int, list
|
287 |
+
The index columns for our data. Assumed to be non-null.
|
288 |
+
|
289 |
+
Returns
|
290 |
+
-------
|
291 |
+
header_name : str
|
292 |
+
The extracted header name.
|
293 |
+
trimmed_row : list
|
294 |
+
The original data row with the header name removed.
|
295 |
+
"""
|
296 |
+
# Pop out header name and fill w/blank.
|
297 |
+
if is_list_like(index_col):
|
298 |
+
assert isinstance(index_col, Iterable)
|
299 |
+
i = max(index_col)
|
300 |
+
else:
|
301 |
+
assert not isinstance(index_col, Iterable)
|
302 |
+
i = index_col
|
303 |
+
|
304 |
+
header_name = row[i]
|
305 |
+
header_name = None if header_name == "" else header_name
|
306 |
+
|
307 |
+
return header_name, row[:i] + [""] + row[i + 1 :]
|
308 |
+
|
309 |
+
|
310 |
+
def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict:
|
311 |
+
"""
|
312 |
+
Used to combine two sources of kwargs for the backend engine.
|
313 |
+
|
314 |
+
Use of kwargs is deprecated, this function is solely for use in 1.3 and should
|
315 |
+
be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs
|
316 |
+
or kwargs must be None or empty respectively.
|
317 |
+
|
318 |
+
Parameters
|
319 |
+
----------
|
320 |
+
engine_kwargs: dict
|
321 |
+
kwargs to be passed through to the engine.
|
322 |
+
kwargs: dict
|
323 |
+
kwargs to be psased through to the engine (deprecated)
|
324 |
+
|
325 |
+
Returns
|
326 |
+
-------
|
327 |
+
engine_kwargs combined with kwargs
|
328 |
+
"""
|
329 |
+
if engine_kwargs is None:
|
330 |
+
result = {}
|
331 |
+
else:
|
332 |
+
result = engine_kwargs.copy()
|
333 |
+
result.update(kwargs)
|
334 |
+
return result
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from datetime import time
|
4 |
+
import math
|
5 |
+
from typing import TYPE_CHECKING
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
|
9 |
+
from pandas.compat._optional import import_optional_dependency
|
10 |
+
from pandas.util._decorators import doc
|
11 |
+
|
12 |
+
from pandas.core.shared_docs import _shared_docs
|
13 |
+
|
14 |
+
from pandas.io.excel._base import BaseExcelReader
|
15 |
+
|
16 |
+
if TYPE_CHECKING:
|
17 |
+
from xlrd import Book
|
18 |
+
|
19 |
+
from pandas._typing import (
|
20 |
+
Scalar,
|
21 |
+
StorageOptions,
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
class XlrdReader(BaseExcelReader["Book"]):
|
26 |
+
@doc(storage_options=_shared_docs["storage_options"])
|
27 |
+
def __init__(
|
28 |
+
self,
|
29 |
+
filepath_or_buffer,
|
30 |
+
storage_options: StorageOptions | None = None,
|
31 |
+
engine_kwargs: dict | None = None,
|
32 |
+
) -> None:
|
33 |
+
"""
|
34 |
+
Reader using xlrd engine.
|
35 |
+
|
36 |
+
Parameters
|
37 |
+
----------
|
38 |
+
filepath_or_buffer : str, path object or Workbook
|
39 |
+
Object to be parsed.
|
40 |
+
{storage_options}
|
41 |
+
engine_kwargs : dict, optional
|
42 |
+
Arbitrary keyword arguments passed to excel engine.
|
43 |
+
"""
|
44 |
+
err_msg = "Install xlrd >= 2.0.1 for xls Excel support"
|
45 |
+
import_optional_dependency("xlrd", extra=err_msg)
|
46 |
+
super().__init__(
|
47 |
+
filepath_or_buffer,
|
48 |
+
storage_options=storage_options,
|
49 |
+
engine_kwargs=engine_kwargs,
|
50 |
+
)
|
51 |
+
|
52 |
+
@property
|
53 |
+
def _workbook_class(self) -> type[Book]:
|
54 |
+
from xlrd import Book
|
55 |
+
|
56 |
+
return Book
|
57 |
+
|
58 |
+
def load_workbook(self, filepath_or_buffer, engine_kwargs) -> Book:
|
59 |
+
from xlrd import open_workbook
|
60 |
+
|
61 |
+
if hasattr(filepath_or_buffer, "read"):
|
62 |
+
data = filepath_or_buffer.read()
|
63 |
+
return open_workbook(file_contents=data, **engine_kwargs)
|
64 |
+
else:
|
65 |
+
return open_workbook(filepath_or_buffer, **engine_kwargs)
|
66 |
+
|
67 |
+
@property
|
68 |
+
def sheet_names(self):
|
69 |
+
return self.book.sheet_names()
|
70 |
+
|
71 |
+
def get_sheet_by_name(self, name):
|
72 |
+
self.raise_if_bad_sheet_by_name(name)
|
73 |
+
return self.book.sheet_by_name(name)
|
74 |
+
|
75 |
+
def get_sheet_by_index(self, index):
|
76 |
+
self.raise_if_bad_sheet_by_index(index)
|
77 |
+
return self.book.sheet_by_index(index)
|
78 |
+
|
79 |
+
def get_sheet_data(
|
80 |
+
self, sheet, file_rows_needed: int | None = None
|
81 |
+
) -> list[list[Scalar]]:
|
82 |
+
from xlrd import (
|
83 |
+
XL_CELL_BOOLEAN,
|
84 |
+
XL_CELL_DATE,
|
85 |
+
XL_CELL_ERROR,
|
86 |
+
XL_CELL_NUMBER,
|
87 |
+
xldate,
|
88 |
+
)
|
89 |
+
|
90 |
+
epoch1904 = self.book.datemode
|
91 |
+
|
92 |
+
def _parse_cell(cell_contents, cell_typ):
|
93 |
+
"""
|
94 |
+
converts the contents of the cell into a pandas appropriate object
|
95 |
+
"""
|
96 |
+
if cell_typ == XL_CELL_DATE:
|
97 |
+
# Use the newer xlrd datetime handling.
|
98 |
+
try:
|
99 |
+
cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)
|
100 |
+
except OverflowError:
|
101 |
+
return cell_contents
|
102 |
+
|
103 |
+
# Excel doesn't distinguish between dates and time,
|
104 |
+
# so we treat dates on the epoch as times only.
|
105 |
+
# Also, Excel supports 1900 and 1904 epochs.
|
106 |
+
year = (cell_contents.timetuple())[0:3]
|
107 |
+
if (not epoch1904 and year == (1899, 12, 31)) or (
|
108 |
+
epoch1904 and year == (1904, 1, 1)
|
109 |
+
):
|
110 |
+
cell_contents = time(
|
111 |
+
cell_contents.hour,
|
112 |
+
cell_contents.minute,
|
113 |
+
cell_contents.second,
|
114 |
+
cell_contents.microsecond,
|
115 |
+
)
|
116 |
+
|
117 |
+
elif cell_typ == XL_CELL_ERROR:
|
118 |
+
cell_contents = np.nan
|
119 |
+
elif cell_typ == XL_CELL_BOOLEAN:
|
120 |
+
cell_contents = bool(cell_contents)
|
121 |
+
elif cell_typ == XL_CELL_NUMBER:
|
122 |
+
# GH5394 - Excel 'numbers' are always floats
|
123 |
+
# it's a minimal perf hit and less surprising
|
124 |
+
if math.isfinite(cell_contents):
|
125 |
+
# GH54564 - don't attempt to convert NaN/Inf
|
126 |
+
val = int(cell_contents)
|
127 |
+
if val == cell_contents:
|
128 |
+
cell_contents = val
|
129 |
+
return cell_contents
|
130 |
+
|
131 |
+
data = []
|
132 |
+
|
133 |
+
nrows = sheet.nrows
|
134 |
+
if file_rows_needed is not None:
|
135 |
+
nrows = min(nrows, file_rows_needed)
|
136 |
+
for i in range(nrows):
|
137 |
+
row = [
|
138 |
+
_parse_cell(value, typ)
|
139 |
+
for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
|
140 |
+
]
|
141 |
+
data.append(row)
|
142 |
+
|
143 |
+
return data
|
llmeval-env/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py
ADDED
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
Any,
|
7 |
+
)
|
8 |
+
|
9 |
+
from pandas.io.excel._base import ExcelWriter
|
10 |
+
from pandas.io.excel._util import (
|
11 |
+
combine_kwargs,
|
12 |
+
validate_freeze_panes,
|
13 |
+
)
|
14 |
+
|
15 |
+
if TYPE_CHECKING:
|
16 |
+
from pandas._typing import (
|
17 |
+
ExcelWriterIfSheetExists,
|
18 |
+
FilePath,
|
19 |
+
StorageOptions,
|
20 |
+
WriteExcelBuffer,
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
class _XlsxStyler:
|
25 |
+
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
|
26 |
+
# Ordering necessary for both determinism and because some are keyed by
|
27 |
+
# prefixes of others.
|
28 |
+
STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {
|
29 |
+
"font": [
|
30 |
+
(("name",), "font_name"),
|
31 |
+
(("sz",), "font_size"),
|
32 |
+
(("size",), "font_size"),
|
33 |
+
(("color", "rgb"), "font_color"),
|
34 |
+
(("color",), "font_color"),
|
35 |
+
(("b",), "bold"),
|
36 |
+
(("bold",), "bold"),
|
37 |
+
(("i",), "italic"),
|
38 |
+
(("italic",), "italic"),
|
39 |
+
(("u",), "underline"),
|
40 |
+
(("underline",), "underline"),
|
41 |
+
(("strike",), "font_strikeout"),
|
42 |
+
(("vertAlign",), "font_script"),
|
43 |
+
(("vertalign",), "font_script"),
|
44 |
+
],
|
45 |
+
"number_format": [(("format_code",), "num_format"), ((), "num_format")],
|
46 |
+
"protection": [(("locked",), "locked"), (("hidden",), "hidden")],
|
47 |
+
"alignment": [
|
48 |
+
(("horizontal",), "align"),
|
49 |
+
(("vertical",), "valign"),
|
50 |
+
(("text_rotation",), "rotation"),
|
51 |
+
(("wrap_text",), "text_wrap"),
|
52 |
+
(("indent",), "indent"),
|
53 |
+
(("shrink_to_fit",), "shrink"),
|
54 |
+
],
|
55 |
+
"fill": [
|
56 |
+
(("patternType",), "pattern"),
|
57 |
+
(("patterntype",), "pattern"),
|
58 |
+
(("fill_type",), "pattern"),
|
59 |
+
(("start_color", "rgb"), "fg_color"),
|
60 |
+
(("fgColor", "rgb"), "fg_color"),
|
61 |
+
(("fgcolor", "rgb"), "fg_color"),
|
62 |
+
(("start_color",), "fg_color"),
|
63 |
+
(("fgColor",), "fg_color"),
|
64 |
+
(("fgcolor",), "fg_color"),
|
65 |
+
(("end_color", "rgb"), "bg_color"),
|
66 |
+
(("bgColor", "rgb"), "bg_color"),
|
67 |
+
(("bgcolor", "rgb"), "bg_color"),
|
68 |
+
(("end_color",), "bg_color"),
|
69 |
+
(("bgColor",), "bg_color"),
|
70 |
+
(("bgcolor",), "bg_color"),
|
71 |
+
],
|
72 |
+
"border": [
|
73 |
+
(("color", "rgb"), "border_color"),
|
74 |
+
(("color",), "border_color"),
|
75 |
+
(("style",), "border"),
|
76 |
+
(("top", "color", "rgb"), "top_color"),
|
77 |
+
(("top", "color"), "top_color"),
|
78 |
+
(("top", "style"), "top"),
|
79 |
+
(("top",), "top"),
|
80 |
+
(("right", "color", "rgb"), "right_color"),
|
81 |
+
(("right", "color"), "right_color"),
|
82 |
+
(("right", "style"), "right"),
|
83 |
+
(("right",), "right"),
|
84 |
+
(("bottom", "color", "rgb"), "bottom_color"),
|
85 |
+
(("bottom", "color"), "bottom_color"),
|
86 |
+
(("bottom", "style"), "bottom"),
|
87 |
+
(("bottom",), "bottom"),
|
88 |
+
(("left", "color", "rgb"), "left_color"),
|
89 |
+
(("left", "color"), "left_color"),
|
90 |
+
(("left", "style"), "left"),
|
91 |
+
(("left",), "left"),
|
92 |
+
],
|
93 |
+
}
|
94 |
+
|
95 |
+
@classmethod
|
96 |
+
def convert(cls, style_dict, num_format_str=None):
|
97 |
+
"""
|
98 |
+
converts a style_dict to an xlsxwriter format dict
|
99 |
+
|
100 |
+
Parameters
|
101 |
+
----------
|
102 |
+
style_dict : style dictionary to convert
|
103 |
+
num_format_str : optional number format string
|
104 |
+
"""
|
105 |
+
# Create a XlsxWriter format object.
|
106 |
+
props = {}
|
107 |
+
|
108 |
+
if num_format_str is not None:
|
109 |
+
props["num_format"] = num_format_str
|
110 |
+
|
111 |
+
if style_dict is None:
|
112 |
+
return props
|
113 |
+
|
114 |
+
if "borders" in style_dict:
|
115 |
+
style_dict = style_dict.copy()
|
116 |
+
style_dict["border"] = style_dict.pop("borders")
|
117 |
+
|
118 |
+
for style_group_key, style_group in style_dict.items():
|
119 |
+
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
|
120 |
+
# src is a sequence of keys into a nested dict
|
121 |
+
# dst is a flat key
|
122 |
+
if dst in props:
|
123 |
+
continue
|
124 |
+
v = style_group
|
125 |
+
for k in src:
|
126 |
+
try:
|
127 |
+
v = v[k]
|
128 |
+
except (KeyError, TypeError):
|
129 |
+
break
|
130 |
+
else:
|
131 |
+
props[dst] = v
|
132 |
+
|
133 |
+
if isinstance(props.get("pattern"), str):
|
134 |
+
# TODO: support other fill patterns
|
135 |
+
props["pattern"] = 0 if props["pattern"] == "none" else 1
|
136 |
+
|
137 |
+
for k in ["border", "top", "right", "bottom", "left"]:
|
138 |
+
if isinstance(props.get(k), str):
|
139 |
+
try:
|
140 |
+
props[k] = [
|
141 |
+
"none",
|
142 |
+
"thin",
|
143 |
+
"medium",
|
144 |
+
"dashed",
|
145 |
+
"dotted",
|
146 |
+
"thick",
|
147 |
+
"double",
|
148 |
+
"hair",
|
149 |
+
"mediumDashed",
|
150 |
+
"dashDot",
|
151 |
+
"mediumDashDot",
|
152 |
+
"dashDotDot",
|
153 |
+
"mediumDashDotDot",
|
154 |
+
"slantDashDot",
|
155 |
+
].index(props[k])
|
156 |
+
except ValueError:
|
157 |
+
props[k] = 2
|
158 |
+
|
159 |
+
if isinstance(props.get("font_script"), str):
|
160 |
+
props["font_script"] = ["baseline", "superscript", "subscript"].index(
|
161 |
+
props["font_script"]
|
162 |
+
)
|
163 |
+
|
164 |
+
if isinstance(props.get("underline"), str):
|
165 |
+
props["underline"] = {
|
166 |
+
"none": 0,
|
167 |
+
"single": 1,
|
168 |
+
"double": 2,
|
169 |
+
"singleAccounting": 33,
|
170 |
+
"doubleAccounting": 34,
|
171 |
+
}[props["underline"]]
|
172 |
+
|
173 |
+
# GH 30107 - xlsxwriter uses different name
|
174 |
+
if props.get("valign") == "center":
|
175 |
+
props["valign"] = "vcenter"
|
176 |
+
|
177 |
+
return props
|
178 |
+
|
179 |
+
|
180 |
+
class XlsxWriter(ExcelWriter):
|
181 |
+
_engine = "xlsxwriter"
|
182 |
+
_supported_extensions = (".xlsx",)
|
183 |
+
|
184 |
+
def __init__(
|
185 |
+
self,
|
186 |
+
path: FilePath | WriteExcelBuffer | ExcelWriter,
|
187 |
+
engine: str | None = None,
|
188 |
+
date_format: str | None = None,
|
189 |
+
datetime_format: str | None = None,
|
190 |
+
mode: str = "w",
|
191 |
+
storage_options: StorageOptions | None = None,
|
192 |
+
if_sheet_exists: ExcelWriterIfSheetExists | None = None,
|
193 |
+
engine_kwargs: dict[str, Any] | None = None,
|
194 |
+
**kwargs,
|
195 |
+
) -> None:
|
196 |
+
# Use the xlsxwriter module as the Excel writer.
|
197 |
+
from xlsxwriter import Workbook
|
198 |
+
|
199 |
+
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
|
200 |
+
|
201 |
+
if mode == "a":
|
202 |
+
raise ValueError("Append mode is not supported with xlsxwriter!")
|
203 |
+
|
204 |
+
super().__init__(
|
205 |
+
path,
|
206 |
+
engine=engine,
|
207 |
+
date_format=date_format,
|
208 |
+
datetime_format=datetime_format,
|
209 |
+
mode=mode,
|
210 |
+
storage_options=storage_options,
|
211 |
+
if_sheet_exists=if_sheet_exists,
|
212 |
+
engine_kwargs=engine_kwargs,
|
213 |
+
)
|
214 |
+
|
215 |
+
try:
|
216 |
+
self._book = Workbook(self._handles.handle, **engine_kwargs)
|
217 |
+
except TypeError:
|
218 |
+
self._handles.handle.close()
|
219 |
+
raise
|
220 |
+
|
221 |
+
@property
|
222 |
+
def book(self):
|
223 |
+
"""
|
224 |
+
Book instance of class xlsxwriter.Workbook.
|
225 |
+
|
226 |
+
This attribute can be used to access engine-specific features.
|
227 |
+
"""
|
228 |
+
return self._book
|
229 |
+
|
230 |
+
@property
|
231 |
+
def sheets(self) -> dict[str, Any]:
|
232 |
+
result = self.book.sheetnames
|
233 |
+
return result
|
234 |
+
|
235 |
+
def _save(self) -> None:
|
236 |
+
"""
|
237 |
+
Save workbook to disk.
|
238 |
+
"""
|
239 |
+
self.book.close()
|
240 |
+
|
241 |
+
def _write_cells(
|
242 |
+
self,
|
243 |
+
cells,
|
244 |
+
sheet_name: str | None = None,
|
245 |
+
startrow: int = 0,
|
246 |
+
startcol: int = 0,
|
247 |
+
freeze_panes: tuple[int, int] | None = None,
|
248 |
+
) -> None:
|
249 |
+
# Write the frame cells using xlsxwriter.
|
250 |
+
sheet_name = self._get_sheet_name(sheet_name)
|
251 |
+
|
252 |
+
wks = self.book.get_worksheet_by_name(sheet_name)
|
253 |
+
if wks is None:
|
254 |
+
wks = self.book.add_worksheet(sheet_name)
|
255 |
+
|
256 |
+
style_dict = {"null": None}
|
257 |
+
|
258 |
+
if validate_freeze_panes(freeze_panes):
|
259 |
+
wks.freeze_panes(*(freeze_panes))
|
260 |
+
|
261 |
+
for cell in cells:
|
262 |
+
val, fmt = self._value_with_fmt(cell.val)
|
263 |
+
|
264 |
+
stylekey = json.dumps(cell.style)
|
265 |
+
if fmt:
|
266 |
+
stylekey += fmt
|
267 |
+
|
268 |
+
if stylekey in style_dict:
|
269 |
+
style = style_dict[stylekey]
|
270 |
+
else:
|
271 |
+
style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))
|
272 |
+
style_dict[stylekey] = style
|
273 |
+
|
274 |
+
if cell.mergestart is not None and cell.mergeend is not None:
|
275 |
+
wks.merge_range(
|
276 |
+
startrow + cell.row,
|
277 |
+
startcol + cell.col,
|
278 |
+
startrow + cell.mergestart,
|
279 |
+
startcol + cell.mergeend,
|
280 |
+
val,
|
281 |
+
style,
|
282 |
+
)
|
283 |
+
else:
|
284 |
+
wks.write(startrow + cell.row, startcol + cell.col, val, style)
|
llmeval-env/lib/python3.10/site-packages/pandas/io/gbq.py
ADDED
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Google BigQuery support """
|
2 |
+
from __future__ import annotations
|
3 |
+
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
Any,
|
7 |
+
)
|
8 |
+
import warnings
|
9 |
+
|
10 |
+
from pandas.compat._optional import import_optional_dependency
|
11 |
+
from pandas.util._exceptions import find_stack_level
|
12 |
+
|
13 |
+
if TYPE_CHECKING:
|
14 |
+
import google.auth
|
15 |
+
|
16 |
+
from pandas import DataFrame
|
17 |
+
|
18 |
+
|
19 |
+
def _try_import():
|
20 |
+
# since pandas is a dependency of pandas-gbq
|
21 |
+
# we need to import on first use
|
22 |
+
msg = (
|
23 |
+
"pandas-gbq is required to load data from Google BigQuery. "
|
24 |
+
"See the docs: https://pandas-gbq.readthedocs.io."
|
25 |
+
)
|
26 |
+
pandas_gbq = import_optional_dependency("pandas_gbq", extra=msg)
|
27 |
+
return pandas_gbq
|
28 |
+
|
29 |
+
|
30 |
+
def read_gbq(
|
31 |
+
query: str,
|
32 |
+
project_id: str | None = None,
|
33 |
+
index_col: str | None = None,
|
34 |
+
col_order: list[str] | None = None,
|
35 |
+
reauth: bool = False,
|
36 |
+
auth_local_webserver: bool = True,
|
37 |
+
dialect: str | None = None,
|
38 |
+
location: str | None = None,
|
39 |
+
configuration: dict[str, Any] | None = None,
|
40 |
+
credentials: google.auth.credentials.Credentials | None = None,
|
41 |
+
use_bqstorage_api: bool | None = None,
|
42 |
+
max_results: int | None = None,
|
43 |
+
progress_bar_type: str | None = None,
|
44 |
+
) -> DataFrame:
|
45 |
+
"""
|
46 |
+
Load data from Google BigQuery.
|
47 |
+
|
48 |
+
.. deprecated:: 2.2.0
|
49 |
+
|
50 |
+
Please use ``pandas_gbq.read_gbq`` instead.
|
51 |
+
|
52 |
+
This function requires the `pandas-gbq package
|
53 |
+
<https://pandas-gbq.readthedocs.io>`__.
|
54 |
+
|
55 |
+
See the `How to authenticate with Google BigQuery
|
56 |
+
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
|
57 |
+
guide for authentication instructions.
|
58 |
+
|
59 |
+
Parameters
|
60 |
+
----------
|
61 |
+
query : str
|
62 |
+
SQL-Like Query to return data values.
|
63 |
+
project_id : str, optional
|
64 |
+
Google BigQuery Account project ID. Optional when available from
|
65 |
+
the environment.
|
66 |
+
index_col : str, optional
|
67 |
+
Name of result column to use for index in results DataFrame.
|
68 |
+
col_order : list(str), optional
|
69 |
+
List of BigQuery column names in the desired order for results
|
70 |
+
DataFrame.
|
71 |
+
reauth : bool, default False
|
72 |
+
Force Google BigQuery to re-authenticate the user. This is useful
|
73 |
+
if multiple accounts are used.
|
74 |
+
auth_local_webserver : bool, default True
|
75 |
+
Use the `local webserver flow`_ instead of the `console flow`_
|
76 |
+
when getting user credentials.
|
77 |
+
|
78 |
+
.. _local webserver flow:
|
79 |
+
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
|
80 |
+
.. _console flow:
|
81 |
+
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
|
82 |
+
|
83 |
+
*New in version 0.2.0 of pandas-gbq*.
|
84 |
+
|
85 |
+
.. versionchanged:: 1.5.0
|
86 |
+
Default value is changed to ``True``. Google has deprecated the
|
87 |
+
``auth_local_webserver = False`` `"out of band" (copy-paste)
|
88 |
+
flow
|
89 |
+
<https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_.
|
90 |
+
dialect : str, default 'legacy'
|
91 |
+
Note: The default value is changing to 'standard' in a future version.
|
92 |
+
|
93 |
+
SQL syntax dialect to use. Value can be one of:
|
94 |
+
|
95 |
+
``'legacy'``
|
96 |
+
Use BigQuery's legacy SQL dialect. For more information see
|
97 |
+
`BigQuery Legacy SQL Reference
|
98 |
+
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
|
99 |
+
``'standard'``
|
100 |
+
Use BigQuery's standard SQL, which is
|
101 |
+
compliant with the SQL 2011 standard. For more information
|
102 |
+
see `BigQuery Standard SQL Reference
|
103 |
+
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
|
104 |
+
location : str, optional
|
105 |
+
Location where the query job should run. See the `BigQuery locations
|
106 |
+
documentation
|
107 |
+
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
|
108 |
+
list of available locations. The location must match that of any
|
109 |
+
datasets used in the query.
|
110 |
+
|
111 |
+
*New in version 0.5.0 of pandas-gbq*.
|
112 |
+
configuration : dict, optional
|
113 |
+
Query config parameters for job processing.
|
114 |
+
For example:
|
115 |
+
|
116 |
+
configuration = {'query': {'useQueryCache': False}}
|
117 |
+
|
118 |
+
For more information see `BigQuery REST API Reference
|
119 |
+
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
|
120 |
+
credentials : google.auth.credentials.Credentials, optional
|
121 |
+
Credentials for accessing Google APIs. Use this parameter to override
|
122 |
+
default credentials, such as to use Compute Engine
|
123 |
+
:class:`google.auth.compute_engine.Credentials` or Service Account
|
124 |
+
:class:`google.oauth2.service_account.Credentials` directly.
|
125 |
+
|
126 |
+
*New in version 0.8.0 of pandas-gbq*.
|
127 |
+
use_bqstorage_api : bool, default False
|
128 |
+
Use the `BigQuery Storage API
|
129 |
+
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
|
130 |
+
download query results quickly, but at an increased cost. To use this
|
131 |
+
API, first `enable it in the Cloud Console
|
132 |
+
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
|
133 |
+
You must also have the `bigquery.readsessions.create
|
134 |
+
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
|
135 |
+
permission on the project you are billing queries to.
|
136 |
+
|
137 |
+
This feature requires version 0.10.0 or later of the ``pandas-gbq``
|
138 |
+
package. It also requires the ``google-cloud-bigquery-storage`` and
|
139 |
+
``fastavro`` packages.
|
140 |
+
|
141 |
+
max_results : int, optional
|
142 |
+
If set, limit the maximum number of rows to fetch from the query
|
143 |
+
results.
|
144 |
+
|
145 |
+
progress_bar_type : Optional, str
|
146 |
+
If set, use the `tqdm <https://tqdm.github.io/>`__ library to
|
147 |
+
display a progress bar while the data downloads. Install the
|
148 |
+
``tqdm`` package to use this feature.
|
149 |
+
|
150 |
+
Possible values of ``progress_bar_type`` include:
|
151 |
+
|
152 |
+
``None``
|
153 |
+
No progress bar.
|
154 |
+
``'tqdm'``
|
155 |
+
Use the :func:`tqdm.tqdm` function to print a progress bar
|
156 |
+
to :data:`sys.stderr`.
|
157 |
+
``'tqdm_notebook'``
|
158 |
+
Use the :func:`tqdm.tqdm_notebook` function to display a
|
159 |
+
progress bar as a Jupyter notebook widget.
|
160 |
+
``'tqdm_gui'``
|
161 |
+
Use the :func:`tqdm.tqdm_gui` function to display a
|
162 |
+
progress bar as a graphical dialog box.
|
163 |
+
|
164 |
+
Returns
|
165 |
+
-------
|
166 |
+
df: DataFrame
|
167 |
+
DataFrame representing results of query.
|
168 |
+
|
169 |
+
See Also
|
170 |
+
--------
|
171 |
+
pandas_gbq.read_gbq : This function in the pandas-gbq library.
|
172 |
+
DataFrame.to_gbq : Write a DataFrame to Google BigQuery.
|
173 |
+
|
174 |
+
Examples
|
175 |
+
--------
|
176 |
+
Example taken from `Google BigQuery documentation
|
177 |
+
<https://cloud.google.com/bigquery/docs/pandas-gbq-migration>`_
|
178 |
+
|
179 |
+
>>> sql = "SELECT name FROM table_name WHERE state = 'TX' LIMIT 100;"
|
180 |
+
>>> df = pd.read_gbq(sql, dialect="standard") # doctest: +SKIP
|
181 |
+
>>> project_id = "your-project-id" # doctest: +SKIP
|
182 |
+
>>> df = pd.read_gbq(sql,
|
183 |
+
... project_id=project_id,
|
184 |
+
... dialect="standard"
|
185 |
+
... ) # doctest: +SKIP
|
186 |
+
"""
|
187 |
+
warnings.warn(
|
188 |
+
"read_gbq is deprecated and will be removed in a future version. "
|
189 |
+
"Please use pandas_gbq.read_gbq instead: "
|
190 |
+
"https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.read_gbq",
|
191 |
+
FutureWarning,
|
192 |
+
stacklevel=find_stack_level(),
|
193 |
+
)
|
194 |
+
pandas_gbq = _try_import()
|
195 |
+
|
196 |
+
kwargs: dict[str, str | bool | int | None] = {}
|
197 |
+
|
198 |
+
# START: new kwargs. Don't populate unless explicitly set.
|
199 |
+
if use_bqstorage_api is not None:
|
200 |
+
kwargs["use_bqstorage_api"] = use_bqstorage_api
|
201 |
+
if max_results is not None:
|
202 |
+
kwargs["max_results"] = max_results
|
203 |
+
|
204 |
+
kwargs["progress_bar_type"] = progress_bar_type
|
205 |
+
# END: new kwargs
|
206 |
+
|
207 |
+
return pandas_gbq.read_gbq(
|
208 |
+
query,
|
209 |
+
project_id=project_id,
|
210 |
+
index_col=index_col,
|
211 |
+
col_order=col_order,
|
212 |
+
reauth=reauth,
|
213 |
+
auth_local_webserver=auth_local_webserver,
|
214 |
+
dialect=dialect,
|
215 |
+
location=location,
|
216 |
+
configuration=configuration,
|
217 |
+
credentials=credentials,
|
218 |
+
**kwargs,
|
219 |
+
)
|
220 |
+
|
221 |
+
|
222 |
+
def to_gbq(
|
223 |
+
dataframe: DataFrame,
|
224 |
+
destination_table: str,
|
225 |
+
project_id: str | None = None,
|
226 |
+
chunksize: int | None = None,
|
227 |
+
reauth: bool = False,
|
228 |
+
if_exists: str = "fail",
|
229 |
+
auth_local_webserver: bool = True,
|
230 |
+
table_schema: list[dict[str, str]] | None = None,
|
231 |
+
location: str | None = None,
|
232 |
+
progress_bar: bool = True,
|
233 |
+
credentials: google.auth.credentials.Credentials | None = None,
|
234 |
+
) -> None:
|
235 |
+
warnings.warn(
|
236 |
+
"to_gbq is deprecated and will be removed in a future version. "
|
237 |
+
"Please use pandas_gbq.to_gbq instead: "
|
238 |
+
"https://pandas-gbq.readthedocs.io/en/latest/api.html#pandas_gbq.to_gbq",
|
239 |
+
FutureWarning,
|
240 |
+
stacklevel=find_stack_level(),
|
241 |
+
)
|
242 |
+
pandas_gbq = _try_import()
|
243 |
+
pandas_gbq.to_gbq(
|
244 |
+
dataframe,
|
245 |
+
destination_table,
|
246 |
+
project_id=project_id,
|
247 |
+
chunksize=chunksize,
|
248 |
+
reauth=reauth,
|
249 |
+
if_exists=if_exists,
|
250 |
+
auth_local_webserver=auth_local_webserver,
|
251 |
+
table_schema=table_schema,
|
252 |
+
location=location,
|
253 |
+
progress_bar=progress_bar,
|
254 |
+
credentials=credentials,
|
255 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pandas/io/json/__init__.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.io.json._json import (
|
2 |
+
read_json,
|
3 |
+
to_json,
|
4 |
+
ujson_dumps,
|
5 |
+
ujson_loads,
|
6 |
+
)
|
7 |
+
from pandas.io.json._table_schema import build_table_schema
|
8 |
+
|
9 |
+
__all__ = [
|
10 |
+
"ujson_dumps",
|
11 |
+
"ujson_loads",
|
12 |
+
"read_json",
|
13 |
+
"to_json",
|
14 |
+
"build_table_schema",
|
15 |
+
]
|
llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (420 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_json.cpython-310.pyc
ADDED
Binary file (39 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_normalize.cpython-310.pyc
ADDED
Binary file (15 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/io/json/__pycache__/_table_schema.cpython-310.pyc
ADDED
Binary file (10.3 kB). View file
|
|